|
| 1 | +/* |
| 2 | + * Copyright (c) Meta Platforms, Inc. and affiliates. |
| 3 | + * All rights reserved. |
| 4 | + * |
| 5 | + * This source code is licensed under the BSD-style license found in the |
| 6 | + * LICENSE file in the root directory of this source tree. |
| 7 | + */ |
| 8 | + |
| 9 | +#include "cortex_m_ops_common.h" |
| 10 | + |
| 11 | +extern "C" { |
| 12 | +#include "arm_nnfunctions.h" |
| 13 | +} |
| 14 | + |
| 15 | +namespace cortex_m { |
| 16 | +namespace native { |
| 17 | + |
| 18 | +using KernelRuntimeContext = torch::executor::KernelRuntimeContext; |
| 19 | + |
| 20 | +namespace { |
| 21 | + |
| 22 | +bool validate_batch_matmul_arguments( |
| 23 | + KernelRuntimeContext& context, |
| 24 | + const Tensor& lhs, |
| 25 | + const Tensor& rhs_transposed, |
| 26 | + const Tensor& out) { |
| 27 | + if (lhs.scalar_type() != ScalarType::Char || |
| 28 | + rhs_transposed.scalar_type() != ScalarType::Char || |
| 29 | + out.scalar_type() != ScalarType::Char) { |
| 30 | + ET_LOG(Error, "quantized_batch_matmul: all tensors must be int8"); |
| 31 | + context.fail(Error::InvalidArgument); |
| 32 | + return false; |
| 33 | + } |
| 34 | + |
| 35 | + if (lhs.dim() != 3 || rhs_transposed.dim() != 3 || out.dim() != 3) { |
| 36 | + ET_LOG(Error, "quantized_batch_matmul: all tensors must be 3-D"); |
| 37 | + context.fail(Error::InvalidArgument); |
| 38 | + return false; |
| 39 | + } |
| 40 | + |
| 41 | + if (lhs.size(0) != rhs_transposed.size(0)) { |
| 42 | + ET_LOG(Error, "quantized_batch_matmul: batch dims must match"); |
| 43 | + context.fail(Error::InvalidArgument); |
| 44 | + return false; |
| 45 | + } |
| 46 | + |
| 47 | + if (lhs.size(2) != rhs_transposed.size(2)) { |
| 48 | + ET_LOG(Error, "quantized_batch_matmul: inner dims must match"); |
| 49 | + context.fail(Error::InvalidArgument); |
| 50 | + return false; |
| 51 | + } |
| 52 | + |
| 53 | + if (out.size(0) != lhs.size(0) || out.size(1) != lhs.size(1) || |
| 54 | + out.size(2) != rhs_transposed.size(1)) { |
| 55 | + ET_LOG(Error, "quantized_batch_matmul: output shape mismatch"); |
| 56 | + context.fail(Error::InvalidArgument); |
| 57 | + return false; |
| 58 | + } |
| 59 | + |
| 60 | + return true; |
| 61 | +} |
| 62 | + |
| 63 | +} // namespace |
| 64 | + |
| 65 | +Tensor& quantized_batch_matmul_out( |
| 66 | + KernelRuntimeContext& context, |
| 67 | + const Tensor& lhs, |
| 68 | + int64_t lhs_offset, |
| 69 | + const Tensor& rhs_transposed, |
| 70 | + int64_t rhs_offset, |
| 71 | + int64_t output_offset, |
| 72 | + int64_t output_multiplier, |
| 73 | + int64_t output_shift, |
| 74 | + Tensor& out) { |
| 75 | + if (!validate_batch_matmul_arguments(context, lhs, rhs_transposed, out)) { |
| 76 | + return out; |
| 77 | + } |
| 78 | + |
| 79 | + const int32_t batch = static_cast<int32_t>(lhs.size(0)); |
| 80 | + const int32_t lhs_rows = static_cast<int32_t>(lhs.size(1)); |
| 81 | + const int32_t inner = static_cast<int32_t>(lhs.size(2)); |
| 82 | + const int32_t rhs_cols = static_cast<int32_t>(rhs_transposed.size(1)); |
| 83 | + |
| 84 | + const cmsis_nn_dims lhs_dims = {1, batch, lhs_rows, inner}; |
| 85 | + const cmsis_nn_dims rhs_dims = {1, batch, rhs_cols, inner}; |
| 86 | + const cmsis_nn_dims out_dims = {1, batch, lhs_rows, rhs_cols}; |
| 87 | + |
| 88 | + const cmsis_nn_bmm_params bmm_params = { |
| 89 | + /* adj_x */ false, |
| 90 | + /* adj_y */ false, |
| 91 | + /* fc_params */ |
| 92 | + {static_cast<int32_t>(lhs_offset), |
| 93 | + static_cast<int32_t>(rhs_offset), |
| 94 | + static_cast<int32_t>(output_offset), |
| 95 | + /* activation */ |
| 96 | + {std::numeric_limits<int8_t>::min(), |
| 97 | + std::numeric_limits<int8_t>::max()}}}; |
| 98 | + |
| 99 | + cmsis_nn_per_tensor_quant_params quant_params; |
| 100 | + quant_params.multiplier = static_cast<int32_t>(output_multiplier); |
| 101 | + quant_params.shift = static_cast<int32_t>(output_shift); |
| 102 | + |
| 103 | + const int32_t buf_size = arm_fully_connected_s8_get_buffer_size(&out_dims); |
| 104 | + |
| 105 | + cmsis_nn_context ctx; |
| 106 | + ctx.buf = nullptr; |
| 107 | + ctx.size = 0; |
| 108 | + |
| 109 | + if (buf_size > 0) { |
| 110 | + auto buffer_or_error = context.allocate_temp(buf_size); |
| 111 | + if (!buffer_or_error.ok()) { |
| 112 | + ET_LOG( |
| 113 | + Error, |
| 114 | + "quantized_batch_matmul: failed to allocate scratch buffer (%d bytes)", |
| 115 | + buf_size); |
| 116 | + context.fail(buffer_or_error.error()); |
| 117 | + return out; |
| 118 | + } |
| 119 | + ctx.buf = buffer_or_error.get(); |
| 120 | + ctx.size = buf_size; |
| 121 | + } |
| 122 | + |
| 123 | + const arm_cmsis_nn_status status = arm_batch_matmul_s8( |
| 124 | + &ctx, |
| 125 | + &bmm_params, |
| 126 | + &quant_params, |
| 127 | + &lhs_dims, |
| 128 | + lhs.const_data_ptr<int8_t>(), |
| 129 | + &rhs_dims, |
| 130 | + rhs_transposed.const_data_ptr<int8_t>(), |
| 131 | + &out_dims, |
| 132 | + out.mutable_data_ptr<int8_t>()); |
| 133 | + |
| 134 | + if (status != ARM_CMSIS_NN_SUCCESS) { |
| 135 | + ET_LOG( |
| 136 | + Error, |
| 137 | + "quantized_batch_matmul: arm_batch_matmul_s8 failed with status [%d]", |
| 138 | + status); |
| 139 | + context.fail(Error::Internal); |
| 140 | + } |
| 141 | + |
| 142 | + return out; |
| 143 | +} |
| 144 | + |
| 145 | +} // namespace native |
| 146 | +} // namespace cortex_m |
0 commit comments