diff --git a/backends/cadence/aot/functions_hifi.yaml b/backends/cadence/aot/functions_hifi.yaml index 3a274adacaf..d7ea1ea8789 100644 --- a/backends/cadence/aot/functions_hifi.yaml +++ b/backends/cadence/aot/functions_hifi.yaml @@ -106,6 +106,11 @@ kernels: - arg_meta: null kernel_name: torch::executor::where_out + +- op: mean.out + kernels: + - arg_meta: null + kernel_name: impl::HiFi::mean_dim_out # custom ops - func: cadence::quantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) diff --git a/backends/cadence/hifi/kernels/CMakeLists.txt b/backends/cadence/hifi/kernels/CMakeLists.txt index 0ff3d1fde60..b52d2bdf0a3 100644 --- a/backends/cadence/hifi/kernels/CMakeLists.txt +++ b/backends/cadence/hifi/kernels/CMakeLists.txt @@ -13,8 +13,12 @@ add_library( ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_f32_broadcast.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_mode_f32_broadcast.c ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_mul_f32_broadcast.c + ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_reduce_32_32.c ) +# Let files say "include ". +set(_common_include_directories ${EXECUTORCH_ROOT}/..) + target_include_directories( cadence_kernels PUBLIC @@ -23,6 +27,7 @@ target_include_directories( ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/nnlib-hifi4/xa_nnlib/include/nnlib ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/nnlib-hifi4/xa_nnlib/include ${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/nnlib-hifi4/xa_nnlib/algo/ndsp/hifi4/include/ + ${_common_include_directories} ) target_link_libraries(cadence_kernels PRIVATE xa_nnlib) diff --git a/backends/cadence/hifi/kernels/kernels.h b/backends/cadence/hifi/kernels/kernels.h index 8faf06713b7..a23ae080e92 100644 --- a/backends/cadence/hifi/kernels/kernels.h +++ b/backends/cadence/hifi/kernels/kernels.h @@ -51,6 +51,16 @@ extern "C" WORD32 xa_nn_elm_mul_broadcast_4D_f32xf32_f32(FLOAT32 * __restrict__ const WORD32 *const p_inp1_shape, const FLOAT32 * __restrict__ p_inp2, const WORD32 *const p_inp2_shape); + +extern "C" WORD32 xa_nn_reduce_mean_4D_f32_f32(FLOAT32 * __restrict__ p_out, + const WORD32 *const p_out_shape, + const FLOAT32 * __restrict__ p_inp, + const WORD32 *const p_inp_shape, + const WORD32 * __restrict__ p_axis, + WORD32 num_out_dims, + WORD32 num_inp_dims, + WORD32 num_axis_dims, + void * __restrict__ p_scratch_in); namespace impl { namespace HiFi { diff --git a/backends/cadence/hifi/operators/CMakeLists.txt b/backends/cadence/hifi/operators/CMakeLists.txt index 15e6c280ef6..b0ca716ed08 100644 --- a/backends/cadence/hifi/operators/CMakeLists.txt +++ b/backends/cadence/hifi/operators/CMakeLists.txt @@ -26,6 +26,7 @@ set(_aten_ops__srcs "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sigmoid.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_sub.cpp" "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_tanh.cpp" + "${EXECUTORCH_ROOT}/backends/cadence/hifi/operators/op_mean.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_bmm.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_cat.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/op_clone.cpp" @@ -47,6 +48,7 @@ set(_aten_ops__srcs "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/matmul_ops_util.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/reduce_util.cpp" "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/repeat_util.cpp" + "${EXECUTORCH_ROOT}/kernels/portable/cpu/util/slice_util.cpp" ) add_library(aten_ops_cadence ${_aten_ops__srcs}) target_link_libraries(aten_ops_cadence PUBLIC executorch) diff --git a/backends/cadence/hifi/operators/op_mean.cpp b/backends/cadence/hifi/operators/op_mean.cpp new file mode 100644 index 00000000000..d16608f1da8 --- /dev/null +++ b/backends/cadence/hifi/operators/op_mean.cpp @@ -0,0 +1,173 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include + +#include + +using exec_aten::Tensor; +using exec_aten::ScalarType; +using executorch::aten::RuntimeContext; +using torch::executor::Error; +using executorch::runtime::ArrayRef; +using torch::executor::optional; + +namespace impl { +namespace HiFi { +namespace native { + +int prepare_data( + const Tensor& in, + Tensor& out, + optional> dim_list, + int *inp_shape, + int *out_shape, + int *p_axis, + int num_inp_dims, + int num_out_dims + ) { + + for(int i = 0; i < num_inp_dims; i++) + { + inp_shape[i] = in.size(i); + } + + for(int i = 0; i < num_out_dims; i++) + { + out_shape[i] = out.size(i); + } + + int num_axis_dims = 0; + for (const auto& d : dim_list.value()) + { + if(d < 0) + { + p_axis[num_axis_dims] = num_inp_dims + d; + num_axis_dims++; + } + else + { + p_axis[num_axis_dims] = d; + num_axis_dims++; + } + } + + return num_axis_dims; +} + +Tensor& mean_dim_out( + RuntimeContext& ctx, + const Tensor& in, + optional> dim_list, + bool keepdim, + optional dtype, + Tensor& out) { + + ET_KERNEL_CHECK( + ctx, + check_mean_dim_args(in, dim_list, keepdim, dtype, out), + InvalidArgument, + out); + + ET_KERNEL_CHECK( + ctx, + resize_reduction_out(in, dim_list, keepdim, out) == Error::Ok, + InvalidArgument, + out); + + constexpr auto name = "mean.out"; + constexpr int kNnlibMaxDim = 4; + + bool optimized = 1; + + if(out.scalar_type() != ScalarType::Float) + optimized = 0; + + if(in.dim() > kNnlibMaxDim) + optimized = 0; + + if(optimized) + { + float * __restrict__ p_out = out.mutable_data_ptr(); + const float * __restrict__ p_inp = (const float * __restrict__)in.const_data_ptr(); + + int num_elm = in.numel(); + + int num_inp_dims = in.dim(); + int num_out_dims = out.dim(); + + int inp_shape[kNnlibMaxDim]; + int out_shape[kNnlibMaxDim]; + int p_axis[kNnlibMaxDim]; + + for(int i = 0; i < kNnlibMaxDim; i++) + { + out_shape[i] = 1; + inp_shape[i] = 1; + p_axis[i] = 1; + } + + int num_axis_dims = prepare_data(in, out, dim_list, inp_shape, out_shape, p_axis, num_inp_dims, num_out_dims); + + if(num_axis_dims == num_inp_dims) + { + num_out_dims = 1; + out_shape[0] = 1; + } + + int scratch_size = xa_nn_reduce_getsize_nhwc(-3, + inp_shape, + num_inp_dims, + p_axis, + num_axis_dims, + 1); + + void * __restrict__ p_scratch_in = (void * __restrict__)malloc(scratch_size); + + xa_nn_reduce_mean_4D_f32_f32(p_out, + out_shape, + p_inp, + inp_shape, + p_axis, + num_out_dims, + num_inp_dims, + num_axis_dims, + p_scratch_in); + + return out; + } + + ET_SWITCH_REALHB_TYPES(in.scalar_type(), ctx, name, CTYPE_IN, [&] { + ET_SWITCH_FLOATH_TYPES(out.scalar_type(), ctx, name, CTYPE_OUT, [&] { + CTYPE_OUT* out_data = out.mutable_data_ptr(); + const size_t num = get_reduced_dim_product(in, dim_list); + + for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) { + CTYPE_OUT sum = 0; + if (in.numel() > 0) { + sum = torch::executor::map_reduce_over_dim_list( + [](CTYPE_IN v) { return static_cast(v); }, + [](CTYPE_OUT outv, CTYPE_OUT acc) { return acc + outv; }, + in, + dim_list, + out_ix); + } + out_data[out_ix] = sum / static_cast(num); + } + }); + }); + + return out; +} + +} // namespace native +} // namespace HiFi +} // namespace impl diff --git a/backends/cadence/hifi/third-party/nnlib/xa_nn_reduce_32_32.c b/backends/cadence/hifi/third-party/nnlib/xa_nn_reduce_32_32.c new file mode 100644 index 00000000000..5978a92d269 --- /dev/null +++ b/backends/cadence/hifi/third-party/nnlib/xa_nn_reduce_32_32.c @@ -0,0 +1,647 @@ +#include "xa_nnlib_common.h" +#include +//#include "xa_nn_basic_state.h" +#include "xa_nnlib_common_macros.h" + +#define ALIGNMENT_8 8 + +#define ALIGN_PTR(x, bytes) ((((unsigned)(x))+(bytes-1))&(~(bytes-1))) + +static void vecmean16_inpx3(const xtfloatx2 *p_src1, const xtfloat* p_src2, const xtfloat* p_src3, xtfloatx2 *p_dst, int N){ + int i = 0; + ae_valign align_src1, align_dst; + ae_valign align_src2, align_src3; + align_src1 = AE_LA64_PP(p_src1); + align_src2 = AE_LA64_PP(p_src2); + align_src3 = AE_LA64_PP(p_src3); + align_dst = AE_ZALIGN64(); + + for(i=0; i < (N >> 2); i++) + { + xtfloatx2 j1_h, j1_l, j2_h, j2_l; + + xtfloatx2 wout1, wout2; + XT_LASX2IP(wout1, align_src1, p_src1); + XT_LASX2IP(wout2, align_src1, p_src1); + + XT_LASX2IP(j1_h, align_src2, (xtfloatx2 *)p_src2); + XT_LASX2IP(j1_l, align_src2, (xtfloatx2 *)p_src2); + XT_LASX2IP(j2_h, align_src3, (xtfloatx2 *)p_src3); + XT_LASX2IP(j2_l, align_src3, (xtfloatx2 *)p_src3); + + j1_h = XT_ADD_SX2(j1_h, j2_h); + j1_l = XT_ADD_SX2(j1_l, j2_l); + wout1 = XT_ADD_SX2(wout1, j1_h); + wout2 = XT_ADD_SX2(wout2, j1_l); + + XT_SASX2IP(wout1, align_dst, p_dst); + XT_SASX2IP(wout2, align_dst, p_dst); + } + AE_SA64POS_FP(align_dst, p_dst); // finalize the stream + + //Remainder Loop + for(i=0; i < (N & 3); i++) + { + xtfloat j1, j2; + xtfloat wout1; + XT_LSXP(wout1, (xtfloat *)p_src1, sizeof(xtfloat)); + j1 = (xtfloat) *(p_src2 + i); + j2 = (xtfloat) *(p_src3 + i); + + j1 = XT_ADD_S(j1, j2); + wout1 = XT_ADD_S(wout1, j1); + XT_SSXP(wout1, (xtfloat *)p_dst, sizeof(xtfloat)); + } +} + +static void vecmean16_inpx2(const xtfloatx2 *p_src1, const xtfloat* p_src2, xtfloatx2 *p_dst, int N){ + ae_valign align_src1, align_dst; + ae_valign align_src2; + align_src1 = AE_LA64_PP(p_src1); + align_src2 = AE_LA64_PP(p_src2); + align_dst = AE_ZALIGN64(); + + int i = 0; + for(i=0; i < (N >> 2); i++) + { + xtfloatx2 j1, j2; + xtfloatx2 wout1, wout2; + XT_LASX2IP(wout1, align_src1, p_src1); + XT_LASX2IP(wout2, align_src1, p_src1); + + XT_LASX2IP(j1, align_src2, (xtfloatx2 *)p_src2); + XT_LASX2IP(j2, align_src2, (xtfloatx2 *)p_src2); + + wout1 = XT_ADD_SX2(wout1, j1); + wout2 = XT_ADD_SX2(wout2, j2); + + XT_SASX2IP(wout1, align_dst, p_dst); + XT_SASX2IP(wout2, align_dst, p_dst); + } + AE_SA64POS_FP(align_dst, p_dst); // finalize the stream + + //Remainder Loop + for(i=0; i < (N & 3); i++) + { + xtfloat j1; + xtfloat wout1; + XT_LSXP(wout1, (xtfloat *)p_src1, sizeof(xtfloat)); + j1 = (xtfloat) *(p_src2 + i); + wout1 = XT_ADD_S(wout1, j1); + XT_SSXP(wout1, (xtfloat *)p_dst, sizeof(xtfloat)); + } +} + +static void vecmean32_inpx3(const xtfloatx2* p_src1, const xtfloatx2* p_wsrc2, const xtfloatx2* p_wsrc3, xtfloatx2 *p_dst, int N){ + ae_valign align_src1, align_src2, align_src3, align_dst; + align_src1 = AE_LA64_PP(p_src1); + align_src2 = AE_LA64_PP(p_wsrc2); + align_src3 = AE_LA64_PP(p_wsrc3); + align_dst = AE_ZALIGN64(); + + int i = 0; + for(i=0; i < (N >> 2); i++) + { + xtfloatx2 j1, j2, j3, j4; + xtfloatx2 wj1, wj2; + xtfloatx2 wout1, wout2; + XT_LASX2IP(wout1, align_src1, p_src1); + XT_LASX2IP(wout2, align_src1, p_src1); + XT_LASX2IP(j1, align_src2, p_wsrc2); + XT_LASX2IP(j2, align_src3, p_wsrc3); + XT_LASX2IP(j3, align_src2, p_wsrc2); + XT_LASX2IP(j4, align_src3, p_wsrc3); + + wj1 = XT_ADD_SX2(j1, j2); + wj2 = XT_ADD_SX2(j3, j4); + wout1 = XT_ADD_SX2(wout1, wj1); + wout2 = XT_ADD_SX2(wout2, wj2); + XT_SASX2IP(wout1, align_dst, p_dst); + XT_SASX2IP(wout2, align_dst, p_dst); + } + AE_SA64POS_FP(align_dst, p_dst); // finalize the stream + + //Remainder Loop + for(i=0; i < (N & 3); i++) + { + xtfloat j1, j2; + xtfloat wj1; + xtfloat wout1; + XT_LSXP(wout1, (xtfloat *)p_src1, 4); + XT_LSXP(j1, (xtfloat *)p_wsrc2, 4); + XT_LSXP(j2, (xtfloat *)p_wsrc3, 4); + wj1 = XT_ADD_S(j1, j2); + wout1 = XT_ADD_S(wout1, wj1); + XT_SSXP(wout1, (xtfloat *)p_dst, sizeof(xtfloat)); + } +} + +static void vecmean32_inpx2(const xtfloatx2* p_src1, const xtfloatx2* p_wsrc2, xtfloatx2 *p_dst, int N){ + ae_valign align_src1, align_src2, align_dst; + align_src1 = AE_LA64_PP(p_src1); + align_src2 = AE_LA64_PP(p_wsrc2); + align_dst = AE_ZALIGN64(); + + int i = 0; + for(i=0; i < (N >> 2); i++) + { + xtfloatx2 j1, j2; + xtfloatx2 wout1, wout2; + XT_LASX2IP(wout1, align_src1, p_src1); + XT_LASX2IP(wout2, align_src1, p_src1); + XT_LASX2IP(j1, align_src2, p_wsrc2); + XT_LASX2IP(j2, align_src2, p_wsrc2); + wout1 = XT_ADD_SX2(wout1, j1); + wout2 = XT_ADD_SX2(wout2, j2); + XT_SASX2IP(wout1, align_dst, p_dst); + XT_SASX2IP(wout2, align_dst, p_dst); + } + AE_SA64POS_FP(align_dst, p_dst); // finalize the stream + + //Remainder Loop + for(i=0; i < (N & 3); i++) + { + xtfloat j1; + xtfloat wout1; + XT_LSXP(wout1, (xtfloat *)p_src1, 4); + XT_LSXP(j1, (xtfloat *)p_wsrc2, 4); + wout1 = XT_ADD_S(wout1, j1); + XT_SSXP(wout1, (xtfloat *)p_dst, sizeof(WORD32)); + } +} + +static inline void xa_nn_reduce_sum_4D_f32_f32(const FLOAT32 * __restrict__ p_inp + ,const WORD32 *const p_4D_inp_shape + ,const WORD32 * __restrict__ p_axis_data + ,WORD32 num_inp_dims + ,WORD32 num_axis_dims + ,pVOID p_scratch_in) +{ + xtfloat *p_in = (xtfloat *)(p_inp); + xtfloat *p_scratch = (xtfloat *)(p_scratch_in); + + int temp_inp_n = p_4D_inp_shape[0]; + int temp_inp_h = p_4D_inp_shape[1]; + int temp_inp_w = p_4D_inp_shape[2]; + int temp_inp_c = p_4D_inp_shape[3]; + + int itr_axis = 0, itr_n = 0, itr_h = 0, itr_w = 0, itr_c = 0; + xtfloat *p_src2, *p_src3; + xtfloatx2 *p_src1; + xtfloatx2 * p_dst; + ae_valign align_src2; + + int axis_dims_count = num_axis_dims; + if(axis_dims_count) + { + switch(p_axis_data[itr_axis]) + { + case 0: { + int plane_size = temp_inp_h * temp_inp_w * temp_inp_c; + for(itr_n=0; itr_n < (temp_inp_n & ~(2 - 1)); itr_n += 2) + { + p_src1 = (xtfloatx2 *)p_scratch; + p_src2 = p_in + itr_n * plane_size; + p_src3 = p_in + (itr_n + 1) * plane_size; + p_dst = (xtfloatx2 *)p_scratch; + vecmean16_inpx3(p_src1, p_src2, p_src3, p_dst, plane_size); + } + + if(temp_inp_n & 1) + { + p_src1 = (xtfloatx2 *)p_scratch; + p_src2 = (p_in + itr_n * plane_size); + p_dst = (xtfloatx2 *)p_scratch; + vecmean16_inpx2(p_src1, p_src2, p_dst, plane_size); + } + temp_inp_n = 1; + }break; + case 1: { + int plane_size = temp_inp_h * temp_inp_w * temp_inp_c; + int wc_plane_size = temp_inp_w * temp_inp_c; + for(itr_n=0; itr_n < (temp_inp_n); itr_n++) + { + p_src1 = (xtfloatx2 *)(p_scratch + (itr_n * wc_plane_size)); + for(itr_h=0; itr_h < (temp_inp_h & ~(2 - 1)); itr_h += 2) + { + p_src2 = p_in + (itr_n * plane_size) + (itr_h * wc_plane_size); + p_src3 = p_in + (itr_n * plane_size) + ((itr_h + 1) * wc_plane_size); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * wc_plane_size)); + vecmean16_inpx3(p_src1, p_src2, p_src3, p_dst, wc_plane_size); + p_src1 = (xtfloatx2 *)(p_scratch + (itr_n * wc_plane_size)); + } + + if(temp_inp_h & 1) + { + p_src2 = p_in + (itr_n * plane_size) + (itr_h * wc_plane_size); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * wc_plane_size)); + vecmean16_inpx2(p_src1, p_src2, p_dst, wc_plane_size); + } + } + temp_inp_h = 1; + }break; + case 2:{ + int plane_size = temp_inp_h * temp_inp_w * temp_inp_c; + int wc_plane_size = temp_inp_w * temp_inp_c; + int hc_plane_size = temp_inp_h * temp_inp_c; + + for(itr_n=0; itr_n < (temp_inp_n); itr_n++) + { + for(itr_h=0; itr_h < (temp_inp_h); itr_h++) + { + p_src1 = (xtfloatx2 *)(p_scratch + (((itr_n * hc_plane_size) + itr_h * temp_inp_c))); + for(itr_w=0; itr_w < (temp_inp_w & ~(2 - 1)); itr_w += 2) + { + p_src2 = p_in + (itr_n * plane_size) + (itr_h * wc_plane_size) + (itr_w * temp_inp_c); + p_src3 = p_in + (itr_n * plane_size) + (itr_h * wc_plane_size) + ((itr_w + 1) * temp_inp_c); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * hc_plane_size) + itr_h * temp_inp_c); + vecmean16_inpx3(p_src1, p_src2, p_src3, p_dst, temp_inp_c); + p_src1 = (xtfloatx2 *)(p_scratch + (itr_n * hc_plane_size) + (itr_h * temp_inp_c)); + } + + if(temp_inp_w & 1) + { + p_src2 = p_in + (itr_n * plane_size) + (itr_h * wc_plane_size) + (itr_w * temp_inp_c); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * hc_plane_size) + itr_h * temp_inp_c); + vecmean16_inpx2(p_src1, p_src2, p_dst, temp_inp_c); + } + } + } + temp_inp_w = 1; + }break; + case 3: { + int plane_size = temp_inp_h * temp_inp_w * temp_inp_c; + int wc_plane_size = temp_inp_w * temp_inp_c; + int hw_plane_size = temp_inp_h * temp_inp_w; + int rem_c = (temp_inp_c & 7); + + for(itr_n=0; itr_n < (temp_inp_n); itr_n++) + { + for(itr_h=0; itr_h < (temp_inp_h); itr_h++) + { + for(itr_w=0; itr_w < (temp_inp_w); itr_w++) + { + p_src1 = (xtfloatx2 *)(p_scratch + (((itr_n * hw_plane_size) + (itr_h * temp_inp_w) + itr_w))); + p_src2 = p_in + (itr_n * plane_size) + (itr_h * wc_plane_size) + (itr_w * temp_inp_c); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * hw_plane_size) + (itr_h * temp_inp_w) + itr_w); + align_src2 = AE_LA64_PP(p_src2); + + for(itr_c=0; itr_c < (temp_inp_c >> 3); itr_c++) + { + xtfloatx2 j11, j12, j21, j22, i1; + i1 = XT_LSX((xtfloat *)p_src1, 0); + XT_LASX2IP(j11, align_src2, (xtfloatx2 *)p_src2); + XT_LASX2IP(j12, align_src2, (xtfloatx2 *)p_src2); + XT_LASX2IP(j21, align_src2, (xtfloatx2 *)p_src2); + XT_LASX2IP(j22, align_src2, (xtfloatx2 *)p_src2); + + j11 = XT_ADD_SX2(j11, j12); + j21 = XT_ADD_SX2(j21, j22); + + xtfloatx2 t1 = XT_SEL32_HH_SX2(j11, j11); + xtfloatx2 t2 = XT_SEL32_HH_SX2(j21, j21); + + j11 = XT_ADD_SX2(j11, t1); + j21 = XT_ADD_SX2(j21, t2); + + j11 = XT_ADD_SX2(j11, j21); + i1 = XT_ADD_SX2(i1, j11); + + XT_SSX(i1, (xtfloat *)p_dst, 0); + + p_src1 = p_dst; + } + //Remainder Loop + for(itr_c=0; itr_c < rem_c ; itr_c++) + { + xtfloat j1; + xtfloat i1; + i1 = XT_LSX((xtfloat *)p_src1, 0); + j1 = *p_src2++; + + i1 = XT_ADD_S(i1, j1); + XT_SSX(i1, (xtfloat *)p_dst, 0); + } + } + } + } + temp_inp_c = 1; + }break; + default: + break; + } + + axis_dims_count--; + itr_axis++; + } + + while(axis_dims_count) + { + ae_valign align_src; + xtfloat *p_scr_in = p_scratch; + xtfloatx2 *p_wsrc2, *p_wsrc3; + switch(p_axis_data[itr_axis]) + { + case 0: { + int plane_size = temp_inp_h * temp_inp_w * temp_inp_c; + for(itr_n=1; itr_n < ((temp_inp_n -1) & ~(2 - 1)); itr_n += 2) + { + p_src1 = (xtfloatx2 *)p_scratch; + p_wsrc2 = (xtfloatx2 *)(p_scr_in + itr_n * plane_size); + p_wsrc3 = (xtfloatx2 *)(p_scr_in + (itr_n + 1) * plane_size); + p_dst = (xtfloatx2 *)p_scratch; + vecmean32_inpx3(p_src1, p_wsrc2, p_wsrc3, p_dst, plane_size); + } + + if((temp_inp_n - 1) & 1) + { + p_src1 = (xtfloatx2 *)p_scratch; + p_wsrc2 = (xtfloatx2 *)(p_scr_in + itr_n * plane_size); + p_dst = (xtfloatx2 *)p_scratch; + vecmean32_inpx2(p_src1, p_wsrc2, p_dst, plane_size); + } + temp_inp_n = 1; + }break; + case 1: { + int plane_size = temp_inp_h * temp_inp_w * temp_inp_c; + int wc_plane_size = temp_inp_w * temp_inp_c; + for(itr_n=0; itr_n < (temp_inp_n); itr_n++) + { + p_src1 = (xtfloatx2 *)(p_scratch + + (itr_n * plane_size)); + for(itr_h = 1; itr_h < ((temp_inp_h - 1) & ~(2 - 1)); itr_h += 2) + { + p_wsrc2 = (xtfloatx2 *)(p_scr_in + (itr_n * plane_size) + (itr_h * wc_plane_size)); + p_wsrc3 = (xtfloatx2 *)(p_scr_in + (itr_n * plane_size) + ((itr_h + 1) * wc_plane_size)); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * wc_plane_size)); + vecmean32_inpx3(p_src1, p_wsrc2, p_wsrc3, p_dst, wc_plane_size); + p_src1 = (xtfloatx2 *)(p_scratch + (itr_n * wc_plane_size)); + } + + if((temp_inp_h - 1) & 1) + { + p_wsrc2 = (xtfloatx2 *)(p_scr_in + (itr_n * plane_size) + (itr_h * wc_plane_size)); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * wc_plane_size)); + vecmean32_inpx2(p_src1, p_wsrc2, p_dst, plane_size); + } + } + temp_inp_h = 1; + }break; + case 2:{ + int plane_size = temp_inp_h * temp_inp_w * temp_inp_c; + int wc_plane_size = temp_inp_w * temp_inp_c; + int hc_plane_size = temp_inp_h * temp_inp_c; + for(itr_n=0; itr_n < (temp_inp_n); itr_n++) + { + for(itr_h=0; itr_h < (temp_inp_h); itr_h++) + { + p_src1 = (xtfloatx2 *)(p_scratch + ((itr_n * plane_size) + (itr_h * wc_plane_size))); + for(itr_w = 1; itr_w < ((temp_inp_w - 1) & ~(2 - 1)); itr_w += 2) + { + p_wsrc2 = (xtfloatx2 *)(p_scr_in + (itr_n * plane_size) + (itr_h * wc_plane_size) + (itr_w * temp_inp_c)); + p_wsrc3 = (xtfloatx2 *)(p_scr_in + (itr_n * plane_size) + (itr_h * wc_plane_size) + ((itr_w + 1) * temp_inp_c)); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * hc_plane_size) + itr_h * temp_inp_c); + vecmean32_inpx3(p_src1, p_wsrc2, p_wsrc3, p_dst, temp_inp_c); + p_src1 = (xtfloatx2 *)(p_scratch + (itr_n * hc_plane_size) + (itr_h * temp_inp_c)); + } + + if((temp_inp_w - 1) & 1) + { + p_wsrc2 = (xtfloatx2 *)(p_scr_in + (itr_n * plane_size) + (itr_h * wc_plane_size) + (itr_w * temp_inp_c)); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * hc_plane_size) + itr_h * temp_inp_c); + vecmean32_inpx2(p_src1, p_wsrc2, p_dst, temp_inp_c); + } + } + } + temp_inp_w = 1; + }break; + case 3: { + int plane_size = temp_inp_h * temp_inp_w * temp_inp_c; + int wc_plane_size = temp_inp_w * temp_inp_c; + int hw_plane_size = temp_inp_h * temp_inp_w; + int rem_c = ((temp_inp_c) & 3); + for(itr_n=0; itr_n < (temp_inp_n); itr_n++) + { + for(itr_h=0; itr_h < (temp_inp_h); itr_h++) + { + for(itr_w=0; itr_w < (temp_inp_w); itr_w++) + { + p_wsrc2 = (xtfloatx2 *)(p_scr_in + (itr_n * plane_size) + (itr_h * wc_plane_size) + (itr_w * temp_inp_c)); + p_dst = (xtfloatx2 *)(p_scratch + (itr_n * hw_plane_size) + (itr_h * temp_inp_w) + itr_w); + align_src = AE_LA64_PP(p_wsrc2); + xtfloatx2 i1 = AE_MOVXTFLOATX2_FROMF32X2(AE_MOVDA32(0)); + for(itr_c = 0; itr_c < (temp_inp_c >> 2); itr_c++) + { + xtfloatx2 j1, j2; + XT_LASX2IP(j1, align_src, p_wsrc2); + XT_LASX2IP(j2, align_src, p_wsrc2); + + xtfloatx2 t1 = XT_SEL32_HH_SX2(j1, j1); + xtfloatx2 t2 = XT_SEL32_HH_SX2(j2, j2); + + j1 = XT_ADD_SX2(t1, j1); + j2 = XT_ADD_SX2(t2, j2); + + i1 = XT_ADD_SX2(i1, j1); + i1 = XT_ADD_SX2(i1, j2); + } + + //Remainder Loop + for(itr_c=0; itr_c < rem_c; itr_c++) + { + xtfloat j1; + XT_LSXP(j1, (xtfloat *)p_wsrc2, sizeof(xtfloat)); + i1 = XT_ADD_S(i1, j1); + } + XT_SSX(i1, (xtfloat *)p_dst, 0); + } + } + } + temp_inp_c = 1; + }break; + default: + break; + } + axis_dims_count--; + itr_axis++; + } +} + +WORD32 xa_nn_reduce_mean_4D_f32_f32( + FLOAT32 * __restrict__ p_out, + const WORD32 *const p_out_shape, + const FLOAT32 * __restrict__ p_inp, + const WORD32 *const p_inp_shape, + const WORD32 * __restrict__ p_axis, + WORD32 num_out_dims, + WORD32 num_inp_dims, + WORD32 num_axis_dims, + void * __restrict__ p_scratch_in) +{ + /* NULL pointer checks */ + XA_NNLIB_ARG_CHK_PTR(p_out, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp, -1); + XA_NNLIB_ARG_CHK_PTR(p_axis, -1); + XA_NNLIB_ARG_CHK_PTR(p_out_shape, -1); + XA_NNLIB_ARG_CHK_PTR(p_inp_shape, -1); + + /* Invalid input checks */ + XA_NNLIB_ARG_CHK_COND(((num_inp_dims <= 0) || (num_inp_dims > 4)), -1); + XA_NNLIB_ARG_CHK_COND(((num_out_dims <= 0) || (num_out_dims > 4)), -1); + XA_NNLIB_ARG_CHK_COND(((num_axis_dims < 0) || (num_axis_dims > 4)), -1); + + int axis_itr = 0, inp_itr = 0, out_itr = 0; + int num_elm_in_axis = 1; + int current, past = -1; + for(axis_itr=0; axis_itr < num_axis_dims; axis_itr++) + { + current = p_axis[axis_itr]; + XA_NNLIB_ARG_CHK_COND(((current < 0) || (current > (num_inp_dims - 1))), -1); + XA_NNLIB_ARG_CHK_COND((p_inp_shape[current] > 1024), -1); + + /* Avoid calculation in case of repeated axis dims*/ + if(current != past) + { + num_elm_in_axis *= p_inp_shape[current]; + past = current; + } + } + + for(inp_itr=0; inp_itr < num_inp_dims; inp_itr++) + { + XA_NNLIB_ARG_CHK_COND((p_inp_shape[inp_itr] <= 0), -1); + } + + int out_length = 1; + for(out_itr=0; out_itr < num_out_dims; out_itr++) + { + XA_NNLIB_ARG_CHK_COND((p_out_shape[out_itr] <= 0), -1); + out_length *= p_out_shape[out_itr]; + } + + /* Pointer alignment checks */ + XA_NNLIB_ARG_CHK_ALIGN(p_out, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp, sizeof(FLOAT32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_axis, sizeof(WORD32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_out_shape, sizeof(WORD32), -1); + XA_NNLIB_ARG_CHK_ALIGN(p_inp_shape, sizeof(WORD32), -1); + + FLOAT32 *p_in = (FLOAT32 *)(p_inp); + WORD32 *p_scratch = (WORD32 *)(ALIGN_PTR(p_scratch_in, ALIGNMENT_8)); + + // Changing order of axis data so that reduce max will be first computed + // across largest inp shape dim in axis. This is required to + // minimize the scratch usage. + int inp_length = 1, p_axis_data[4] = {0}, inp_shape_max; + if(num_axis_dims) + { + inp_shape_max = p_inp_shape[p_axis[0]]; + axis_itr = 1; + int max_axis_itr = 0; + int temp_p_axis_0 = p_axis[0]; + for(axis_itr = 0; axis_itr < num_axis_dims; axis_itr++) + { + p_axis_data[axis_itr] = p_axis[axis_itr]; + } + for(axis_itr = 1; axis_itr < num_axis_dims; axis_itr++) + { + if(p_inp_shape[p_axis[axis_itr]] > inp_shape_max) + { + inp_shape_max = p_inp_shape[p_axis[axis_itr]]; + max_axis_itr = axis_itr; + } + } + p_axis_data[0] = p_axis_data[max_axis_itr]; + p_axis_data[max_axis_itr] = temp_p_axis_0; + + inp_itr = 0; + for(inp_itr=0; inp_itr < num_inp_dims; inp_itr++) + { + inp_length *= p_inp_shape[inp_itr]; + } + + memset(p_scratch, 0, ((inp_length / inp_shape_max) * sizeof(WORD32))); //TODO: Alternate approach for memset? + } + + // Promoting lesser dim tensors to 4D tensors. Also modifying axis + // data accordingly. + int p_4D_inp_shape[4] = {1, 1, 1, 1}; + int itr = num_inp_dims - 1; + int count = 3; + while(itr >= 0) + { + p_4D_inp_shape[count] = p_inp_shape[itr]; + itr--; + count--; + } + for(itr = 0; itr < num_axis_dims; itr++) + { + p_axis_data[itr] = p_axis_data[itr] + (4 - num_inp_dims); + } + ae_valign align_out = AE_ZALIGN64(); + + if(num_axis_dims) + { + if(num_elm_in_axis > 1) + { + xa_nn_reduce_sum_4D_f32_f32(p_in, + p_4D_inp_shape, + p_axis_data, + num_inp_dims, + num_axis_dims, + p_scratch); + itr = 0; + xtfloatx2 *p_src1 = (xtfloatx2 *)(p_scratch); + + float div = 1; + + for(int i = 0; i < num_axis_dims; i++) + { + div = div * (float)p_4D_inp_shape[p_axis_data[i]]; + } + + float mul = 1 / div; + + xtfloatx2 multiplier = XT_LSX((xtfloat *)&mul, 0); + + for(itr = 0; itr < (out_length >> 3); itr++) + { + xtfloatx2 temp1, temp2, temp3, temp4; + + temp2 = XT_LSX2X(p_src1, 8); + temp3 = XT_LSX2X(p_src1, 16); + temp4 = XT_LSX2X(p_src1, 24); + XT_LSX2XP(temp1, p_src1, 32); + + temp1 = XT_MUL_SX2(temp1, multiplier); + temp2 = XT_MUL_SX2(temp2, multiplier); + temp3 = XT_MUL_SX2(temp3, multiplier); + temp4 = XT_MUL_SX2(temp4, multiplier); + + XT_SASX2IP(temp1, align_out, (xtfloatx2 *)p_out); + XT_SASX2IP(temp2, align_out, (xtfloatx2 *)p_out); + XT_SASX2IP(temp3, align_out, (xtfloatx2 *)p_out); + XT_SASX2IP(temp4, align_out, (xtfloatx2 *)p_out); + } + AE_SA64POS_FP(align_out, p_out); + + for(itr = 0; itr < (out_length & 7); itr++) + { + xtfloat temp1; + XT_LSXP(temp1, (xtfloat *)p_src1, 4); + temp1 = XT_MUL_S(temp1, multiplier); + XT_SSXP(temp1, (xtfloat *)p_out, 4); + } + } + else + { + + memcpy(p_out, p_inp, inp_length * sizeof(FLOAT32)); + } + } + else + { + memcpy(p_out, p_inp, inp_length * sizeof(FLOAT32)); + } + + return 0; +}