Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
136 changes: 0 additions & 136 deletions backends/cadence/aot/functions.yaml

This file was deleted.

172 changes: 171 additions & 1 deletion backends/cadence/aot/functions_hifi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,61 @@
kernels:
- arg_meta: null
kernel_name: torch::executor::div_out_mode

- op: floor_divide.out
kernels:
- arg_meta: null
kernel_name: torch::executor::floor_divide_out

- op: remainder.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::remainder_Tensor_out

- op: remainder.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::remainder_Scalar_out

- op: fmod.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::fmod_Tensor_out

- op: fmod.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::fmod_Scalar_out

- op: bitwise_and.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::bitwise_and_Scalar_out

- op: bitwise_and.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::bitwise_and_Tensor_out

- op: bitwise_or.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::bitwise_or_Scalar_out

- op: bitwise_or.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::bitwise_or_Tensor_out

- op: bitwise_xor.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::bitwise_xor_Scalar_out

- op: bitwise_xor.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::bitwise_xor_Tensor_out

- op: embedding.out
kernels:
Expand All @@ -67,6 +122,11 @@
- arg_meta: null
kernel_name: torch::executor::mul_out

- op: mul.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::mul_scalar_out

- op: permute_copy.out
kernels:
- arg_meta: null
Expand Down Expand Up @@ -102,10 +162,105 @@
- arg_meta: null
kernel_name: torch::executor::where_out

- op: scalar_tensor.out
kernels:
- arg_meta: null
kernel_name: torch::executor::scalar_tensor_out

- op: rsqrt.out
kernels:
- arg_meta: null
kernel_name: torch::executor::rsqrt_out
kernel_name: torch::executor::rsqrt_out

- op: ge.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::ge_scalar_out

- op: ge.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::ge_tensor_out

- op: gt.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::gt_scalar_out

- op: gt.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::gt_tensor_out

- op: le.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::le_scalar_out

- op: le.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::le_tensor_out

- op: lt.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::lt_scalar_out

- op: lt.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::lt_tensor_out

- op: eq.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::eq_scalar_out

- op: eq.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::eq_tensor_out

- op: ne.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::ne_scalar_out

- op: ne.Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::ne_tensor_out

- op: pow.Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::pow_Scalar_out

- op: pow.Tensor_Scalar_out
kernels:
- arg_meta: null
kernel_name: torch::executor::pow_Tensor_Scalar_out

- op: pow.Tensor_Tensor_out
kernels:
- arg_meta: null
kernel_name: torch::executor::pow_Tensor_Tensor_out

- op: atan2.out
kernels:
- arg_meta: null
kernel_name: torch::executor::atan2_out

- op: empty.out
kernels:
- arg_meta: null
kernel_name: torch::executor::empty_out

- op: gelu.out
kernels:
- arg_meta: null
kernel_name: torch::executor::gelu_out

- op: empty.out
kernels:
Expand Down Expand Up @@ -139,6 +294,21 @@
kernels:
- arg_meta: null
kernel_name: impl::HiFi::quantized_linear_out

- func: cadence::quantized_matmul.out(Tensor X, int X_zero_point, Tensor Y, int Y_zero_point, Tensor? bias, int out_multiplier, int out_shift, int out_zero_point, bool transposed, *, Tensor(a!) out) -> Tensor(a!)
kernels:
- arg_meta: null
kernel_name: impl::HiFi::quantized_matmul_out

- func: cadence::quantized_relu.out(Tensor X, Tensor X_zero_point, *, Tensor(a!) out) -> Tensor(a!)
kernels:
- arg_meta: null
kernel_name: impl::HiFi::quantized_relu_out

- func: cadence::quantized_conv.out(Tensor input, Tensor weight, Tensor bias, int[] stride, SymInt[] padding, int[] dilation, int groups, int input_zero_point, Tensor weight_zero_point, Tensor bias_scale, float out_scale, int out_zero_point, Tensor out_multiplier, Tensor out_shift, bool channel_last=False, *, Tensor(a!) out) -> Tensor(a!)
kernels:
- arg_meta: null
kernel_name: impl::HiFi::quantized_conv_out

- func: cadence::quantized_relu.out(Tensor X, Tensor X_zero_point, *, Tensor(a!) out) -> Tensor(a!)
kernels:
Expand Down
8 changes: 8 additions & 0 deletions backends/cadence/hifi/kernels/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,14 @@ add_library(
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_where_f32xf32_f32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_div_broadcast_f32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_floor_div_broadcast_f32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_concat_32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_broadcast_32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_greater_lesser_equal_f32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_remainder_broadcast_f32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_fmod_broadcast_f32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_floor_div_broadcast_f32.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_logicalxor_bool_bool.c
${EXECUTORCH_ROOT}/backends/cadence/hifi/third-party/nnlib/xa_nn_elm_where_f32xf32_f32.c
)

target_include_directories(
Expand Down
Loading