From e962b89eb04e51d2d05843382f6c543196cd90a6 Mon Sep 17 00:00:00 2001 From: WangYuli Date: Tue, 13 Aug 2024 12:38:12 +0800 Subject: [PATCH 1/4] net: 3snic: Add support for 3snic 3s9xx ethernet card support 3SNIC 910/920/930 including following features: *1. single-root I/O virtualization (SR-IOV) 2. virtual machine multi queue (VMMQ) 3. receive side scaling (RSS) 4. physical function (PF) passthrough VMs 5. the PF promiscuous mode,unicast or multicast MAC filtering, and all multicast mode 6. IPv4/IPv6, checksum offload,TCP Segmentation Offload (TSO), and Large Receive Offload (LRO) 7. in-band one-click logs collection 8. loopback tests 9. port location indicators product specs: Model Port Config PCIe 3S910 2 x 25GE SFP28 Gen3 x8 3S920 4 x 25GE SFP28 Gen4 x16 3S930 2 x 100GE QSFP28 Gen4 x16 Link: https://ramaxel.com/ Link: https://gitee.com/OpenCloudOS/OpenCloudOS-Kernel/pulls/141 Link: https://gitee.com/OpenCloudOS/OpenCloudOS-Kernel/pulls/159 Signed-off-by: weiwei1 Signed-off-by: WangYuli --- .../device_drivers/ethernet/3snic/sssnic.rst | 67 + .../device_drivers/ethernet/index.rst | 1 + drivers/net/ethernet/3snic/Kconfig | 22 + drivers/net/ethernet/3snic/Makefile | 6 + drivers/net/ethernet/3snic/sssnic/Kconfig | 19 + drivers/net/ethernet/3snic/sssnic/Makefile | 6 + .../3snic/sssnic/hw/include/sss_adapter.h | 78 ++ .../3snic/sssnic/hw/include/sss_adm_info.h | 121 ++ .../3snic/sssnic/hw/include/sss_aeq_info.h | 34 + .../3snic/sssnic/hw/include/sss_board_info.h | 32 + .../3snic/sssnic/hw/include/sss_ceq_info.h | 29 + .../3snic/sssnic/hw/include/sss_csr.h | 171 +++ .../3snic/sssnic/hw/include/sss_ctrlq_info.h | 98 ++ .../3snic/sssnic/hw/include/sss_eq_info.h | 77 ++ .../3snic/sssnic/hw/include/sss_hwdev.h | 276 ++++ .../3snic/sssnic/hw/include/sss_hwif.h | 103 ++ .../3snic/sssnic/hw/include/sss_irq_info.h | 28 + .../3snic/sssnic/hw/include/sss_mbx_info.h | 110 ++ .../sssnic/hw/include/sss_mgmt_channel.h | 141 ++ .../3snic/sssnic/hw/include/sss_mgmt_info.h | 123 ++ .../3snic/sssnic/hw/include/sss_sriov_info.h | 22 + .../3snic/sssnic/hw/sss_adapter_mgmt.c | 723 +++++++++++ .../3snic/sssnic/hw/sss_adapter_mgmt.h | 100 ++ .../net/ethernet/3snic/sssnic/hw/sss_common.c | 92 ++ .../net/ethernet/3snic/sssnic/hw/sss_common.h | 19 + .../ethernet/3snic/sssnic/hw/sss_hw_main.c | 88 ++ .../ethernet/3snic/sssnic/hw/sss_hwdev_api.c | 136 ++ .../ethernet/3snic/sssnic/hw/sss_hwdev_api.h | 19 + .../ethernet/3snic/sssnic/hw/sss_hwdev_cap.c | 748 +++++++++++ .../ethernet/3snic/sssnic/hw/sss_hwdev_cap.h | 12 + .../3snic/sssnic/hw/sss_hwdev_export.c | 599 +++++++++ .../ethernet/3snic/sssnic/hw/sss_hwdev_init.c | 548 ++++++++ .../ethernet/3snic/sssnic/hw/sss_hwdev_init.h | 15 + .../3snic/sssnic/hw/sss_hwdev_io_flush.c | 141 ++ .../3snic/sssnic/hw/sss_hwdev_io_flush.h | 11 + .../ethernet/3snic/sssnic/hw/sss_hwdev_link.c | 729 +++++++++++ .../ethernet/3snic/sssnic/hw/sss_hwdev_link.h | 14 + .../3snic/sssnic/hw/sss_hwdev_mgmt_channel.c | 770 +++++++++++ .../3snic/sssnic/hw/sss_hwdev_mgmt_channel.h | 127 ++ .../3snic/sssnic/hw/sss_hwdev_mgmt_info.c | 97 ++ .../3snic/sssnic/hw/sss_hwdev_mgmt_info.h | 12 + .../ethernet/3snic/sssnic/hw/sss_hwif_adm.c | 804 ++++++++++++ .../ethernet/3snic/sssnic/hw/sss_hwif_adm.h | 16 + .../3snic/sssnic/hw/sss_hwif_adm_common.h | 79 ++ .../3snic/sssnic/hw/sss_hwif_adm_init.c | 762 +++++++++++ .../3snic/sssnic/hw/sss_hwif_adm_init.h | 13 + .../ethernet/3snic/sssnic/hw/sss_hwif_aeq.c | 568 ++++++++ .../ethernet/3snic/sssnic/hw/sss_hwif_aeq.h | 28 + .../ethernet/3snic/sssnic/hw/sss_hwif_api.c | 293 +++++ .../ethernet/3snic/sssnic/hw/sss_hwif_api.h | 127 ++ .../ethernet/3snic/sssnic/hw/sss_hwif_ceq.c | 441 +++++++ .../ethernet/3snic/sssnic/hw/sss_hwif_ceq.h | 19 + .../ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.c | 928 ++++++++++++++ .../ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.h | 33 + .../3snic/sssnic/hw/sss_hwif_ctrlq_export.c | 171 +++ .../3snic/sssnic/hw/sss_hwif_ctrlq_init.c | 598 +++++++++ .../3snic/sssnic/hw/sss_hwif_ctrlq_init.h | 15 + .../ethernet/3snic/sssnic/hw/sss_hwif_eq.c | 355 +++++ .../ethernet/3snic/sssnic/hw/sss_hwif_eq.h | 91 ++ .../3snic/sssnic/hw/sss_hwif_export.c | 147 +++ .../ethernet/3snic/sssnic/hw/sss_hwif_init.c | 413 ++++++ .../ethernet/3snic/sssnic/hw/sss_hwif_init.h | 13 + .../ethernet/3snic/sssnic/hw/sss_hwif_irq.c | 125 ++ .../ethernet/3snic/sssnic/hw/sss_hwif_irq.h | 12 + .../ethernet/3snic/sssnic/hw/sss_hwif_mbx.c | 656 ++++++++++ .../ethernet/3snic/sssnic/hw/sss_hwif_mbx.h | 94 ++ .../3snic/sssnic/hw/sss_hwif_mbx_export.c | 184 +++ .../3snic/sssnic/hw/sss_hwif_mbx_init.c | 888 +++++++++++++ .../3snic/sssnic/hw/sss_hwif_mbx_init.h | 14 + .../3snic/sssnic/hw/sss_hwif_mgmt_common.h | 21 + .../3snic/sssnic/hw/sss_hwif_mgmt_init.c | 298 +++++ .../3snic/sssnic/hw/sss_hwif_mgmt_init.h | 13 + .../net/ethernet/3snic/sssnic/hw/sss_pci.c | 37 + .../net/ethernet/3snic/sssnic/hw/sss_pci.h | 33 + .../ethernet/3snic/sssnic/hw/sss_pci_error.c | 47 + .../ethernet/3snic/sssnic/hw/sss_pci_error.h | 11 + .../ethernet/3snic/sssnic/hw/sss_pci_global.c | 65 + .../ethernet/3snic/sssnic/hw/sss_pci_global.h | 18 + .../ethernet/3snic/sssnic/hw/sss_pci_id_tbl.h | 14 + .../ethernet/3snic/sssnic/hw/sss_pci_probe.c | 588 +++++++++ .../ethernet/3snic/sssnic/hw/sss_pci_probe.h | 12 + .../ethernet/3snic/sssnic/hw/sss_pci_remove.c | 264 ++++ .../ethernet/3snic/sssnic/hw/sss_pci_remove.h | 21 + .../3snic/sssnic/hw/sss_pci_shutdown.c | 41 + .../3snic/sssnic/hw/sss_pci_shutdown.h | 10 + .../ethernet/3snic/sssnic/hw/sss_pci_sriov.c | 190 +++ .../ethernet/3snic/sssnic/hw/sss_pci_sriov.h | 16 + drivers/net/ethernet/3snic/sssnic/hw/sss_wq.c | 160 +++ .../ethernet/3snic/sssnic/hw/tool/sss_tool.h | 29 + .../3snic/sssnic/hw/tool/sss_tool_chip.c | 802 ++++++++++++ .../3snic/sssnic/hw/tool/sss_tool_chip.h | 15 + .../3snic/sssnic/hw/tool/sss_tool_hw.h | 212 +++ .../3snic/sssnic/hw/tool/sss_tool_main.c | 740 +++++++++++ .../3snic/sssnic/hw/tool/sss_tool_sdk.c | 527 ++++++++ .../3snic/sssnic/hw/tool/sss_tool_sdk.h | 16 + .../3snic/sssnic/hw/tool/sss_tool_sm.c | 383 ++++++ .../3snic/sssnic/hw/tool/sss_tool_sm.h | 21 + .../3snic/sssnic/include/hw/sss_hw_aeq.h | 29 + .../3snic/sssnic/include/hw/sss_hw_ceq.h | 14 + .../3snic/sssnic/include/hw/sss_hw_common.h | 121 ++ .../3snic/sssnic/include/hw/sss_hw_ctrlq.h | 67 + .../3snic/sssnic/include/hw/sss_hw_event.h | 160 +++ .../3snic/sssnic/include/hw/sss_hw_export.h | 228 ++++ .../3snic/sssnic/include/hw/sss_hw_irq.h | 36 + .../3snic/sssnic/include/hw/sss_hw_mbx.h | 332 +++++ .../3snic/sssnic/include/hw/sss_hw_mbx_msg.h | 260 ++++ .../3snic/sssnic/include/hw/sss_hw_mgmt.h | 22 + .../3snic/sssnic/include/hw/sss_hw_sriov.h | 13 + .../sssnic/include/hw/sss_hw_statistics.h | 38 + .../3snic/sssnic/include/hw/sss_hw_svc_cap.h | 281 ++++ .../sssnic/include/hw/sss_hw_uld_driver.h | 47 + .../3snic/sssnic/include/hw/sss_hw_wq.h | 126 ++ .../3snic/sssnic/include/hw/sss_hwif_export.h | 89 ++ .../sssnic/include/kernel/sss_linux_kernel.h | 335 +++++ .../ethernet/3snic/sssnic/include/sss_hw.h | 24 + .../3snic/sssnic/include/sss_kernel.h | 35 + .../3snic/sssnic/include/sss_tool_comm.h | 114 ++ .../3snic/sssnic/include/sss_version.h | 9 + .../net/ethernet/3snic/sssnic/nic/Makefile | 90 ++ .../sssnic/nic/include/sss_nic_cfg_define.h | 608 +++++++++ .../nic/include/sss_nic_cfg_mag_define.h | 460 +++++++ .../nic/include/sss_nic_cfg_rss_define.h | 56 + .../nic/include/sss_nic_cfg_vf_define.h | 27 + .../3snic/sssnic/nic/include/sss_nic_common.h | 21 + .../sssnic/nic/include/sss_nic_dcb_define.h | 52 + .../sssnic/nic/include/sss_nic_dev_define.h | 272 ++++ .../sssnic/nic/include/sss_nic_io_define.h | 108 ++ .../sssnic/nic/include/sss_nic_irq_define.h | 40 + .../sssnic/nic/include/sss_nic_qp_define.h | 48 + .../sssnic/nic/include/sss_nic_rx_define.h | 114 ++ .../sssnic/nic/include/sss_nic_tcam_define.h | 184 +++ .../sssnic/nic/include/sss_nic_tx_define.h | 85 ++ .../ethernet/3snic/sssnic/nic/sss_nic_cfg.c | 1140 +++++++++++++++++ .../ethernet/3snic/sssnic/nic/sss_nic_cfg.h | 104 ++ .../ethernet/3snic/sssnic/nic/sss_nic_dcb.c | 257 ++++ .../ethernet/3snic/sssnic/nic/sss_nic_dcb.h | 29 + .../3snic/sssnic/nic/sss_nic_ethtool.c | 485 +++++++ .../3snic/sssnic/nic/sss_nic_ethtool.h | 10 + .../3snic/sssnic/nic/sss_nic_ethtool_api.c | 810 ++++++++++++ .../3snic/sssnic/nic/sss_nic_ethtool_api.h | 77 ++ .../3snic/sssnic/nic/sss_nic_ethtool_stats.c | 128 ++ .../3snic/sssnic/nic/sss_nic_ethtool_stats.h | 26 + .../sssnic/nic/sss_nic_ethtool_stats_api.c | 1057 +++++++++++++++ .../sssnic/nic/sss_nic_ethtool_stats_api.h | 109 ++ .../ethernet/3snic/sssnic/nic/sss_nic_event.c | 562 ++++++++ .../ethernet/3snic/sssnic/nic/sss_nic_event.h | 58 + .../3snic/sssnic/nic/sss_nic_filter.c | 495 +++++++ .../3snic/sssnic/nic/sss_nic_filter.h | 13 + .../ethernet/3snic/sssnic/nic/sss_nic_io.c | 953 ++++++++++++++ .../ethernet/3snic/sssnic/nic/sss_nic_io.h | 106 ++ .../ethernet/3snic/sssnic/nic/sss_nic_irq.c | 321 +++++ .../ethernet/3snic/sssnic/nic/sss_nic_irq.h | 15 + .../3snic/sssnic/nic/sss_nic_mag_cfg.c | 765 +++++++++++ .../3snic/sssnic/nic/sss_nic_mag_cfg.h | 79 ++ .../ethernet/3snic/sssnic/nic/sss_nic_main.c | 1077 ++++++++++++++++ .../3snic/sssnic/nic/sss_nic_netdev_ops.c | 799 ++++++++++++ .../3snic/sssnic/nic/sss_nic_netdev_ops.h | 15 + .../3snic/sssnic/nic/sss_nic_netdev_ops_api.c | 1074 ++++++++++++++++ .../3snic/sssnic/nic/sss_nic_netdev_ops_api.h | 69 + .../3snic/sssnic/nic/sss_nic_ntuple.c | 919 +++++++++++++ .../3snic/sssnic/nic/sss_nic_ntuple.h | 27 + .../ethernet/3snic/sssnic/nic/sss_nic_rss.c | 1002 +++++++++++++++ .../ethernet/3snic/sssnic/nic/sss_nic_rss.h | 75 ++ .../3snic/sssnic/nic/sss_nic_rss_cfg.c | 341 +++++ .../3snic/sssnic/nic/sss_nic_rss_cfg.h | 32 + .../ethernet/3snic/sssnic/nic/sss_nic_rx.c | 904 +++++++++++++ .../ethernet/3snic/sssnic/nic/sss_nic_rx.h | 56 + .../3snic/sssnic/nic/sss_nic_rx_init.c | 288 +++++ .../3snic/sssnic/nic/sss_nic_rx_init.h | 34 + .../3snic/sssnic/nic/sss_nic_rx_reset.c | 243 ++++ .../3snic/sssnic/nic/sss_nic_rx_reset.h | 16 + .../ethernet/3snic/sssnic/nic/sss_nic_tx.c | 866 +++++++++++++ .../ethernet/3snic/sssnic/nic/sss_nic_tx.h | 26 + .../3snic/sssnic/nic/sss_nic_tx_init.c | 210 +++ .../3snic/sssnic/nic/sss_nic_tx_init.h | 26 + .../3snic/sssnic/nic/sss_nic_vf_cfg.c | 603 +++++++++ .../3snic/sssnic/nic/sss_nic_vf_cfg.h | 46 + .../3snic/sssnic/nic/tool/sss_tool_nic.h | 111 ++ .../3snic/sssnic/nic/tool/sss_tool_nic_dcb.c | 457 +++++++ .../3snic/sssnic/nic/tool/sss_tool_nic_dcb.h | 16 + .../3snic/sssnic/nic/tool/sss_tool_nic_func.c | 108 ++ .../3snic/sssnic/nic/tool/sss_tool_nic_func.h | 10 + .../sssnic/nic/tool/sss_tool_nic_phy_attr.c | 415 ++++++ .../sssnic/nic/tool/sss_tool_nic_phy_attr.h | 37 + .../sssnic/nic/tool/sss_tool_nic_qp_info.c | 323 +++++ .../sssnic/nic/tool/sss_tool_nic_qp_info.h | 28 + .../sssnic/nic/tool/sss_tool_nic_stats.c | 136 ++ .../sssnic/nic/tool/sss_tool_nic_stats.h | 16 + drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + 190 files changed, 41606 insertions(+) create mode 100644 Documentation/networking/device_drivers/ethernet/3snic/sssnic.rst create mode 100644 drivers/net/ethernet/3snic/Kconfig create mode 100644 drivers/net/ethernet/3snic/Makefile create mode 100644 drivers/net/ethernet/3snic/sssnic/Kconfig create mode 100644 drivers/net/ethernet/3snic/sssnic/Makefile create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/include/sss_adapter.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/include/sss_adm_info.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/include/sss_aeq_info.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/include/sss_board_info.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/include/sss_ceq_info.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/include/sss_csr.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/include/sss_ctrlq_info.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/include/sss_eq_info.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwdev.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwif.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/include/sss_irq_info.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/include/sss_mbx_info.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_channel.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_info.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/include/sss_sriov_info.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_common.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_common.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hw_main.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_export.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_common.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_export.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_export.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_export.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_common.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_pci.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_pci.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_pci_id_tbl.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/sss_wq.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_hw.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_main.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.h create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.c create mode 100644 drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_aeq.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ceq.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_common.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ctrlq.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_event.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_export.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_irq.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx_msg.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mgmt.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_sriov.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_statistics.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_svc_cap.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_uld_driver.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_wq.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/hw/sss_hwif_export.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/kernel/sss_linux_kernel.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/sss_hw.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/sss_kernel.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/sss_tool_comm.h create mode 100644 drivers/net/ethernet/3snic/sssnic/include/sss_version.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/Makefile create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_define.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_mag_define.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_rss_define.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_vf_define.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_common.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dcb_define.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dev_define.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_io_define.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_irq_define.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_qp_define.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_rx_define.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tcam_define.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tx_define.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_main.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.h create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.c create mode 100644 drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.h diff --git a/Documentation/networking/device_drivers/ethernet/3snic/sssnic.rst b/Documentation/networking/device_drivers/ethernet/3snic/sssnic.rst new file mode 100644 index 0000000000000..2bf2856bc0f8d --- /dev/null +++ b/Documentation/networking/device_drivers/ethernet/3snic/sssnic.rst @@ -0,0 +1,67 @@ +.. SPDX-License-Identifier: GPL-2.0 + +==================================================== +Linux Kernel Driver for 3SNIC Intelligent NIC family +==================================================== + +Contents +======== + +- `Overview`_ +- `Supported PCI vendor ID/device IDs`_ +- `Supported features`_ +- `Product specification`_ +- `Support`_ + +Overview: +========= +SSSNIC is a network interface card that can meet the demand of a range +of application scenarios,such as the Data Center Area,cloud computing +and Financial industry,etc. + +The construction of SSSNIC card facilities mainly depends on servers and +switches. 3S910, 920 and 930 are PCIe standard cards adapted to servers, +which provide extended external business interfaces for servers. + +The driver supports a range of link-speed devices (100GE (40GE +compatible) and 25GE (10GE compatible)).A negotiated and extendable +feature set also supported. + +Supported PCI vendor ID/device IDs: +=================================== + +1f3f:9020 - SSSNIC PF + +Supported features: +=================== + +1. Support single-root I/O virtualization (SR-IOV) +2. Support virtual machine multi queue (VMMQ) +3. Support receive side scaling (RSS) +4. Support physical function (PF) passthrough VMs +5. Support the PF promiscuous mode,unicast or multicast MAC filtering, and +all multicast mode +6. Support IPv4/IPv6, checksum offload,TCP Segmentation Offload (TSO), and +Large Receive Offload (LRO) +7. Support in-band one-click logs collection +8. Support loopback tests +9. Support port location indicators + +Product specification +===================== + + =================== ======= ============================= =============================================== + PCI ID (pci.ids) OEM Product PCIe port + =================== ======= ============================= =============================================== + 1F3F:9020 3SNIC 3S910(2 x 25GE SFP28 ports) PCIe Gen3 x8(compatible with Gen2/ Gen1) + 1F3F:9020 3SNIC 3S920(4 x 25GE SFP28 ports) PCIe Gen4 x16, compatible with Gen3/ Gen2/ Gen1 + 1F3F:9020 3SNIC 3S930(2 x 100GE QSFP28 ports) PCIe Gen4 x16, compatible with Gen3/ Gen2/ Gen1 + =================== ======= ============================= =============================================== + + +Support +======= + +If an issue is identified with the released source code on the supported kernel +with a supported adapter, email the specific information related to the issue to +https://www.3snic.com. diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst index 9827e816084b8..bf5b6a7b99686 100644 --- a/Documentation/networking/device_drivers/ethernet/index.rst +++ b/Documentation/networking/device_drivers/ethernet/index.rst @@ -12,6 +12,7 @@ Contents: 3com/3c509 3com/vortex + 3snic/sssnic.rst amazon/ena altera/altera_tse amd/pds_core diff --git a/drivers/net/ethernet/3snic/Kconfig b/drivers/net/ethernet/3snic/Kconfig new file mode 100644 index 0000000000000..eb71854198d40 --- /dev/null +++ b/drivers/net/ethernet/3snic/Kconfig @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# 3SNIC network device configuration +# + +config NET_VENDOR_3SNIC + bool "3SNIC smart NIC devices" + depends on PCI + select NET_DEVLINK + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about 3SNIC cards. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_3SNIC + +source "drivers/net/ethernet/3snic/sssnic/Kconfig" + +endif # NET_VENDOR_3SNIC diff --git a/drivers/net/ethernet/3snic/Makefile b/drivers/net/ethernet/3snic/Makefile new file mode 100644 index 0000000000000..eb9a8b8cf105c --- /dev/null +++ b/drivers/net/ethernet/3snic/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the 3SNIC network device drivers. +# + +obj-$(CONFIG_SSSNIC) += sssnic/ diff --git a/drivers/net/ethernet/3snic/sssnic/Kconfig b/drivers/net/ethernet/3snic/sssnic/Kconfig new file mode 100644 index 0000000000000..d515a49cb2685 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# 3SNIC network device configuration +# + +config SSSNIC + tristate "3SNIC Ethernet Controller SSSNIC Support" + depends on PCI + depends on ARM64 || X86_64 + default y + help + This driver supports 3SNIC Ethernet Controller SSSNIC device. + For more information about this product, go to the product + description with smart NIC: + + + + To compile this driver as a module, choose M here. The module + will be called sssnic. diff --git a/drivers/net/ethernet/3snic/sssnic/Makefile b/drivers/net/ethernet/3snic/sssnic/Makefile new file mode 100644 index 0000000000000..9aad44b9c46f5 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the 3SNIC network device drivers. +# + +obj-$(CONFIG_SSSNIC) += nic/ diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_adapter.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_adapter.h new file mode 100644 index 0000000000000..afc5ff37f4a3c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_adapter.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_ADAPTER_H +#define SSS_ADAPTER_H + +#include +#include +#include +#include +#include + +#include "sss_hw_common.h" +#include "sss_hw_uld_driver.h" +#include "sss_hw_svc_cap.h" +#include "sss_sriov_info.h" + +#define SSS_MAX_FUNC 4096 + +struct sss_card_node { + struct list_head node; + struct list_head func_list; + char chip_name[IFNAMSIZ]; + u8 bus_id; + u8 resvd[7]; + u16 func_num; + atomic_t channel_timeout_cnt; + void *func_handle_array[SSS_MAX_FUNC]; + void *dbgtool_info; +}; + +/* Structure pcidev private */ +struct sss_pci_adapter { + struct pci_dev *pcidev; + void *hwdev; + + struct sss_hal_dev hal_dev; + + /* Record the upper driver object address, + * such as nic_dev and toe_dev, fc_dev + */ + void *uld_dev[SSS_SERVICE_TYPE_MAX]; + + /* Record the upper driver object name */ + char uld_dev_name[SSS_SERVICE_TYPE_MAX][IFNAMSIZ]; + + /* Manage all function device linked by list */ + struct list_head node; + + void __iomem *cfg_reg_bar; + void __iomem *intr_reg_bar; + void __iomem *mgmt_reg_bar; + void __iomem *db_reg_bar; + u64 db_dwqe_len; + u64 db_base_paddr; + + struct sss_card_node *chip_node; + + int init_state; + + struct sss_sriov_info sriov_info; + + atomic_t ref_cnt; + + atomic_t uld_ref_cnt[SSS_SERVICE_TYPE_MAX]; + spinlock_t uld_lock; /* protect uld probe and remove */ + + /* set when uld driver processing event */ + unsigned long uld_run_state; + + unsigned long uld_attach_state; + + /* lock for attach/detach uld */ + struct mutex uld_attach_mutex; + + spinlock_t dettach_uld_lock; /* spin lock for uld_attach_state access */ +}; +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_adm_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_adm_info.h new file mode 100644 index 0000000000000..fbcf0b007194b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_adm_info.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_ADM_INFO_H +#define SSS_ADM_INFO_H + +#include +#include +#include +#include + +#include "sss_hw_common.h" + +enum sss_adm_msg_type { + /* write to mgmt cpu command with completion */ + SSS_ADM_MSG_WRITE_TO_MGMT_MODULE = 2, + + /* multi read command with completion notification */ + SSS_ADM_MSG_MULTI_READ = 3, + + /* write command without completion notification */ + SSS_ADM_MSG_POLL_WRITE = 4, + + /* read command without completion notification */ + SSS_ADM_MSG_POLL_READ = 5, + + /* read from mgmt cpu command with completion */ + SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE = 6, + + SSS_ADM_MSG_MAX, +}; + +struct sss_adm_msg_state { + u64 head; + u32 desc_buf; + u32 elem_hi; + u32 elem_lo; + u32 rsvd0; + u64 rsvd1; +}; + +/* HW struct */ +struct sss_adm_msg_elem { + u64 control; + + u64 next_elem_paddr; + + u64 desc; + + /* HW struct */ + union { + struct { + u64 hw_msg_paddr; + } write; + + struct { + u64 hw_wb_reply_paddr; + u64 hw_msg_paddr; + } read; + }; +}; + +struct sss_adm_msg_reply_fmt { + u64 head; + u64 reply; +}; + +struct sss_adm_msg_elem_ctx { + struct sss_adm_msg_elem *elem_vaddr; + + void *adm_msg_vaddr; + + struct sss_adm_msg_reply_fmt *reply_fmt; + + struct completion done; + int state; + + u32 store_pi; + void *hwdev; +}; + +struct sss_adm_msg { + void *hwdev; + + enum sss_adm_msg_type msg_type; + + u32 elem_num; + + u16 elem_size; + u16 reply_size; + + u32 pi; + u32 ci; + + struct semaphore sem; + spinlock_t async_lock; /* protect adm msg async and sync */ + dma_addr_t wb_state_paddr; + + dma_addr_t head_elem_paddr; + + struct sss_adm_msg_state *wb_state; + + struct sss_adm_msg_elem *head_node; + + struct sss_adm_msg_elem_ctx *elem_ctx; + struct sss_adm_msg_elem *now_node; + + struct sss_dma_addr_align elem_addr; + + u8 *elem_vaddr_base; + u8 *reply_vaddr_base; + u8 *buf_vaddr_base; + + u64 elem_paddr_base; + u64 reply_paddr_base; + u64 buf_paddr_base; + u64 elem_size_align; + u64 reply_size_align; + u64 buf_size_align; +}; +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_aeq_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_aeq_info.h new file mode 100644 index 0000000000000..bdcec6ae4ad81 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_aeq_info.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_AEQ_INFO_H +#define SSS_AEQ_INFO_H + +#include +#include + +#include "sss_eq_info.h" +#include "sss_hw_aeq.h" + +#define SSS_MAX_AEQ 4 + +typedef void (*sss_aeq_hw_event_handler_t)(void *pri_handle, u8 *data, u8 size); +typedef u8 (*sss_aeq_sw_event_handler_t)(void *pri_handle, u8 event, u8 *data); + +struct sss_aeq_info { + void *hwdev; + + sss_aeq_hw_event_handler_t hw_event_handler[SSS_AEQ_EVENT_MAX]; + void *hw_event_data[SSS_AEQ_EVENT_MAX]; + sss_aeq_sw_event_handler_t sw_event_handler[SSS_AEQ_SW_EVENT_MAX]; + void *sw_event_data[SSS_AEQ_SW_EVENT_MAX]; + unsigned long hw_event_handler_state[SSS_AEQ_EVENT_MAX]; + unsigned long sw_event_handler_state[SSS_AEQ_SW_EVENT_MAX]; + + struct sss_eq aeq[SSS_MAX_AEQ]; + u16 num; + u16 rsvd1; + u32 rsvd2; + struct workqueue_struct *workq; +}; +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_board_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_board_info.h new file mode 100644 index 0000000000000..749268d67a6bf --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_board_info.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_BOARD_INFO_H +#define SSS_BOARD_INFO_H + +enum sss_board_type_define { + SSS_BOARD_TYPE_MPU_DEFAULT = 0, /* Default config */ + SSS_BOARD_TYPE_TEST_EVB_4X25G = 1, /* EVB Board */ + SSS_BOARD_TYPE_TEST_CEM_2X100G = 2, /* 2X100G CEM Card */ + SSS_BOARD_TYPE_STRG_SMARTIO_4X32G_FC = 30, /* 4X32G SmartIO FC Card */ + SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_TIOE = 31, /* 4X25GE SmartIO TIOE Card */ + SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_ROCE = 32, /* 4X25GE SmartIO ROCE Card */ + SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_ROCE_AA = 33, /* 4X25GE SmartIO ROCE_AA Card */ + SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_SRIOV = 34, /* 4X25GE SmartIO container Card */ + SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_SRIOV_SW = 35, /* 4X25GE SmartIO container switch Card */ + SSS_BOARD_TYPE_STRG_2X100G_TIOE = 40, /* 2X100G SmartIO TIOE Card */ + SSS_BOARD_TYPE_STRG_2X100G_ROCE = 41, /* 2X100G SmartIO ROCE Card */ + SSS_BOARD_TYPE_STRG_2X100G_ROCE_AA = 42, /* 2X100G SmartIO ROCE_AA Card */ + SSS_BOARD_TYPE_CAL_2X25G_NIC_75MPPS = 100, /* 2X25G ETH Standard card 75MPPS */ + SSS_BOARD_TYPE_CAL_2X25G_NIC_40MPPS = 101, /* 2X25G ETH Standard card 40MPPS */ + SSS_BOARD_TYPE_CAL_4X25G_NIC_120MPPS = 105, /* 4X25G ETH Standard card 120MPPS */ + SSS_BOARD_TYPE_CAL_2X32G_FC_HBA = 110, /* 2X32G FC HBA card */ + SSS_BOARD_TYPE_CAL_2X16G_FC_HBA = 111, /* 2X16G FC HBA card */ + SSS_BOARD_TYPE_CAL_2X100G_NIC_120MPPS = 115, /* 2X100G ETH Standard card 120MPPS */ + SSS_BOARD_TYPE_CLD_2X100G_SDI5_1 = 170, /* 2X100G SDI 5.1 Card */ + SSS_BOARD_TYPE_CLD_2X25G_SDI5_0_LITE = 171, /* 2x25G SDI5.0 Lite Card */ + SSS_BOARD_TYPE_CLD_2X100G_SDI5_0 = 172, /* 2x100G SDI5.0 Card */ + SSS_BOARD_MAX_TYPE = 0xFF +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_ceq_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_ceq_info.h new file mode 100644 index 0000000000000..e6806f64cadad --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_ceq_info.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_CEQ_INFO_H +#define SSS_CEQ_INFO_H + +#include + +#include "sss_hw_ceq.h" +#include "sss_eq_info.h" + +#define SSS_MAX_CEQ 32 + +typedef void (*sss_ceq_event_handler_t)(void *dev, u32 data); + +struct sss_ceq_info { + void *hwdev; + + sss_ceq_event_handler_t event_handler[SSS_CEQ_EVENT_MAX]; + void *event_handler_data[SSS_CEQ_EVENT_MAX]; + void *ceq_data[SSS_CEQ_EVENT_MAX]; + unsigned long event_handler_state[SSS_CEQ_EVENT_MAX]; + + struct sss_eq ceq[SSS_MAX_CEQ]; + u16 num; + u16 rsvd1; + u32 rsvd2; +}; +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_csr.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_csr.h new file mode 100644 index 0000000000000..08e4389957964 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_csr.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_CSR_H +#define SSS_CSR_H + +#define SSS_CSR_CFG_FLAG 0x40000000 + +#define SSS_MGMT_FLAG 0xC0000000 + +#define SSS_CSR_FLAG_MASK 0x3FFFFFFF + +#define SSS_VF_CFG_REG_OFFSET 0x2000 + +#define SSS_HOST_CSR_BASE_ADDR (SSS_MGMT_FLAG + 0x6000) +#define SSS_CSR_GLOBAL_BASE_ADDR (SSS_MGMT_FLAG + 0x6400) + +/* HW interface registers */ +#define SSS_CSR_HW_ATTR0_ADDR (SSS_CSR_CFG_FLAG + 0x0) +#define SSS_CSR_HW_ATTR1_ADDR (SSS_CSR_CFG_FLAG + 0x4) +#define SSS_CSR_HW_ATTR2_ADDR (SSS_CSR_CFG_FLAG + 0x8) +#define SSS_CSR_HW_ATTR3_ADDR (SSS_CSR_CFG_FLAG + 0xC) +#define SSS_CSR_HW_ATTR4_ADDR (SSS_CSR_CFG_FLAG + 0x10) +#define SSS_CSR_HW_ATTR5_ADDR (SSS_CSR_CFG_FLAG + 0x14) +#define SSS_CSR_HW_ATTR6_ADDR (SSS_CSR_CFG_FLAG + 0x18) + +#define SSS_HW_CSR_MBX_DATA_OFF 0x80 +#define SSS_HW_CSR_MBX_CTRL_OFF (SSS_CSR_CFG_FLAG + 0x0100) +#define SSS_HW_CSR_MBX_INT_OFFSET_OFF (SSS_CSR_CFG_FLAG + 0x0104) +#define SSS_HW_CSR_MBX_RES_H_OFF (SSS_CSR_CFG_FLAG + 0x0108) +#define SSS_HW_CSR_MBX_RES_L_OFF (SSS_CSR_CFG_FLAG + 0x010C) + +#define SSS_PPF_ELECT_OFF 0x0 +#define SSS_MPF_ELECT_OFF 0x20 + +#define SSS_CSR_PPF_ELECT_ADDR \ + (SSS_HOST_CSR_BASE_ADDR + SSS_PPF_ELECT_OFF) + +#define SSS_CSR_GLOBAL_MPF_ELECT_ADDR \ + (SSS_HOST_CSR_BASE_ADDR + SSS_MPF_ELECT_OFF) + +#define SSS_CSR_HW_PPF_ELECT_BASE_ADDR (SSS_CSR_CFG_FLAG + 0x60) +#define SSS_CSR_HW_PPF_ELECT_PORT_STRIDE 0x4 + +#define SSS_CSR_FUNC_PPF_ELECT(host_id) \ + (SSS_CSR_HW_PPF_ELECT_BASE_ADDR + \ + (host_id) * SSS_CSR_HW_PPF_ELECT_PORT_STRIDE) + +#define SSS_CSR_DMA_ATTR_TBL_ADDR (SSS_CSR_CFG_FLAG + 0x380) +#define SSS_CSR_DMA_ATTR_INDIR_ID_ADDR (SSS_CSR_CFG_FLAG + 0x390) + +/* CLP registers */ +#define SSS_BAR3_CLP_BASE_ADDR (SSS_MGMT_FLAG + 0x0000) + +#define SSS_UCPU_CLP_SIZE_REG (SSS_HOST_CSR_BASE_ADDR + 0x40) +#define SSS_UCPU_CLP_REQBASE_REG (SSS_HOST_CSR_BASE_ADDR + 0x44) +#define SSS_UCPU_CLP_RSPBASE_REG (SSS_HOST_CSR_BASE_ADDR + 0x48) +#define SSS_UCPU_CLP_REQ_REG (SSS_HOST_CSR_BASE_ADDR + 0x4c) +#define SSS_UCPU_CLP_RSP_REG (SSS_HOST_CSR_BASE_ADDR + 0x50) +#define SSS_CLP_REG(member) (SSS_UCPU_CLP_##member##_REG) + +#define SSS_CLP_REQ_DATA SSS_BAR3_CLP_BASE_ADDR +#define SSS_CLP_RSP_DATA (SSS_BAR3_CLP_BASE_ADDR + 0x1000) +#define SSS_CLP_DATA(member) (SSS_CLP_##member##_DATA) + +/* MSI-X registers */ +#define SSS_CSR_MSIX_INDIR_ID_ADDR (SSS_CSR_CFG_FLAG + 0x310) +#define SSS_CSR_MSIX_CTRL_ADDR (SSS_CSR_CFG_FLAG + 0x300) +#define SSS_CSR_MSIX_CNT_ADDR (SSS_CSR_CFG_FLAG + 0x304) +#define SSS_CSR_FUNC_MSI_CLR_WR_ADDR (SSS_CSR_CFG_FLAG + 0x58) + +#define SSS_MSI_CLR_INDIR_RESEND_TIMER_CLR_SHIFT 0 +#define SSS_MSI_CLR_INDIR_INT_MSK_SET_SHIFT 1 +#define SSS_MSI_CLR_INDIR_INT_MSK_CLR_SHIFT 2 +#define SSS_MSI_CLR_INDIR_AUTO_MSK_SET_SHIFT 3 +#define SSS_MSI_CLR_INDIR_AUTO_MSK_CLR_SHIFT 4 +#define SSS_MSI_CLR_INDIR_SIMPLE_INDIR_ID_SHIFT 22 + +#define SSS_MSI_CLR_INDIR_RESEND_TIMER_CLR_MASK 0x1U +#define SSS_MSI_CLR_INDIR_INT_MSK_SET_MASK 0x1U +#define SSS_MSI_CLR_INDIR_INT_MSK_CLR_MASK 0x1U +#define SSS_MSI_CLR_INDIR_AUTO_MSK_SET_MASK 0x1U +#define SSS_MSI_CLR_INDIR_AUTO_MSK_CLR_MASK 0x1U +#define SSS_MSI_CLR_INDIR_SIMPLE_INDIR_ID_MASK 0x3FFU + +#define SSS_SET_MSI_CLR_INDIR(val, member) \ + (((val) & SSS_MSI_CLR_INDIR_##member##_MASK) << \ + SSS_MSI_CLR_INDIR_##member##_SHIFT) + +/* EQ registers */ +#define SSS_AEQ_INDIR_ID_ADDR (SSS_CSR_CFG_FLAG + 0x210) +#define SSS_CEQ_INDIR_ID_ADDR (SSS_CSR_CFG_FLAG + 0x290) + +#define SSS_EQ_INDIR_ID_ADDR(type) \ + ((type == SSS_AEQ) ? SSS_AEQ_INDIR_ID_ADDR : SSS_CEQ_INDIR_ID_ADDR) + +#define SSS_AEQ_MTT_OFF_BASE_ADDR (SSS_CSR_CFG_FLAG + 0x240) +#define SSS_CEQ_MTT_OFF_BASE_ADDR (SSS_CSR_CFG_FLAG + 0x2C0) + +#define SSS_CSR_EQ_PAGE_OFF_STRIDE 8 + +#define SSS_AEQ_PHY_HI_ADDR_REG(pg_num) \ + (SSS_AEQ_MTT_OFF_BASE_ADDR + (pg_num) * SSS_CSR_EQ_PAGE_OFF_STRIDE) + +#define SSS_AEQ_PHY_LO_ADDR_REG(pg_num) \ + (SSS_AEQ_MTT_OFF_BASE_ADDR + (pg_num) * SSS_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define SSS_CEQ_PHY_HI_ADDR_REG(pg_num) \ + (SSS_CEQ_MTT_OFF_BASE_ADDR + (pg_num) * SSS_CSR_EQ_PAGE_OFF_STRIDE) + +#define SSS_CEQ_PHY_LO_ADDR_REG(pg_num) \ + (SSS_CEQ_MTT_OFF_BASE_ADDR + \ + (pg_num) * SSS_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define SSS_CSR_AEQ_CTRL_0_ADDR (SSS_CSR_CFG_FLAG + 0x200) +#define SSS_CSR_AEQ_CTRL_1_ADDR (SSS_CSR_CFG_FLAG + 0x204) +#define SSS_CSR_AEQ_CI_ADDR (SSS_CSR_CFG_FLAG + 0x208) +#define SSS_CSR_AEQ_PI_ADDR (SSS_CSR_CFG_FLAG + 0x20C) +#define SSS_CSR_AEQ_CI_SIMPLE_INDIR_ADDR (SSS_CSR_CFG_FLAG + 0x50) + +#define SSS_CSR_CEQ_CTRL_0_ADDR (SSS_CSR_CFG_FLAG + 0x280) +#define SSS_CSR_CEQ_CTRL_1_ADDR (SSS_CSR_CFG_FLAG + 0x284) +#define SSS_CSR_CEQ_CI_ADDR (SSS_CSR_CFG_FLAG + 0x288) +#define SSS_CSR_CEQ_PI_ADDR (SSS_CSR_CFG_FLAG + 0x28c) +#define SSS_CSR_CEQ_CI_SIMPLE_INDIR_ADDR (SSS_CSR_CFG_FLAG + 0x54) + +/* ADM MSG registers */ +#define SSS_CSR_ADM_MSG_BASE (SSS_MGMT_FLAG + 0x2000) + +#define SSS_CSR_ADM_MSG_STRIDE 0x80 + +#define SSS_CSR_ADM_MSG_HEAD_HI_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x0 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_HEAD_LO_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x4 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_STATE_HI_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x8 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_STATE_LO_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0xC + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_NUM_ELEM_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x10 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_CTRL_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x14 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_PI_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x1C + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_REQ_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x20 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_STATE_0_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x30 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +/* self test register */ +#define SSS_MGMT_HEALTH_STATUS_ADDR (SSS_MGMT_FLAG + 0x983c) + +#define SSS_CHIP_BASE_INFO_ADDR (SSS_MGMT_FLAG + 0xB02C) + +#define SSS_CHIP_ERR_STATUS0_ADDR (SSS_MGMT_FLAG + 0xC0EC) +#define SSS_CHIP_ERR_STATUS1_ADDR (SSS_MGMT_FLAG + 0xC0F0) + +#define SSS_ERR_INFO0_ADDR (SSS_MGMT_FLAG + 0xC0F4) +#define SSS_ERR_INFO1_ADDR (SSS_MGMT_FLAG + 0xC0F8) +#define SSS_ERR_INFO2_ADDR (SSS_MGMT_FLAG + 0xC0FC) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_ctrlq_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_ctrlq_info.h new file mode 100644 index 0000000000000..02727d453fed4 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_ctrlq_info.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_CTRLQ_INFO_H +#define SSS_CTRLQ_INFO_H + +#include +#include +#include +#include + +#include "sss_hw_mbx_msg.h" +#include "sss_hw_wq.h" +#include "sss_hw_ctrlq.h" + +#define SSS_DEFAULT_WQ_PAGE_SIZE 0x100000 +#define SSS_HW_WQ_PAGE_SIZE 0x1000 +#define SSS_MAX_WQ_PAGE_NUM 8 + +/* ctrlq ack type */ +enum sss_ack_type { + SSS_ACK_TYPE_CTRLQ, + SSS_ACK_TYPE_SHARE_CQN, + SSS_ACK_TYPE_APP_CQN, + + SSS_MOD_ACK_MAX = 15, +}; + +enum sss_ctrlq_type { + SSS_CTRLQ_SYNC, + SSS_CTRLQ_ASYNC, + SSS_MAX_CTRLQ_TYPE = 4 +}; + +enum sss_ctrlq_msg_type { + SSS_MSG_TYPE_NONE, + SSS_MSG_TYPE_SET_ARM, + SSS_MSG_TYPE_DIRECT_RESP, + SSS_MSG_TYPE_SGE_RESP, + SSS_MSG_TYPE_ASYNC, + SSS_MSG_TYPE_PSEUDO_TIMEOUT, + SSS_MSG_TYPE_TIMEOUT, + SSS_MSG_TYPE_FORCE_STOP, + SSS_MSG_TYPE_MAX +}; + +struct sss_ctrlq_cmd_info { + enum sss_ctrlq_msg_type msg_type; + u16 channel; + + struct completion *done; + int *err_code; + int *cmpt_code; + u64 *direct_resp; + u64 msg_id; + + struct sss_ctrl_msg_buf *in_buf; + struct sss_ctrl_msg_buf *out_buf; +}; + +struct sss_ctrlq { + struct sss_wq wq; + + enum sss_ctrlq_type ctrlq_type; + int wrapped; + + /* spinlock for send ctrlq commands */ + spinlock_t ctrlq_lock; + + struct sss_ctrlq_ctxt_info ctrlq_ctxt; + + struct sss_ctrlq_cmd_info *cmd_info; + + void *hwdev; +}; + +struct sss_ctrlq_info { + void *hwdev; + + struct pci_pool *msg_buf_pool; + + /* doorbell area */ + u8 __iomem *db_base; + + /* All ctrlq's CLA of a VF occupy a PAGE when ctrlq wq is 1-level CLA */ + void *wq_block_vaddr; + dma_addr_t wq_block_paddr; + struct sss_ctrlq ctrlq[SSS_MAX_CTRLQ_TYPE]; + + u32 state; + u32 disable_flag; + + u8 lock_channel_en; + u8 num; + u8 rsvd[6]; + unsigned long channel_stop; +}; +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_eq_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_eq_info.h new file mode 100644 index 0000000000000..c8a16dabeacc1 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_eq_info.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_EQ_INFO_H +#define SSS_EQ_INFO_H + +#include +#include +#include +#include + +#include "sss_hw_common.h" +#include "sss_hw_irq.h" +#include "sss_hw_svc_cap.h" + +#define SSS_EQ_IRQ_NAME_LEN 64 + +enum sss_eq_type { + SSS_AEQ, + SSS_CEQ +}; + +typedef void (*sss_init_desc_handler_t)(void *eq); +typedef u32 (*sss_chip_init_attr_handler_t)(void *eq); + +struct sss_eq { + char *name; + void *hwdev; + enum sss_eq_type type; + u32 page_size; + u32 old_page_size; + u32 len; + + u32 ci; + + u16 wrap; + u16 qid; + + u16 entry_size; + u16 page_num; + + u32 num_entry_per_pg; + + struct sss_irq_desc irq_desc; + char irq_name[SSS_EQ_IRQ_NAME_LEN]; + + struct sss_dma_addr_align *page_array; + + struct work_struct aeq_work; + struct tasklet_struct ceq_tasklet; + + u64 hw_intr_jiffies; + u64 sw_intr_jiffies; + + sss_init_desc_handler_t init_desc_handler; + sss_chip_init_attr_handler_t init_attr_handler; + irq_handler_t irq_handler; +}; + +struct sss_eq_cfg { + enum sss_service_type type; + int id; + int free; /* 1 - alocated, 0- freed */ +}; + +struct sss_eq_info { + struct sss_eq_cfg *eq; + + u8 ceq_num; + + u8 remain_ceq_num; + + /* mutex used for allocate EQs */ + struct mutex eq_mutex; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwdev.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwdev.h new file mode 100644 index 0000000000000..2970438cb3ac5 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwdev.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_H +#define SSS_HWDEV_H + +#include +#include +#include +#include +#include + +#include "sss_hw_common.h" +#include "sss_hw_svc_cap.h" +#include "sss_hw_mbx_msg.h" +#include "sss_hw_statistics.h" +#include "sss_hw_event.h" + +#include "sss_hwif.h" +#include "sss_mgmt_info.h" +#include "sss_ctrlq_info.h" +#include "sss_aeq_info.h" +#include "sss_ceq_info.h" +#include "sss_mbx_info.h" +#include "sss_mgmt_channel.h" + +#define SSSNIC_CHANNEL_DETECT_PERIOD (5 * 1000) + +enum sss_func_mode { + SSS_FUNC_MOD_MIN, + + /* single host */ + SSS_FUNC_MOD_NORMAL_HOST = SSS_FUNC_MOD_MIN, + + /* multi host, bare-metal, sdi side */ + SSS_FUNC_MOD_MULTI_BM_MASTER, + + /* multi host, bare-metal, host side */ + SSS_FUNC_MOD_MULTI_BM_SLAVE, + + /* multi host, vm mode, sdi side */ + SSS_FUNC_MOD_MULTI_VM_MASTER, + + /* multi host, vm mode, host side */ + SSS_FUNC_MOD_MULTI_VM_SLAVE, + + SSS_FUNC_MOD_MAX = SSS_FUNC_MOD_MULTI_VM_SLAVE, +}; + +struct sss_page_addr { + void *virt_addr; + u64 phys_addr; +}; + +struct sss_mqm_addr_trans_tbl_info { + u32 chunk_num; + u32 search_gpa_num; + u32 page_size; + u32 page_num; + + struct sss_page_addr *brm_srch_page_addr; +}; + +struct sss_devlink { + void *hwdev; + u8 active_cfg_id; /* 1 ~ 8 */ + u8 switch_cfg_id; /* 1 ~ 8 */ +}; + +struct sss_heartbeat { + u8 pcie_link_down; + u8 heartbeat_lost; + u16 rsvd; + u32 pcie_link_down_cnt; + struct timer_list heartbeat_timer; + struct work_struct lost_work; +}; + +struct sss_aeq_stat { + u16 busy_cnt; + u16 rsvd; + u64 cur_recv_cnt; + u64 last_recv_cnt; +}; + +struct sss_clp_pf_to_mgmt { + struct semaphore clp_msg_lock; + void *clp_msg_buf; +}; + +struct sss_hwdev { + void *adapter_hdl; /* pointer to sss_pci_adapter or NDIS_Adapter */ + void *pcidev_hdl; /* pointer to pcidev or Handler */ + + /* pointer to pcidev->dev or Handler, for + * sdk_err() or dma_alloc() + */ + void *dev_hdl; + void *chip_node; + + void *service_adapter[SSS_SERVICE_TYPE_MAX]; + + u32 wq_page_size; + int chip_present_flag; + u8 poll; /* use polling mode or int mode */ + u8 rsvd[3]; + struct sss_hwif *hwif; /* include void __iomem *bar */ + struct sss_comm_global_attr glb_attr; + u64 features[SSS_MAX_FEATURE_QWORD]; + + struct sss_mgmt_info *mgmt_info; + + struct sss_ctrlq_info *ctrlq_info; + struct sss_aeq_info *aeq_info; + struct sss_ceq_info *ceq_info; + struct sss_mbx *mbx; // mbx + struct sss_msg_pf_to_mgmt *pf_to_mgmt; // adm + struct sss_clp_pf_to_mgmt *clp_pf_to_mgmt; + + struct sss_hw_stats hw_stats; + u8 *chip_fault_stats; + + sss_event_handler_t event_handler; + void *event_handler_data; + + struct sss_board_info board_info; + + struct delayed_work sync_time_task; + struct delayed_work channel_detect_task; + + struct workqueue_struct *workq; + + struct sss_heartbeat heartbeat; + + ulong func_state; + spinlock_t channel_lock; /* protect channel init and deinit */ + + struct sss_devlink *devlink_dev; + + enum sss_func_mode func_mode; + + struct sss_aeq_stat aeq_stat; + + u16 aeq_busy_cnt; +}; + +#define SSS_TO_HWDEV(ptr) ((struct sss_hwdev *)(ptr)->hwdev) +#define SSS_TO_DEV(hwdev) (((struct sss_hwdev *)hwdev)->dev_hdl) +#define SSS_TO_HWIF(hwdev) (((struct sss_hwdev *)hwdev)->hwif) +#define SSS_TO_MGMT_INFO(hwdev) (((struct sss_hwdev *)hwdev)->mgmt_info) +#define SSS_TO_AEQ_INFO(hwdev) (((struct sss_hwdev *)hwdev)->aeq_info) +#define SSS_TO_CEQ_INFO(hwdev) (((struct sss_hwdev *)hwdev)->ceq_info) +#define SSS_TO_CTRLQ_INFO(hwdev) (((struct sss_hwdev *)hwdev)->ctrlq_info) +#define SSS_TO_IRQ_INFO(hwdev) (&((struct sss_hwdev *)hwdev)->mgmt_info->irq_info) +#define SSS_TO_SVC_CAP(hwdev) (&(((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap)) +#define SSS_TO_NIC_CAP(hwdev) (&(((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.nic_cap)) +#define SSS_TO_MAX_SQ_NUM(hwdev) \ + (((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.nic_cap.max_sq) +#define SSS_TO_PHY_PORT_ID(hwdev) (((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.port_id) +#define SSS_TO_MAX_VF_NUM(hwdev) (((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.max_vf) +#define SSS_TO_FUNC_COS_BITMAP(hwdev) \ + (((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.cos_valid_bitmap) +#define SSS_TO_PORT_COS_BITMAP(hwdev) \ + (((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.port_cos_valid_bitmap) + +enum sss_servic_bit_define { + SSS_SERVICE_BIT_NIC = 0, + SSS_SERVICE_BIT_ROCE = 1, + SSS_SERVICE_BIT_VBS = 2, + SSS_SERVICE_BIT_TOE = 3, + SSS_SERVICE_BIT_IPSEC = 4, + SSS_SERVICE_BIT_FC = 5, + SSS_SERVICE_BIT_VIRTIO = 6, + SSS_SERVICE_BIT_OVS = 7, + SSS_SERVICE_BIT_NVME = 8, + SSS_SERVICE_BIT_ROCEAA = 9, + SSS_SERVICE_BIT_CURRENET = 10, + SSS_SERVICE_BIT_PPA = 11, + SSS_SERVICE_BIT_MIGRATE = 12, + SSS_MAX_SERVICE_BIT +}; + +#define SSS_CFG_SERVICE_MASK_NIC (0x1 << SSS_SERVICE_BIT_NIC) +#define SSS_CFG_SERVICE_MASK_ROCE (0x1 << SSS_SERVICE_BIT_ROCE) +#define SSS_CFG_SERVICE_MASK_VBS (0x1 << SSS_SERVICE_BIT_VBS) +#define SSS_CFG_SERVICE_MASK_TOE (0x1 << SSS_SERVICE_BIT_TOE) +#define SSS_CFG_SERVICE_MASK_IPSEC (0x1 << SSS_SERVICE_BIT_IPSEC) +#define SSS_CFG_SERVICE_MASK_FC (0x1 << SSS_SERVICE_BIT_FC) +#define SSS_CFG_SERVICE_MASK_VIRTIO (0x1 << SSS_SERVICE_BIT_VIRTIO) +#define SSS_CFG_SERVICE_MASK_OVS (0x1 << SSS_SERVICE_BIT_OVS) +#define SSS_CFG_SERVICE_MASK_NVME (0x1 << SSS_SERVICE_BIT_NVME) +#define SSS_CFG_SERVICE_MASK_ROCEAA (0x1 << SSS_SERVICE_BIT_ROCEAA) +#define SSS_CFG_SERVICE_MASK_CURRENET (0x1 << SSS_SERVICE_BIT_CURRENET) +#define SSS_CFG_SERVICE_MASK_PPA (0x1 << SSS_SERVICE_BIT_PPA) +#define SSS_CFG_SERVICE_MASK_MIGRATE (0x1 << SSS_SERVICE_BIT_MIGRATE) + +#define SSS_CFG_SERVICE_RDMA_EN SSS_CFG_SERVICE_MASK_ROCE + +#define SSS_IS_NIC_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_NIC) +#define SSS_IS_ROCE_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_ROCE) +#define SSS_IS_VBS_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_VBS) +#define SSS_IS_TOE_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_TOE) +#define SSS_IS_IPSEC_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_IPSEC) +#define SSS_IS_FC_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_FC) +#define SSS_IS_OVS_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_OVS) +#define SSS_IS_RDMA_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_RDMA_EN) +#define SSS_IS_RDMA_ENABLE(dev) \ + ((dev)->mgmt_info->svc_cap.sf_svc_attr.rdma_en) +#define SSS_IS_PPA_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_PPA) +#define SSS_IS_MIGR_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_MIGRATE) + +#define SSS_MAX_HOST_NUM(hwdev) ((hwdev)->glb_attr.max_host_num) +#define SSS_MAX_PF_NUM(hwdev) ((hwdev)->glb_attr.max_pf_num) +#define SSS_MGMT_CPU_NODE_ID(hwdev) \ + ((hwdev)->glb_attr.mgmt_host_node_id) + +#define SSS_GET_FUNC_TYPE(hwdev) ((hwdev)->hwif->attr.func_type) +#define SSS_IS_PF(dev) (SSS_GET_FUNC_TYPE(dev) == SSS_FUNC_TYPE_PF) +#define SSS_IS_VF(dev) (SSS_GET_FUNC_TYPE(dev) == SSS_FUNC_TYPE_VF) +#define SSS_IS_PPF(dev) \ + (SSS_GET_FUNC_TYPE(dev) == SSS_FUNC_TYPE_PPF) + +#define SSS_GET_FUNC_ID(hwdev) ((hwdev)->hwif->attr.func_id) + +#define SSS_IS_BMGW_MASTER_HOST(hwdev) \ + ((hwdev)->func_mode == SSS_FUNC_MOD_MULTI_BM_MASTER) +#define SSS_IS_BMGW_SLAVE_HOST(hwdev) \ + ((hwdev)->func_mode == SSS_FUNC_MOD_MULTI_BM_SLAVE) +#define SSS_IS_VM_MASTER_HOST(hwdev) \ + ((hwdev)->func_mode == SSS_FUNC_MOD_MULTI_VM_MASTER) +#define SSS_IS_VM_SLAVE_HOST(hwdev) \ + ((hwdev)->func_mode == SSS_FUNC_MOD_MULTI_VM_SLAVE) + +#define SSS_IS_MASTER_HOST(hwdev) \ + (SSS_IS_BMGW_MASTER_HOST(hwdev) || SSS_IS_VM_MASTER_HOST(hwdev)) + +#define SSS_IS_SLAVE_HOST(hwdev) \ + (SSS_IS_BMGW_SLAVE_HOST(hwdev) || SSS_IS_VM_SLAVE_HOST(hwdev)) + +#define SSS_IS_MULTI_HOST(hwdev) \ + (SSS_IS_BMGW_MASTER_HOST(hwdev) || SSS_IS_BMGW_SLAVE_HOST(hwdev) || \ + SSS_IS_VM_MASTER_HOST(hwdev) || SSS_IS_VM_SLAVE_HOST(hwdev)) + +#define SSS_SPU_HOST_ID 4 + +#define SSS_SUPPORT_ADM_MSG(hwdev) ((hwdev)->features[0] & SSS_COMM_F_ADM) +#define SSS_SUPPORT_MBX_SEGMENT(hwdev) \ + (SSS_GET_HWIF_PCI_INTF_ID((hwdev)->hwif) == SSS_SPU_HOST_ID) +#define SSS_SUPPORT_CTRLQ_NUM(hwdev) \ + ((hwdev)->features[0] & SSS_COMM_F_CTRLQ_NUM) +#define SSS_SUPPORT_VIRTIO_VQ_SIZE(hwdev) \ + ((hwdev)->features[0] & SSS_COMM_F_VIRTIO_VQ_SIZE) +#define SSS_SUPPORT_CHANNEL_DETECT(hwdev) \ + ((hwdev)->features[0] & SSS_COMM_F_CHANNEL_DETECT) +#define SSS_SUPPORT_CLP(hwdev) \ + ((hwdev)->features[0] & SSS_COMM_F_CLP) + +enum { + SSS_CFG_FREE = 0, + SSS_CFG_BUSY = 1 +}; + +int sss_init_pci(void); +void sss_exit_pci(void); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwif.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwif.h new file mode 100644 index 0000000000000..d7e18653e7943 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwif.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_H +#define SSS_HWIF_H + +#include +#include + +struct sss_db_pool { + unsigned long *bitmap; + u32 bit_size; + + /* spinlock for allocating doorbell area */ + spinlock_t id_lock; +}; + +struct sss_func_attr { + enum sss_func_type func_type; + + u16 func_id; + u8 pf_id; + u8 pci_intf_id; + + u16 global_vf_off; + u8 mpf_id; + u8 ppf_id; + + u16 irq_num; /* max: 2 ^ 15 */ + u8 aeq_num; /* max: 2 ^ 3 */ + u8 ceq_num; /* max: 2 ^ 7 */ + + u16 sq_num; /* max: 2 ^ 8 */ + u8 dma_attr_num; /* max: 2 ^ 6 */ + u8 msix_flex_en; +}; + +struct sss_hwif { + u8 __iomem *cfg_reg_base; + u8 __iomem *mgmt_reg_base; + u64 db_base_paddr; + u64 db_dwqe_len; + u8 __iomem *db_base_vaddr; + + void *pdev; + + struct sss_db_pool db_pool; + + struct sss_func_attr attr; +}; + +#define SSS_GET_HWIF_AEQ_NUM(hwif) ((hwif)->attr.aeq_num) +#define SSS_GET_HWIF_CEQ_NUM(hwif) ((hwif)->attr.ceq_num) +#define SSS_GET_HWIF_IRQ_NUM(hwif) ((hwif)->attr.irq_num) +#define SSS_GET_HWIF_GLOBAL_ID(hwif) ((hwif)->attr.func_id) +#define SSS_GET_HWIF_PF_ID(hwif) ((hwif)->attr.pf_id) +#define SSS_GET_HWIF_GLOBAL_VF_OFFSET(hwif) ((hwif)->attr.global_vf_off) +#define SSS_GET_HWIF_PPF_ID(hwif) ((hwif)->attr.ppf_id) +#define SSS_GET_HWIF_MPF_ID(hwif) ((hwif)->attr.mpf_id) +#define SSS_GET_HWIF_PCI_INTF_ID(hwif) ((hwif)->attr.pci_intf_id) +#define SSS_GET_HWIF_FUNC_TYPE(hwif) ((hwif)->attr.func_type) +#define SSS_GET_HWIF_MSIX_EN(hwif) ((hwif)->attr.msix_flex_en) + +#define SSS_SET_HWIF_AEQ_NUM(hwif, val) \ + ((hwif)->attr.aeq_num = (val)) + +#define SSS_SET_HWIF_CEQ_NUM(hwif, val) \ + ((hwif)->attr.ceq_num = (val)) + +#define SSS_SET_HWIF_IRQ_NUM(hwif, val) \ + ((hwif)->attr.irq_num = (val)) + +#define SSS_SET_HWIF_GLOBAL_ID(hwif, val) \ + ((hwif)->attr.func_id = (val)) + +#define SSS_SET_HWIF_PF_ID(hwif, val) \ + ((hwif)->attr.pf_id = (val)) + +#define SSS_SET_HWIF_GLOBAL_VF_OFFSET(hwif, val) \ + ((hwif)->attr.global_vf_off = (val)) + +#define SSS_SET_HWIF_PPF_ID(hwif, val) \ + ((hwif)->attr.ppf_id = (val)) + +#define SSS_SET_HWIF_MPF_ID(hwif, val) \ + ((hwif)->attr.mpf_id = (val)) + +#define SSS_SET_HWIF_PCI_INTF_ID(hwif, val) \ + ((hwif)->attr.pci_intf_id = (val)) + +#define SSS_SET_HWIF_FUNC_TYPE(hwif, val) \ + ((hwif)->attr.func_type = (val)) + +#define SSS_SET_HWIF_DMA_ATTR_NUM(hwif, val) \ + ((hwif)->attr.dma_attr_num = (val)) + +#define SSS_SET_HWIF_MSIX_EN(hwif, val) \ + ((hwif)->attr.msix_flex_en = (val)) + +#define SSS_SET_HWIF_SQ_NUM(hwif, val) \ + ((hwif)->attr.sq_num = (val)) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_irq_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_irq_info.h new file mode 100644 index 0000000000000..dfc2a68680430 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_irq_info.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_IRQ_INFO_H +#define SSS_IRQ_INFO_H + +#include +#include + +#include "sss_hw_svc_cap.h" +#include "sss_hw_irq.h" + +struct sss_irq { + enum sss_service_type type; + int busy; /* 1 - allocated, 0 - freed */ + struct sss_irq_desc desc; +}; + +struct sss_irq_info { + struct sss_irq *irq; + u16 total_num; + u16 free_num; + u16 max_num; /* device max irq number */ + + struct mutex irq_mutex; /* mutex is used to allocate eq */ +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mbx_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mbx_info.h new file mode 100644 index 0000000000000..542fcb20442a3 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mbx_info.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_MBX_INFO_H +#define SSS_MBX_INFO_H +#include +#include +#include +#include +#include + +#include "sss_hw_mbx.h" + +enum sss_mbx_event_state { + SSS_EVENT_START = 0, + SSS_EVENT_FAIL, + SSS_EVENT_SUCCESS, + SSS_EVENT_TIMEOUT, + SSS_EVENT_END, +}; + +struct sss_mbx_send { + u8 *data; + + u64 *wb_state; /* write back status */ + void *wb_vaddr; + dma_addr_t wb_paddr; +}; + +struct sss_mbx_dma_queue { + void *dma_buff_vaddr; + dma_addr_t dma_buff_paddr; + + u16 depth; + u16 pi; + u16 ci; +}; + +struct sss_mbx_msg_info { + u8 msg_id; + u8 state; /* can only use 1 bit */ +}; + +struct sss_msg_desc { + void *msg; + u16 msg_len; + u8 seq_id; + u8 mod; + u16 cmd; + struct sss_mbx_msg_info msg_info; +}; + +struct sss_msg_buffer { + struct sss_msg_desc resp_msg; + struct sss_msg_desc recv_msg; + + atomic_t recv_msg_cnt; +}; + +struct sss_mbx { + void *hwdev; + + u8 lock_channel_en; + u8 rsvd0[3]; + unsigned long channel_stop; + + /* lock for send mbx message and ack message */ + struct mutex mbx_send_lock; + /* lock for send mbx message */ + struct mutex msg_send_lock; + struct sss_mbx_send mbx_send; + + struct sss_mbx_dma_queue sync_msg_queue; + struct sss_mbx_dma_queue async_msg_queue; + + struct workqueue_struct *workq; + + struct sss_msg_buffer mgmt_msg; /* driver and MGMT CPU */ + struct sss_msg_buffer *host_msg; /* PPF message between hosts */ + struct sss_msg_buffer *func_msg; /* PF to VF or VF to PF */ + u16 num_func_msg; + u16 cur_msg_channel; + u8 support_h2h_msg; /* host to host */ + u8 rsvd1[3]; + /* vf receive pf/ppf callback */ + sss_vf_mbx_handler_t vf_mbx_cb[SSS_MOD_TYPE_MAX]; + void *vf_mbx_data[SSS_MOD_TYPE_MAX]; + /* pf/ppf receive vf callback */ + sss_pf_mbx_handler_t pf_mbx_cb[SSS_MOD_TYPE_MAX]; + void *pf_mbx_data[SSS_MOD_TYPE_MAX]; + /* ppf receive pf/ppf callback */ + sss_ppf_mbx_handler_t ppf_mbx_cb[SSS_MOD_TYPE_MAX]; + void *ppf_mbx_data[SSS_MOD_TYPE_MAX]; + /* pf receive ppf callback */ + sss_pf_from_ppf_mbx_handler_t pf_recv_ppf_mbx_cb[SSS_MOD_TYPE_MAX]; + void *pf_recv_ppf_mbx_data[SSS_MOD_TYPE_MAX]; + unsigned long ppf_to_pf_mbx_cb_state[SSS_MOD_TYPE_MAX]; + unsigned long ppf_mbx_cb_state[SSS_MOD_TYPE_MAX]; + unsigned long pf_mbx_cb_state[SSS_MOD_TYPE_MAX]; + unsigned long vf_mbx_cb_state[SSS_MOD_TYPE_MAX]; + + enum sss_mbx_event_state event_flag; + /* lock for mbx event flag */ + spinlock_t mbx_lock; + + u8 send_msg_id; + u8 rsvd2[3]; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_channel.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_channel.h new file mode 100644 index 0000000000000..4c0c3c482dde4 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_channel.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_MGMT_CHANNEL_H +#define SSS_MGMT_CHANNEL_H + +#include +#include +#include +#include +#include + +#include "sss_hw_mbx.h" +#include "sss_hw_mgmt.h" +#include "sss_adm_info.h" + +/* message header define */ +#define SSS_MSG_HEADER_SRC_GLB_FUNC_ID_SHIFT 0 +#define SSS_MSG_HEADER_STATUS_SHIFT 13 +#define SSS_MSG_HEADER_SOURCE_SHIFT 15 +#define SSS_MSG_HEADER_AEQ_ID_SHIFT 16 +#define SSS_MSG_HEADER_MSG_ID_SHIFT 18 +#define SSS_MSG_HEADER_CMD_SHIFT 22 + +#define SSS_MSG_HEADER_MSG_LEN_SHIFT 32 +#define SSS_MSG_HEADER_MODULE_SHIFT 43 +#define SSS_MSG_HEADER_SEG_LEN_SHIFT 48 +#define SSS_MSG_HEADER_NO_ACK_SHIFT 54 +#define SSS_MSG_HEADER_DATA_TYPE_SHIFT 55 +#define SSS_MSG_HEADER_SEQID_SHIFT 56 +#define SSS_MSG_HEADER_LAST_SHIFT 62 +#define SSS_MSG_HEADER_DIRECTION_SHIFT 63 + +#define SSS_MSG_HEADER_SRC_GLB_FUNC_ID_MASK 0x1FFF +#define SSS_MSG_HEADER_STATUS_MASK 0x1 +#define SSS_MSG_HEADER_SOURCE_MASK 0x1 +#define SSS_MSG_HEADER_AEQ_ID_MASK 0x3 +#define SSS_MSG_HEADER_MSG_ID_MASK 0xF +#define SSS_MSG_HEADER_CMD_MASK 0x3FF + +#define SSS_MSG_HEADER_MSG_LEN_MASK 0x7FF +#define SSS_MSG_HEADER_MODULE_MASK 0x1F +#define SSS_MSG_HEADER_SEG_LEN_MASK 0x3F +#define SSS_MSG_HEADER_NO_ACK_MASK 0x1 +#define SSS_MSG_HEADER_DATA_TYPE_MASK 0x1 +#define SSS_MSG_HEADER_SEQID_MASK 0x3F +#define SSS_MSG_HEADER_LAST_MASK 0x1 +#define SSS_MSG_HEADER_DIRECTION_MASK 0x1 + +#define SSS_GET_MSG_HEADER(val, field) \ + (((val) >> SSS_MSG_HEADER_##field##_SHIFT) & \ + SSS_MSG_HEADER_##field##_MASK) +#define SSS_SET_MSG_HEADER(val, field) \ + ((u64)(((u64)(val)) & SSS_MSG_HEADER_##field##_MASK) << \ + SSS_MSG_HEADER_##field##_SHIFT) + +enum sss_msg_ack_type { + SSS_MSG_ACK, + SSS_MSG_NO_ACK, +}; + +enum sss_data_type { + SSS_INLINE_DATA = 0, + SSS_DMA_DATA = 1, +}; + +enum sss_msg_seg_type { + SSS_NOT_LAST_SEG = 0, + SSS_LAST_SEG = 1, +}; + +enum sss_msg_direction_type { + SSS_DIRECT_SEND_MSG = 0, + SSS_RESP_MSG = 1, +}; + +enum sss_msg_src_type { + SSS_MSG_SRC_MGMT = 0, + SSS_MSG_SRC_MBX = 1, +}; + +enum sss_mgmt_msg_cb_t_state { + SSS_CALLBACK_REG = 0, + SSS_CALLBACK_RUNNING, +}; + +enum sss_pf_to_mgmt_event_state { + SSS_ADM_EVENT_UNINIT = 0, + SSS_ADM_EVENT_START, + SSS_ADM_EVENT_SUCCESS, + SSS_ADM_EVENT_FAIL, + SSS_ADM_EVENT_TIMEOUT, + SSS_ADM_EVENT_END, +}; + +struct sss_recv_msg { + void *buf; + + u16 buf_len; + u16 cmd; + + u16 msg_id; + u8 seq_id; + u8 no_ack; + + enum sss_mod_type mod; + + struct completion done; +}; + +struct sss_msg_pf_to_mgmt { + void *hwdev; + spinlock_t async_msg_lock; /* protect msg async and sync */ + + struct semaphore sync_lock; + + struct workqueue_struct *workq; + + void *async_msg_buf; + void *sync_buf; + void *ack_buf; + + struct sss_recv_msg recv_msg; + struct sss_recv_msg recv_resp_msg; + + u16 rsvd; + u16 async_msg_id; + u16 sync_msg_id; + struct sss_adm_msg *adm_msg[SSS_ADM_MSG_MAX]; + + sss_mgmt_msg_handler_t recv_handler[SSS_MOD_TYPE_HW_MAX]; + void *recv_data[SSS_MOD_TYPE_HW_MAX]; + unsigned long recv_handler_state[SSS_MOD_TYPE_HW_MAX]; + void *async_msg_cb_data[SSS_MOD_TYPE_HW_MAX]; + + /* lock when sending msg */ + spinlock_t sync_event_lock; /* protect event async and sync */ + enum sss_pf_to_mgmt_event_state event_state; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_info.h new file mode 100644 index 0000000000000..f3b50b0d4f1df --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_info.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_MGMT_INFO_H +#define SSS_MGMT_INFO_H + +#include + +#include "sss_hw_svc_cap.h" +#include "sss_eq_info.h" +#include "sss_irq_info.h" + +struct sss_dev_sf_svc_attr { + u8 rdma_en; + u8 rsvd[3]; +}; + +enum sss_intr_type { + SSS_INTR_TYPE_MSIX, + SSS_INTR_TYPE_MSI, + SSS_INTR_TYPE_INT, + SSS_INTR_TYPE_NONE, + + /* PXE,OVS need single thread processing, + * synchronization messages must use poll wait mechanism interface + */ +}; + +/* device service capability */ +struct sss_service_cap { + struct sss_dev_sf_svc_attr sf_svc_attr; + u16 svc_type; /* user input service type */ + u16 chip_svc_type; /* HW supported service type, reference to sss_servic_bit_define */ + + u8 host_id; + u8 ep_id; + u8 er_id; /* PF/VF's ER */ + u8 port_id; /* PF/VF's physical port */ + + /* Host global resources */ + u16 host_total_function; + u8 pf_num; + u8 pf_id_start; + u16 vf_num; /* max numbers of vf in current host */ + u16 vf_id_start; + u8 host_oq_id_mask_val; + u8 host_valid_bitmap; + u8 master_host_id; + u8 srv_multi_host_mode; + + u8 timer_pf_num; + u8 timer_pf_id_start; + u16 timer_vf_num; + u16 timer_vf_id_start; + u8 flexq_en; + u8 resvd; + + u8 cos_valid_bitmap; + u8 port_cos_valid_bitmap; + u16 max_vf; /* max VF number that PF supported */ + u16 pseudo_vf_start_id; + u16 pseudo_vf_num; + u32 pseudo_vf_max_pctx; + u16 pseudo_vf_bfilter_start_addr; + u16 pseudo_vf_bfilter_len; + + u16 pseudo_vf_cfg_num; + u16 virtio_vq_size; + + /* DO NOT get interrupt_type from firmware */ + enum sss_intr_type intr_type; + + u8 sf_en; /* stateful business status */ + u8 timer_en; /* 0:disable, 1:enable */ + u8 bloomfilter_en; /* 0:disable, 1:enable */ + u8 lb_mode; + u8 smf_pg; + u8 rsvd[3]; + + u32 max_connect_num; /* PF/VF maximum connection number(1M) */ + + /* The maximum connections which can be stick to cache memory, max 1K */ + u16 max_stick2cache_num; + + /* Starting address in cache memory for bloom filter, 64Bytes aligned */ + u16 bfilter_start_addr; + + /* Length for bloom filter, aligned on 64Bytes. The size is length*64B. + * Bloom filter memory size + 1 must be power of 2. + * The maximum memory size of bloom filter is 4M + */ + u16 bfilter_len; + + /* The size of hash bucket tables, align on 64 entries. + * Be used to AND (&) the hash value. Bucket Size +1 must be power of 2. + * The maximum number of hash bucket is 4M + */ + u16 hash_bucket_num; + + struct sss_nic_service_cap nic_cap; /* NIC capability */ + struct sss_rdma_service_cap rdma_cap; /* RDMA capability */ + struct sss_fc_service_cap fc_cap; /* FC capability */ + struct sss_toe_service_cap toe_cap; /* ToE capability */ + struct sss_ovs_service_cap ovs_cap; /* OVS capability */ + struct sss_ipsec_service_cap ipsec_cap; /* IPsec capability */ + struct sss_ppa_service_cap ppa_cap; /* PPA capability */ + struct sss_vbs_service_cap vbs_cap; /* VBS capability */ +}; + +struct sss_svc_cap_info { + u32 func_id; + struct sss_service_cap cap; +}; + +struct sss_mgmt_info { + void *hwdev; + struct sss_service_cap svc_cap; + struct sss_eq_info eq_info; /* CEQ */ + struct sss_irq_info irq_info; /* IRQ */ + u32 func_seq_num; /* temporary */ +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_sriov_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_sriov_info.h new file mode 100644 index 0000000000000..bfb29200db9f5 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_sriov_info.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_SRIOV_INFO_H +#define SSS_SRIOV_INFO_H + +#include + +enum sss_sriov_state { + SSS_SRIOV_DISABLE, + SSS_SRIOV_ENABLE, + SSS_SRIOV_PRESENT, +}; + +struct sss_sriov_info { + u8 enabled; + u8 rsvd[3]; + unsigned int vf_num; + unsigned long state; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.c new file mode 100644 index 0000000000000..e71c40f7bb8c7 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.c @@ -0,0 +1,723 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_pci_sriov.h" +#include "sss_pci_id_tbl.h" +#include "sss_adapter.h" +#include "sss_adapter_mgmt.h" +#include "sss_pci_global.h" +#include "sss_tool_comm.h" +#include "sss_hw_export.h" +#include "sss_tool_hw.h" +#include "sss_tool.h" + +#ifndef SSS_PF_NUM_MAX +#define SSS_PF_NUM_MAX (16) +#endif + +#define SSS_ADAPTER_CNT_TIMEOUT 10000 +#define SSS_WAIT_ADAPTER_USLEEP_MIN 9900 +#define SSS_WAIT_ADAPTER_USLEEP_MAX 10000 + +#define SSS_CHIP_NODE_HOLD_TIMEOUT (10 * 60 * 1000) +#define SSS_WAIT_CHIP_NODE_CHANGED (10 * 60 * 1000) +#define SSS_PRINT_TIMEOUT_INTERVAL 10000 +#define SSS_MICRO_SECOND 1000 +#define SSS_CHIP_NODE_USLEEP_MIN 900 +#define SSS_CHIP_NODE_USLEEP_MAX 1000 + +#define SSS_CARD_CNT_MAX 64 + +#define SSS_IS_SPU_DEV(pdev) ((pdev)->device == SSS_DEV_ID_SPU) + +enum sss_node_state { + SSS_NODE_CHANGE = BIT(0), +}; + +struct sss_chip_node_lock { + struct mutex chip_mutex; /* lock for chip list */ + unsigned long state; + atomic_t ref_cnt; +}; + +static struct sss_chip_node_lock g_chip_node_lock; + +static unsigned long g_index_bit_map; + +LIST_HEAD(g_chip_list); + +struct list_head *sss_get_chip_list(void) +{ + return &g_chip_list; +} + +void lld_dev_hold(struct sss_hal_dev *dev) +{ + struct sss_pci_adapter *pci_adapter = pci_get_drvdata(dev->pdev); + + atomic_inc(&pci_adapter->ref_cnt); +} + +void lld_dev_put(struct sss_hal_dev *dev) +{ + struct sss_pci_adapter *pci_adapter = pci_get_drvdata(dev->pdev); + + atomic_dec(&pci_adapter->ref_cnt); +} + +void sss_chip_node_lock(void) +{ + unsigned long end; + bool timeout = true; + u32 loop_cnt; + + mutex_lock(&g_chip_node_lock.chip_mutex); + + loop_cnt = 0; + end = jiffies + msecs_to_jiffies(SSS_WAIT_CHIP_NODE_CHANGED); + do { + if (!test_and_set_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state)) { + timeout = false; + break; + } + + loop_cnt++; + if (loop_cnt % SSS_PRINT_TIMEOUT_INTERVAL == 0) + pr_warn("Wait for adapter change complete for %us\n", + loop_cnt / SSS_MICRO_SECOND); + + /* if sleep 1ms, use usleep_range to be more precise */ + usleep_range(SSS_CHIP_NODE_USLEEP_MIN, SSS_CHIP_NODE_USLEEP_MAX); + } while (time_before(jiffies, end)); + + if (timeout && test_and_set_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state)) + pr_warn("Wait for adapter change complete timeout when trying to get adapter lock\n"); + + loop_cnt = 0; + timeout = true; + end = jiffies + msecs_to_jiffies(SSS_WAIT_CHIP_NODE_CHANGED); + do { + if (!atomic_read(&g_chip_node_lock.ref_cnt)) { + timeout = false; + break; + } + + loop_cnt++; + if (loop_cnt % SSS_PRINT_TIMEOUT_INTERVAL == 0) + pr_warn("Wait for adapter unused for %us, reference count: %d\n", + loop_cnt / SSS_MICRO_SECOND, + atomic_read(&g_chip_node_lock.ref_cnt)); + + usleep_range(SSS_CHIP_NODE_USLEEP_MIN, + SSS_CHIP_NODE_USLEEP_MAX); + } while (time_before(jiffies, end)); + + if (timeout && atomic_read(&g_chip_node_lock.ref_cnt)) + pr_warn("Wait for adapter unused timeout\n"); + + mutex_unlock(&g_chip_node_lock.chip_mutex); +} + +void sss_chip_node_unlock(void) +{ + clear_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state); +} + +void sss_hold_chip_node(void) +{ + unsigned long end; + u32 loop_cnt = 0; + + mutex_lock(&g_chip_node_lock.chip_mutex); + + end = jiffies + msecs_to_jiffies(SSS_CHIP_NODE_HOLD_TIMEOUT); + do { + if (!test_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state)) + break; + + loop_cnt++; + + if (loop_cnt % SSS_PRINT_TIMEOUT_INTERVAL == 0) + pr_warn("Wait adapter change complete for %us\n", + loop_cnt / SSS_MICRO_SECOND); + /* if sleep 1ms, use usleep_range to be more precise */ + usleep_range(SSS_CHIP_NODE_USLEEP_MIN, SSS_CHIP_NODE_USLEEP_MAX); + } while (time_before(jiffies, end)); + + if (test_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state)) + pr_warn("Wait adapter change complete timeout when trying to adapter dev\n"); + + atomic_inc(&g_chip_node_lock.ref_cnt); + mutex_unlock(&g_chip_node_lock.chip_mutex); +} + +void sss_put_chip_node(void) +{ + atomic_dec(&g_chip_node_lock.ref_cnt); +} + +void sss_pre_init(void) +{ + mutex_init(&g_chip_node_lock.chip_mutex); + atomic_set(&g_chip_node_lock.ref_cnt, 0); + sss_init_uld_lock(); +} + +struct sss_pci_adapter *sss_get_adapter_by_pcidev(struct pci_dev *pdev) +{ + struct sss_pci_adapter *adapter = pci_get_drvdata(pdev); + + if (!pdev) + return NULL; + + return adapter; +} + +static bool sss_chip_node_exist(struct sss_pci_adapter *adapter, + unsigned char bus_id) +{ + struct sss_card_node *chip_node = NULL; + + sss_chip_node_lock(); + if (bus_id != 0) { + list_for_each_entry(chip_node, &g_chip_list, node) { + if (chip_node->bus_id == bus_id) { + adapter->chip_node = chip_node; + sss_chip_node_unlock(); + return true; + } + } + } else if (SSS_IS_VF_DEV(adapter->pcidev) || + SSS_IS_SPU_DEV(adapter->pcidev)) { + list_for_each_entry(chip_node, &g_chip_list, node) { + if (chip_node) { + adapter->chip_node = chip_node; + sss_chip_node_unlock(); + return true; + } + } + } + sss_chip_node_unlock(); + + return false; +} + +static unsigned char sss_get_pci_bus_id(struct sss_pci_adapter *adapter) +{ + struct pci_dev *pf_pdev = NULL; + unsigned char bus_id = 0; + + if (!pci_is_root_bus(adapter->pcidev->bus)) + bus_id = adapter->pcidev->bus->number; + + if (bus_id == 0) + return bus_id; + + if (adapter->pcidev->is_virtfn) { + pf_pdev = adapter->pcidev->physfn; + bus_id = pf_pdev->bus->number; + } + + return bus_id; +} + +static bool sss_alloc_card_id(u8 *id) +{ + unsigned char i; + + sss_chip_node_lock(); + for (i = 0; i < SSS_CARD_CNT_MAX; i++) { + if (test_and_set_bit(i, &g_index_bit_map) == 0) { + sss_chip_node_unlock(); + *id = i; + return true; + } + } + sss_chip_node_unlock(); + + return false; +} + +static void sss_free_card_id(u8 id) +{ + clear_bit(id, &g_index_bit_map); +} + +int sss_alloc_chip_node(struct sss_pci_adapter *adapter) +{ + struct sss_card_node *chip_node = NULL; + unsigned char card_id; + unsigned char bus_id; + + bus_id = sss_get_pci_bus_id(adapter); + + if (sss_chip_node_exist(adapter, bus_id)) + return 0; + + chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL); + if (!chip_node) + return -ENOMEM; + + chip_node->bus_id = bus_id; + + if (!sss_alloc_card_id(&card_id)) { + kfree(chip_node); + sdk_err(&adapter->pcidev->dev, "chip node is exceed\n"); + return -EINVAL; + } + + if (snprintf(chip_node->chip_name, IFNAMSIZ, "%s%u", SSS_CHIP_NAME, card_id) < 0) { + sss_free_card_id(card_id); + kfree(chip_node); + return -EINVAL; + } + + INIT_LIST_HEAD(&chip_node->func_list); + sss_chip_node_lock(); + list_add_tail(&chip_node->node, &g_chip_list); + sss_chip_node_unlock(); + adapter->chip_node = chip_node; + sdk_info(&adapter->pcidev->dev, + "Success to add new chip %s to global list\n", chip_node->chip_name); + + return 0; +} + +void sss_free_chip_node(struct sss_pci_adapter *adapter) +{ + struct sss_card_node *chip_node = adapter->chip_node; + int id; + int ret; + + sss_chip_node_lock(); + if (list_empty(&chip_node->func_list)) { + list_del(&chip_node->node); + sdk_info(&adapter->pcidev->dev, + "Success to delete chip %s from global list\n", + chip_node->chip_name); + ret = sscanf(chip_node->chip_name, SSS_CHIP_NAME "%d", &id); + if (ret < 0) + sdk_err(&adapter->pcidev->dev, "Fail to get nic id\n"); + + sss_free_card_id(id); + kfree(chip_node); + } + sss_chip_node_unlock(); +} + +void sss_add_func_list(struct sss_pci_adapter *adapter) +{ + sss_chip_node_lock(); + list_add_tail(&adapter->node, &adapter->chip_node->func_list); + sss_chip_node_unlock(); +} + +void sss_del_func_list(struct sss_pci_adapter *adapter) +{ + sss_chip_node_lock(); + list_del(&adapter->node); + sss_chip_node_unlock(); +} + +static struct sss_card_node *sss_get_chip_node_by_hwdev(const void *hwdev) +{ + struct sss_card_node *chip_node = NULL; + struct sss_card_node *node_tmp = NULL; + struct sss_pci_adapter *dev = NULL; + + if (!hwdev) + return NULL; + + sss_hold_chip_node(); + + list_for_each_entry(node_tmp, &g_chip_list, node) { + if (!chip_node) { + list_for_each_entry(dev, &node_tmp->func_list, node) { + if (dev->hwdev == hwdev) { + chip_node = node_tmp; + break; + } + } + } + } + + sss_put_chip_node(); + + return chip_node; +} + +static bool sss_is_func_valid(struct sss_pci_adapter *dev) +{ + if (sss_get_func_type(dev->hwdev) == SSS_FUNC_TYPE_VF) + return false; + + return true; +} + +static int sss_get_dynamic_uld_dev_name(struct sss_pci_adapter *dev, enum sss_service_type type, + char *ifname) +{ + u32 out_size = IFNAMSIZ; + struct sss_uld_info *uld_info = sss_get_uld_info(); + + if (!uld_info[type].ioctl) + return -EFAULT; + + return uld_info[type].ioctl(dev->uld_dev[type], SSS_TOOL_GET_ULD_DEV_NAME, + NULL, 0, ifname, &out_size); +} + +static bool sss_support_service_type(void *hwdev) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + return !dev->mgmt_info->svc_cap.chip_svc_type; +} + +void sss_get_card_info(const void *hwdev, void *bufin) +{ + struct sss_card_node *chip_node = NULL; + struct sss_tool_card_info *info = (struct sss_tool_card_info *)bufin; + struct sss_pci_adapter *dev = NULL; + void *fun_hwdev = NULL; + u32 i = 0; + + info->pf_num = 0; + + chip_node = sss_get_chip_node_by_hwdev(hwdev); + if (!chip_node) + return; + + sss_hold_chip_node(); + + list_for_each_entry(dev, &chip_node->func_list, node) { + if (!sss_is_func_valid(dev)) + continue; + + fun_hwdev = dev->hwdev; + + if (sss_support_nic(fun_hwdev)) { + if (dev->uld_dev[SSS_SERVICE_TYPE_NIC]) { + info->pf[i].pf_type |= (u32)BIT(SSS_SERVICE_TYPE_NIC); + sss_get_dynamic_uld_dev_name(dev, SSS_SERVICE_TYPE_NIC, + info->pf[i].name); + } + } + + if (sss_support_ppa(fun_hwdev, NULL)) { + if (dev->uld_dev[SSS_SERVICE_TYPE_PPA]) { + info->pf[i].pf_type |= (u32)BIT(SSS_SERVICE_TYPE_PPA); + sss_get_dynamic_uld_dev_name(dev, SSS_SERVICE_TYPE_PPA, + info->pf[i].name); + } + } + + if (sss_support_service_type(fun_hwdev)) + strscpy(info->pf[i].name, "FOR_MGMT", IFNAMSIZ); + + strscpy(info->pf[i].bus_info, pci_name(dev->pcidev), + sizeof(info->pf[i].bus_info)); + info->pf_num++; + i = info->pf_num; + } + + sss_put_chip_node(); +} + +bool sss_is_in_host(void) +{ + struct sss_card_node *node = NULL; + struct sss_pci_adapter *adapter = NULL; + + sss_hold_chip_node(); + list_for_each_entry(node, &g_chip_list, node) { + list_for_each_entry(adapter, &node->func_list, node) { + if (sss_get_func_type(adapter->hwdev) != SSS_FUNC_TYPE_VF) { + sss_put_chip_node(); + return true; + } + } + } + sss_put_chip_node(); + + return false; +} + +void sss_get_all_chip_id(void *id_info) +{ + int i = 0; + int id; + int ret; + struct sss_card_id *card_id = (struct sss_card_id *)id_info; + struct sss_card_node *node = NULL; + + sss_hold_chip_node(); + list_for_each_entry(node, &g_chip_list, node) { + ret = sscanf(node->chip_name, SSS_CHIP_NAME "%d", &id); + if (ret < 0) { + pr_err("Fail to get chip id\n"); + continue; + } + card_id->id[i] = (u32)id; + i++; + } + sss_put_chip_node(); + + card_id->num = (u32)i; +} + +void *sss_get_pcidev_hdl(void *hwdev) +{ + struct sss_hwdev *dev = (struct sss_hwdev *)hwdev; + + if (!hwdev) + return NULL; + + return dev->pcidev_hdl; +} + +struct sss_card_node *sss_get_card_node(struct sss_hal_dev *hal_dev) +{ + struct sss_pci_adapter *adapter = pci_get_drvdata(hal_dev->pdev); + + return adapter->chip_node; +} + +void sss_get_card_func_info(const char *chip_name, struct sss_card_func_info *card_func) +{ + struct sss_card_node *card_node = NULL; + struct sss_pci_adapter *adapter = NULL; + struct sss_func_pdev_info *info = NULL; + + card_func->pf_num = 0; + + sss_hold_chip_node(); + + list_for_each_entry(card_node, &g_chip_list, node) { + if (strncmp(card_node->chip_name, chip_name, IFNAMSIZ)) + continue; + + list_for_each_entry(adapter, &card_node->func_list, node) { + if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_VF) + continue; + + info = &card_func->pdev_info[card_func->pf_num]; + info->bar1_size = + pci_resource_len(adapter->pcidev, SSS_PF_PCI_CFG_REG_BAR); + info->bar1_pa = + pci_resource_start(adapter->pcidev, SSS_PF_PCI_CFG_REG_BAR); + + info->bar3_size = + pci_resource_len(adapter->pcidev, SSS_PCI_MGMT_REG_BAR); + info->bar3_pa = + pci_resource_start(adapter->pcidev, SSS_PCI_MGMT_REG_BAR); + + card_func->pf_num++; + if (card_func->pf_num >= SSS_PF_NUM_MAX) { + sss_put_chip_node(); + return; + } + } + } + + sss_put_chip_node(); +} + +int sss_get_pf_id(struct sss_card_node *card_node, u32 port_id, u32 *pf_id, u32 *valid) +{ + struct sss_pci_adapter *adapter = NULL; + + sss_hold_chip_node(); + list_for_each_entry(adapter, &card_node->func_list, node) { + if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_VF) + continue; + + if (SSS_TO_PHY_PORT_ID(adapter->hwdev) == port_id) { + *pf_id = sss_get_func_id(adapter->hwdev); + *valid = 1; + break; + } + } + sss_put_chip_node(); + + return 0; +} + +void *sss_get_uld_dev(struct sss_hal_dev *hal_dev, enum sss_service_type type) +{ + struct sss_pci_adapter *dev = NULL; + void *uld = NULL; + + if (!hal_dev) + return NULL; + + dev = pci_get_drvdata(hal_dev->pdev); + if (!dev) + return NULL; + + spin_lock_bh(&dev->uld_lock); + if (!dev->uld_dev[type] || !test_bit(type, &dev->uld_attach_state)) { + spin_unlock_bh(&dev->uld_lock); + return NULL; + } + uld = dev->uld_dev[type]; + + atomic_inc(&dev->uld_ref_cnt[type]); + spin_unlock_bh(&dev->uld_lock); + + return uld; +} + +void sss_uld_dev_put(struct sss_hal_dev *hal_dev, enum sss_service_type type) +{ + struct sss_pci_adapter *pci_adapter = pci_get_drvdata(hal_dev->pdev); + + atomic_dec(&pci_adapter->uld_ref_cnt[type]); +} + +static bool sss_is_pcidev_match_dev_name(const char *dev_name, struct sss_pci_adapter *dev, + enum sss_service_type type) +{ + enum sss_service_type i; + char nic_uld_name[IFNAMSIZ] = {0}; + int err; + + if (type > SSS_SERVICE_TYPE_MAX) + return false; + + if (type == SSS_SERVICE_TYPE_MAX) { + for (i = SSS_SERVICE_TYPE_OVS; i < SSS_SERVICE_TYPE_MAX; i++) { + if (!strncmp(dev->uld_dev_name[i], dev_name, IFNAMSIZ)) + return true; + } + } else { + if (!strncmp(dev->uld_dev_name[type], dev_name, IFNAMSIZ)) + return true; + } + + err = sss_get_dynamic_uld_dev_name(dev, SSS_SERVICE_TYPE_NIC, (char *)nic_uld_name); + if (err == 0) { + if (!strncmp(nic_uld_name, dev_name, IFNAMSIZ)) + return true; + } + + return false; +} + +struct sss_hal_dev *sss_get_lld_dev_by_dev_name(const char *dev_name, enum sss_service_type type) +{ + struct sss_card_node *chip_node = NULL; + struct sss_pci_adapter *dev = NULL; + + sss_hold_chip_node(); + + list_for_each_entry(chip_node, &g_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (sss_is_pcidev_match_dev_name(dev_name, dev, type)) { + lld_dev_hold(&dev->hal_dev); + sss_put_chip_node(); + return &dev->hal_dev; + } + } + } + + sss_put_chip_node(); + + return NULL; +} + +static bool sss_is_pcidev_match_chip_name(const char *ifname, struct sss_pci_adapter *dev, + struct sss_card_node *chip_node, enum sss_func_type type) +{ + if (!strncmp(chip_node->chip_name, ifname, IFNAMSIZ)) { + if (sss_get_func_type(dev->hwdev) != type) + return false; + return true; + } + + return false; +} + +static struct sss_hal_dev *sss_get_dst_type_lld_dev_by_chip_name(const char *ifname, + enum sss_func_type type) +{ + struct sss_card_node *chip_node = NULL; + struct sss_pci_adapter *dev = NULL; + + list_for_each_entry(chip_node, &g_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (sss_is_pcidev_match_chip_name(ifname, dev, chip_node, type)) + return &dev->hal_dev; + } + } + + return NULL; +} + +struct sss_hal_dev *sss_get_lld_dev_by_chip_name(const char *chip_name) +{ + struct sss_hal_dev *dev = NULL; + + sss_hold_chip_node(); + + dev = sss_get_dst_type_lld_dev_by_chip_name(chip_name, SSS_FUNC_TYPE_PPF); + if (dev) + goto out; + + dev = sss_get_dst_type_lld_dev_by_chip_name(chip_name, SSS_FUNC_TYPE_PF); + if (dev) + goto out; + + dev = sss_get_dst_type_lld_dev_by_chip_name(chip_name, SSS_FUNC_TYPE_VF); +out: + if (dev) + lld_dev_hold(dev); + sss_put_chip_node(); + + return dev; +} + +struct sss_hal_dev *sss_get_lld_dev_by_chip_and_port(const char *chip_name, u8 port_id) +{ + struct sss_card_node *chip_node = NULL; + struct sss_pci_adapter *dev = NULL; + + sss_hold_chip_node(); + list_for_each_entry(chip_node, &g_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (sss_get_func_type(dev->hwdev) == SSS_FUNC_TYPE_VF) + continue; + + if (SSS_TO_PHY_PORT_ID(dev->hwdev) == port_id && + !strncmp(chip_node->chip_name, chip_name, IFNAMSIZ)) { + lld_dev_hold(&dev->hal_dev); + sss_put_chip_node(); + + return &dev->hal_dev; + } + } + } + sss_put_chip_node(); + + return NULL; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.h new file mode 100644 index 0000000000000..65f5101783318 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_ADAPTER_MGMT_H +#define SSS_ADAPTER_MGMT_H + +#include +#include + +#include "sss_version.h" +#include "sss_adapter.h" + +#define SSS_DRV_VERSION SSS_VERSION_STR + +#define SSS_DRV_NAME "sssnic" +#define SSS_CHIP_NAME "sssnic" + +#define SSS_VF_PCI_CFG_REG_BAR 0 +#define SSS_PF_PCI_CFG_REG_BAR 1 + +#define SSS_PCI_INTR_REG_BAR 2 +#define SSS_PCI_MGMT_REG_BAR 3 /* Only PF have mgmt bar */ +#define SSS_PCI_DB_BAR 4 + +#define SSS_IS_VF_DEV(pdev) ((pdev)->device == SSS_DEV_ID_VF) + +#define SSS_CARD_MAX_SIZE (64) + +struct sss_card_id { + u32 id[SSS_CARD_MAX_SIZE]; + u32 num; +}; + +struct sss_func_pdev_info { + u64 bar0_pa; + u64 bar0_size; + u64 bar1_pa; + u64 bar1_size; + u64 bar3_pa; + u64 bar3_size; + u64 rsvd[4]; +}; + +struct sss_card_func_info { + u32 pf_num; + u32 rsvd; + u64 usr_adm_pa; + struct sss_func_pdev_info pdev_info[SSS_CARD_MAX_SIZE]; +}; + +enum { + SSS_NO_PROBE = 1, + SSS_PROBE_START = 2, + SSS_PROBE_OK = 3, + SSS_IN_REMOVE = 4, +}; + +struct list_head *sss_get_chip_list(void); +int sss_alloc_chip_node(struct sss_pci_adapter *adapter); +void sss_free_chip_node(struct sss_pci_adapter *adapter); +void sss_pre_init(void); +struct sss_pci_adapter *sss_get_adapter_by_pcidev(struct pci_dev *pdev); +void sss_add_func_list(struct sss_pci_adapter *adapter); +void sss_del_func_list(struct sss_pci_adapter *adapter); +void sss_hold_chip_node(void); +void sss_put_chip_node(void); + +void sss_set_adapter_probe_state(struct sss_pci_adapter *adapter, int state); + +void lld_dev_hold(struct sss_hal_dev *dev); +void lld_dev_put(struct sss_hal_dev *dev); + +void sss_chip_node_lock(void); +void sss_chip_node_unlock(void); + +void *sss_get_pcidev_hdl(void *hwdev); +void *sss_get_uld_dev(struct sss_hal_dev *hal_dev, enum sss_service_type type); + +void sss_uld_dev_put(struct sss_hal_dev *hal_dev, enum sss_service_type type); + +struct sss_hal_dev *sss_get_lld_dev_by_dev_name(const char *dev_name, enum sss_service_type type); + +struct sss_hal_dev *sss_get_lld_dev_by_chip_name(const char *chip_name); + +struct sss_hal_dev *sss_get_lld_dev_by_chip_and_port(const char *chip_name, u8 port_id); + +void sss_get_all_chip_id(void *id_info); + +void sss_get_card_func_info + (const char *chip_name, struct sss_card_func_info *card_func); + +void sss_get_card_info(const void *hwdev, void *bufin); + +bool sss_is_in_host(void); + +int sss_get_pf_id(struct sss_card_node *chip_node, u32 port_id, u32 *pf_id, u32 *valid); + +struct sss_card_node *sss_get_card_node(struct sss_hal_dev *hal_dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_common.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_common.c new file mode 100644 index 0000000000000..452795f7bcb5b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_common.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_common.h" + +#define SSS_MIN_SLEEP_TIME(us) ((us) - (us) / 10) + +/* Sleep more than 20ms using msleep is accurate */ +#define SSS_HANDLER_SLEEP(usleep_min, wait_once_us) \ +do { \ + if ((wait_once_us) >= 20 * USEC_PER_MSEC) \ + msleep((wait_once_us) / USEC_PER_MSEC); \ + else \ + usleep_range((usleep_min), (wait_once_us)); \ +} while (0) + +int sss_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, + unsigned int flag, struct sss_dma_addr_align *addr) +{ + dma_addr_t pa; + dma_addr_t pa_align; + void *va = NULL; + void *va_align = NULL; + + va = dma_zalloc_coherent(dev_hdl, size, &pa, flag); + if (!va) + return -ENOMEM; + + pa_align = ALIGN(pa, align); + if (pa_align == pa) { + va_align = va; + goto same_addr_after_align; + } + + dma_free_coherent(dev_hdl, size, va, pa); + + va = dma_zalloc_coherent(dev_hdl, size + align, &pa, flag); + if (!va) + return -ENOMEM; + + pa_align = ALIGN(pa, align); + va_align = (void *)((u64)va + (pa_align - pa)); + +same_addr_after_align: + addr->origin_paddr = pa; + addr->align_paddr = pa_align; + addr->origin_vaddr = va; + addr->align_vaddr = va_align; + addr->real_size = (u32)size; + + return 0; +} + +void sss_dma_free_coherent_align(void *dev_hdl, struct sss_dma_addr_align *addr) +{ + dma_free_coherent(dev_hdl, addr->real_size, addr->origin_vaddr, addr->origin_paddr); +} + +int sss_check_handler_timeout(void *priv_data, sss_wait_handler_t handler, + u32 wait_total_ms, u32 wait_once_us) +{ + enum sss_process_ret ret; + unsigned long end; + u32 usleep_min = SSS_MIN_SLEEP_TIME(wait_once_us); + + if (!handler) + return -EINVAL; + + end = jiffies + msecs_to_jiffies(wait_total_ms); + do { + ret = handler(priv_data); + if (ret == SSS_PROCESS_OK) + return 0; + else if (ret == SSS_PROCESS_ERR) + return -EIO; + + SSS_HANDLER_SLEEP(usleep_min, wait_once_us); + } while (time_before(jiffies, end)); + + ret = handler(priv_data); + if (ret == SSS_PROCESS_OK) + return 0; + else if (ret == SSS_PROCESS_ERR) + return -EIO; + + return -ETIMEDOUT; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_common.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_common.h new file mode 100644 index 0000000000000..36988f134d964 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_common.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_COMMON_H +#define SSS_COMMON_H + +#include + +#include "sss_hw_common.h" + +int sss_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, + unsigned int flag, struct sss_dma_addr_align *mem_align); + +void sss_dma_free_coherent_align(void *dev_hdl, struct sss_dma_addr_align *mem_align); + +int sss_check_handler_timeout(void *priv_data, sss_wait_handler_t handler, + u32 wait_total_ms, u32 wait_once_us); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hw_main.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hw_main.c new file mode 100644 index 0000000000000..7c81f4bee2f4b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hw_main.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_version.h" +#include "sss_adapter_mgmt.h" +#include "sss_pci_id_tbl.h" +#include "sss_pci_sriov.h" +#include "sss_pci_probe.h" +#include "sss_pci_remove.h" +#include "sss_pci_shutdown.h" +#include "sss_pci_error.h" +#include "sss_hwdev.h" + +#define SSS_DRV_DESC "Intelligent Network Interface Card Driver" + +MODULE_AUTHOR("steven.song@3snic.com"); +MODULE_DESCRIPTION("3SNIC Network Interface Card Driver"); +MODULE_VERSION(SSS_DRV_VERSION); +MODULE_LICENSE("GPL"); + +static const struct pci_device_id g_pci_table[] = { + {PCI_VDEVICE(SSSNIC, SSS_DEV_ID_STANDARD), 0}, + {PCI_VDEVICE(SSSNIC, SSS_DEV_ID_SPN120), 0}, + {PCI_VDEVICE(SSSNIC, SSS_DEV_ID_VF), 0}, + {0, 0} +}; + +MODULE_DEVICE_TABLE(pci, g_pci_table); + +#ifdef HAVE_RHEL6_SRIOV_CONFIGURE +static struct pci_driver_rh g_pci_driver_rh = { + .sriov_configure = sss_pci_configure_sriov, +}; +#endif + +static struct pci_error_handlers g_pci_err_handler = { + .error_detected = sss_detect_pci_error, +}; + +static struct pci_driver g_pci_driver = { + .name = SSS_DRV_NAME, + .id_table = g_pci_table, + .probe = sss_pci_probe, + .remove = sss_pci_remove, + .shutdown = sss_pci_shutdown, +#if defined(HAVE_SRIOV_CONFIGURE) + .sriov_configure = sss_pci_configure_sriov, +#elif defined(HAVE_RHEL6_SRIOV_CONFIGURE) + .rh_reserved = &g_pci_driver_rh, +#endif + .err_handler = &g_pci_err_handler +}; + +int sss_init_pci(void) +{ + int ret; + + pr_info("%s - version %s\n", SSS_DRV_DESC, SSS_DRV_VERSION); + sss_pre_init(); + + ret = pci_register_driver(&g_pci_driver); + if (ret != 0) + return ret; + + return 0; +} + +void sss_exit_pci(void) +{ + pci_unregister_driver(&g_pci_driver); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.c new file mode 100644 index 0000000000000..c825864805f31 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_csr.h" +#include "sss_hwdev.h" +#include "sss_hwdev_api.h" +#include "sss_hwif_api.h" + +int sss_chip_sync_time(void *hwdev, u64 mstime) +{ + int ret; + struct sss_cmd_sync_time cmd_time = {0}; + u16 out_len = sizeof(cmd_time); + + cmd_time.mstime = mstime; + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SYNC_TIME, &cmd_time, + sizeof(cmd_time), &cmd_time, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_time)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to sync time, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_time.head.state, out_len); + return -EIO; + } + + return 0; +} + +void sss_chip_disable_mgmt_channel(void *hwdev) +{ + sss_chip_set_pf_status(SSS_TO_HWIF(hwdev), SSS_PF_STATUS_INIT); +} + +int sss_chip_get_board_info(void *hwdev, struct sss_board_info *board_info) +{ + int ret; + struct sss_cmd_board_info cmd_info = {0}; + u16 out_len = sizeof(cmd_info); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_GET_BOARD_INFO, + &cmd_info, sizeof(cmd_info), &cmd_info, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_info)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to get board info, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_info.head.state, out_len); + return -EIO; + } + + memcpy(board_info, &cmd_info.info, sizeof(*board_info)); + + return 0; +} + +int sss_chip_do_nego_feature(void *hwdev, u8 opcode, u64 *feature, u16 feature_num) +{ + int ret; + struct sss_cmd_feature_nego cmd_feature = {0}; + u16 out_len = sizeof(cmd_feature); + + cmd_feature.func_id = sss_get_global_func_id(hwdev); + cmd_feature.opcode = opcode; + if (opcode == SSS_MGMT_MSG_SET_CMD) + memcpy(cmd_feature.feature, feature, (feature_num * sizeof(u64))); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_FEATURE_NEGO, + &cmd_feature, sizeof(cmd_feature), &cmd_feature, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_feature)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to nego feature, opcode: %d, ret: %d, status: 0x%x, out_len: 0x%x\n", + opcode, ret, cmd_feature.head.state, out_len); + return -EINVAL; + } + + if (opcode == SSS_MGMT_MSG_GET_CMD) + memcpy(feature, cmd_feature.feature, (feature_num * sizeof(u64))); + + return 0; +} + +int sss_chip_set_pci_bdf_num(void *hwdev, u8 bus_id, u8 device_id, u8 func_id) +{ + int ret; + struct sss_cmd_bdf_info cmd_bdf = {0}; + u16 out_len = sizeof(cmd_bdf); + + cmd_bdf.bus = bus_id; + cmd_bdf.device = device_id; + cmd_bdf.function = func_id; + cmd_bdf.function_id = sss_get_global_func_id(hwdev); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SEND_BDF_INFO, + &cmd_bdf, sizeof(cmd_bdf), &cmd_bdf, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_bdf)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to set bdf info, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_bdf.head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_chip_comm_channel_detect(struct sss_hwdev *hwdev) +{ + int ret; + struct sss_cmd_channel_detect cmd_detect = {0}; + u16 out_len = sizeof(cmd_detect); + + if (!hwdev) + return -EINVAL; + + cmd_detect.func_id = sss_get_global_func_id(hwdev); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_CHANNEL_DETECT, + &cmd_detect, sizeof(cmd_detect), &cmd_detect, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_detect)) { + sdk_err(hwdev->dev_hdl, + "Fail to send channel detect, ret: %d, status: 0x%x, out_size: 0x%x\n", + ret, cmd_detect.head.state, out_len); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.h new file mode 100644 index 0000000000000..d0471e8a9514d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_API_H +#define SSS_HWDEV_API_H + +#include + +#include "sss_hw_mbx_msg.h" +#include "sss_hwdev.h" + +int sss_chip_sync_time(void *hwdev, u64 mstime); +int sss_chip_get_board_info(void *hwdev, struct sss_board_info *board_info); +void sss_chip_disable_mgmt_channel(void *hwdev); +int sss_chip_do_nego_feature(void *hwdev, u8 opcode, u64 *feature, u16 feature_num); +int sss_chip_set_pci_bdf_num(void *hwdev, u8 bus_id, u8 device_id, u8 func_id); +int sss_chip_comm_channel_detect(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.c new file mode 100644 index 0000000000000..412cc574a563d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.c @@ -0,0 +1,748 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" +#include "sss_hwdev_cap.h" + +/* RDMA resource */ +#define K_UNIT BIT(10) +#define M_UNIT BIT(20) +#define G_UNIT BIT(30) + +/* L2NIC */ +#define SSS_CFG_MAX_QP 256 + +/* RDMA */ +#define SSS_RDMA_RSVD_QP 2 +#define SSS_ROCE_MAX_WQE (8 * K_UNIT - 1) + +#define SSS_RDMA_MAX_SQ_SGE 16 + +#define SSS_ROCE_MAX_RQ_SGE 16 + +#define SSS_RDMA_MAX_SQ_DESC_SIZE 256 + +/* (256B(cache_line_len) - 16B(ctrl_seg_len) - 48B(max_task_seg_len)) */ +#define SSS_ROCE_MAX_SQ_INLINE_DATA_SIZE 192 + +#define SSS_ROCE_MAX_RQ_DESC_SIZE 256 + +#define SSS_ROCE_QPC_ENTRY_SIZE 512 + +#define SSS_WQEBB_SIZE 64 + +#define SSS_ROCE_RDMARC_ENTRY_SIZE 32 +#define SSS_ROCE_MAX_QP_INIT_RDMA 128 +#define SSS_ROCE_MAX_QP_DEST_RDMA 128 + +#define SSS_ROCE_MAX_SRQ_WQE (16 * K_UNIT - 1) +#define SSS_ROCE_RSVD_SRQ 0 +#define SSS_ROCE_MAX_SRQ_SGE 15 +#define ROCE_SRQC_ENTERY_SIZE 64 + +#define SSS_ROCE_MAX_SRQ 0x400 +#define SSS_ROCE_MAX_CQ 0x800 +#define SSS_ROCE_MAX_QP 0x400 +#define SSS_ROCE_MAX_MPT 0x400 +#define SSS_ROCE_MAX_DRC_QP 0x40 + +#define SSS_RDMA_MAX_CQE (8 * M_UNIT - 1) +#define SSS_RDMA_RSVD_CQ 0 + +#define SSS_RDMA_CQC_ENTRY_SIZE 128 + +#define SSS_RDMA_CQE_SIZE 64 +#define SSS_RDMA_RSVD_MRW 128 +#define SSS_RDMA_MPT_ENTRY_SIZE 64 +#define SSS_RDMA_MTT_NUM (1 * G_UNIT) +#define SSS_LOG_MTT_SEG 5 +#define SSS_MTT_ENTRY_SIZE 8 +#define SSS_LOG_RDMARC_SEG 3 + +#define SSS_LOCAL_ACK_DELAY 15 +#define SSS_RDMA_PORT_NUM 1 +#define SSS_ROCE_MAX_MSG_SIZE (2 * G_UNIT) + +#define SSS_DB_PAGE_SIZE_K (4 * K_UNIT) +#define SSS_DWQE_SIZE 256 + +#define SSS_PD_NUM (128 * K_UNIT) +#define SSS_RSVD_PD 0 + +#define SSS_MAX_XRCD (64 * K_UNIT) +#define SSS_RSVD_XRCD 0 + +#define SSS_MAX_GID_PER_PORT 128 +#define SSS_GID_ENTRY_SIZE 32 +#define SSS_RSVD_LKEY ((SSS_RDMA_RSVD_MRW - 1) << 8) +#define SSS_PAGE_SIZE_CAP ((1UL << 12) | (1UL << 16) | (1UL << 21)) +#define SSS_ROCE_MODE 1 + +#define SSS_MAX_FRPL_LEN 511 +#define SSS_MAX_PKEY 1 + +/* ToE */ +#define SSS_TOE_PCTX_SIZE 1024 +#define SSS_TOE_SCQC_SIZE 64 + +/* FC */ +#define SSS_FC_PQPC_SIZE 256 +#define SSS_FC_CQPC_SIZE 256 +#define SSS_FC_SQE_SIZE 128 +#define SSS_FC_SCQC_SIZE 64 +#define SSS_FC_SCQE_SIZE 64 +#define SSS_FC_SRQC_SIZE 64 +#define SSS_FC_SRQE_SIZE 32 + +/* OVS */ +#define SSS_OVS_PCTX_SIZE 512 + +/* PPA */ +#define SSS_PPA_PCTX_SIZE 512 + +/* IPsec */ +#define SSS_IPSEC_SACTX_SIZE 512 + +/* VirtIO */ +#define SSS_VIRTIO_BASE_VQ_SIZE 2048U +#define SSS_VIRTIO_DEFAULT_VQ_SIZE 8192U + +struct sss_cmd_dev_cap_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd; + + u8 host_id; + u8 ep_id; + u8 er_id; + u8 port_id; + + u16 host_total_function; + u8 pf_num; + u8 pf_id_start; + u16 vf_num; + u16 vf_id_start; + u8 host_oq_id_mask_val; + u8 timer_en; + u8 host_valid_bitmap; + u8 rsvd_host; + + u16 svc_type; + u16 max_vf; + u8 flexq_en; + u8 cos_valid_bitmap; + u8 port_cos_valid_bitmap; + u8 rsvd_func1; + u32 rsvd_func2; + + u8 sf_svc_attr; + u8 func_sf_en; + u8 lb_mode; + u8 smf_pg; + + u32 max_connect_num; + u16 max_stick2cache_num; + u16 bfilter_start_addr; + u16 bfilter_len; + u16 hash_bucket_num; + + u8 host_sf_en; + u8 master_host_id; + u8 srv_multi_host_mode; + u8 rsvd2_sr; + + u32 rsvd_func3[5]; + + /* l2nic */ + u16 nic_max_sq_id; + u16 nic_max_rq_id; + u16 nic_def_queue_num; + u16 rsvd_nic1; + u32 rsvd_nic2[2]; + + /* RoCE */ + u32 roce_max_qp; + u32 roce_max_cq; + u32 roce_max_srq; + u32 roce_max_mpt; + u32 roce_max_drc_qp; + + u32 roce_cmtt_cl_start; + u32 roce_cmtt_cl_end; + u32 roce_cmtt_cl_size; + + u32 roce_dmtt_cl_start; + u32 roce_dmtt_cl_end; + u32 roce_dmtt_cl_size; + + u32 roce_wqe_cl_start; + u32 roce_wqe_cl_end; + u32 roce_wqe_cl_size; + u8 roce_srq_container_mode; + u8 rsvd_roce1[3]; + u32 rsvd_roce2[5]; + + /* IPsec */ + u32 ipsec_max_sactx; + u16 ipsec_max_cq; + u16 rsvd_ipsec1; + u32 rsvd_ipsec2[2]; + + /* OVS */ + u32 ovs_max_qpc; + u32 rsvd_ovs[3]; + + /* ToE */ + u32 toe_max_pctx; + u32 toe_max_cq; + u16 toe_max_srq; + u16 toe_srq_id_start; + u16 toe_max_mpt; + u16 toe_max_cctxt; + u32 rsvd_toe[2]; + + /* FC */ + u32 fc_max_pctx; + u32 fc_max_scq; + u32 fc_max_srq; + + u32 fc_max_cctx; + u32 fc_cctx_id_start; + + u8 fc_vp_id_start; + u8 fc_vp_id_end; + u8 rsvd_fc1[2]; + u32 rsvd_fc2[5]; + + /* VBS */ + u16 vbs_max_volq; + u16 rsvd_vbs1; + u32 rsvd_vbs2[3]; + + u16 pseudo_vf_start_id; + u16 pseudo_vf_num; + u32 pseudo_vf_max_pctx; + u16 pseudo_vf_bfilter_start_addr; + u16 pseudo_vf_bfilter_len; + u32 rsvd_glb[8]; +}; + +enum { + SSS_SF_SVC_FT_BIT = (1 << 0), + SSS_SF_SVC_RDMA_BIT = (1 << 1), +}; + +enum sss_cfg_cmd { + SSS_CFG_CMD_GET_CAP_CFG = 0, + SSS_CFG_CMD_GET_HOST_TIMER = 1, +}; + +static void sss_print_pubic_cap(void *dev_hdl, const struct sss_service_cap *svc_cap) +{ + sdk_info(dev_hdl, + "Get public capbility: svc_type: 0x%x, chip_svc_type: 0x%x\n", + svc_cap->svc_type, svc_cap->chip_svc_type); + sdk_info(dev_hdl, + "host_id: 0x%x, ep_id: 0x%x, er_id: 0x%x, port_id: 0x%x\n", + svc_cap->host_id, svc_cap->ep_id, svc_cap->er_id, svc_cap->port_id); + sdk_info(dev_hdl, + "host_total_function: 0x%x, host_oq_id_mask_val: 0x%x, max_vf: 0x%x\n", + svc_cap->host_total_function, svc_cap->host_oq_id_mask_val, svc_cap->max_vf); + sdk_info(dev_hdl, + "pf_num: 0x%x, pf_id_start: 0x%x, vf_num: 0x%x, vf_id_start: 0x%x\n", + svc_cap->pf_num, svc_cap->pf_id_start, svc_cap->vf_num, svc_cap->vf_id_start); + sdk_info(dev_hdl, + "host_valid_bitmap: 0x%x, master_host_id: 0x%x, srv_multi_host_mode: 0x%x\n", + svc_cap->host_valid_bitmap, svc_cap->master_host_id, svc_cap->srv_multi_host_mode); + sdk_info(dev_hdl, + "cos_valid_bitmap: 0x%x, port_cos_valid_bitmap: 0x%x, flexq_en: 0x%x, virtio_vq_size: 0x%x\n", + svc_cap->cos_valid_bitmap, svc_cap->port_cos_valid_bitmap, svc_cap->flexq_en, + svc_cap->virtio_vq_size); + sdk_info(dev_hdl, + "pseudo_vf_start_id: 0x%x, pseudo_vf_num: 0x%x, pseudo_vf_max_pctx: 0x%x\n", + svc_cap->pseudo_vf_start_id, svc_cap->pseudo_vf_num, svc_cap->pseudo_vf_max_pctx); + sdk_info(dev_hdl, + "pseudo_vf_bfilter_start_addr: 0x%x, pseudo_vf_bfilter_len: 0x%x\n", + svc_cap->pseudo_vf_bfilter_start_addr, svc_cap->pseudo_vf_bfilter_len); +} + +static void sss_parse_qmm_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, struct sss_cmd_dev_cap_cfg *cmd_cap) +{ + struct sss_dev_sf_svc_attr *sf_svc_attr = &svc_cap->sf_svc_attr; + + svc_cap->pseudo_vf_num = cmd_cap->pseudo_vf_num; + svc_cap->pseudo_vf_cfg_num = cmd_cap->pseudo_vf_num; + svc_cap->pseudo_vf_start_id = cmd_cap->pseudo_vf_start_id; + svc_cap->pseudo_vf_max_pctx = cmd_cap->pseudo_vf_max_pctx; + svc_cap->pseudo_vf_bfilter_start_addr = cmd_cap->pseudo_vf_bfilter_start_addr; + svc_cap->pseudo_vf_bfilter_len = cmd_cap->pseudo_vf_bfilter_len; + + if (SSS_SUPPORT_VIRTIO_VQ_SIZE(hwdev)) + svc_cap->virtio_vq_size = (u16)(SSS_VIRTIO_BASE_VQ_SIZE << svc_cap->virtio_vq_size); + else + svc_cap->virtio_vq_size = SSS_VIRTIO_DEFAULT_VQ_SIZE; + + sf_svc_attr->rdma_en = !!(cmd_cap->sf_svc_attr & SSS_SF_SVC_RDMA_BIT); + + svc_cap->smf_pg = cmd_cap->smf_pg; + svc_cap->lb_mode = cmd_cap->lb_mode; + + svc_cap->timer_en = cmd_cap->timer_en; + svc_cap->bfilter_start_addr = cmd_cap->bfilter_start_addr; + svc_cap->bfilter_len = cmd_cap->bfilter_len; + svc_cap->host_oq_id_mask_val = cmd_cap->host_oq_id_mask_val; + svc_cap->hash_bucket_num = cmd_cap->hash_bucket_num; + svc_cap->max_stick2cache_num = cmd_cap->max_stick2cache_num; + svc_cap->max_connect_num = cmd_cap->max_connect_num; +} + +static void sss_parse_pubic_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + svc_cap->svc_type = cmd_cap->svc_type; + svc_cap->chip_svc_type = cmd_cap->svc_type; + + svc_cap->ep_id = cmd_cap->ep_id; + svc_cap->er_id = cmd_cap->er_id; + svc_cap->host_id = cmd_cap->host_id; + svc_cap->port_id = cmd_cap->port_id; + + svc_cap->host_total_function = cmd_cap->host_total_function; + svc_cap->host_valid_bitmap = cmd_cap->host_valid_bitmap; + svc_cap->master_host_id = cmd_cap->master_host_id; + svc_cap->srv_multi_host_mode = cmd_cap->srv_multi_host_mode; + + svc_cap->flexq_en = cmd_cap->flexq_en; + svc_cap->cos_valid_bitmap = cmd_cap->cos_valid_bitmap; + svc_cap->port_cos_valid_bitmap = cmd_cap->port_cos_valid_bitmap; + + if (type != SSS_FUNC_TYPE_VF) { + svc_cap->pf_num = cmd_cap->pf_num; + svc_cap->pf_id_start = cmd_cap->pf_id_start; + svc_cap->vf_num = cmd_cap->vf_num; + svc_cap->vf_id_start = cmd_cap->vf_id_start; + svc_cap->max_vf = cmd_cap->max_vf; + } else { + svc_cap->max_vf = 0; + } + + svc_cap->sf_en = (type == SSS_FUNC_TYPE_PPF) ? + (!!cmd_cap->host_sf_en) : (!!cmd_cap->func_sf_en); + + sss_parse_qmm_cap(hwdev, svc_cap, cmd_cap); + sss_print_pubic_cap(hwdev->dev_hdl, svc_cap); +} + +static void sss_parse_l2nic_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_nic_service_cap *nic_svc_cap = &svc_cap->nic_cap; + + if (!SSS_IS_NIC_TYPE(hwdev)) + return; + + nic_svc_cap->max_rq = cmd_cap->nic_max_rq_id + 1; + nic_svc_cap->max_sq = cmd_cap->nic_max_sq_id + 1; + nic_svc_cap->def_queue_num = cmd_cap->nic_def_queue_num; + + sdk_info(hwdev->dev_hdl, + "Get Nic capbility, max_sq: 0x%x, max_rq: 0x%x, def_queue_num: 0x%x\n", + nic_svc_cap->max_sq, nic_svc_cap->max_rq, nic_svc_cap->def_queue_num); + + /* Check parameters from firmware */ + if (nic_svc_cap->max_sq > SSS_CFG_MAX_QP || + nic_svc_cap->max_rq > SSS_CFG_MAX_QP) { + sdk_info(hwdev->dev_hdl, "Exceed limit[1-%d]:sq: %u, rq: %u\n", + SSS_CFG_MAX_QP, nic_svc_cap->max_sq, nic_svc_cap->max_rq); + nic_svc_cap->max_rq = SSS_CFG_MAX_QP; + nic_svc_cap->max_sq = SSS_CFG_MAX_QP; + } +} + +static void sss_parse_fc_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_fc_service_cap *fc_svc_cap = &svc_cap->fc_cap; + struct sss_dev_fc_svc_cap *dev_fc_cap = &fc_svc_cap->dev_fc_cap; + + if (!SSS_IS_FC_TYPE(hwdev)) + return; + + /* FC without virtulization */ + if (type != SSS_FUNC_TYPE_PF && type != SSS_FUNC_TYPE_PPF) + return; + + dev_fc_cap->srq_num = cmd_cap->fc_max_srq; + dev_fc_cap->scq_num = cmd_cap->fc_max_scq; + dev_fc_cap->max_parent_qpc_num = cmd_cap->fc_max_pctx; + dev_fc_cap->max_child_qpc_num = cmd_cap->fc_max_cctx; + dev_fc_cap->child_qpc_id_start = cmd_cap->fc_cctx_id_start; + dev_fc_cap->vp_id_start = cmd_cap->fc_vp_id_start; + dev_fc_cap->vp_id_end = cmd_cap->fc_vp_id_end; + + fc_svc_cap->parent_qpc_size = SSS_FC_PQPC_SIZE; + fc_svc_cap->child_qpc_size = SSS_FC_CQPC_SIZE; + fc_svc_cap->sqe_size = SSS_FC_SQE_SIZE; + + fc_svc_cap->scqc_size = SSS_FC_SCQC_SIZE; + fc_svc_cap->scqe_size = SSS_FC_SCQE_SIZE; + + fc_svc_cap->srqc_size = SSS_FC_SRQC_SIZE; + fc_svc_cap->srqe_size = SSS_FC_SRQE_SIZE; + + sdk_info(hwdev->dev_hdl, "Get FC capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, + "max_parent_qpc_num: 0x%x, max_child_qpc_num: 0x%x, scq_num: 0x%x, srq_num: 0x%x\n", + dev_fc_cap->max_parent_qpc_num, dev_fc_cap->max_child_qpc_num, + dev_fc_cap->scq_num, dev_fc_cap->srq_num); + sdk_info(hwdev->dev_hdl, "child_qpc_id_start: 0x%x, vp_id_start: 0x%x, vp_id_end: 0x%x\n", + dev_fc_cap->child_qpc_id_start, dev_fc_cap->vp_id_start, dev_fc_cap->vp_id_end); +} + +static void sss_init_rdma_cap_param(struct sss_hwdev *hwdev) +{ + struct sss_rdma_service_cap *rdma_svc_cap = &hwdev->mgmt_info->svc_cap.rdma_cap; + struct sss_dev_roce_svc_own_cap *roce_own_cap = + &rdma_svc_cap->dev_rdma_cap.roce_own_cap; + + rdma_svc_cap->log_mtt = SSS_LOG_MTT_SEG; + rdma_svc_cap->log_rdmarc = SSS_LOG_RDMARC_SEG; + rdma_svc_cap->reserved_qp = SSS_RDMA_RSVD_QP; + rdma_svc_cap->max_sq_sg = SSS_RDMA_MAX_SQ_SGE; + + /* RoCE */ + roce_own_cap->qpc_entry_size = SSS_ROCE_QPC_ENTRY_SIZE; + roce_own_cap->max_wqe = SSS_ROCE_MAX_WQE; + roce_own_cap->max_rq_sg = SSS_ROCE_MAX_RQ_SGE; + roce_own_cap->max_sq_inline_data_size = SSS_ROCE_MAX_SQ_INLINE_DATA_SIZE; + roce_own_cap->max_rq_desc_size = SSS_ROCE_MAX_RQ_DESC_SIZE; + roce_own_cap->rdmarc_entry_size = SSS_ROCE_RDMARC_ENTRY_SIZE; + roce_own_cap->max_qp_init_rdma = SSS_ROCE_MAX_QP_INIT_RDMA; + roce_own_cap->max_qp_dest_rdma = SSS_ROCE_MAX_QP_DEST_RDMA; + roce_own_cap->max_srq_wqe = SSS_ROCE_MAX_SRQ_WQE; + roce_own_cap->reserved_srq = SSS_ROCE_RSVD_SRQ; + roce_own_cap->max_srq_sge = SSS_ROCE_MAX_SRQ_SGE; + roce_own_cap->srqc_entry_size = ROCE_SRQC_ENTERY_SIZE; + roce_own_cap->max_msg_size = SSS_ROCE_MAX_MSG_SIZE; + + rdma_svc_cap->max_sq_desc_size = SSS_RDMA_MAX_SQ_DESC_SIZE; + rdma_svc_cap->wqebb_size = SSS_WQEBB_SIZE; + rdma_svc_cap->max_cqe = SSS_RDMA_MAX_CQE; + rdma_svc_cap->reserved_cq = SSS_RDMA_RSVD_CQ; + rdma_svc_cap->cqc_entry_size = SSS_RDMA_CQC_ENTRY_SIZE; + rdma_svc_cap->cqe_size = SSS_RDMA_CQE_SIZE; + rdma_svc_cap->reserved_mrw = SSS_RDMA_RSVD_MRW; + rdma_svc_cap->mpt_entry_size = SSS_RDMA_MPT_ENTRY_SIZE; + + rdma_svc_cap->max_fmr_map = 0xff; + rdma_svc_cap->mtt_num = SSS_RDMA_MTT_NUM; + rdma_svc_cap->log_mtt_seg = SSS_LOG_MTT_SEG; + rdma_svc_cap->mtt_entry_size = SSS_MTT_ENTRY_SIZE; + rdma_svc_cap->log_rdmarc_seg = SSS_LOG_RDMARC_SEG; + rdma_svc_cap->local_ca_ack_delay = SSS_LOCAL_ACK_DELAY; + rdma_svc_cap->port_num = SSS_RDMA_PORT_NUM; + rdma_svc_cap->db_page_size = SSS_DB_PAGE_SIZE_K; + rdma_svc_cap->direct_wqe_size = SSS_DWQE_SIZE; + rdma_svc_cap->pd_num = SSS_PD_NUM; + rdma_svc_cap->reserved_pd = SSS_RSVD_PD; + rdma_svc_cap->max_xrcd = SSS_MAX_XRCD; + rdma_svc_cap->reserved_xrcd = SSS_RSVD_XRCD; + rdma_svc_cap->max_gid_per_port = SSS_MAX_GID_PER_PORT; + rdma_svc_cap->gid_entry_size = SSS_GID_ENTRY_SIZE; + rdma_svc_cap->reserved_lkey = SSS_RSVD_LKEY; + rdma_svc_cap->comp_vector_num = (u32)hwdev->mgmt_info->eq_info.ceq_num; + rdma_svc_cap->page_size_cap = SSS_PAGE_SIZE_CAP; + rdma_svc_cap->flag = (SSS_RDMA_BMME_FLAG_LOCAL_INV | + SSS_RDMA_BMME_FLAG_REMOTE_INV | + SSS_RDMA_BMME_FLAG_FAST_REG_WR | + SSS_RDMA_DEV_CAP_FLAG_XRC | + SSS_RDMA_DEV_CAP_FLAG_MEM_WINDOW | + SSS_RDMA_BMME_FLAG_TYPE_2_WIN | + SSS_RDMA_BMME_FLAG_WIN_TYPE_2B | + SSS_RDMA_DEV_CAP_FLAG_ATOMIC); + rdma_svc_cap->max_frpl_len = SSS_MAX_FRPL_LEN; + rdma_svc_cap->max_pkey = SSS_MAX_PKEY; +} + +static void sss_parse_roce_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_dev_roce_svc_own_cap *roce_own_cap = + &svc_cap->rdma_cap.dev_rdma_cap.roce_own_cap; + + if (!SSS_IS_ROCE_TYPE(hwdev)) + return; + + roce_own_cap->max_srq = cmd_cap->roce_max_srq; + roce_own_cap->max_cq = cmd_cap->roce_max_cq; + roce_own_cap->max_qp = cmd_cap->roce_max_qp; + roce_own_cap->max_mpt = cmd_cap->roce_max_mpt; + roce_own_cap->max_drc_qp = cmd_cap->roce_max_drc_qp; + + roce_own_cap->wqe_cl_size = cmd_cap->roce_wqe_cl_size; + roce_own_cap->wqe_cl_start = cmd_cap->roce_wqe_cl_start; + roce_own_cap->wqe_cl_end = cmd_cap->roce_wqe_cl_end; + + if (roce_own_cap->max_qp == 0) { + roce_own_cap->max_drc_qp = SSS_ROCE_MAX_DRC_QP; + if (type == SSS_FUNC_TYPE_PF || type == SSS_FUNC_TYPE_PPF) { + roce_own_cap->max_srq = SSS_ROCE_MAX_SRQ; + roce_own_cap->max_cq = SSS_ROCE_MAX_CQ; + roce_own_cap->max_qp = SSS_ROCE_MAX_QP; + roce_own_cap->max_mpt = SSS_ROCE_MAX_MPT; + } else { + roce_own_cap->max_srq = SSS_ROCE_MAX_SRQ / 2; + roce_own_cap->max_cq = SSS_ROCE_MAX_CQ / 2; + roce_own_cap->max_qp = SSS_ROCE_MAX_QP / 2; + roce_own_cap->max_mpt = SSS_ROCE_MAX_MPT / 2; + } + } + + sss_init_rdma_cap_param(hwdev); + + sdk_info(hwdev->dev_hdl, "Get ROCE capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, + "max_qps: 0x%x, max_srq: 0x%x, max_cq: 0x%x, max_mpt: 0x%x, max_drct: 0x%x\n", + roce_own_cap->max_qp, roce_own_cap->max_srq, roce_own_cap->max_cq, + roce_own_cap->max_mpt, roce_own_cap->max_drc_qp); + sdk_info(hwdev->dev_hdl, "wqe_start: 0x%x, wqe_end: 0x%x, wqe_sz: 0x%x\n", + roce_own_cap->wqe_cl_start, roce_own_cap->wqe_cl_end, roce_own_cap->wqe_cl_size); +} + +static void sss_parse_rdma_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_rdma_service_cap *rdma_svc_cap = &svc_cap->rdma_cap; + struct sss_dev_roce_svc_own_cap *roce_own_cap = + &rdma_svc_cap->dev_rdma_cap.roce_own_cap; + + if (!SSS_IS_RDMA_ENABLE(hwdev)) + return; + + roce_own_cap->dmtt_cl_start = cmd_cap->roce_dmtt_cl_start; + roce_own_cap->dmtt_cl_end = cmd_cap->roce_dmtt_cl_end; + roce_own_cap->dmtt_cl_size = cmd_cap->roce_dmtt_cl_size; + + roce_own_cap->cmtt_cl_start = cmd_cap->roce_cmtt_cl_start; + roce_own_cap->cmtt_cl_end = cmd_cap->roce_cmtt_cl_end; + roce_own_cap->cmtt_cl_size = cmd_cap->roce_cmtt_cl_size; + + rdma_svc_cap->log_mtt = SSS_LOG_MTT_SEG; + rdma_svc_cap->log_mtt_seg = SSS_LOG_MTT_SEG; + rdma_svc_cap->mtt_entry_size = SSS_MTT_ENTRY_SIZE; + rdma_svc_cap->mpt_entry_size = SSS_RDMA_MPT_ENTRY_SIZE; + rdma_svc_cap->mtt_num = SSS_RDMA_MTT_NUM; + + sdk_info(hwdev->dev_hdl, "Get RDMA capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, "cmtt_cl_start: 0x%x, cmtt_cl_end: 0x%x, cmtt_cl_size: 0x%x\n", + roce_own_cap->cmtt_cl_start, roce_own_cap->cmtt_cl_end, + roce_own_cap->cmtt_cl_size); + sdk_info(hwdev->dev_hdl, "dmtt_cl_start: 0x%x, dmtt_cl_end: 0x%x, dmtt_cl_size: 0x%x\n", + roce_own_cap->dmtt_cl_start, roce_own_cap->dmtt_cl_end, + roce_own_cap->dmtt_cl_size); +} + +static void sss_parse_ovs_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_ovs_service_cap *ovs_cap = &svc_cap->ovs_cap; + struct sss_dev_ovs_svc_cap *dev_ovs_cap = &ovs_cap->dev_ovs_cap; + + if (!SSS_IS_OVS_TYPE(hwdev)) + return; + + dev_ovs_cap->max_pctx = cmd_cap->ovs_max_qpc; + dev_ovs_cap->pseudo_vf_start_id = cmd_cap->pseudo_vf_start_id; + dev_ovs_cap->pseudo_vf_num = cmd_cap->pseudo_vf_num; + dev_ovs_cap->pseudo_vf_max_pctx = cmd_cap->pseudo_vf_max_pctx; + dev_ovs_cap->dynamic_qp_en = cmd_cap->flexq_en; + ovs_cap->pctx_size = SSS_OVS_PCTX_SIZE; + + sdk_info(hwdev->dev_hdl, "Get OVS capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, "max_pctxs: 0x%x, pseudo_vf_start_id: 0x%x, pseudo_vf_num: 0x%x\n", + dev_ovs_cap->max_pctx, dev_ovs_cap->pseudo_vf_start_id, + dev_ovs_cap->pseudo_vf_num); + sdk_info(hwdev->dev_hdl, "pseudo_vf_max_pctx: 0x%x, dynamic_qp_en: 0x%x\n", + dev_ovs_cap->pseudo_vf_max_pctx, dev_ovs_cap->dynamic_qp_en); +} + +static void sss_parse_ppa_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_ppa_service_cap *ppa_cap = &svc_cap->ppa_cap; + + if (!SSS_IS_PPA_TYPE(hwdev)) + return; + + ppa_cap->qpc_pseudo_vf_start = cmd_cap->pseudo_vf_start_id; + ppa_cap->qpc_pseudo_vf_num = cmd_cap->pseudo_vf_num; + ppa_cap->qpc_pseudo_vf_ctx_num = cmd_cap->pseudo_vf_max_pctx; + ppa_cap->bloomfilter_len = cmd_cap->pseudo_vf_bfilter_len; + ppa_cap->bloomfilter_en = !!cmd_cap->pseudo_vf_bfilter_len; + ppa_cap->pctx_size = SSS_PPA_PCTX_SIZE; + + sdk_info(hwdev->dev_hdl, "Get PPA capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, + "qpc_pseudo_vf_start: 0x%x, qpc_pseudo_vf_num: 0x%x, qpc_pseudo_vf_ctx_num: 0x%x\n", + ppa_cap->qpc_pseudo_vf_start, ppa_cap->qpc_pseudo_vf_num, + ppa_cap->qpc_pseudo_vf_ctx_num); +} + +static void sss_parse_toe_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_toe_service_cap *toe_svc_cap = &svc_cap->toe_cap; + struct sss_dev_toe_svc_cap *dev_toe_cap = &toe_svc_cap->dev_toe_cap; + + if (!SSS_IS_TOE_TYPE(hwdev)) + return; + + dev_toe_cap->max_srq = cmd_cap->toe_max_srq; + dev_toe_cap->max_cq = cmd_cap->toe_max_cq; + dev_toe_cap->srq_id_start = cmd_cap->toe_srq_id_start; + dev_toe_cap->max_pctx = cmd_cap->toe_max_pctx; + dev_toe_cap->max_cctxt = cmd_cap->toe_max_cctxt; + dev_toe_cap->max_mpt = cmd_cap->toe_max_mpt; + + toe_svc_cap->pctx_size = SSS_TOE_PCTX_SIZE; + toe_svc_cap->scqc_size = SSS_TOE_SCQC_SIZE; + + sdk_info(hwdev->dev_hdl, "Get TOE capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, + "max_pctx: 0x%x, max_cq: 0x%x, max_srq: 0x%x, srq_id_start: 0x%x, max_mpt: 0x%x\n", + dev_toe_cap->max_pctx, dev_toe_cap->max_cq, dev_toe_cap->max_srq, + dev_toe_cap->srq_id_start, dev_toe_cap->max_mpt); +} + +static void sss_parse_ipsec_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_ipsec_service_cap *ipsec_cap = &svc_cap->ipsec_cap; + struct sss_dev_ipsec_svc_cap *dev_ipsec_cap = &ipsec_cap->dev_ipsec_cap; + + if (!SSS_IS_IPSEC_TYPE(hwdev)) + return; + + dev_ipsec_cap->max_sactx = cmd_cap->ipsec_max_sactx; + dev_ipsec_cap->max_cq = cmd_cap->ipsec_max_cq; + ipsec_cap->sactx_size = SSS_IPSEC_SACTX_SIZE; + + sdk_info(hwdev->dev_hdl, "Get IPSEC capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, "max_sactx: 0x%x, max_cq: 0x%x\n", + dev_ipsec_cap->max_sactx, dev_ipsec_cap->max_cq); +} + +static void sss_parse_vbs_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_vbs_service_cap *vbs_cap = &svc_cap->vbs_cap; + + if (!SSS_IS_VBS_TYPE(hwdev)) + return; + + vbs_cap->vbs_max_volq = cmd_cap->vbs_max_volq; + + sdk_info(hwdev->dev_hdl, "Get VBS capbility, type: 0x%x, vbs_max_volq: 0x%x\n", + type, vbs_cap->vbs_max_volq); +} + +static void sss_parse_dev_cap(struct sss_hwdev *hwdev, + struct sss_cmd_dev_cap_cfg *cmd_cap, enum sss_func_type type) +{ + struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap; + + sss_parse_pubic_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_l2nic_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_fc_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_toe_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_rdma_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_roce_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_ovs_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_ipsec_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_ppa_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_vbs_cap(hwdev, svc_cap, cmd_cap, type); +} + +static int sss_chip_get_cap(struct sss_hwdev *hwdev, struct sss_cmd_dev_cap_cfg *cmd_cap) +{ + int ret; + u16 out_len = sizeof(*cmd_cap); + + cmd_cap->func_id = sss_get_global_func_id(hwdev); + sdk_info(hwdev->dev_hdl, "Get svc_cap, func_id: %u\n", cmd_cap->func_id); + + ret = sss_sync_mbx_send_msg(hwdev, SSS_MOD_TYPE_CFGM, SSS_CFG_CMD_GET_CAP_CFG, + cmd_cap, sizeof(*cmd_cap), cmd_cap, &out_len, 0, + SSS_CHANNEL_COMM); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, cmd_cap)) { + sdk_err(hwdev->dev_hdl, + "Fail to get capability, err: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_cap->head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_init_capability(struct sss_hwdev *hwdev) +{ + int ret; + enum sss_func_type type = SSS_GET_FUNC_TYPE(hwdev); + struct sss_cmd_dev_cap_cfg cmd_cap = {0}; + + if (type != SSS_FUNC_TYPE_PF && + type != SSS_FUNC_TYPE_VF && + type != SSS_FUNC_TYPE_PPF) { + sdk_err(hwdev->dev_hdl, "Unsupported PCI Function type: %d\n", type); + return -EINVAL; + } + + ret = sss_chip_get_cap(hwdev, &cmd_cap); + if (ret != 0) + return ret; + + sss_parse_dev_cap(hwdev, &cmd_cap, type); + + sdk_info(hwdev->dev_hdl, "Success to init capability\n"); + return 0; +} + +void sss_deinit_capability(struct sss_hwdev *hwdev) +{ + sdk_info(hwdev->dev_hdl, "Success to deinit capability"); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.h new file mode 100644 index 0000000000000..fa4a8809e1fd5 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_CAP_H +#define SSS_HWDEV_CAP_H + +#include "sss_hwdev.h" + +int sss_init_capability(struct sss_hwdev *dev); +void sss_deinit_capability(struct sss_hwdev *dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_export.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_export.c new file mode 100644 index 0000000000000..0469392468273 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_export.c @@ -0,0 +1,599 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_csr.h" +#include "sss_hwif_api.h" +#include "sss_hw_svc_cap.h" + +#define SSS_DEFAULT_RX_BUF_SIZE_LEVEL ((u16)0xB) + +enum sss_rx_buf_size { + SSS_RX_BUF_SIZE_32B = 0x20, + SSS_RX_BUF_SIZE_64B = 0x40, + SSS_RX_BUF_SIZE_96B = 0x60, + SSS_RX_BUF_SIZE_128B = 0x80, + SSS_RX_BUF_SIZE_192B = 0xC0, + SSS_RX_BUF_SIZE_256B = 0x100, + SSS_RX_BUF_SIZE_384B = 0x180, + SSS_RX_BUF_SIZE_512B = 0x200, + SSS_RX_BUF_SIZE_768B = 0x300, + SSS_RX_BUF_SIZE_1K = 0x400, + SSS_RX_BUF_SIZE_1_5K = 0x600, + SSS_RX_BUF_SIZE_2K = 0x800, + SSS_RX_BUF_SIZE_3K = 0xC00, + SSS_RX_BUF_SIZE_4K = 0x1000, + SSS_RX_BUF_SIZE_8K = 0x2000, + SSS_RX_BUF_SIZE_16K = 0x4000, +}; + +const int sss_rx_buf_size_level[] = { + SSS_RX_BUF_SIZE_32B, + SSS_RX_BUF_SIZE_64B, + SSS_RX_BUF_SIZE_96B, + SSS_RX_BUF_SIZE_128B, + SSS_RX_BUF_SIZE_192B, + SSS_RX_BUF_SIZE_256B, + SSS_RX_BUF_SIZE_384B, + SSS_RX_BUF_SIZE_512B, + SSS_RX_BUF_SIZE_768B, + SSS_RX_BUF_SIZE_1K, + SSS_RX_BUF_SIZE_1_5K, + SSS_RX_BUF_SIZE_2K, + SSS_RX_BUF_SIZE_3K, + SSS_RX_BUF_SIZE_4K, + SSS_RX_BUF_SIZE_8K, + SSS_RX_BUF_SIZE_16K, +}; + +static u16 sss_get_rx_buf_size_level(int buf_size) +{ + u16 i; + u16 cnt = ARRAY_LEN(sss_rx_buf_size_level); + + for (i = 0; i < cnt; i++) { + if (sss_rx_buf_size_level[i] == buf_size) + return i; + } + + return SSS_DEFAULT_RX_BUF_SIZE_LEVEL; /* default 2K */ +} + +static int sss_chip_get_interrupt_cfg(void *hwdev, + struct sss_irq_cfg *intr_cfg, u16 channel) +{ + int ret; + struct sss_cmd_msix_config cmd_msix = {0}; + u16 out_len = sizeof(cmd_msix); + + cmd_msix.opcode = SSS_MGMT_MSG_GET_CMD; + cmd_msix.func_id = sss_get_global_func_id(hwdev); + cmd_msix.msix_index = intr_cfg->msix_id; + + ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, + &cmd_msix, sizeof(cmd_msix), &cmd_msix, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_msix)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to get intr config, ret: %d, status: 0x%x, out_len: 0x%x, channel: 0x%x\n", + ret, cmd_msix.head.state, out_len, channel); + return -EINVAL; + } + + intr_cfg->lli_credit = cmd_msix.lli_credit_cnt; + intr_cfg->lli_timer = cmd_msix.lli_timer_cnt; + intr_cfg->pending = cmd_msix.pending_cnt; + intr_cfg->coalesc_timer = cmd_msix.coalesce_timer_cnt; + intr_cfg->resend_timer = cmd_msix.resend_timer_cnt; + + return 0; +} + +int sss_chip_set_msix_attr(void *hwdev, + struct sss_irq_cfg intr_cfg, u16 channel) +{ + int ret; + struct sss_irq_cfg temp_cfg = {0}; + + if (!hwdev) + return -EINVAL; + + temp_cfg.msix_id = intr_cfg.msix_id; + + ret = sss_chip_get_interrupt_cfg(hwdev, &temp_cfg, channel); + if (ret != 0) + return -EINVAL; + + if (intr_cfg.lli_set == 0) { + intr_cfg.lli_credit = temp_cfg.lli_credit; + intr_cfg.lli_timer = temp_cfg.lli_timer; + } + + if (intr_cfg.coalesc_intr_set == 0) { + intr_cfg.pending = temp_cfg.pending; + intr_cfg.coalesc_timer = temp_cfg.coalesc_timer; + intr_cfg.resend_timer = temp_cfg.resend_timer; + } + + return sss_chip_set_eq_msix_attr(hwdev, &intr_cfg, channel); +} +EXPORT_SYMBOL(sss_chip_set_msix_attr); + +void sss_chip_clear_msix_resend_bit(void *hwdev, u16 msix_id, bool clear_en) +{ + u32 val; + + if (!hwdev) + return; + + val = SSS_SET_MSI_CLR_INDIR(msix_id, SIMPLE_INDIR_ID) | + SSS_SET_MSI_CLR_INDIR(!!clear_en, RESEND_TIMER_CLR); + + sss_chip_write_reg(SSS_TO_HWIF(hwdev), SSS_CSR_FUNC_MSI_CLR_WR_ADDR, val); +} +EXPORT_SYMBOL(sss_chip_clear_msix_resend_bit); + +int sss_chip_reset_function(void *hwdev, u16 func_id, u64 flag, u16 channel) +{ + int ret = 0; + struct sss_cmd_func_reset cmd_reset = {0}; + u16 out_len = sizeof(cmd_reset); + + if (!hwdev) + return -EINVAL; + + cmd_reset.func_id = func_id; + cmd_reset.reset_flag = flag; + sdk_info(SSS_TO_DEV(hwdev), "Func reset, flag: 0x%llx, channel:0x%x\n", flag, channel); + + ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_FUNC_RESET, + &cmd_reset, sizeof(cmd_reset), &cmd_reset, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_reset)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to reset func, flag 0x%llx, ret: %d, status: 0x%x, out_len: 0x%x\n", + flag, ret, cmd_reset.head.state, out_len); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(sss_chip_reset_function); + +int sss_chip_set_root_ctx(void *hwdev, + u32 rq_depth, u32 sq_depth, int rx_size, u16 channel) +{ + int ret; + struct sss_cmd_root_ctxt cmd_root = {0}; + u16 out_len = sizeof(cmd_root); + + if (!hwdev) + return -EINVAL; + + cmd_root.func_id = sss_get_global_func_id(hwdev); + if (rq_depth != 0 || sq_depth != 0 || rx_size != 0) { + cmd_root.rx_buf_sz = sss_get_rx_buf_size_level(rx_size); + cmd_root.rq_depth = (u16)ilog2(rq_depth); + cmd_root.sq_depth = (u16)ilog2(sq_depth); + cmd_root.lro_en = 1; + } + + ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_SET_VAT, + &cmd_root, sizeof(cmd_root), &cmd_root, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_root)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to set root ctx, ret: %d, status: 0x%x, out_len: 0x%x, channel: 0x%x\n", + ret, cmd_root.head.state, out_len, channel); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(sss_chip_set_root_ctx); + +int sss_chip_clean_root_ctx(void *hwdev, u16 channel) +{ + return sss_chip_set_root_ctx(hwdev, 0, 0, 0, channel); +} +EXPORT_SYMBOL(sss_chip_clean_root_ctx); + +static int sss_get_fw_ver(struct sss_hwdev *hwdev, + enum sss_fw_ver_type fw_type, u8 *buf, u8 buf_size, u16 channel) +{ + int ret; + struct sss_cmd_get_fw_version cmd_version = {0}; + u16 out_len = sizeof(cmd_version); + + if (!hwdev || !buf) + return -EINVAL; + + cmd_version.fw_type = fw_type; + ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_GET_FW_VERSION, + &cmd_version, sizeof(cmd_version), &cmd_version, + &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_version)) { + sdk_err(hwdev->dev_hdl, + "Fail to get fw version, ret: %d, status: 0x%x, out_len: 0x%x, channel: 0x%x\n", + ret, cmd_version.head.state, out_len, channel); + return -EIO; + } + + ret = snprintf(buf, buf_size, "%s", cmd_version.ver); + if (ret < 0) + return -EINVAL; + + return 0; +} + +int sss_get_mgmt_version(void *hwdev, u8 *buf, u8 buf_size, u16 channel) +{ + return sss_get_fw_ver(hwdev, SSS_FW_VER_TYPE_MPU, buf, + buf_size, channel); +} +EXPORT_SYMBOL(sss_get_mgmt_version); + +int sss_chip_set_func_used_state(void *hwdev, + u16 service_type, bool state, u16 channel) +{ + int ret; + struct sss_cmd_func_svc_used_state cmd_state = {0}; + u16 out_len = sizeof(cmd_state); + + if (!hwdev) + return -EINVAL; + + cmd_state.func_id = sss_get_global_func_id(hwdev); + cmd_state.svc_type = service_type; + cmd_state.used_state = !!state; + + ret = sss_sync_send_msg_ch(hwdev, + SSS_COMM_MGMT_CMD_SET_FUNC_SVC_USED_STATE, + &cmd_state, sizeof(cmd_state), &cmd_state, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_state)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to set func used state, ret: %d, status: 0x%x, out_len: 0x%x, channel: 0x%x\n\n", + ret, cmd_state.head.state, out_len, channel); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(sss_chip_set_func_used_state); + +bool sss_get_nic_capability(void *hwdev, struct sss_nic_service_cap *capability) +{ + struct sss_hwdev *dev = hwdev; + + if (!capability || !hwdev) + return false; + + if (SSS_IS_NIC_TYPE(dev)) { + memcpy(capability, SSS_TO_NIC_CAP(hwdev), sizeof(*capability)); + return true; + } else { + return false; + } +} +EXPORT_SYMBOL(sss_get_nic_capability); + +bool sss_support_nic(void *hwdev) +{ + return (hwdev && SSS_IS_NIC_TYPE((struct sss_hwdev *)hwdev)); +} +EXPORT_SYMBOL(sss_support_nic); + +bool sss_support_ppa(void *hwdev, struct sss_ppa_service_cap *cap) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!SSS_IS_PPA_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->mgmt_info->svc_cap.ppa_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(sss_support_ppa); + +u16 sss_get_max_sq_num(void *hwdev) +{ + if (!hwdev) { + pr_err("Get max sq num: hwdev is NULL\n"); + return 0; + } + + return SSS_TO_MAX_SQ_NUM(hwdev); +} +EXPORT_SYMBOL(sss_get_max_sq_num); + +u8 sss_get_phy_port_id(void *hwdev) +{ + if (!hwdev) { + pr_err("Get phy port id: hwdev is NULL\n"); + return 0; + } + + return SSS_TO_PHY_PORT_ID(hwdev); +} +EXPORT_SYMBOL(sss_get_phy_port_id); + +u16 sss_get_max_vf_num(void *hwdev) +{ + if (!hwdev) { + pr_err("Get max vf num: hwdev is NULL\n"); + return 0; + } + + return SSS_TO_MAX_VF_NUM(hwdev); +} +EXPORT_SYMBOL(sss_get_max_vf_num); + +u16 sss_nic_intr_num(void *hwdev) +{ + struct sss_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct sss_hwdev *)hwdev)->hwif; + + return hwif->attr.irq_num; +} +EXPORT_SYMBOL(sss_nic_intr_num); + +int sss_get_cos_valid_bitmap(void *hwdev, u8 *func_cos_bitmap, u8 *port_cos_bitmap) +{ + if (!hwdev) { + pr_err("Get cos valid bitmap: hwdev is NULL\n"); + return -EINVAL; + } + + *func_cos_bitmap = SSS_TO_FUNC_COS_BITMAP(hwdev); + *port_cos_bitmap = SSS_TO_PORT_COS_BITMAP(hwdev); + + return 0; +} +EXPORT_SYMBOL(sss_get_cos_valid_bitmap); + +u16 sss_alloc_irq(void *hwdev, enum sss_service_type service_type, + struct sss_irq_desc *alloc_array, u16 alloc_num) +{ + int i; + int j; + u16 need_num = alloc_num; + u16 act_num = 0; + struct sss_irq_info *irq_info = NULL; + struct sss_irq *irq = NULL; + + if (!hwdev || !alloc_array) + return 0; + + irq_info = SSS_TO_IRQ_INFO(hwdev); + irq = irq_info->irq; + + mutex_lock(&irq_info->irq_mutex); + if (irq_info->free_num == 0) { + sdk_err(SSS_TO_DEV(hwdev), "Fail to alloc irq, free_num is zero\n"); + mutex_unlock(&irq_info->irq_mutex); + return 0; + } + + if (alloc_num > irq_info->free_num) { + sdk_warn(SSS_TO_DEV(hwdev), "Adjust need_num to %u\n", irq_info->free_num); + need_num = irq_info->free_num; + } + + for (i = 0; i < need_num; i++) { + for (j = 0; j < irq_info->total_num; j++) { + if (irq[j].busy != SSS_CFG_FREE) + continue; + + if (irq_info->free_num == 0) { + sdk_err(SSS_TO_DEV(hwdev), "Fail to alloc irq, free_num is zero\n"); + mutex_unlock(&irq_info->irq_mutex); + memset(alloc_array, 0, sizeof(*alloc_array) * alloc_num); + return 0; + } + + irq[j].type = service_type; + irq[j].busy = SSS_CFG_BUSY; + + alloc_array[i].irq_id = irq[j].desc.irq_id; + alloc_array[i].msix_id = irq[j].desc.msix_id; + irq_info->free_num--; + act_num++; + + break; + } + } + + mutex_unlock(&irq_info->irq_mutex); + return act_num; +} +EXPORT_SYMBOL(sss_alloc_irq); + +void sss_free_irq(void *hwdev, enum sss_service_type service_type, u32 irq_id) +{ + int i; + struct sss_irq_info *irq_info = NULL; + struct sss_irq *irq = NULL; + + if (!hwdev) + return; + + irq_info = SSS_TO_IRQ_INFO(hwdev); + irq = irq_info->irq; + + mutex_lock(&irq_info->irq_mutex); + + for (i = 0; i < irq_info->total_num; i++) { + if (irq_id != irq[i].desc.irq_id || + service_type != irq[i].type) + continue; + + if (irq[i].busy == SSS_CFG_FREE) + continue; + + irq[i].busy = SSS_CFG_FREE; + irq_info->free_num++; + if (irq_info->free_num > irq_info->total_num) { + sdk_err(SSS_TO_DEV(hwdev), "Free_num out of range :[0, %u]\n", + irq_info->total_num); + mutex_unlock(&irq_info->irq_mutex); + return; + } + break; + } + + if (i >= irq_info->total_num) + sdk_warn(SSS_TO_DEV(hwdev), "Irq %u don`t need to free\n", irq_id); + + mutex_unlock(&irq_info->irq_mutex); +} +EXPORT_SYMBOL(sss_free_irq); + +void sss_register_dev_event(void *hwdev, void *data, sss_event_handler_t callback) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) { + pr_err("Register event: hwdev is NULL\n"); + return; + } + + dev->event_handler = callback; + dev->event_handler_data = data; +} +EXPORT_SYMBOL(sss_register_dev_event); + +void sss_unregister_dev_event(void *hwdev) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) { + pr_err("Unregister event: hwdev is NULL\n"); + return; + } + + dev->event_handler = NULL; + dev->event_handler_data = NULL; +} +EXPORT_SYMBOL(sss_unregister_dev_event); + +int sss_get_dev_present_flag(const void *hwdev) +{ + return hwdev && !!((struct sss_hwdev *)hwdev)->chip_present_flag; +} +EXPORT_SYMBOL(sss_get_dev_present_flag); + +u8 sss_get_max_pf_num(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_MAX_PF_NUM((struct sss_hwdev *)hwdev); +} +EXPORT_SYMBOL(sss_get_max_pf_num); + +int sss_get_chip_present_state(void *hwdev, bool *present_state) +{ + if (!hwdev || !present_state) + return -EINVAL; + + *present_state = sss_chip_get_present_state(hwdev); + + return 0; +} +EXPORT_SYMBOL(sss_get_chip_present_state); + +void sss_fault_event_report(void *hwdev, u16 src, u16 level) +{ + if (!hwdev) + return; + + sdk_info(SSS_TO_DEV(hwdev), + "Fault event report, src: %u, level: %u\n", src, level); +} +EXPORT_SYMBOL(sss_fault_event_report); + +int sss_register_service_adapter(void *hwdev, enum sss_service_type service_type, + void *service_adapter) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev || !service_adapter || service_type >= SSS_SERVICE_TYPE_MAX) + return -EINVAL; + + if (dev->service_adapter[service_type]) + return -EINVAL; + + dev->service_adapter[service_type] = service_adapter; + + return 0; +} +EXPORT_SYMBOL(sss_register_service_adapter); + +void sss_unregister_service_adapter(void *hwdev, + enum sss_service_type service_type) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev || service_type >= SSS_SERVICE_TYPE_MAX) + return; + + dev->service_adapter[service_type] = NULL; +} +EXPORT_SYMBOL(sss_unregister_service_adapter); + +void *sss_get_service_adapter(void *hwdev, enum sss_service_type service_type) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev || service_type >= SSS_SERVICE_TYPE_MAX) + return NULL; + + return dev->service_adapter[service_type]; +} +EXPORT_SYMBOL(sss_get_service_adapter); + +void sss_do_event_callback(void *hwdev, struct sss_event_info *event) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) { + pr_err("Event callback: hwdev is NULL\n"); + return; + } + + if (!dev->event_handler) { + sdk_info(dev->dev_hdl, "Event callback: handler is NULL\n"); + return; + } + + dev->event_handler(dev->event_handler_data, event); +} +EXPORT_SYMBOL(sss_do_event_callback); + +void sss_update_link_stats(void *hwdev, bool link_state) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) + return; + + if (link_state) + atomic_inc(&dev->hw_stats.link_event_stats.link_up_stats); + else + atomic_inc(&dev->hw_stats.link_event_stats.link_down_stats); +} +EXPORT_SYMBOL(sss_update_link_stats); diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.c new file mode 100644 index 0000000000000..50c45a623a90c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.c @@ -0,0 +1,548 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_adapter.h" +#include "sss_hwdev_api.h" +#include "sss_hwdev_mgmt_info.h" +#include "sss_hwdev_mgmt_channel.h" +#include "sss_hwdev_cap.h" +#include "sss_hwdev_link.h" +#include "sss_hwdev_io_flush.h" +#include "sss_hwif_init.h" +#include "sss_hwif_api.h" +#include "sss_hwif_export.h" +#include "sss_hwif_mgmt_init.h" + +enum sss_host_mode { + SSS_HOST_MODE_NORMAL = 0, + SSS_HOST_MODE_VM, + SSS_HOST_MODE_BM, + SSS_HOST_MODE_MAX, +}; + +#define SSS_HWDEV_WQ_NAME "sssnic_hardware" +#define SSS_WQ_MAX_REQ 10 + +#define SSS_DETECT_PCIE_LINK_DOWN_RETRY 2 + +#define SSS_CHN_BUSY_TIMEOUT 25 + +#define SSS_HEARTBEAT_TIMER_EXPIRES 5000 +#define SSS_HEARTBEAT_PERIOD 1000 + +#define SSS_GET_PCIE_LINK_STATUS(hwdev) \ + ((hwdev)->heartbeat.pcie_link_down ? \ + SSS_EVENT_PCIE_LINK_DOWN : SSS_EVENT_HEART_LOST) + +#define SSS_SET_FUNC_HOST_MODE(hwdev, mode) \ +do { \ + if ((mode) >= SSS_FUNC_MOD_MIN && (mode) <= SSS_FUNC_MOD_MAX) { \ + (hwdev)->func_mode = (mode); \ + } else \ + (hwdev)->func_mode = SSS_FUNC_MOD_NORMAL_HOST; \ +} while (0) + +#define SSS_SYNFW_TIME_PERIOD (60 * 60 * 1000) +#define SSS_CHANNEL_DETECT_PERIOD (5 * 1000) + +#define SSS_COMM_SUPPORT_CHANNEL_DETECT(hwdev) \ + ((hwdev)->features[0] & SSS_COMM_F_CHANNEL_DETECT) + +typedef void (*sss_set_mode_handler_t)(struct sss_hwdev *hwdev); + +static struct sss_hwdev *sss_alloc_hwdev(void) +{ + struct sss_hwdev *hwdev; + + hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL); + if (!hwdev) + return NULL; + + hwdev->chip_fault_stats = vzalloc(SSS_CHIP_FAULT_SIZE); + if (!hwdev->chip_fault_stats) { + kfree(hwdev); + return NULL; + } + + return hwdev; +} + +static void sss_free_hwdev(struct sss_hwdev *hwdev) +{ + vfree(hwdev->chip_fault_stats); + kfree(hwdev); +} + +static void sss_init_hwdev_param(struct sss_hwdev *hwdev, + struct sss_pci_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pcidev; + + hwdev->adapter_hdl = adapter; + hwdev->pcidev_hdl = pdev; + hwdev->dev_hdl = &pdev->dev; + hwdev->chip_node = adapter->chip_node; + spin_lock_init(&hwdev->channel_lock); +} + +static void sss_set_chip_present_flag(struct sss_hwdev *hwdev, bool present) +{ + hwdev->chip_present_flag = !!present; +} + +static bool sss_is_chip_abnormal(struct sss_hwdev *hwdev) +{ + u32 pcie_status; + + if (!sss_get_dev_present_flag(hwdev)) + return false; + + pcie_status = sss_chip_get_pcie_link_status(hwdev); + if (pcie_status == SSS_PCIE_LINK_DOWN) { + hwdev->heartbeat.pcie_link_down_cnt++; + sdk_warn(hwdev->dev_hdl, "Pcie link down\n"); + if (hwdev->heartbeat.pcie_link_down_cnt >= SSS_DETECT_PCIE_LINK_DOWN_RETRY) { + sss_set_chip_present_flag(hwdev, false); + sss_force_complete_all(hwdev); + hwdev->heartbeat.pcie_link_down = true; + return true; + } + + return false; + } + + if (pcie_status != SSS_PCIE_LINK_UP) { + hwdev->heartbeat.heartbeat_lost = true; + return true; + } + + hwdev->heartbeat.pcie_link_down_cnt = 0; + + return false; +} + +static void sss_update_aeq_stat(struct sss_hwdev *hwdev) +{ + if (hwdev->aeq_stat.last_recv_cnt != hwdev->aeq_stat.cur_recv_cnt) { + hwdev->aeq_stat.last_recv_cnt = hwdev->aeq_stat.cur_recv_cnt; + hwdev->aeq_stat.busy_cnt = 0; + } else { + hwdev->aeq_stat.busy_cnt++; + } +} + +static void sss_update_channel_status(struct sss_hwdev *hwdev) +{ + struct sss_card_node *node = hwdev->chip_node; + + if (!node) + return; + + if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_PPF || + !SSS_COMM_SUPPORT_CHANNEL_DETECT(hwdev) || + atomic_read(&node->channel_timeout_cnt)) + return; + + if (test_bit(SSS_HW_MBX_INIT_OK, &hwdev->func_state)) { + sss_update_aeq_stat(hwdev); + + if (hwdev->aeq_stat.busy_cnt > SSS_CHN_BUSY_TIMEOUT) { + sdk_err(hwdev->dev_hdl, "Detect channel busy\n"); + atomic_inc(&node->channel_timeout_cnt); + } + } +} + +static void sss_heartbeat_timer_handler(struct timer_list *t) +{ + struct sss_hwdev *hwdev = from_timer(hwdev, t, heartbeat.heartbeat_timer); + + if (sss_is_chip_abnormal(hwdev)) { + queue_work(hwdev->workq, &hwdev->heartbeat.lost_work); + } else { + mod_timer(&hwdev->heartbeat.heartbeat_timer, + jiffies + msecs_to_jiffies(SSS_HEARTBEAT_PERIOD)); + } + + sss_update_channel_status(hwdev); +} + +static void sss_heartbeat_lost_handler(struct work_struct *work) +{ + u16 fault_level; + u16 pcie_src; + struct sss_event_info event_info = {0}; + struct sss_hwdev *hwdev = container_of(work, struct sss_hwdev, + heartbeat.lost_work); + + atomic_inc(&hwdev->hw_stats.heart_lost_stats); + + if (hwdev->event_handler) { + event_info.type = SSS_GET_PCIE_LINK_STATUS(hwdev); + event_info.service = SSS_EVENT_SRV_COMM; + hwdev->event_handler(hwdev->event_handler_data, &event_info); + } + + if (hwdev->heartbeat.pcie_link_down) { + sdk_err(hwdev->dev_hdl, "Detect pcie is link down\n"); + fault_level = SSS_FAULT_LEVEL_HOST; + pcie_src = SSS_FAULT_SRC_PCIE_LINK_DOWN; + } else { + sdk_err(hwdev->dev_hdl, "Heart lost report received, func_id: %d\n", + sss_get_global_func_id(hwdev)); + fault_level = SSS_FAULT_LEVEL_FATAL; + pcie_src = SSS_FAULT_SRC_HOST_HEARTBEAT_LOST; + } + + sss_dump_chip_err_info(hwdev); +} + +static void sss_create_heartbeat_timer(struct sss_hwdev *hwdev) +{ + timer_setup(&hwdev->heartbeat.heartbeat_timer, sss_heartbeat_timer_handler, 0); + hwdev->heartbeat.heartbeat_timer.expires = + jiffies + msecs_to_jiffies(SSS_HEARTBEAT_TIMER_EXPIRES); + add_timer(&hwdev->heartbeat.heartbeat_timer); + + INIT_WORK(&hwdev->heartbeat.lost_work, sss_heartbeat_lost_handler); +} + +static void sss_destroy_heartbeat_timer(struct sss_hwdev *hwdev) +{ + destroy_work(&hwdev->heartbeat.lost_work); + del_timer_sync(&hwdev->heartbeat.heartbeat_timer); +} + +static void sss_set_bm_host_mode(struct sss_hwdev *hwdev) +{ + struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap; + u8 host_id = SSS_GET_HWIF_PCI_INTF_ID(hwdev->hwif); + + if (host_id == svc_cap->master_host_id) + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_MULTI_BM_MASTER); + else + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_MULTI_BM_SLAVE); +} + +static void sss_set_vm_host_mode(struct sss_hwdev *hwdev) +{ + struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap; + u8 host_id = SSS_GET_HWIF_PCI_INTF_ID(hwdev->hwif); + + if (host_id == svc_cap->master_host_id) + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_MULTI_VM_MASTER); + else + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_MULTI_VM_SLAVE); +} + +static void sss_set_normal_host_mode(struct sss_hwdev *hwdev) +{ + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_NORMAL_HOST); +} + +static int sss_enable_multi_host(struct sss_hwdev *hwdev) +{ + if (!SSS_IS_PPF(hwdev) || !SSS_IS_MULTI_HOST(hwdev)) + return 0; + + if (SSS_IS_SLAVE_HOST(hwdev)) + sss_chip_set_slave_host_status(hwdev, sss_get_pcie_itf_id(hwdev), true); + + return 0; +} + +static int sss_disable_multi_host(struct sss_hwdev *hwdev) +{ + if (!SSS_IS_PPF(hwdev) || !SSS_IS_MULTI_HOST(hwdev)) + return 0; + + if (SSS_IS_SLAVE_HOST(hwdev)) + sss_chip_set_slave_host_status(hwdev, sss_get_pcie_itf_id(hwdev), false); + + return 0; +} + +static int sss_init_host_mode(struct sss_hwdev *hwdev) +{ + int ret; + struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap; + sss_set_mode_handler_t handler[SSS_HOST_MODE_MAX] = { + sss_set_normal_host_mode, + sss_set_vm_host_mode, + sss_set_bm_host_mode + }; + + if (SSS_GET_FUNC_TYPE(hwdev) == SSS_FUNC_TYPE_VF) { + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_NORMAL_HOST); + return 0; + } + + if (svc_cap->srv_multi_host_mode >= SSS_HOST_MODE_MAX) { + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_NORMAL_HOST); + return 0; + } + + handler[svc_cap->srv_multi_host_mode](hwdev); + + ret = sss_enable_multi_host(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init function mode\n"); + return ret; + } + + return 0; +} + +static void sss_deinit_host_mode(struct sss_hwdev *hwdev) +{ + sss_disable_multi_host(hwdev); +} + +static u64 sss_get_real_time(void) +{ + struct timeval val = {0}; + + do_gettimeofday(&val); + + return (u64)val.tv_sec * MSEC_PER_SEC + + (u64)val.tv_usec / USEC_PER_MSEC; +} + +static void sss_auto_sync_time_work(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct sss_hwdev *hwdev = container_of(delay, + struct sss_hwdev, sync_time_task); + int ret; + + ret = sss_chip_sync_time(hwdev, sss_get_real_time()); + if (ret != 0) + sdk_err(hwdev->dev_hdl, + "Fail to sync UTC time to firmware, errno:%d.\n", ret); + + queue_delayed_work(hwdev->workq, &hwdev->sync_time_task, + msecs_to_jiffies(SSS_SYNFW_TIME_PERIOD)); +} + +static void sss_auto_channel_detect_work(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct sss_hwdev *hwdev = container_of(delay, + struct sss_hwdev, channel_detect_task); + struct sss_card_node *chip_node = NULL; + + sss_chip_comm_channel_detect(hwdev); + + chip_node = hwdev->chip_node; + if (!atomic_read(&chip_node->channel_timeout_cnt)) + queue_delayed_work(hwdev->workq, &hwdev->channel_detect_task, + msecs_to_jiffies(SSS_CHANNEL_DETECT_PERIOD)); +} + +static void sss_hwdev_init_work(struct sss_hwdev *hwdev) +{ + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PPF) + return; + + INIT_DELAYED_WORK(&hwdev->sync_time_task, sss_auto_sync_time_work); + queue_delayed_work(hwdev->workq, &hwdev->sync_time_task, + msecs_to_jiffies(SSS_SYNFW_TIME_PERIOD)); + + if (SSS_COMM_SUPPORT_CHANNEL_DETECT(hwdev)) { + INIT_DELAYED_WORK(&hwdev->channel_detect_task, + sss_auto_channel_detect_work); + queue_delayed_work(hwdev->workq, &hwdev->channel_detect_task, + msecs_to_jiffies(SSS_CHANNEL_DETECT_PERIOD)); + } +} + +static void sss_hwdev_deinit_work(struct sss_hwdev *hwdev) +{ + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PPF) + return; + + if (SSS_COMM_SUPPORT_CHANNEL_DETECT(hwdev)) { + hwdev->features[0] &= ~(SSS_COMM_F_CHANNEL_DETECT); + cancel_delayed_work_sync(&hwdev->channel_detect_task); + } + + cancel_delayed_work_sync(&hwdev->sync_time_task); +} + +int sss_init_hwdev(struct sss_pci_adapter *adapter) +{ + struct sss_hwdev *hwdev; + int ret; + + hwdev = sss_alloc_hwdev(); + if (!hwdev) + return -ENOMEM; + + sss_init_hwdev_param(hwdev, adapter); + adapter->hwdev = hwdev; + + ret = sss_hwif_init(adapter); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init hwif\n"); + goto init_hwif_err; + } + + sss_set_chip_present_flag(hwdev, true); + + hwdev->workq = alloc_workqueue(SSS_HWDEV_WQ_NAME, WQ_MEM_RECLAIM, SSS_WQ_MAX_REQ); + if (!hwdev->workq) { + sdk_err(hwdev->dev_hdl, "Fail to alloc hardware workq\n"); + goto alloc_workq_err; + } + + sss_create_heartbeat_timer(hwdev); + + ret = sss_init_mgmt_info(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init mgmt info\n"); + goto init_mgmt_info_err; + } + + ret = sss_init_mgmt_channel(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init mgmt channel\n"); + goto init_mgmt_channel_err; + } + +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS + ret = sss_init_devlink(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init devlink\n"); + goto init_devlink_err; + } +#endif + + ret = sss_init_capability(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init capability\n"); + goto init_cap_err; + } + + ret = sss_init_host_mode(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init capability\n"); + goto init_multi_host_fail; + } + + sss_hwdev_init_work(hwdev); + + ret = sss_chip_do_nego_feature(hwdev, SSS_MGMT_MSG_SET_CMD, + hwdev->features, SSS_MAX_FEATURE_QWORD); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to set comm features\n"); + goto set_feature_err; + } + + return 0; + +set_feature_err: + sss_hwdev_deinit_work(hwdev); + + sss_deinit_host_mode(hwdev); +init_multi_host_fail: + sss_deinit_capability(hwdev); + +init_cap_err: +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS + sss_deinit_devlink(hwdev); + +init_devlink_err: +#endif + sss_deinit_mgmt_channel(hwdev); + +init_mgmt_channel_err: + sss_deinit_mgmt_info(hwdev); + +init_mgmt_info_err: + sss_destroy_heartbeat_timer(hwdev); + destroy_workqueue(hwdev->workq); + +alloc_workq_err: + sss_hwif_deinit(hwdev); + +init_hwif_err: + sss_free_hwdev(hwdev); + adapter->hwdev = NULL; + + return -EFAULT; +} + +void sss_deinit_hwdev(void *hwdev) +{ + struct sss_hwdev *dev = hwdev; + u64 drv_features[SSS_MAX_FEATURE_QWORD] = {0}; + + sss_chip_do_nego_feature(hwdev, SSS_MGMT_MSG_SET_CMD, + drv_features, SSS_MAX_FEATURE_QWORD); + + sss_hwdev_deinit_work(dev); + + if (SSS_IS_MULTI_HOST(dev)) + sss_disable_multi_host(dev); + + sss_hwdev_flush_io(dev, SSS_CHANNEL_COMM); + + sss_deinit_capability(dev); + +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS + sss_deinit_devlink(dev); +#endif + + sss_deinit_mgmt_channel(dev); + + sss_deinit_mgmt_info(dev); + sss_destroy_heartbeat_timer(hwdev); + destroy_workqueue(dev->workq); + + sss_hwif_deinit(dev); + sss_free_hwdev(dev); +} + +void sss_hwdev_stop(void *hwdev) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) + return; + + sss_set_chip_present_flag(hwdev, false); + sdk_info(dev->dev_hdl, "Set card absent\n"); + sss_force_complete_all(dev); + sdk_info(dev->dev_hdl, "All messages interacting with the chip will stop\n"); +} + +void sss_hwdev_detach(void *hwdev) +{ + if (!sss_chip_get_present_state((struct sss_hwdev *)hwdev)) { + sss_set_chip_present_flag(hwdev, false); + sss_force_complete_all(hwdev); + } +} + +void sss_hwdev_shutdown(void *hwdev) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) + return; + + if (SSS_IS_SLAVE_HOST(dev)) + sss_chip_set_slave_host_status(hwdev, sss_get_pcie_itf_id(hwdev), false); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.h new file mode 100644 index 0000000000000..43f35f29588c7 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_INIT_H +#define SSS_HWDEV_INIT_H + +#include "sss_adapter.h" + +int sss_init_hwdev(struct sss_pci_adapter *adapter); +void sss_deinit_hwdev(void *hwdev); +void sss_hwdev_detach(void *hwdev); +void sss_hwdev_stop(void *hwdev); +void sss_hwdev_shutdown(void *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.c new file mode 100644 index 0000000000000..aeb2a64d758b4 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_ctrlq_init.h" +#include "sss_hwif_api.h" +#include "sss_hwif_mbx.h" +#include "sss_common.h" + +#define SSS_FLR_TIMEOUT 1000 +#define SSS_FLR_TIMEOUT_ONCE 10000 + +static enum sss_process_ret sss_check_flr_finish_handler(void *priv_data) +{ + struct sss_hwif *hwif = priv_data; + enum sss_pf_status status; + + status = sss_chip_get_pf_status(hwif); + if (status == SSS_PF_STATUS_FLR_FINISH_FLAG) { + sss_chip_set_pf_status(hwif, SSS_PF_STATUS_ACTIVE_FLAG); + return SSS_PROCESS_OK; + } + + return SSS_PROCESS_DOING; +} + +static int sss_wait_for_flr_finish(struct sss_hwif *hwif) +{ + return sss_check_handler_timeout(hwif, sss_check_flr_finish_handler, + SSS_FLR_TIMEOUT, SSS_FLR_TIMEOUT_ONCE); +} + +static int sss_msg_to_mgmt_no_ack(void *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 channel) +{ + if (!hwdev) + return -EINVAL; + + if (sss_get_dev_present_flag(hwdev) == 0) + return -EPERM; + + return sss_send_mbx_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, + in_size, channel); +} + +static int sss_chip_flush_doorbell(struct sss_hwdev *hwdev, u16 channel) +{ + struct sss_hwif *hwif = hwdev->hwif; + struct sss_cmd_clear_doorbell clear_db = {0}; + u16 out_len = sizeof(clear_db); + int ret; + + clear_db.func_id = SSS_GET_HWIF_GLOBAL_ID(hwif); + + ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_FLUSH_DOORBELL, + &clear_db, sizeof(clear_db), + &clear_db, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &clear_db)) { + sdk_warn(hwdev->dev_hdl, + "Fail to flush doorbell, ret: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", + ret, clear_db.head.state, out_len, channel); + if (ret == 0) + return -EFAULT; + } + + return ret; +} + +static int sss_chip_flush_resource(struct sss_hwdev *hwdev, u16 channel) +{ + struct sss_hwif *hwif = hwdev->hwif; + struct sss_cmd_clear_resource clr_res = {0}; + int ret; + + clr_res.func_id = SSS_GET_HWIF_GLOBAL_ID(hwif); + ret = sss_msg_to_mgmt_no_ack(hwdev, SSS_MOD_TYPE_COMM, + SSS_COMM_MGMT_CMD_START_FLUSH, &clr_res, + sizeof(clr_res), channel); + if (ret != 0) { + sdk_warn(hwdev->dev_hdl, "Fail to notice flush message, ret: %d, channel: 0x%x\n", + ret, channel); + } + + return ret; +} + +int sss_hwdev_flush_io(struct sss_hwdev *hwdev, u16 channel) +{ + struct sss_hwif *hwif = hwdev->hwif; + int err; + int ret = 0; + + if (hwdev->chip_present_flag == 0) + return 0; + + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_VF) + msleep(100); + + err = sss_wait_ctrlq_stop(hwdev); + if (err != 0) { + sdk_warn(hwdev->dev_hdl, "Fail to wait ctrlq stop\n"); + ret = err; + } + + sss_chip_disable_doorbell(hwif); + + err = sss_chip_flush_doorbell(hwdev, channel); + if (err != 0) + ret = err; + + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_VF) + sss_chip_set_pf_status(hwif, SSS_PF_STATUS_FLR_START_FLAG); + else + msleep(100); + + err = sss_chip_flush_resource(hwdev, channel); + if (err != 0) + ret = err; + + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_VF) { + err = sss_wait_for_flr_finish(hwif); + if (err != 0) { + sdk_warn(hwdev->dev_hdl, "Wait firmware FLR timeout\n"); + ret = err; + } + } + + sss_chip_enable_doorbell(hwif); + + err = sss_reinit_ctrlq_ctx(hwdev); + if (err != 0) { + sdk_warn(hwdev->dev_hdl, "Fail to reinit ctrlq ctx\n"); + ret = err; + } + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.h new file mode 100644 index 0000000000000..4b15cd0d23f6e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_IO_FLUSH_H +#define SSS_HWDEV_IO_FLUSH_H + +#include "sss_hwdev.h" + +int sss_hwdev_flush_io(struct sss_hwdev *hwdev, u16 channel); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.c new file mode 100644 index 0000000000000..7b51496e27a95 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.c @@ -0,0 +1,729 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include + +#include "sss_hwdev_link.h" +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#include "sss_hw_common.h" +#include "sss_hwdev_api.h" +#include "sss_hwif_adm.h" +#include "sss_hwif_adm_common.h" + +#define SSS_FW_MAGIC_NUM 0x5a5a1100 +#define SSS_FW_IMAGE_HEAD_SIZE 4096 +#define SSS_FW_FRAGMENT_MAX_LEN 1536 +#define SSS_FW_CFG_DEFAULT_INDEX 0xFF +#define SSS_FW_UPDATE_MGMT_TIMEOUT 3000000U +#define SSS_FW_TYPE_MAX_NUM 0x40 +#define SSS_FW_CFG_MAX_INDEX 8 +#define SSS_FW_CFG_MIN_INDEX 1 + +#ifndef DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT +#define DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT BIT(0) +#endif + +enum sss_devlink_param_id { + SSS_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + SSS_DEVLINK_PARAM_ID_ACTIVATE_FW, + SSS_DEVLINK_PARAM_ID_SWITCH_CFG, +}; + +enum sss_firmware_type { + SSS_UP_FW_UPDATE_MIN_TYPE1 = 0x0, + SSS_UP_FW_UPDATE_UP_TEXT = 0x0, + SSS_UP_FW_UPDATE_UP_DATA = 0x1, + SSS_UP_FW_UPDATE_UP_DICT = 0x2, + SSS_UP_FW_UPDATE_TILE_PCPTR = 0x3, + SSS_UP_FW_UPDATE_TILE_TEXT = 0x4, + SSS_UP_FW_UPDATE_TILE_DATA = 0x5, + SSS_UP_FW_UPDATE_TILE_DICT = 0x6, + SSS_UP_FW_UPDATE_PPE_STATE = 0x7, + SSS_UP_FW_UPDATE_PPE_BRANCH = 0x8, + SSS_UP_FW_UPDATE_PPE_EXTACT = 0x9, + SSS_UP_FW_UPDATE_MAX_TYPE1 = 0x9, + SSS_UP_FW_UPDATE_CFG0 = 0xa, + SSS_UP_FW_UPDATE_CFG1 = 0xb, + SSS_UP_FW_UPDATE_CFG2 = 0xc, + SSS_UP_FW_UPDATE_CFG3 = 0xd, + SSS_UP_FW_UPDATE_MAX_TYPE1_CFG = 0xd, + + SSS_UP_FW_UPDATE_MIN_TYPE2 = 0x14, + SSS_UP_FW_UPDATE_MAX_TYPE2 = 0x14, + + SSS_UP_FW_UPDATE_MIN_TYPE3 = 0x18, + SSS_UP_FW_UPDATE_PHY = 0x18, + SSS_UP_FW_UPDATE_BIOS = 0x19, + SSS_UP_FW_UPDATE_HLINK_ONE = 0x1a, + SSS_UP_FW_UPDATE_HLINK_TWO = 0x1b, + SSS_UP_FW_UPDATE_HLINK_THR = 0x1c, + SSS_UP_FW_UPDATE_MAX_TYPE3 = 0x1c, + + SSS_UP_FW_UPDATE_MIN_TYPE4 = 0x20, + SSS_UP_FW_UPDATE_L0FW = 0x20, + SSS_UP_FW_UPDATE_L1FW = 0x21, + SSS_UP_FW_UPDATE_BOOT = 0x22, + SSS_UP_FW_UPDATE_SEC_DICT = 0x23, + SSS_UP_FW_UPDATE_HOT_PATCH0 = 0x24, + SSS_UP_FW_UPDATE_HOT_PATCH1 = 0x25, + SSS_UP_FW_UPDATE_HOT_PATCH2 = 0x26, + SSS_UP_FW_UPDATE_HOT_PATCH3 = 0x27, + SSS_UP_FW_UPDATE_HOT_PATCH4 = 0x28, + SSS_UP_FW_UPDATE_HOT_PATCH5 = 0x29, + SSS_UP_FW_UPDATE_HOT_PATCH6 = 0x2a, + SSS_UP_FW_UPDATE_HOT_PATCH7 = 0x2b, + SSS_UP_FW_UPDATE_HOT_PATCH8 = 0x2c, + SSS_UP_FW_UPDATE_HOT_PATCH9 = 0x2d, + SSS_UP_FW_UPDATE_HOT_PATCH10 = 0x2e, + SSS_UP_FW_UPDATE_HOT_PATCH11 = 0x2f, + SSS_UP_FW_UPDATE_HOT_PATCH12 = 0x30, + SSS_UP_FW_UPDATE_HOT_PATCH13 = 0x31, + SSS_UP_FW_UPDATE_HOT_PATCH14 = 0x32, + SSS_UP_FW_UPDATE_HOT_PATCH15 = 0x33, + SSS_UP_FW_UPDATE_HOT_PATCH16 = 0x34, + SSS_UP_FW_UPDATE_HOT_PATCH17 = 0x35, + SSS_UP_FW_UPDATE_HOT_PATCH18 = 0x36, + SSS_UP_FW_UPDATE_HOT_PATCH19 = 0x37, + SSS_UP_FW_UPDATE_MAX_TYPE4 = 0x37, + + SSS_UP_FW_UPDATE_MIN_TYPE5 = 0x3a, + SSS_UP_FW_UPDATE_OPTION_ROM = 0x3a, + SSS_UP_FW_UPDATE_MAX_TYPE5 = 0x3a, + + SSS_UP_FW_UPDATE_MIN_TYPE6 = 0x3e, + SSS_UP_FW_UPDATE_MAX_TYPE6 = 0x3e, + + SSS_UP_FW_UPDATE_MIN_TYPE7 = 0x40, + SSS_UP_FW_UPDATE_MAX_TYPE7 = 0x40, +}; + +#define SSS_IMAGE_MPU_ALL_IN (BIT_ULL(SSS_UP_FW_UPDATE_UP_TEXT) | \ + BIT_ULL(SSS_UP_FW_UPDATE_UP_DATA) | \ + BIT_ULL(SSS_UP_FW_UPDATE_UP_DICT)) + +#define SSS_IMAGE_NPU_ALL_IN (BIT_ULL(SSS_UP_FW_UPDATE_TILE_PCPTR) | \ + BIT_ULL(SSS_UP_FW_UPDATE_TILE_TEXT) | \ + BIT_ULL(SSS_UP_FW_UPDATE_TILE_DATA) | \ + BIT_ULL(SSS_UP_FW_UPDATE_TILE_DICT) | \ + BIT_ULL(SSS_UP_FW_UPDATE_PPE_STATE) | \ + BIT_ULL(SSS_UP_FW_UPDATE_PPE_BRANCH) | \ + BIT_ULL(SSS_UP_FW_UPDATE_PPE_EXTACT)) + +#define SSS_IMAGE_COLD_ALL_IN (SSS_IMAGE_MPU_ALL_IN | SSS_IMAGE_NPU_ALL_IN) + +#define SSS_IMAGE_CFG_ALL_IN (BIT_ULL(SSS_UP_FW_UPDATE_CFG0) | \ + BIT_ULL(SSS_UP_FW_UPDATE_CFG1) | \ + BIT_ULL(SSS_UP_FW_UPDATE_CFG2) | \ + BIT_ULL(SSS_UP_FW_UPDATE_CFG3)) + +#define SSS_CHECK_IMAGE_INTEGRATY(mask) \ + (((mask) & SSS_IMAGE_COLD_ALL_IN) == SSS_IMAGE_COLD_ALL_IN && \ + ((mask) & SSS_IMAGE_CFG_ALL_IN) != 0) + +#define SSS_LINK_HWDEV(link) \ + ((struct sss_hwdev *)((struct sss_devlink *)devlink_priv(link))->hwdev) + +struct sss_firmware_section { + u32 section_len; + u32 section_offset; + u32 section_version; + u32 section_type; + u32 section_crc; + u32 section_flag; +}; + +struct sss_firmware_image { + u32 fw_version; + u32 fw_len; + u32 fw_magic; + struct { + u32 section_cnt : 16; + u32 rsvd : 16; + } fw_info; + struct sss_firmware_section section_info[SSS_FW_TYPE_MAX_NUM]; + u32 device_id; + u32 rsvd0[101]; + u32 rsvd1[534]; + u32 bin_data; +}; + +struct sss_host_image { + struct sss_firmware_section section_info[SSS_FW_TYPE_MAX_NUM]; + struct { + u32 total_len; + u32 fw_version; + } image_info; + u32 section_cnt; + u32 device_id; +}; + +struct sss_cmd_update_firmware { + struct sss_mgmt_msg_head head; + + struct { + u32 sl : 1; + u32 sf : 1; + u32 flag : 1; + u32 bit_signed : 1; + u32 reserved : 12; + u32 fragment_len : 16; + } ctl_info; + + struct { + u32 section_crc; + u32 section_type; + } section_info; + + u32 total_len; + u32 section_len; + u32 section_version; + u32 section_offset; + u32 data[384]; +}; + +struct sss_cmd_activate_firmware { + struct sss_mgmt_msg_head head; + u8 index; /* 0 ~ 7 */ + u8 data[7]; +}; + +struct sss_cmd_switch_config { + struct sss_mgmt_msg_head head; + u8 index; /* 0 ~ 7 */ + u8 data[7]; +}; + +static bool sss_check_image_valid(struct sss_hwdev *hwdev, + struct sss_firmware_image *image, u32 image_size) +{ + u32 i; + u32 length = 0; + u32 cnt; + + if (image->fw_magic != SSS_FW_MAGIC_NUM) { + sdk_err(hwdev->dev_hdl, "Err fw magic: 0x%x read from file\n", image->fw_magic); + return false; + } + + cnt = image->fw_info.section_cnt; + if (cnt > SSS_FW_TYPE_MAX_NUM) { + sdk_err(hwdev->dev_hdl, "Err fw type num: 0x%x read from file\n", cnt); + return false; + } + + for (i = 0; i < cnt; i++) + length += image->section_info[i].section_len; + + if (length != image->fw_len || + (u32)(image->fw_len + SSS_FW_IMAGE_HEAD_SIZE) != image_size) { + sdk_err(hwdev->dev_hdl, "Err data size: 0x%x read from file\n", length); + return false; + } + + return true; +} + +static void sss_init_host_image(struct sss_host_image *host_image, + struct sss_firmware_image *image) +{ + int i; + + for (i = 0; i < image->fw_info.section_cnt; i++) { + memcpy(&host_image->section_info[i], &image->section_info[i], + sizeof(image->section_info[i])); + } + + host_image->image_info.fw_version = image->fw_version; + host_image->section_cnt = image->fw_info.section_cnt; + host_image->device_id = image->device_id; + host_image->image_info.total_len = image->fw_len; +} + +static bool sss_check_image_integrity(struct sss_hwdev *hwdev, + struct sss_host_image *host_image) +{ + u32 i; + u32 section_type; + u64 mask = 0; + + for (i = 0; i < host_image->section_cnt; i++) { + section_type = host_image->section_info[i].section_type; + if (mask & (1ULL << section_type)) { + sdk_err(hwdev->dev_hdl, "Duplicate section type: %u\n", section_type); + return false; + } + mask |= (1ULL << section_type); + } + + if (SSS_CHECK_IMAGE_INTEGRATY(mask)) + return true; + + sdk_err(hwdev->dev_hdl, + "Fail to check file integrity, valid: 0x%llx, current: 0x%llx\n", + (SSS_IMAGE_COLD_ALL_IN | SSS_IMAGE_CFG_ALL_IN), mask); + + return false; +} + +static bool sss_check_image_device_id(struct sss_hwdev *hwdev, u32 dev_id) +{ + struct sss_cmd_board_info info = {0}; + + if (sss_chip_get_board_info(hwdev, &info.info) != 0) { + sdk_err(hwdev->dev_hdl, "Fail to get board info\n"); + return false; + } + + if (dev_id == info.info.board_type) + return true; + + sdk_err(hwdev->dev_hdl, + "The image device type: 0x%x don't match the fw dev id: 0x%x\n", + dev_id, info.info.board_type); + + return false; +} + +static void sss_init_update_cmd_param(struct sss_cmd_update_firmware *cmd_update, + struct sss_firmware_section *info, int remain, + u32 send_offset) +{ + cmd_update->ctl_info.sl = (remain <= SSS_FW_FRAGMENT_MAX_LEN) ? true : false; + cmd_update->ctl_info.sf = (remain == info->section_len) ? true : false; + cmd_update->ctl_info.bit_signed = info->section_flag & 0x1; + cmd_update->ctl_info.fragment_len = min(remain, SSS_FW_FRAGMENT_MAX_LEN); + + cmd_update->section_info.section_crc = info->section_crc; + cmd_update->section_info.section_type = info->section_type; + + cmd_update->section_version = info->section_version; + cmd_update->section_len = info->section_len; + cmd_update->section_offset = send_offset; +} + +static int sss_chip_update_firmware(struct sss_hwdev *hwdev, + struct sss_cmd_update_firmware *cmd_update) +{ + int ret; + u16 out_len = sizeof(*cmd_update); + + ret = sss_sync_send_adm_msg(hwdev, SSS_MOD_TYPE_COMM, + SSS_COMM_MGMT_CMD_UPDATE_FW, cmd_update, sizeof(*cmd_update), + cmd_update, &out_len, SSS_FW_UPDATE_MGMT_TIMEOUT); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, cmd_update)) { + sdk_err(hwdev->dev_hdl, + "Fail to update fw, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_update->head.state, out_len); + return (cmd_update->head.state != 0) ? + cmd_update->head.state : -EIO; + } + + return 0; +} + +static int sss_update_firmware(struct sss_hwdev *hwdev, const u8 *data, + struct sss_host_image *host_image) +{ + int ret; + int remain; + u32 i; + u32 send_offset; + u32 offset; + bool flag = false; + struct sss_cmd_update_firmware *cmd_update = NULL; + + cmd_update = kzalloc(sizeof(*cmd_update), GFP_KERNEL); + if (!cmd_update) + return -ENOMEM; + + for (i = 0; i < host_image->section_cnt; i++) { + offset = host_image->section_info[i].section_offset; + remain = (int)(host_image->section_info[i].section_len); + send_offset = 0; + + while (remain > 0) { + if (flag) { + cmd_update->total_len = 0; + } else { + cmd_update->total_len = host_image->image_info.total_len; + flag = true; + } + + sss_init_update_cmd_param(cmd_update, &host_image->section_info[i], + remain, send_offset); + + memcpy(cmd_update->data, + ((data + SSS_FW_IMAGE_HEAD_SIZE) + offset) + send_offset, + cmd_update->ctl_info.fragment_len); + + ret = sss_chip_update_firmware(hwdev, cmd_update); + if (ret != 0) { + kfree(cmd_update); + return ret; + } + + send_offset += cmd_update->ctl_info.fragment_len; + remain = (int)(host_image->section_info[i].section_len - send_offset); + } + } + + kfree(cmd_update); + + return 0; +} + +static int sss_flash_update_notify(struct devlink *devlink, + const struct firmware *fw, struct sss_host_image *image, + struct netlink_ext_ack *extack) +{ + struct sss_devlink *devlink_dev = devlink_priv(devlink); + struct sss_hwdev *hwdev = devlink_dev->hwdev; + int ret; + +#ifdef HAVE_DEVLINK_FW_FILE_NAME_MEMBER + devlink_flash_update_begin_notify(devlink); +#endif + devlink_flash_update_status_notify(devlink, "Flash firmware begin", NULL, 0, 0); + sdk_info(hwdev->dev_hdl, "Flash firmware begin\n"); + ret = sss_update_firmware(hwdev, fw->data, image); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to flash firmware, ret: %d\n", ret); + NL_SET_ERR_MSG_MOD(extack, "Fail to flash firmware"); + devlink_flash_update_status_notify(devlink, "Fail to flash firmware", NULL, 0, 0); + } else { + sdk_info(hwdev->dev_hdl, "Flash firmware end\n"); + devlink_flash_update_status_notify(devlink, "Flash firmware end", NULL, 0, 0); + } +#ifdef HAVE_DEVLINK_FW_FILE_NAME_MEMBER + devlink_flash_update_end_notify(devlink); +#endif + + return ret; +} + +#ifdef HAVE_DEVLINK_FW_FILE_NAME_PARAM +static int sss_devlink_flash_update(struct devlink *link, const char *file_name, + const char *component, struct netlink_ext_ack *extack) +#else +static int sss_devlink_flash_update(struct devlink *link, + struct devlink_flash_update_params *param, + struct netlink_ext_ack *extack) +#endif +{ + int ret; + struct sss_host_image *host_image = NULL; + struct sss_devlink *link_dev = devlink_priv(link); + struct sss_hwdev *hwdev = link_dev->hwdev; + +#ifdef HAVE_DEVLINK_FW_FILE_NAME_MEMBER + const struct firmware *fw = NULL; +#else + const struct firmware *fw = param->fw; +#endif + + host_image = kzalloc(sizeof(*host_image), GFP_KERNEL); + if (!host_image) { + ret = -ENOMEM; + goto alloc_host_image_err; + } + +#ifdef HAVE_DEVLINK_FW_FILE_NAME_PARAM + ret = request_firmware_direct(&fw, file_name, hwdev->dev_hdl); +#else +#ifdef HAVE_DEVLINK_FW_FILE_NAME_MEMBER + ret = request_firmware_direct(&fw, param->file_name, hwdev->dev_hdl); +#else + ret = 0; +#endif +#endif + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to request firmware\n"); + goto request_fw_err; + } + + if (!sss_check_image_valid(hwdev, (struct sss_firmware_image *)fw->data, + (u32)(fw->size))) { + sdk_err(hwdev->dev_hdl, "Fail to check image valid\n"); + NL_SET_ERR_MSG_MOD(extack, "Fail to check image valid"); + ret = -EINVAL; + goto check_image_err; + } + + sss_init_host_image(host_image, (struct sss_firmware_image *)fw->data); + + if (!sss_check_image_integrity(hwdev, host_image)) { + sdk_err(hwdev->dev_hdl, "Fail to check image integrity\n"); + NL_SET_ERR_MSG_MOD(extack, "Fail to check image integrity"); + ret = -EINVAL; + goto check_image_err; + } + + if (!sss_check_image_device_id(hwdev, host_image->device_id)) { + sdk_err(hwdev->dev_hdl, "Fail to check image device id\n"); + NL_SET_ERR_MSG_MOD(extack, "Fail to check image device id"); + ret = -EINVAL; + goto check_image_err; + } + + ret = sss_flash_update_notify(link, fw, host_image, extack); + +check_image_err: +#ifdef HAVE_DEVLINK_FW_FILE_NAME_PARAM + release_firmware(fw); +#endif + +request_fw_err: + kfree(host_image); + +alloc_host_image_err: + link_dev->switch_cfg_id = SSS_FW_CFG_DEFAULT_INDEX; + link_dev->active_cfg_id = SSS_FW_CFG_DEFAULT_INDEX; + + return ret; +} + +static const struct devlink_ops g_devlink_ops = { +#ifdef DEVLINK_HAVE_SUPPORTED_FLASH_UPDATE_PARAMS + .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT, +#endif + .flash_update = sss_devlink_flash_update, +}; + +static int sss_chip_activate_firmware(struct sss_hwdev *hwdev, u8 cfg_num) +{ + int ret; + struct sss_cmd_activate_firmware cmd_activate = {0}; + u16 out_len = sizeof(cmd_activate); + + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PF && + SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PPF) + return -EOPNOTSUPP; + + if (!SSS_SUPPORT_ADM_MSG(hwdev)) + return -EPERM; + + cmd_activate.index = cfg_num; + + ret = sss_sync_send_adm_msg(hwdev, SSS_MOD_TYPE_COMM, SSS_COMM_MGMT_CMD_ACTIVE_FW, + &cmd_activate, sizeof(cmd_activate), &cmd_activate, + &out_len, SSS_FW_UPDATE_MGMT_TIMEOUT); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_activate)) { + sdk_err(hwdev->dev_hdl, + "Fail to activate firmware, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_activate.head.state, out_len); + return (cmd_activate.head.state != 0) ? + cmd_activate.head.state : -EIO; + } + + return 0; +} + +static int sss_devlink_get_activate_fw_config(struct devlink *link, u32 id, + struct devlink_param_gset_ctx *param_ctx) +{ + struct sss_devlink *link_dev = devlink_priv(link); + + param_ctx->val.vu8 = link_dev->active_cfg_id; + + return 0; +} + +static int sss_devlink_set_activate_fw_config(struct devlink *link, u32 id, + struct devlink_param_gset_ctx *param_ctx) +{ + int ret; + struct sss_devlink *link_dev = devlink_priv(link); + struct sss_hwdev *hwdev = link_dev->hwdev; + + link_dev->active_cfg_id = param_ctx->val.vu8; + sdk_info(hwdev->dev_hdl, "Begin activate firmware\n"); + + ret = sss_chip_activate_firmware(hwdev, link_dev->active_cfg_id - 1); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to activate firmware, ret: %d\n", ret); + return ret; + } + + sdk_info(hwdev->dev_hdl, "End activate firmware\n"); + + return 0; +} + +static int sss_chip_switch_config(struct sss_hwdev *hwdev, u8 cfg_num) +{ + int ret; + struct sss_cmd_switch_config cmd_switch = {0}; + u16 out_len = sizeof(cmd_switch); + + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PF) + return -EOPNOTSUPP; + + if (!SSS_SUPPORT_ADM_MSG(hwdev)) + return -EPERM; + + cmd_switch.index = cfg_num; + + ret = sss_sync_send_adm_msg(hwdev, SSS_MOD_TYPE_COMM, SSS_COMM_MGMT_CMD_SWITCH_CFG, + &cmd_switch, sizeof(cmd_switch), &cmd_switch, + &out_len, SSS_FW_UPDATE_MGMT_TIMEOUT); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_switch)) { + sdk_err(hwdev->dev_hdl, + "Fail to switch cfg, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_switch.head.state, out_len); + return (cmd_switch.head.state != 0) ? + cmd_switch.head.state : -EIO; + } + + return 0; +} + +static int sss_devlink_get_switch_config(struct devlink *link, u32 id, + struct devlink_param_gset_ctx *param_ctx) +{ + struct sss_devlink *link_dev = devlink_priv(link); + + param_ctx->val.vu8 = link_dev->switch_cfg_id; + + return 0; +} + +static int sss_devlink_set_switch_config(struct devlink *link, u32 id, + struct devlink_param_gset_ctx *param_ctx) +{ + int ret; + struct sss_devlink *link_dev = devlink_priv(link); + struct sss_hwdev *hwdev = link_dev->hwdev; + + link_dev->switch_cfg_id = param_ctx->val.vu8; + sdk_info(hwdev->dev_hdl, "Begin switch cfg"); + + ret = sss_chip_switch_config(hwdev, link_dev->switch_cfg_id - 1); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to switch cfg, ret: %d\n", ret); + return ret; + } + + sdk_info(hwdev->dev_hdl, "End Switch cfg\n"); + + return 0; +} + +static int sss_devlink_validate_firmware_config(struct devlink *link, u32 id, + union devlink_param_value param_val, + struct netlink_ext_ack *ext_ack) +{ + struct sss_hwdev *hwdev = SSS_LINK_HWDEV(link); + + if (param_val.vu8 < SSS_FW_CFG_MIN_INDEX || + param_val.vu8 > SSS_FW_CFG_MAX_INDEX) { + sdk_err(hwdev->dev_hdl, "Firmware cfg id out of range [1,8]\n"); + NL_SET_ERR_MSG_MOD(ext_ack, "Firmware cfg id out of range [1,8]\n"); + return -ERANGE; + } + + return 0; +} + +static const struct devlink_param g_devlink_param[] = { + DEVLINK_PARAM_DRIVER(SSS_DEVLINK_PARAM_ID_ACTIVATE_FW, + "activate_fw", DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + sss_devlink_get_activate_fw_config, + sss_devlink_set_activate_fw_config, + sss_devlink_validate_firmware_config), + DEVLINK_PARAM_DRIVER(SSS_DEVLINK_PARAM_ID_SWITCH_CFG, + "switch_cfg", DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + sss_devlink_get_switch_config, + sss_devlink_set_switch_config, + sss_devlink_validate_firmware_config), +}; + +int sss_init_devlink(struct sss_hwdev *hwdev) +{ + int ret; + struct devlink *link = NULL; + struct pci_dev *pdev = hwdev->pcidev_hdl; + +#ifdef HAS_DEVLINK_ALLOC_SETS_DEV + link = devlink_alloc(&g_devlink_ops, sizeof(struct sss_devlink), &pdev->dev); +#else + link = devlink_alloc(&g_devlink_ops, sizeof(struct sss_devlink)); +#endif + if (!link) { + sdk_err(hwdev->dev_hdl, "Fail to alloc devlink\n"); + return -ENOMEM; + } + + hwdev->devlink_dev = devlink_priv(link); + hwdev->devlink_dev->hwdev = hwdev; + hwdev->devlink_dev->switch_cfg_id = SSS_FW_CFG_DEFAULT_INDEX; + hwdev->devlink_dev->active_cfg_id = SSS_FW_CFG_DEFAULT_INDEX; + +#ifdef REGISTER_DEVLINK_PARAMETER_PREFERRED + ret = devlink_params_register(devlink, g_devlink_param, + ARRAY_SIZE(g_devlink_param)); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to register devlink param\n"); + goto register_err; + } +#endif + +#ifdef NO_DEVLINK_REGISTER_SETS_DEV +#ifdef DEVLINK_REGISTER_RETURN_VOID + devlink_register(link); + ret = 0; +#else + ret = devlink_register(link); +#endif + +#else + ret = devlink_register(link, &pdev->dev); +#endif + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to register devlink\n"); +#ifdef REGISTER_DEVLINK_PARAMETER_PREFERRED + devlink_params_unregister(devlink, g_devlink_param, + ARRAY_SIZE(g_devlink_param)); +#endif + goto register_err; + } + +#ifndef REGISTER_DEVLINK_PARAMETER_PREFERRED + ret = devlink_params_register(link, g_devlink_param, + ARRAY_SIZE(g_devlink_param)); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to register devlink param\n"); + goto register_param_err; + } +#endif + devlink_params_publish(link); + + return 0; + +#ifndef REGISTER_DEVLINK_PARAMETER_PREFERRED +register_param_err: + devlink_unregister(link); +#endif + +register_err: + devlink_free(link); + + return -EFAULT; +} + +void sss_deinit_devlink(struct sss_hwdev *hwdev) +{ + struct devlink *link = priv_to_devlink(hwdev->devlink_dev); + + devlink_params_unpublish(link); + devlink_params_unregister(link, g_devlink_param, + ARRAY_SIZE(g_devlink_param)); + devlink_unregister(link); + devlink_free(link); +} +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.h new file mode 100644 index 0000000000000..32714685d1612 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_LINK_H +#define SSS_HWDEV_LINK_H + +#include "sss_kernel.h" +#include "sss_hwdev.h" +#include "sss_hw_mbx_msg.h" + +int sss_init_devlink(struct sss_hwdev *hwdev); +void sss_deinit_devlink(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.c new file mode 100644 index 0000000000000..42f0c1fa15abb --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.c @@ -0,0 +1,770 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwdev_api.h" +#include "sss_hwdev_mgmt_channel.h" +#include "sss_hwif_mbx.h" +#include "sss_hwif_mbx_init.h" +#include "sss_hwif_aeq.h" +#include "sss_hwif_export.h" +#include "sss_hwif_api.h" +#include "sss_hwif_adm_init.h" +#include "sss_hwif_mgmt_init.h" +#include "sss_hwif_ctrlq_init.h" +#include "sss_csr.h" + +#define SSS_DRV_FEATURE_DEF \ + (SSS_COMM_F_ADM | SSS_COMM_F_CLP | SSS_COMM_F_MBX_SEGMENT | \ + SSS_COMM_F_CTRLQ_NUM | SSS_COMM_F_VIRTIO_VQ_SIZE) + +#define SSS_COMM_SUPPORT_CLP(hwdev) \ + ((hwdev)->features[0] & SSS_COMM_F_CLP) + +#define SSS_DMA_ATTR_INDIR_ID_SHIFT 0 +#define SSS_DMA_ATTR_INDIR_ID_MASK 0x3FF + +#define SSS_SET_DMA_ATTR_INDIR_ID(val, member) \ + (((u32)(val) & SSS_DMA_ATTR_INDIR_##member##_MASK) << \ + SSS_DMA_ATTR_INDIR_##member##_SHIFT) + +#define SSS_CLEAR_DMA_ATTR_INDIR_ID(val, member) \ + ((val) & (~(SSS_DMA_ATTR_INDIR_##member##_MASK \ + << SSS_DMA_ATTR_INDIR_##member##_SHIFT))) + +#define SSS_DMA_ATTR_ENTRY_ST_SHIFT 0 +#define SSS_DMA_ATTR_ENTRY_AT_SHIFT 8 +#define SSS_DMA_ATTR_ENTRY_PH_SHIFT 10 +#define SSS_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12 +#define SSS_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13 + +#define SSS_DMA_ATTR_ENTRY_ST_MASK 0xFF +#define SSS_DMA_ATTR_ENTRY_AT_MASK 0x3 +#define SSS_DMA_ATTR_ENTRY_PH_MASK 0x3 +#define SSS_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1 +#define SSS_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1 + +#define SSS_SET_DMA_ATTR_ENTRY(val, member) \ + (((u32)(val) & SSS_DMA_ATTR_ENTRY_##member##_MASK) << \ + SSS_DMA_ATTR_ENTRY_##member##_SHIFT) + +#define SSS_PCIE_ST_DISABLE 0 +#define SSS_PCIE_AT_DISABLE 0 +#define SSS_PCIE_PH_DISABLE 0 + +#define SSS_PCIE_MSIX_ATTR_ENTRY 0 + +#define SSS_PCIE_SNOOP 0 +#define SSS_PCIE_NO_SNOOP 1 + +#define SSS_PCIE_TPH_DISABLE 0 +#define SSS_PCIE_TPH_ENABLE 1 + +#define SSS_FAULT_LEVEL_STR_FATAL "fatal" +#define SSS_FAULT_LEVEL_STR_RESET "reset" +#define SSS_FAULT_LEVEL_STR_HOST "host" +#define SSS_FAULT_LEVEL_STR_FLR "flr" +#define SSS_FAULT_LEVEL_STR_GENERAL "general" +#define SSS_FAULT_LEVEL_STR_SUGGESTION "suggestion" +#define SSS_FAULT_LEVEL_STR_UNKNOWN "Unknown" + +#define SSS_FAULT_TYPE_STR_CHIP "chip" +#define SSS_FAULT_TYPE_STR_NPU "ucode" +#define SSS_FAULT_TYPE_STR_MEM_RD "mem rd timeout" +#define SSS_FAULT_TYPE_STR_MEM_WR "mem wr timeout" +#define SSS_FAULT_TYPE_STR_REG_RD "reg rd timeout" +#define SSS_FAULT_TYPE_STR_REG_WR "reg wr timeout" +#define SSS_FAULT_TYPE_STR_PHY "phy fault" +#define SSS_FAULT_TYPE_STR_TSENSOR "tsensor fault" +#define SSS_FAULT_TYPE_STR_UNKNOWN "Unknown" + +#define SSS_COMM_RESET_TYPE \ + ((1 << SSS_RESET_TYPE_COMM) | (1 << SSS_RESET_TYPE_COMM_CMD_CH) | \ + (1 << SSS_RESET_TYPE_FLUSH_BIT) | (1 << SSS_RESET_TYPE_MQM) | \ + (1 << SSS_RESET_TYPE_SMF) | (1 << SSS_RESET_TYPE_PF_BW_CFG)) + +#define SSS_FOUR_REG_LEN 16 + +#define SSS_X_CSR_INDEX 30 +#define SSS_DUMP_16B_PER_LINE 16 +#define SSS_DUMP_4_VAR_PER_LINE 4 + +typedef void (*sss_print_err_handler_t)(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event); + +typedef void (*sss_mgmt_event_handler_t)(void *data, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); + +struct sss_mgmt_event { + u16 event_type; + sss_mgmt_event_handler_t handler; +}; + +static void sss_fault_event_handler(void *data, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); + +static void sss_show_watchdog_mgmt_register_info(struct sss_hwdev *hwdev, + struct sss_watchdog_info *watchdog_info) +{ + u32 i; + u64 *reg = NULL; + + sdk_err(hwdev->dev_hdl, "Mgmt deadloop time: 0x%x 0x%x, task id: 0x%x, sp: 0x%llx\n", + watchdog_info->cur_time_h, watchdog_info->cur_time_l, + watchdog_info->task_id, watchdog_info->sp); + + sdk_err(hwdev->dev_hdl, + "Stack current used: 0x%x, peak used: 0x%x, overflow flag: 0x%x, top: 0x%llx, bottom: 0x%llx\n", + watchdog_info->cur_used, watchdog_info->peak_used, + watchdog_info->is_overflow, watchdog_info->stack_top, watchdog_info->stack_bottom); + + sdk_err(hwdev->dev_hdl, "Mgmt pc: 0x%llx, elr: 0x%llx, spsr: 0x%llx, far: 0x%llx, esr: 0x%llx, xzr: 0x%llx\n", + watchdog_info->pc, watchdog_info->elr, watchdog_info->spsr, watchdog_info->far, + watchdog_info->esr, watchdog_info->xzr); + + sdk_err(hwdev->dev_hdl, "Mgmt register info\n"); + + reg = &watchdog_info->x30; + for (i = 0; i <= SSS_X_CSR_INDEX; i++) + sdk_err(hwdev->dev_hdl, "x%02u:0x%llx\n", + SSS_X_CSR_INDEX - i, reg[i]); +} + +static void sss_show_watchdog_stack_info(struct sss_hwdev *hwdev, + struct sss_watchdog_info *watchdog_info) +{ + u32 i; + u32 j; + u32 tmp; + u32 stack_len; + u32 *dump_addr = NULL; + + if (watchdog_info->stack_actlen <= SSS_STACK_DATA_LEN) { + stack_len = watchdog_info->stack_actlen; + } else { + sdk_err(hwdev->dev_hdl, "Oops stack length: 0x%x is wrong\n", + watchdog_info->stack_actlen); + stack_len = SSS_STACK_DATA_LEN; + } + + sdk_err(hwdev->dev_hdl, "Mgmt dump stack, 16 bytes per line(start from sp)\n"); + for (i = 0; i < (stack_len / SSS_DUMP_16B_PER_LINE); i++) { + dump_addr = (u32 *)(watchdog_info->stack_data + (u32)(i * SSS_DUMP_16B_PER_LINE)); + sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n", + *dump_addr, *(dump_addr + 0x1), *(dump_addr + 0x2), *(dump_addr + 0x3)); + } + + tmp = (stack_len % SSS_DUMP_16B_PER_LINE) / SSS_DUMP_4_VAR_PER_LINE; + for (j = 0; j < tmp; j++) { + dump_addr = (u32 *)(watchdog_info->stack_data + + (u32)(i * SSS_DUMP_16B_PER_LINE + j * SSS_DUMP_4_VAR_PER_LINE)); + sdk_err(hwdev->dev_hdl, "0x%08x ", *dump_addr); + } +} + +static void sss_show_watchdog_timeout_info(struct sss_hwdev *hwdev, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size) +{ + struct sss_watchdog_info *watchdog_info = buf_in; + + if (in_size != sizeof(*watchdog_info)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt watchdog report, length: %d, should be %ld\n", + in_size, sizeof(*watchdog_info)); + return; + } + + sss_show_watchdog_mgmt_register_info(hwdev, watchdog_info); + sss_show_watchdog_stack_info(hwdev, watchdog_info); + + *out_size = sizeof(*watchdog_info); + watchdog_info = buf_out; + watchdog_info->head.state = 0; +} + +static void sss_watchdog_timeout_event_handler(void *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct sss_event_info event_info = {0}; + struct sss_hwdev *dev = hwdev; + + sss_show_watchdog_timeout_info(dev, buf_in, in_size, buf_out, out_size); + + if (dev->event_handler) { + event_info.type = SSS_EVENT_MGMT_WATCHDOG; + dev->event_handler(dev->event_handler_data, &event_info); + } +} + +static void sss_show_exc_info(struct sss_hwdev *hwdev, struct sss_exc_info *exc_info) +{ + u32 i; + + /* key information */ + sdk_err(hwdev->dev_hdl, "==================== Exception Info Begin ====================\n"); + sdk_err(hwdev->dev_hdl, "Exception CpuTick : 0x%08x 0x%08x\n", + exc_info->cpu_tick.tick_cnt_h, exc_info->cpu_tick.tick_cnt_l); + sdk_err(hwdev->dev_hdl, "Exception Cause : %u\n", exc_info->exc_cause); + sdk_err(hwdev->dev_hdl, "Os Version : %s\n", exc_info->os_ver); + sdk_err(hwdev->dev_hdl, "App Version : %s\n", exc_info->app_ver); + sdk_err(hwdev->dev_hdl, "CPU Type : 0x%08x\n", exc_info->cpu_type); + sdk_err(hwdev->dev_hdl, "CPU ID : 0x%08x\n", exc_info->cpu_id); + sdk_err(hwdev->dev_hdl, "Thread Type : 0x%08x\n", exc_info->thread_type); + sdk_err(hwdev->dev_hdl, "Thread ID : 0x%08x\n", exc_info->thread_id); + sdk_err(hwdev->dev_hdl, "Byte Order : 0x%08x\n", exc_info->byte_order); + sdk_err(hwdev->dev_hdl, "Nest Count : 0x%08x\n", exc_info->nest_cnt); + sdk_err(hwdev->dev_hdl, "Fatal Error Num : 0x%08x\n", exc_info->fatal_errno); + sdk_err(hwdev->dev_hdl, "Current SP : 0x%016llx\n", exc_info->uw_sp); + sdk_err(hwdev->dev_hdl, "Stack Bottom : 0x%016llx\n", exc_info->stack_bottom); + + /* register field */ + sdk_err(hwdev->dev_hdl, "Register contents when exception occur.\n"); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "TTBR0", + exc_info->reg_info.ttbr0, "TTBR1", exc_info->reg_info.ttbr1); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "TCR", + exc_info->reg_info.tcr, "MAIR", exc_info->reg_info.mair); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "SCTLR", + exc_info->reg_info.sctlr, "VBAR", exc_info->reg_info.vbar); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "CURRENTE1", + exc_info->reg_info.current_el, "SP", exc_info->reg_info.sp); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "ELR", + exc_info->reg_info.elr, "SPSR", exc_info->reg_info.spsr); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "FAR", + exc_info->reg_info.far_r, "ESR", exc_info->reg_info.esr); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx\n", "XZR", exc_info->reg_info.xzr); + + for (i = 0; i < SSS_XREGS_NUM - 1; i += 0x2) + sdk_err(hwdev->dev_hdl, "XREGS[%02u]%-5s: 0x%016llx \t XREGS[%02u]%-5s: 0x%016llx", + i, " ", exc_info->reg_info.xregs[i], + (u32)(i + 0x1U), " ", exc_info->reg_info.xregs[(u32)(i + 0x1U)]); + + sdk_err(hwdev->dev_hdl, "XREGS[%02u]%-5s: 0x%016llx \t ", SSS_XREGS_NUM - 1, " ", + exc_info->reg_info.xregs[SSS_XREGS_NUM - 1]); +} + +static void sss_lastword_report_event_handler(void *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct sss_lastword_info *lastword_info = buf_in; + struct sss_exc_info *exc_info = &lastword_info->stack_info; + u32 stack_len = lastword_info->stack_actlen; + struct sss_hwdev *dev = hwdev; + u32 *curr_reg = NULL; + u32 reg_i; + u32 cnt; + + if (in_size != sizeof(*lastword_info)) { + sdk_err(dev->dev_hdl, "Invalid mgmt lastword, length: %u, should be %ld\n", + in_size, sizeof(*lastword_info)); + return; + } + + sss_show_exc_info(dev, exc_info); + + /* call stack dump */ + sdk_err(dev->dev_hdl, "Dump stack when exceptioin occurs, 16Bytes per line.\n"); + + cnt = stack_len / SSS_FOUR_REG_LEN; + for (reg_i = 0; reg_i < cnt; reg_i++) { + curr_reg = (u32 *)(lastword_info->stack_data + + ((u64)(u32)(reg_i * SSS_FOUR_REG_LEN))); + sdk_err(dev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n", + *curr_reg, *(curr_reg + 0x1), *(curr_reg + 0x2), *(curr_reg + 0x3)); + } + + sdk_err(dev->dev_hdl, "==================== Exception Info End ====================\n"); +} + +const struct sss_mgmt_event g_mgmt_event_handler[] = { + { + .event_type = SSS_COMM_MGMT_CMD_FAULT_REPORT, + .handler = sss_fault_event_handler, + }, + + { + .event_type = SSS_COMM_MGMT_CMD_WATCHDOG_INFO, + .handler = sss_watchdog_timeout_event_handler, + }, + + { + .event_type = SSS_COMM_MGMT_CMD_LASTWORD_GET, + .handler = sss_lastword_report_event_handler, + }, +}; + +static void sss_print_chip_fault(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event) +{ + u8 err_level; + char *level_str = NULL; + char *fault_level[SSS_FAULT_LEVEL_MAX] = { + SSS_FAULT_LEVEL_STR_FATAL, SSS_FAULT_LEVEL_STR_RESET, + SSS_FAULT_LEVEL_STR_HOST, SSS_FAULT_LEVEL_STR_FLR, + SSS_FAULT_LEVEL_STR_GENERAL, SSS_FAULT_LEVEL_STR_SUGGESTION + }; + + err_level = fault_event->info.chip.err_level; + if (err_level < SSS_FAULT_LEVEL_MAX) + level_str = fault_level[err_level]; + else + level_str = SSS_FAULT_LEVEL_STR_UNKNOWN; + + if (err_level == SSS_FAULT_LEVEL_SERIOUS_FLR) + sdk_err(hwdev->dev_hdl, "Err_level: %u [%s], func_id: %u\n", + err_level, level_str, fault_event->info.chip.func_id); + + sdk_err(hwdev->dev_hdl, "Node_id: 0x%x, err_type: 0x%x, err_level: %u[%s], err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n", + fault_event->info.chip.node_id, fault_event->info.chip.err_type, + err_level, level_str, + fault_event->info.chip.err_csr_addr, fault_event->info.chip.err_csr_value); +} + +static void sss_print_ucode_err(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event) +{ + sdk_err(hwdev->dev_hdl, "Cause_id: %u, core_id: %u, c_id: %u, epc: 0x%08x\n", + fault_event->info.ucode.cause_id, fault_event->info.ucode.core_id, + fault_event->info.ucode.c_id, fault_event->info.ucode.epc); +} + +static void sss_print_mem_rw_err(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event) +{ + sdk_err(hwdev->dev_hdl, "Err_csr_ctrl: 0x%08x, err_csr_data: 0x%08x, ctrl_tab: 0x%08x, mem_id: 0x%08x\n", + fault_event->info.mem_timeout.err_csr_ctrl, + fault_event->info.mem_timeout.err_csr_data, + fault_event->info.mem_timeout.ctrl_tab, fault_event->info.mem_timeout.mem_id); +} + +static void sss_print_reg_rw_err(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event) +{ + sdk_err(hwdev->dev_hdl, "Err_csr: 0x%08x\n", fault_event->info.reg_timeout.err_csr); +} + +static void sss_print_phy_err(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event) +{ + sdk_err(hwdev->dev_hdl, "Op_type: %u, port_id: %u, dev_ad: %u, csr_addr: 0x%08x, op_data: 0x%08x\n", + fault_event->info.phy_fault.op_type, fault_event->info.phy_fault.port_id, + fault_event->info.phy_fault.dev_ad, fault_event->info.phy_fault.csr_addr, + fault_event->info.phy_fault.op_data); +} + +static void sss_print_fault_info(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event) +{ + struct sss_fault_event_stats *event_stats = &hwdev->hw_stats.fault_event_stats; + char *type = NULL; + char *fault_type[SSS_FAULT_TYPE_MAX] = { + SSS_FAULT_TYPE_STR_CHIP, SSS_FAULT_TYPE_STR_NPU, + SSS_FAULT_TYPE_STR_MEM_RD, SSS_FAULT_TYPE_STR_MEM_WR, + SSS_FAULT_TYPE_STR_REG_RD, SSS_FAULT_TYPE_STR_REG_WR, + SSS_FAULT_TYPE_STR_PHY, SSS_FAULT_TYPE_STR_TSENSOR + }; + sss_print_err_handler_t print_handler[] = { + sss_print_chip_fault, sss_print_ucode_err, + sss_print_mem_rw_err, sss_print_mem_rw_err, + sss_print_reg_rw_err, sss_print_reg_rw_err, + sss_print_phy_err + }; + + if (fault_event->type < SSS_FAULT_TYPE_MAX) { + type = fault_type[fault_event->type]; + atomic_inc(&event_stats->fault_type_stat[fault_event->type]); + } else { + type = SSS_FAULT_TYPE_STR_UNKNOWN; + } + + sdk_err(hwdev->dev_hdl, "Fault event report received, func_id: %u\n", + sss_get_global_func_id(hwdev)); + sdk_err(hwdev->dev_hdl, "Fault type: %u [%s]\n", fault_event->type, type); + sdk_err(hwdev->dev_hdl, "Fault val[0]: 0x%08x, val[1]: 0x%08x, val[2]: 0x%08x, val[3]: 0x%08x\n", + fault_event->info.val[0x0], fault_event->info.val[0x1], + fault_event->info.val[0x2], fault_event->info.val[0x3]); + + sss_dump_chip_err_info(hwdev); + + if (fault_event->type >= ARRAY_LEN(print_handler)) + return; + + print_handler[fault_event->type](hwdev, fault_event); +} + +static void sss_fault_event_handler(void *data, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_hwdev *hwdev = data; + struct sss_cmd_fault_event *cmd_event = in_buf; + struct sss_event_info info; + struct sss_fault_event *fault_event = (void *)info.event_data; + + if (in_size != sizeof(*cmd_event)) { + sdk_err(hwdev->dev_hdl, "Invalid size: %u.\n", in_size); + return; + } + + sss_print_fault_info(hwdev, &cmd_event->fault_event); + + if (hwdev->event_handler) { + info.type = SSS_EVENT_FAULT; + info.service = SSS_EVENT_SRV_COMM; + memcpy(info.event_data, &cmd_event->fault_event, sizeof(cmd_event->fault_event)); + fault_event->fault_level = (cmd_event->fault_event.type == SSS_FAULT_TYPE_CHIP) ? + cmd_event->fault_event.info.chip.err_level : + SSS_FAULT_LEVEL_FATAL; + hwdev->event_handler(hwdev->event_handler_data, &info); + } +} + +static void sss_pf_handle_mgmt_event(void *data, u16 event_type, + void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + u32 i; + u32 num = ARRAY_LEN(g_mgmt_event_handler); + + for (i = 0; i < num; i++) { + if (event_type == g_mgmt_event_handler[i].event_type && + g_mgmt_event_handler[i].handler) { + g_mgmt_event_handler[i].handler(data, in_buf, in_size, + out_buf, out_size); + return; + } + } + + *out_size = sizeof(struct sss_mgmt_msg_head); + ((struct sss_mgmt_msg_head *)out_buf)->state = SSS_MGMT_CMD_UNSUPPORTED; + sdk_warn(SSS_TO_DEV(data), "Unsupported mgmt event %u.\n", event_type); +} + +static int sss_hwdev_init_mbx(struct sss_hwdev *hwdev) +{ + int ret; + + ret = sss_hwif_init_mbx(hwdev); + if (ret != 0) + return ret; + + sss_aeq_register_hw_cb(hwdev, hwdev, SSS_MBX_FROM_FUNC, sss_recv_mbx_aeq_handler); + sss_aeq_register_hw_cb(hwdev, hwdev, SSS_MSG_FROM_MGMT, sss_mgmt_msg_aeqe_handler); + + set_bit(SSS_HW_MBX_INIT_OK, &hwdev->func_state); + + return 0; +} + +static void sss_hwdev_deinit_mbx(struct sss_hwdev *hwdev) +{ + spin_lock_bh(&hwdev->channel_lock); + clear_bit(SSS_HW_MBX_INIT_OK, &hwdev->func_state); + spin_unlock_bh(&hwdev->channel_lock); + + sss_aeq_unregister_hw_cb(hwdev, SSS_MBX_FROM_FUNC); + + if (!SSS_IS_VF(hwdev)) { + sss_unregister_pf_mbx_handler(hwdev, SSS_MOD_TYPE_COMM); + } else { + sss_unregister_vf_mbx_handler(hwdev, SSS_MOD_TYPE_COMM); + + sss_aeq_unregister_hw_cb(hwdev, SSS_MSG_FROM_MGMT); + } + + sss_hwif_deinit_mbx(hwdev); +} + +static int sss_chip_get_global_attr(struct sss_hwdev *hwdev) +{ + int ret = 0; + struct sss_cmd_get_glb_attr attr_cmd = {0}; + u16 out_len = sizeof(attr_cmd); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_GET_GLOBAL_ATTR, + &attr_cmd, sizeof(attr_cmd), &attr_cmd, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &attr_cmd)) { + sdk_err(((struct sss_hwdev *)hwdev)->dev_hdl, + "Fail to get global attr, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, attr_cmd.head.state, out_len); + return -EIO; + } + + memcpy(&hwdev->glb_attr, &attr_cmd.attr, sizeof(hwdev->glb_attr)); + + return 0; +} + +static int sss_chip_get_feature(struct sss_hwdev *hwdev) +{ + int i; + int ret; + u64 feature[SSS_MAX_FEATURE_QWORD] = {SSS_DRV_FEATURE_DEF, 0, 0, 0}; + + ret = sss_chip_do_nego_feature(hwdev, SSS_MGMT_MSG_GET_CMD, + hwdev->features, SSS_MAX_FEATURE_QWORD); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to get comm feature\n"); + return ret; + } + + if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_PPF) + feature[0] |= SSS_COMM_F_CHANNEL_DETECT; + + for (i = 0; i < SSS_MAX_FEATURE_QWORD; i++) + hwdev->features[i] &= feature[i]; + + return 0; +} + +static int sss_get_global_info(struct sss_hwdev *hwdev) +{ + int ret; + + ret = sss_chip_get_board_info(hwdev, &hwdev->board_info); + if (ret != 0) + return ret; + + ret = sss_chip_get_feature(hwdev); + if (ret != 0) + return ret; + + ret = sss_chip_get_global_attr(hwdev); + if (ret != 0) + return ret; + + return 0; +} + +static void sss_hwdev_deinit_adm(struct sss_hwdev *hwdev) +{ + if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF) + return; + + spin_lock_bh(&hwdev->channel_lock); + clear_bit(SSS_HW_ADM_INIT_OK, &hwdev->func_state); + spin_unlock_bh(&hwdev->channel_lock); + + sss_unregister_mgmt_msg_handler(hwdev, SSS_MOD_TYPE_COMM); + + sss_aeq_unregister_hw_cb(hwdev, SSS_MSG_FROM_MGMT); + + sss_hwif_deinit_adm(hwdev); +} + +static int sss_hwdev_init_adm(struct sss_hwdev *hwdev) +{ + int ret; + + if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF) + return 0; + + ret = sss_hwif_init_adm(hwdev); + if (ret != 0) + return ret; + + sss_register_mgmt_msg_handler(hwdev, SSS_MOD_TYPE_COMM, hwdev, + sss_pf_handle_mgmt_event); + + set_bit(SSS_HW_ADM_INIT_OK, &hwdev->func_state); + + return 0; +} + +static int sss_chip_set_dma_attr_table(struct sss_hwdev *hwdev) +{ + int ret; + struct sss_cmd_dma_attr_config attr = {0}; + u16 out_len = sizeof(attr); + + attr.ph = SSS_PCIE_PH_DISABLE; + attr.at = SSS_PCIE_AT_DISABLE; + attr.st = SSS_PCIE_ST_DISABLE; + attr.no_snooping = SSS_PCIE_SNOOP; + attr.tph_en = SSS_PCIE_TPH_DISABLE; + attr.func_id = sss_get_global_func_id(hwdev); + attr.entry_id = SSS_PCIE_MSIX_ATTR_ENTRY; + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SET_DMA_ATTR, &attr, sizeof(attr), + &attr, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &attr)) { + sdk_err(hwdev->dev_hdl, + "Fail to set dma attr, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, attr.head.state, out_len); + return -EIO; + } + + return 0; +} + +static int sss_chip_init_dma_attr(struct sss_hwdev *hwdev) +{ + u32 set; + u32 get; + u32 dst; + + set = sss_chip_read_reg(hwdev->hwif, SSS_CSR_DMA_ATTR_INDIR_ID_ADDR); + set = SSS_CLEAR_DMA_ATTR_INDIR_ID(set, ID); + set |= SSS_SET_DMA_ATTR_INDIR_ID(SSS_PCIE_MSIX_ATTR_ENTRY, ID); + + sss_chip_write_reg(hwdev->hwif, SSS_CSR_DMA_ATTR_INDIR_ID_ADDR, set); + + /* make sure reset dma attr */ + wmb(); + + dst = SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_TPH_DISABLE, TPH_EN) | + SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_SNOOP, NO_SNOOPING) | + SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_ST_DISABLE, ST) | + SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_AT_DISABLE, AT) | + SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_PH_DISABLE, PH); + get = sss_chip_read_reg(hwdev->hwif, SSS_CSR_DMA_ATTR_TBL_ADDR); + + if (get == dst) + return 0; + + return sss_chip_set_dma_attr_table(hwdev); +} + +static void sss_chip_set_pf_state(struct sss_hwdev *hwdev) +{ + sss_chip_set_pf_status(hwdev->hwif, SSS_PF_STATUS_ACTIVE_FLAG); +} + +static void sss_chip_reset_pf_state(struct sss_hwdev *hwdev) +{ + sss_chip_set_pf_status(hwdev->hwif, SSS_PF_STATUS_INIT); +} + +static int sss_init_basic_mgmt_channel(struct sss_hwdev *hwdev) +{ + int ret; + + ret = sss_hwif_init_aeq(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init comm aeqs\n"); + return ret; + } + + ret = sss_hwdev_init_mbx(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init mbx\n"); + goto init_mbx_err; + } + + ret = sss_init_aeq_msix_attr(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init aeqs msix attr\n"); + goto init_aeq_msix_attr_err; + } + + return 0; + +init_aeq_msix_attr_err: + sss_hwdev_deinit_mbx(hwdev); + +init_mbx_err: + sss_hwif_deinit_aeq(hwdev); + + return ret; +} + +static void sss_free_base_mgmt_channel(struct sss_hwdev *hwdev) +{ + sss_hwdev_deinit_mbx(hwdev); + sss_hwif_deinit_aeq(hwdev); +} + +int sss_init_mgmt_channel(struct sss_hwdev *hwdev) +{ + int ret; + + /* init aeq, mbx */ + ret = sss_init_basic_mgmt_channel(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init basic mgmt channel\n"); + return ret; + } + + ret = sss_chip_reset_function(hwdev, sss_get_global_func_id(hwdev), + SSS_COMM_RESET_TYPE, SSS_CHANNEL_COMM); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to reset func\n"); + goto out; + } + + ret = sss_get_global_info(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init hwdev attr\n"); + goto out; + } + + ret = sss_hwdev_init_adm(hwdev); + if (ret != 0) + goto out; + + ret = sss_chip_set_func_used_state(hwdev, SSS_SVC_TYPE_COM, + true, SSS_CHANNEL_COMM); + if (ret != 0) + goto set_use_state_err; + + ret = sss_chip_init_dma_attr(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init dma attr table\n"); + goto init_dma_attr_err; + } + + ret = sss_init_ctrlq_channel(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ctrlq channel\n"); + goto init_ctrlq_channel_err; + } + + sss_chip_set_pf_state(hwdev); + + ret = sss_aeq_register_swe_cb(hwdev, hwdev, SSS_STL_EVENT, sss_sw_aeqe_handler); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, + "Fail to register sw aeqe handler\n"); + goto register_ucode_aeqe_err; + } + + return 0; + +register_ucode_aeqe_err: + sss_chip_reset_pf_state(hwdev); + sss_deinit_ctrlq_channel(hwdev); + +init_ctrlq_channel_err: +init_dma_attr_err: + sss_chip_set_func_used_state(hwdev, SSS_SVC_TYPE_COM, + false, SSS_CHANNEL_COMM); + +set_use_state_err: + sss_hwdev_deinit_adm(hwdev); + +out: + sss_free_base_mgmt_channel(hwdev); + + return ret; +} + +void sss_deinit_mgmt_channel(struct sss_hwdev *hwdev) +{ + sss_aeq_unregister_swe_cb(hwdev, SSS_STL_EVENT); + + sss_chip_reset_pf_state(hwdev); + + sss_deinit_ctrlq_channel(hwdev); + + sss_chip_set_func_used_state(hwdev, SSS_SVC_TYPE_COM, + false, SSS_CHANNEL_COMM); + + sss_hwdev_deinit_adm(hwdev); + + sss_free_base_mgmt_channel(hwdev); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.h new file mode 100644 index 0000000000000..f8ab14532b73d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_MGMT_CHANNEL_H +#define SSS_HWDEV_MGMT_CHANNEL_H + +#include "sss_hwdev.h" + +#define SSS_STACK_DATA_LEN 1024 +#define SSS_XREGS_NUM 31 +#define SSS_MPU_LASTWORD_SIZE 1024 + +struct sss_watchdog_info { + struct sss_mgmt_msg_head head; + + u32 cur_time_h; + u32 cur_time_l; + u32 task_id; + u32 rsvd; + + u64 pc; + u64 elr; + u64 spsr; + u64 far; + u64 esr; + u64 xzr; + u64 x30; + u64 x29; + u64 x28; + u64 x27; + u64 x26; + u64 x25; + u64 x24; + u64 x23; + u64 x22; + u64 x21; + u64 x20; + u64 x19; + u64 x18; + u64 x17; + u64 x16; + u64 x15; + u64 x14; + u64 x13; + u64 x12; + u64 x11; + u64 x10; + u64 x09; + u64 x08; + u64 x07; + u64 x06; + u64 x05; + u64 x04; + u64 x03; + u64 x02; + u64 x01; + u64 x00; + + u64 stack_top; + u64 stack_bottom; + u64 sp; + u32 cur_used; + u32 peak_used; + u32 is_overflow; + + u32 stack_actlen; + u8 stack_data[SSS_STACK_DATA_LEN]; +}; + +struct sss_cpu_tick { + u32 tick_cnt_h; /* The cycle count higher 32 bits */ + u32 tick_cnt_l; /* The cycle count lower 32 bits */ +}; + +struct sss_ax_exc_reg_info { + u64 ttbr0; + u64 ttbr1; + u64 tcr; + u64 mair; + u64 sctlr; + u64 vbar; + u64 current_el; + u64 sp; + u64 elr; + u64 spsr; + u64 far_r; + u64 esr; + u64 xzr; + u64 xregs[SSS_XREGS_NUM]; /* 0~30: x30~x0 */ +}; + +struct sss_exc_info { + char os_ver[48]; /* OS version */ + char app_ver[64]; /* Product version */ + u32 exc_cause; /* Cause of exception */ + u32 thread_type; /* The thread type before the exception */ + u32 thread_id; /* Thread PID before exception */ + u16 byte_order; /* Byte order */ + u16 cpu_type; /* CPU type */ + u32 cpu_id; /* CPU ID */ + struct sss_cpu_tick cpu_tick; /* CPU Tick */ + u32 nest_cnt; /* The exception nested count */ + u32 fatal_errno; /* Fatal error code */ + u64 uw_sp; /* The stack pointer before the exception */ + u64 stack_bottom; /* Bottom of the stack before the exception */ + + /* The in-core register context information,*/ + /* 82\57 must be at 152 bytes; if it has changed, */ + /* the OS_EXC_REGINFO_OFFSET macro in sre_platform.eh must be updated */ + struct sss_ax_exc_reg_info reg_info; +}; + +struct sss_lastword_info { + struct sss_mgmt_msg_head head; + struct sss_exc_info stack_info; + + /* Stack details, Actual stack size(<=1024) */ + u32 stack_actlen; + + /* More than 1024, it will be truncated */ + u8 stack_data[SSS_MPU_LASTWORD_SIZE]; +}; + +int sss_init_mgmt_channel(struct sss_hwdev *hwdev); +void sss_deinit_mgmt_channel(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.c new file mode 100644 index 0000000000000..9672cce1341b9 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hwdev.h" +#include "sss_hw_svc_cap.h" +#include "sss_hwif_irq.h" + +static int sss_init_ceq_info(struct sss_hwdev *hwdev) +{ + u8 i; + struct sss_eq_info *ceq_info = &hwdev->mgmt_info->eq_info; + struct sss_eq_cfg *ceq = NULL; + + ceq_info->ceq_num = SSS_GET_HWIF_CEQ_NUM(hwdev->hwif); + ceq_info->remain_ceq_num = ceq_info->ceq_num; + mutex_init(&ceq_info->eq_mutex); + + sdk_info(hwdev->dev_hdl, "Mgmt ceq info: ceq_num = 0x%x, remain_ceq_num = 0x%x\n", + ceq_info->ceq_num, ceq_info->remain_ceq_num); + + if (ceq_info->ceq_num == 0) { + sdk_err(hwdev->dev_hdl, "Mgmt ceq info: ceq_num = 0\n"); + return -EFAULT; + } + + ceq = kcalloc(ceq_info->ceq_num, sizeof(*ceq), GFP_KERNEL); + if (!ceq) + return -ENOMEM; + + for (i = 0; i < ceq_info->ceq_num; i++) { + ceq[i].id = i + 1; + ceq[i].free = SSS_CFG_FREE; + ceq[i].type = SSS_SERVICE_TYPE_MAX; + } + ceq_info->eq = ceq; + + return 0; +} + +static void sss_deinit_ceq_info(struct sss_hwdev *hwdev) +{ + struct sss_eq_info *ceq_info = &hwdev->mgmt_info->eq_info; + + kfree(ceq_info->eq); +} + +int sss_init_mgmt_info(struct sss_hwdev *hwdev) +{ + int ret; + struct sss_mgmt_info *mgmt_info; + + mgmt_info = kzalloc(sizeof(*mgmt_info), GFP_KERNEL); + if (!mgmt_info) + return -ENOMEM; + + mgmt_info->hwdev = hwdev; + hwdev->mgmt_info = mgmt_info; + + ret = sss_init_ceq_info(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ceq info, ret: %d\n", ret); + goto init_ceq_info_err; + } + + ret = sss_init_irq_info(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init irq info, ret: %d\n", ret); + goto init_irq_info_err; + } + + return 0; + +init_irq_info_err: + sss_deinit_ceq_info(hwdev); + +init_ceq_info_err: + kfree(mgmt_info); + hwdev->mgmt_info = NULL; + + return ret; +} + +void sss_deinit_mgmt_info(struct sss_hwdev *hwdev) +{ + sss_deinit_irq_info(hwdev); + sss_deinit_ceq_info(hwdev); + + kfree(hwdev->mgmt_info); + hwdev->mgmt_info = NULL; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.h new file mode 100644 index 0000000000000..78beeba092afe --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_MGMT_INFO_H +#define SSS_HWDEV_MGMT_INFO_H + +#include "sss_hwdev.h" + +int sss_init_mgmt_info(struct sss_hwdev *dev); +void sss_deinit_mgmt_info(struct sss_hwdev *dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.c new file mode 100644 index 0000000000000..dd89fa9641a2f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.c @@ -0,0 +1,804 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_common.h" +#include "sss_hwdev.h" +#include "sss_csr.h" +#include "sss_hwif_api.h" +#include "sss_hwif_adm_common.h" +#include "sss_hwif_aeq.h" + +#define SSS_ADM_MSG_ELEM_DESC_SIZE 8 +#define SSS_ADM_MSG_ELEM_DATA_ADDR_SIZE 8 +#define SSS_ADM_MSG_ELEM_WB_ADDR_SIZE 8 + +#define SSS_ADM_MSG_ELEM_ALIGNMENT 8 + +#define SSS_ADM_MSG_STATE_TIMEOUT 10000 + +/* adm_msg_state header */ +#define SSS_ADM_MSG_STATE_HEAD_VALID_SHIFT 0 +#define SSS_ADM_MSG_STATE_HEAD_MSG_ID_SHIFT 16 + +#define SSS_ADM_MSG_STATE_HEAD_VALID_MASK 0xFFU +#define SSS_ADM_MSG_STATE_HEAD_MSG_ID_MASK 0xFFU + +#define COMPLETION_TIMEOUT_DEFAULT 1000UL +#define POLLING_COMPLETION_TIMEOUT_DEFAULT 1000U + +#define SSS_ADM_MSG_STATE_HEAD_GET(val, member) \ + (((val) >> SSS_ADM_MSG_STATE_HEAD_##member##_SHIFT) & \ + SSS_ADM_MSG_STATE_HEAD_##member##_MASK) + +enum sss_adm_msg_data_format { + SSS_SGL_TYPE = 1, +}; + +enum sss_adm_msg_opt { + SSS_ADM_MSG_WRITE = 0, + SSS_ADM_MSG_READ = 1, +}; + +enum sss_adm_msg_bypass { + SSS_NO_BYPASS = 0, + SSS_BYPASS = 1, +}; + +enum sss_adm_msg_reply_aeq { + SSS_NO_TRIGGER = 0, + SSS_TRIGGER = 1, +}; + +enum sss_adm_msg_chn_code { + SSS_ADM_MSG_CHANNEL_0 = 0, +}; + +enum sss_adm_msg_chn_rsvd { + SSS_VALID_MSG_CHANNEL = 0, + SSS_INVALID_MSG_CHANNEL = 1, +}; + +#define SSS_ADM_MSG_DESC_LEN 7 + +struct sss_msg_head { + u8 state; + u8 version; + u8 reply_aeq_num; + u8 rsvd0[5]; +}; + +#define SSS_MGMT_MSG_SIZE_MIN 20 +#define SSS_MGMT_MSG_SIZE_STEP 16 +#define SSS_MGMT_MSG_RSVD_FOR_DEV 8 + +#define SSS_MSG_TO_MGMT_LEN_MAX 2016 + +#define SSS_SYNC_MSG_ID_MASK 0x7 +#define SSS_SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) +#define SSS_INCREASE_SYNC_MSG_ID(pf_to_mgmt) \ + ((pf_to_mgmt)->sync_msg_id = \ + ((pf_to_mgmt)->sync_msg_id + 1) & SSS_SYNC_MSG_ID_MASK) + +#define SSS_MGMT_MSG_TIMEOUT 20000 /* millisecond */ + +#define SSS_MSG_CB_USLEEP_MIN 900 +#define SSS_MSG_CB_USLEEP_MAX 1000 + +#define SSS_ENCAPSULATE_ADM_MSG_HEAD(func_id, msg_len, mod, cmd, msg_id) \ + (SSS_SET_MSG_HEADER(msg_len, MSG_LEN) | \ + SSS_SET_MSG_HEADER(mod, MODULE) | \ + SSS_SET_MSG_HEADER(msg_len, SEG_LEN) | \ + SSS_SET_MSG_HEADER(SSS_MSG_ACK, NO_ACK) | \ + SSS_SET_MSG_HEADER(SSS_INLINE_DATA, DATA_TYPE) | \ + SSS_SET_MSG_HEADER(0, SEQID) | \ + SSS_SET_MSG_HEADER(SSS_ADM_MSG_AEQ_ID, AEQ_ID) | \ + SSS_SET_MSG_HEADER(SSS_LAST_SEG, LAST) | \ + SSS_SET_MSG_HEADER(SSS_DIRECT_SEND_MSG, DIRECTION) | \ + SSS_SET_MSG_HEADER(cmd, CMD) | \ + SSS_SET_MSG_HEADER(SSS_MSG_SRC_MGMT, SOURCE) | \ + SSS_SET_MSG_HEADER(func_id, SRC_GLB_FUNC_ID) | \ + SSS_SET_MSG_HEADER(msg_id, MSG_ID)) + +#define SSSNIC_API_CMD_RESP_HEAD_VALID_SHIFT 0 +#define SSSNIC_API_CMD_RESP_HEAD_STATUS_SHIFT 8 +#define SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT 16 +#define SSSNIC_API_CMD_RESP_HEAD_RESP_LEN_SHIFT 24 +#define SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT 40 + +#define SSSNIC_API_CMD_RESP_HEAD_VALID_MASK 0xFF +#define SSSNIC_API_CMD_RESP_HEAD_STATUS_MASK 0xFFU +#define SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK 0xFFU +#define SSSNIC_API_CMD_RESP_HEAD_RESP_LEN_MASK 0x1FFU +#define SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK 0xFFFFFFU + +#define SSSNIC_API_CMD_RESP_HEAD_VALID_CODE 0xFF + +#define SSSNIC_API_CMD_RESP_HEADER_VALID(val) \ + (((val) & SSSNIC_API_CMD_RESP_HEAD_VALID_MASK) == \ + SSSNIC_API_CMD_RESP_HEAD_VALID_CODE) + +#define SSSNIC_API_CMD_RESP_HEAD_GET(val, member) \ + (((val) >> SSSNIC_API_CMD_RESP_HEAD_##member##_SHIFT) & \ + SSSNIC_API_CMD_RESP_HEAD_##member##_MASK) + +#define SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID(val) \ + (((val) >> SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT) & \ + SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK) + +#define SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV(val) \ + ((u16)(((val) >> SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT) & \ + SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK)) + +static u8 sss_xor_chksum_set(void *data) +{ + int id; + u8 checksum = 0; + u8 *val = data; + + for (id = 0; id < SSS_ADM_MSG_DESC_LEN; id++) + checksum ^= val[id]; + + return checksum; +} + +static void sss_chip_set_pi(struct sss_adm_msg *adm_msg) +{ + enum sss_adm_msg_type msg_type = adm_msg->msg_type; + struct sss_hwif *hwif = SSS_TO_HWDEV(adm_msg)->hwif; + u32 hw_pi_addr = SSS_CSR_ADM_MSG_PI_ADDR(msg_type); + + sss_chip_write_reg(hwif, hw_pi_addr, adm_msg->pi); +} + +static u32 sss_chip_get_ci(struct sss_adm_msg *adm_msg) +{ + u32 addr; + u32 val; + + addr = SSS_CSR_ADM_MSG_STATE_0_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + + return SSS_GET_ADM_MSG_STATE(val, CI); +} + +static void sss_dump_adm_msg_reg(struct sss_adm_msg *adm_msg) +{ + void *dev = SSS_TO_HWDEV(adm_msg)->dev_hdl; + u32 addr; + u32 val; + u16 pci_cmd = 0; + + addr = SSS_CSR_ADM_MSG_STATE_0_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + + sdk_err(dev, "Msg type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n", + adm_msg->msg_type, SSS_GET_ADM_MSG_STATE(val, CPLD_ERR), + SSS_GET_ADM_MSG_STATE(val, CHKSUM_ERR), + SSS_GET_ADM_MSG_STATE(val, FSM)); + + sdk_err(dev, "Adm msg hw current ci: 0x%x\n", + SSS_GET_ADM_MSG_STATE(val, CI)); + + addr = SSS_CSR_ADM_MSG_PI_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + sdk_err(dev, "Adm msg hw current pi: 0x%x\n", val); + pci_read_config_word(SSS_TO_HWDEV(adm_msg)->pcidev_hdl, PCI_COMMAND, &pci_cmd); + sdk_err(dev, "PCI command reg: 0x%x\n", pci_cmd); +} + +static int sss_adm_msg_busy(struct sss_adm_msg *adm_msg) +{ + void *dev = SSS_TO_HWDEV(adm_msg)->dev_hdl; + struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[adm_msg->pi]; + u64 resp_header; + + switch (adm_msg->msg_type) { + case SSS_ADM_MSG_MULTI_READ: + case SSS_ADM_MSG_POLL_READ: + resp_header = be64_to_cpu(ctx->reply_fmt->head); + if (ctx->state && !SSSNIC_API_CMD_RESP_HEADER_VALID(resp_header)) { + sdk_err(dev, "Context(0x%x) busy!, pi: %u, resp_header: 0x%08x%08x\n", + ctx->state, adm_msg->pi, + upper_32_bits(resp_header), + lower_32_bits(resp_header)); + sss_dump_adm_msg_reg(adm_msg); + return -EBUSY; + } + break; + case SSS_ADM_MSG_POLL_WRITE: + case SSS_ADM_MSG_WRITE_TO_MGMT_MODULE: + case SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE: + adm_msg->ci = sss_chip_get_ci(adm_msg); + + if (adm_msg->ci == SSS_MASK_ID(adm_msg, adm_msg->pi + 1)) { + sdk_err(dev, "API CMD chain %d is busy, cons_idx = %u, prod_idx = %u\n", + adm_msg->msg_type, adm_msg->ci, + adm_msg->pi); + sss_dump_adm_msg_reg(adm_msg); + return -EBUSY; + } + break; + default: + sdk_err(dev, "Unknown Chain type %d\n", adm_msg->msg_type); + return -EINVAL; + } + + return 0; +} + +static void sss_prepare_elem_ctrl(u64 *elem_ctrl, enum sss_adm_msg_type msg_type) +{ + u64 control; + u8 chksum; + u16 elem_len = 0; + + switch (msg_type) { + case SSS_ADM_MSG_POLL_READ: + elem_len = ALIGN(SSS_ADM_MSG_ELEM_DESC_SIZE + SSS_ADM_MSG_ELEM_WB_ADDR_SIZE + + SSS_ADM_MSG_ELEM_DATA_ADDR_SIZE, SSS_ADM_MSG_ELEM_ALIGNMENT); + break; + + case SSS_ADM_MSG_WRITE_TO_MGMT_MODULE: + case SSS_ADM_MSG_POLL_WRITE: + case SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE: + elem_len = ALIGN(SSS_ADM_MSG_ELEM_DESC_SIZE + + SSS_ADM_MSG_ELEM_DATA_ADDR_SIZE, SSS_ADM_MSG_ELEM_ALIGNMENT); + break; + default: + break; + } + + control = SSS_ADM_MSG_ELEM_CTRL_SET(SSS_SIZE_TO_8B(elem_len), ELEM_LEN) | + SSS_ADM_MSG_ELEM_CTRL_SET(0ULL, RD_DMA_ATTR_OFF) | + SSS_ADM_MSG_ELEM_CTRL_SET(0ULL, WR_DMA_ATTR_OFF); + + chksum = sss_xor_chksum_set(&control); + + control |= SSS_ADM_MSG_ELEM_CTRL_SET(chksum, XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + *elem_ctrl = cpu_to_be64(control); +} + +static void sss_prepare_elem_desc(struct sss_adm_msg *adm_msg, + u8 node_id, u16 cmd_size) +{ + u32 priv; + struct sss_adm_msg_elem *elem = adm_msg->now_node; + struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[adm_msg->pi]; + + switch (adm_msg->msg_type) { + case SSS_ADM_MSG_POLL_READ: + priv = SSS_READ_ADM_MSG_PRIV_DATA(adm_msg->msg_type, ctx->store_pi); + elem->desc = SSS_ADM_MSG_DESC_SET(SSS_SGL_TYPE, SGL_TYPE) | + SSS_ADM_MSG_DESC_SET(SSS_ADM_MSG_READ, RD_WR) | + SSS_ADM_MSG_DESC_SET(SSS_BYPASS, MGMT_BYPASS) | + SSS_ADM_MSG_DESC_SET(SSS_NO_TRIGGER, REPLY_AEQE_EN) | + SSS_ADM_MSG_DESC_SET(priv, PRIV_DATA); + break; + case SSS_ADM_MSG_POLL_WRITE: + priv = SSS_WRITE_ADM_MSG_PRIV_DATA(adm_msg->msg_type); + elem->desc = SSS_ADM_MSG_DESC_SET(SSS_SGL_TYPE, SGL_TYPE) | + SSS_ADM_MSG_DESC_SET(SSS_ADM_MSG_WRITE, RD_WR) | + SSS_ADM_MSG_DESC_SET(SSS_BYPASS, MGMT_BYPASS) | + SSS_ADM_MSG_DESC_SET(SSS_NO_TRIGGER, REPLY_AEQE_EN) | + SSS_ADM_MSG_DESC_SET(priv, PRIV_DATA); + break; + case SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE: + case SSS_ADM_MSG_WRITE_TO_MGMT_MODULE: + priv = SSS_WRITE_ADM_MSG_PRIV_DATA(adm_msg->msg_type); + elem->desc = SSS_ADM_MSG_DESC_SET(SSS_SGL_TYPE, SGL_TYPE) | + SSS_ADM_MSG_DESC_SET(SSS_ADM_MSG_WRITE, RD_WR) | + SSS_ADM_MSG_DESC_SET(SSS_NO_BYPASS, MGMT_BYPASS) | + SSS_ADM_MSG_DESC_SET(SSS_TRIGGER, REPLY_AEQE_EN) | + SSS_ADM_MSG_DESC_SET(priv, PRIV_DATA); + + break; + default: + sdk_err(((struct sss_hwdev *)adm_msg->hwdev)->dev_hdl, "Unknown Chain type: %d\n", + adm_msg->msg_type); + return; + } + + elem->desc |= SSS_ADM_MSG_DESC_SET(SSS_ADM_MSG_CHANNEL_0, MSG_CHANNEL) | + SSS_ADM_MSG_DESC_SET(SSS_VALID_MSG_CHANNEL, MSG_VALID); + + elem->desc |= SSS_ADM_MSG_DESC_SET(node_id, DEST) | + SSS_ADM_MSG_DESC_SET(SSS_SIZE_TO_4B(cmd_size), SIZE); + + elem->desc |= SSS_ADM_MSG_DESC_SET(sss_xor_chksum_set(&elem->desc), XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + elem->desc = cpu_to_be64(elem->desc); +} + +static void sss_prepare_elem_ctx(struct sss_adm_msg *adm_msg, + const void *cmd, u16 cmd_size) +{ + struct sss_adm_msg_elem_ctx *elem_ctx = &adm_msg->elem_ctx[adm_msg->pi]; + + memcpy(elem_ctx->adm_msg_vaddr, cmd, cmd_size); +} + +static void sss_prepare_elem(struct sss_adm_msg *adm_msg, u8 node_id, + const void *cmd, u16 cmd_size) +{ + struct sss_adm_msg_elem *now_node = adm_msg->now_node; + + sss_prepare_elem_ctrl(&now_node->control, adm_msg->msg_type); + sss_prepare_elem_desc(adm_msg, node_id, cmd_size); + sss_prepare_elem_ctx(adm_msg, cmd, cmd_size); +} + +static inline void sss_adm_msg_increase_pi(struct sss_adm_msg *adm_msg) +{ + adm_msg->pi = SSS_MASK_ID(adm_msg, adm_msg->pi + 1); +} + +static void sss_issue_adm_msg(struct sss_adm_msg *adm_msg) +{ + sss_chip_set_pi(adm_msg); +} + +static void sss_update_adm_msg_state(struct sss_adm_msg *adm_msg) +{ + struct sss_adm_msg_state *wb_state; + enum sss_adm_msg_type msg_type; + u64 status_header; + u32 desc_buf; + + wb_state = adm_msg->wb_state; + + desc_buf = be32_to_cpu(wb_state->desc_buf); + if (SSS_GET_ADM_MSG_STATE(desc_buf, CHKSUM_ERR)) + return; + + status_header = be64_to_cpu(wb_state->head); + msg_type = SSS_ADM_MSG_STATE_HEAD_GET(status_header, MSG_ID); + if (msg_type >= SSS_ADM_MSG_MAX) + return; + + if (msg_type != adm_msg->msg_type) + return; + + adm_msg->ci = SSS_GET_ADM_MSG_STATE(desc_buf, CI); +} + +static enum sss_process_ret sss_wait_for_state_poll_handler(void *priv_data) +{ + struct sss_adm_msg *adm_msg = priv_data; + + if (!SSS_TO_HWDEV(adm_msg)->chip_present_flag) + return SSS_PROCESS_ERR; + + sss_update_adm_msg_state(adm_msg); + /* SYNC ADM MSG cmd should start after prev cmd finished */ + if (adm_msg->ci == adm_msg->pi) + return SSS_PROCESS_OK; + + return SSS_PROCESS_DOING; +} + +static enum sss_process_ret check_cmd_resp_handler(void *priv_data) +{ + struct sss_adm_msg_elem_ctx *ctxt = priv_data; + u64 resp_header; + u8 resp_status; + + if (!SSS_TO_HWDEV(ctxt)->chip_present_flag) { + pr_err("Fail to resp chip present"); + return SSS_PROCESS_ERR; + } + + resp_header = be64_to_cpu(ctxt->reply_fmt->head); + rmb(); /* read the latest header */ + + if (SSSNIC_API_CMD_RESP_HEADER_VALID(resp_header)) { + resp_status = SSSNIC_API_CMD_RESP_HEAD_GET(resp_header, STATUS); + if (resp_status) { + pr_err("Api chain response data err, status: %u\n", + resp_status); + return SSS_PROCESS_ERR; + } + + return SSS_PROCESS_OK; + } + + return SSS_PROCESS_DOING; +} + +static int sss_wait_for_state_poll(struct sss_adm_msg *adm_msg) +{ + return sss_check_handler_timeout(adm_msg, sss_wait_for_state_poll_handler, + SSS_ADM_MSG_STATE_TIMEOUT, 100); /* wait 100 us once */ +} + +static int wait_for_resp_polling(struct sss_adm_msg_elem_ctx *ctx) +{ + return sss_check_handler_timeout(ctx, check_cmd_resp_handler, + POLLING_COMPLETION_TIMEOUT_DEFAULT, + USEC_PER_MSEC); +} + +static void copy_resp_data(struct sss_adm_msg_elem_ctx *ctx, void *ack, + u16 ack_size) +{ + struct sss_adm_msg_reply_fmt *resp = ctx->reply_fmt; + + memcpy(ack, &resp->reply, ack_size); + ctx->state = 0; +} + +static int sss_wait_for_adm_msg_completion(struct sss_adm_msg *adm_msg, + struct sss_adm_msg_elem_ctx *ctx, + void *ack, u16 ack_size) +{ + int ret = 0; + + switch (adm_msg->msg_type) { + case SSS_ADM_MSG_POLL_READ: + ret = wait_for_resp_polling(ctx); + if (ret == 0) + copy_resp_data(ctx, ack, ack_size); + else + sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "API CMD poll response timeout\n"); + break; + case SSS_ADM_MSG_POLL_WRITE: + case SSS_ADM_MSG_WRITE_TO_MGMT_MODULE: + ret = sss_wait_for_state_poll(adm_msg); + break; + case SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE: + /* No need to wait */ + break; + default: + sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "Unknown API CMD Chain type: %d\n", + adm_msg->msg_type); + ret = -EINVAL; + } + + if (ret) { + sss_dump_adm_msg_reg(adm_msg); + sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "Adm msg wait timeout,type :%d\n", + adm_msg->msg_type); + } + + return ret; +} + +static inline void sss_update_adm_msg_ctx(struct sss_adm_msg *adm_msg) +{ + struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[adm_msg->pi]; + + ctx->state = 1; + ctx->store_pi = adm_msg->pi; + if (ctx->reply_fmt) { + ctx->reply_fmt->head = 0; + + /* make sure "header" was cleared */ + wmb(); + } +} + +static void sss_adm_msg_lock(struct sss_adm_msg *adm_msg) +{ + if (adm_msg->msg_type == SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE) + spin_lock(&adm_msg->async_lock); + else + down(&adm_msg->sem); +} + +static void sss_adm_msg_unlock(struct sss_adm_msg *adm_msg) +{ + if (adm_msg->msg_type == SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE) + spin_unlock(&adm_msg->async_lock); + else + up(&adm_msg->sem); +} + +static int sss_adm_msg_io(struct sss_adm_msg *adm_msg, u8 node_id, + const void *cmd, u16 cmd_size, void *ack, u16 ack_size) +{ + struct sss_adm_msg_elem_ctx *ctx = NULL; + + sss_adm_msg_lock(adm_msg); + + ctx = &adm_msg->elem_ctx[adm_msg->pi]; + + if (sss_adm_msg_busy(adm_msg)) { + sss_adm_msg_unlock(adm_msg); + return -EBUSY; + } + + sss_update_adm_msg_ctx(adm_msg); + + sss_prepare_elem(adm_msg, node_id, cmd, cmd_size); + + sss_adm_msg_increase_pi(adm_msg); + + wmb(); /* make sure issue correctly the command */ + + sss_issue_adm_msg(adm_msg); + + adm_msg->now_node = adm_msg->elem_ctx[adm_msg->pi].elem_vaddr; + + sss_adm_msg_unlock(adm_msg); + + return sss_wait_for_adm_msg_completion(adm_msg, ctx, ack, ack_size); +} + +int sss_adm_msg_write(struct sss_adm_msg *adm_msg, u8 node_id, + const void *cmd, u16 cmd_size) +{ + return sss_adm_msg_io(adm_msg, node_id, cmd, cmd_size, NULL, 0); +} + +int sss_adm_msg_read(struct sss_adm_msg *adm_msg, u8 node_id, + const void *cmd, u16 size, void *ack, u16 ack_size) +{ + return sss_adm_msg_io(adm_msg, node_id, cmd, size, ack, ack_size); +} + +static void sss_set_adm_event_flag(struct sss_msg_pf_to_mgmt *pf_to_mgmt, + int event_flag) +{ + spin_lock(&pf_to_mgmt->sync_event_lock); + pf_to_mgmt->event_state = event_flag; + spin_unlock(&pf_to_mgmt->sync_event_lock); +} + +static u16 sss_align_adm_msg_len(u16 msg_data_len) +{ + /* u64 - the size of the header */ + u16 msg_size; + + msg_size = (u16)(SSS_MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len); + + if (msg_size > SSS_MGMT_MSG_SIZE_MIN) + msg_size = SSS_MGMT_MSG_SIZE_MIN + + ALIGN((msg_size - SSS_MGMT_MSG_SIZE_MIN), SSS_MGMT_MSG_SIZE_STEP); + else + msg_size = SSS_MGMT_MSG_SIZE_MIN; + + return msg_size; +} + +static void sss_encapsulate_adm_msg(u8 *adm_msg, u64 *header, + const void *body, int body_len) +{ + u8 *adm_msg_new = adm_msg; + + memset(adm_msg_new, 0, SSS_MGMT_MSG_RSVD_FOR_DEV); + + adm_msg_new += SSS_MGMT_MSG_RSVD_FOR_DEV; + memcpy(adm_msg_new, header, sizeof(*header)); + + adm_msg_new += sizeof(*header); + memcpy(adm_msg_new, body, (size_t)(u32)body_len); +} + +#define SSS_MAX_PF_MGMT_BUF_MAX 2048L + +int sss_adm_msg_read_ack(void *hwdev, u8 dest, const void *cmd, + u16 size, void *ack, u16 ack_size) +{ + struct sss_msg_pf_to_mgmt *pf_to_mgmt = NULL; + struct sss_adm_msg *adm_mag = NULL; + + if (!hwdev || !cmd || (ack_size && !ack) || size > SSS_MAX_PF_MGMT_BUF_MAX) + return -EINVAL; + + if (!SSS_SUPPORT_ADM_MSG((struct sss_hwdev *)hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct sss_hwdev *)hwdev)->pf_to_mgmt; + adm_mag = pf_to_mgmt->adm_msg[SSS_ADM_MSG_POLL_READ]; + + if (!(((struct sss_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + return sss_adm_msg_read(adm_mag, dest, cmd, size, ack, ack_size); +} + +int sss_adm_msg_write_nack(void *hwdev, u8 dest, const void *cmd, u16 size) +{ + struct sss_msg_pf_to_mgmt *pf_to_mgmt = NULL; + struct sss_adm_msg *adm_mag = NULL; + + if (!hwdev || !size || !cmd || size > SSS_MAX_PF_MGMT_BUF_MAX) + return -EINVAL; + + if (!SSS_SUPPORT_ADM_MSG((struct sss_hwdev *)hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct sss_hwdev *)hwdev)->pf_to_mgmt; + adm_mag = pf_to_mgmt->adm_msg[SSS_ADM_MSG_POLL_WRITE]; + + if (!(((struct sss_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + return sss_adm_msg_write(adm_mag, dest, cmd, size); +} + +#define SSS_MSG_NO_RESP 0xFFFF + +static int sss_send_adm_msg(struct sss_msg_pf_to_mgmt *pf_to_mgmt, + u8 mod, u16 cmd, const void *msg_body, u16 msg_body_len) +{ + struct sss_hwif *hwif = SSS_TO_HWDEV(pf_to_mgmt)->hwif; + void *msg_buf = pf_to_mgmt->sync_buf; + u16 adm_msg_len = sss_align_adm_msg_len(msg_body_len); + u32 func_id = SSS_GET_HWIF_GLOBAL_ID(hwif); + u8 node_id = SSS_MGMT_CPU_NODE_ID(SSS_TO_HWDEV(pf_to_mgmt)); + u64 header; + struct sss_adm_msg *adm_mag; + + if (sss_get_dev_present_flag(pf_to_mgmt->hwdev) == 0) + return -EFAULT; + + if (adm_msg_len > SSS_MSG_TO_MGMT_LEN_MAX) + return -EFAULT; + + sss_set_adm_event_flag(pf_to_mgmt, SSS_ADM_EVENT_START); + + header = SSS_ENCAPSULATE_ADM_MSG_HEAD(func_id, msg_body_len, mod, + cmd, SSS_INCREASE_SYNC_MSG_ID(pf_to_mgmt)); + + sss_encapsulate_adm_msg((u8 *)msg_buf, &header, msg_body, msg_body_len); + + adm_mag = pf_to_mgmt->adm_msg[SSS_ADM_MSG_WRITE_TO_MGMT_MODULE]; + + return sss_adm_msg_write(adm_mag, node_id, msg_buf, adm_msg_len); +} + +static inline void sss_check_msg_body(u8 mod, void *buf_in) +{ + struct sss_msg_head *msg_head = NULL; + + /* set aeq fix num to 3, need to ensure response aeq id < 3 */ + if (mod == SSS_MOD_TYPE_COMM || mod == SSS_MOD_TYPE_L2NIC) { + msg_head = buf_in; + + if (msg_head->reply_aeq_num >= SSS_MAX_AEQ) + msg_head->reply_aeq_num = 0; + } +} + +int sss_sync_send_adm_msg(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout) +{ + struct sss_msg_pf_to_mgmt *pf_to_mgmt = NULL; + void *dev = ((struct sss_hwdev *)hwdev)->dev_hdl; + struct sss_recv_msg *recv_msg = NULL; + struct completion *recv_done = NULL; + ulong timeo; + int err; + ulong ret; + + if (!SSS_SUPPORT_ADM_MSG((struct sss_hwdev *)hwdev)) + return -EPERM; + + sss_check_msg_body(mod, buf_in); + + pf_to_mgmt = ((struct sss_hwdev *)hwdev)->pf_to_mgmt; + + /* Lock the sync_buf */ + down(&pf_to_mgmt->sync_lock); + recv_msg = &pf_to_mgmt->recv_resp_msg; + recv_done = &recv_msg->done; + + init_completion(recv_done); + + err = sss_send_adm_msg(pf_to_mgmt, mod, cmd, buf_in, in_size); + if (err != 0) { + sdk_err(dev, "Fail to send adm msg to mgmt, sync_msg_id: %u\n", + pf_to_mgmt->sync_msg_id); + sss_set_adm_event_flag(pf_to_mgmt, SSS_ADM_EVENT_FAIL); + goto unlock_sync_msg; + } + + timeo = msecs_to_jiffies(timeout ? timeout : SSS_MGMT_MSG_TIMEOUT); + + ret = wait_for_completion_timeout(recv_done, timeo); + if (ret == 0) { + sdk_err(dev, "Mgmt response sync cmd timeout, sync_msg_id: %u\n", + pf_to_mgmt->sync_msg_id); + sss_dump_aeq_info((struct sss_hwdev *)hwdev); + err = -ETIMEDOUT; + sss_set_adm_event_flag(pf_to_mgmt, SSS_ADM_EVENT_TIMEOUT); + goto unlock_sync_msg; + } + + spin_lock(&pf_to_mgmt->sync_event_lock); + if (pf_to_mgmt->event_state == SSS_ADM_EVENT_TIMEOUT) { + spin_unlock(&pf_to_mgmt->sync_event_lock); + err = -ETIMEDOUT; + goto unlock_sync_msg; + } + spin_unlock(&pf_to_mgmt->sync_event_lock); + + sss_set_adm_event_flag(pf_to_mgmt, SSS_ADM_EVENT_END); + + if (!(((struct sss_hwdev *)hwdev)->chip_present_flag)) { + destroy_completion(recv_done); + up(&pf_to_mgmt->sync_lock); + return -ETIMEDOUT; + } + + if (buf_out && out_size) { + if (*out_size < recv_msg->buf_len) { + sdk_err(dev, + "Invalid resp msg len: %u out of range: %u, mod %d, cmd %u\n", + recv_msg->buf_len, *out_size, mod, cmd); + err = -EFAULT; + goto unlock_sync_msg; + } + + if (recv_msg->buf_len) + memcpy(buf_out, recv_msg->buf, recv_msg->buf_len); + + *out_size = recv_msg->buf_len; + } + +unlock_sync_msg: + destroy_completion(recv_done); + up(&pf_to_mgmt->sync_lock); + + return err; +} + +int sss_register_mgmt_msg_handler(void *hwdev, u8 mod_type, void *data, + sss_mgmt_msg_handler_t handler) +{ + struct sss_msg_pf_to_mgmt *mgmt_msg = NULL; + + if (!hwdev || mod_type >= SSS_MOD_TYPE_HW_MAX) + return -EFAULT; + + mgmt_msg = ((struct sss_hwdev *)hwdev)->pf_to_mgmt; + if (!mgmt_msg) + return -EINVAL; + + mgmt_msg->recv_data[mod_type] = data; + mgmt_msg->recv_handler[mod_type] = handler; + + set_bit(SSS_CALLBACK_REG, &mgmt_msg->recv_handler_state[mod_type]); + + return 0; +} +EXPORT_SYMBOL(sss_register_mgmt_msg_handler); + +void sss_unregister_mgmt_msg_handler(void *hwdev, u8 mod_type) +{ + struct sss_msg_pf_to_mgmt *mgmt_msg = NULL; + + if (!hwdev || mod_type >= SSS_MOD_TYPE_HW_MAX) + return; + + mgmt_msg = ((struct sss_hwdev *)hwdev)->pf_to_mgmt; + if (!mgmt_msg) + return; + + clear_bit(SSS_CALLBACK_REG, &mgmt_msg->recv_handler_state[mod_type]); + + while (test_bit(SSS_CALLBACK_RUNNING, &mgmt_msg->recv_handler_state[mod_type])) + usleep_range(SSS_MSG_CB_USLEEP_MIN, SSS_MSG_CB_USLEEP_MAX); + + mgmt_msg->recv_data[mod_type] = NULL; + mgmt_msg->recv_handler[mod_type] = NULL; +} +EXPORT_SYMBOL(sss_unregister_mgmt_msg_handler); diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.h new file mode 100644 index 0000000000000..54cfe231e6313 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_ADM_H +#define SSS_HWIF_ADM_H + +#include +int sss_adm_msg_read_ack(void *hwdev, u8 dest, const void *cmd, + u16 size, void *ack, u16 ack_size); + +int sss_adm_msg_write_nack(void *hwdev, u8 dest, const void *cmd, u16 size); + +int sss_sync_send_adm_msg(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_common.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_common.h new file mode 100644 index 0000000000000..fc0d99e326ade --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_common.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_ADM_COMMON_H +#define SSS_HWIF_ADM_COMMON_H + +#define SSS_ADM_MSG_AEQ_ID 2 + +#define SSS_WRITE_ADM_MSG_PRIV_DATA(id) (((u8)(id)) << 16) +#define SSS_READ_ADM_MSG_PRIV_DATA(id, token) ((((u32)(id)) << 16) + (token)) + +#define SSS_MASK_ID(adm_msg, id) \ + ((id) & ((adm_msg)->elem_num - 1)) + +#define SSS_SIZE_TO_4B(size) \ + (ALIGN((u32)(size), 4U) >> 2) +#define SSS_SIZE_TO_8B(size) \ + (ALIGN((u32)(size), 8U) >> 3) + +/* ADM_STATUS_0 CSR: 0x0030+adm msg id*0x080 */ +#define SSS_ADM_MSG_STATE_CI_MASK 0xFFFFFFU +#define SSS_ADM_MSG_STATE_CI_SHIFT 0 + +#define SSS_ADM_MSG_STATE_FSM_MASK 0xFU +#define SSS_ADM_MSG_STATE_FSM_SHIFT 24 + +#define SSS_ADM_MSG_STATE_CHKSUM_ERR_MASK 0x3U +#define SSS_ADM_MSG_STATE_CHKSUM_ERR_SHIFT 28 + +#define SSS_ADM_MSG_STATE_CPLD_ERR_MASK 0x1U +#define SSS_ADM_MSG_STATE_CPLD_ERR_SHIFT 30 + +#define SSS_GET_ADM_MSG_STATE(val, member) \ + (((val) >> SSS_ADM_MSG_STATE_##member##_SHIFT) & \ + SSS_ADM_MSG_STATE_##member##_MASK) + +/* adm_msg_elem.desc structure */ +#define SSS_ADM_MSG_DESC_SGL_TYPE_SHIFT 0 +#define SSS_ADM_MSG_DESC_RD_WR_SHIFT 1 +#define SSS_ADM_MSG_DESC_MGMT_BYPASS_SHIFT 2 +#define SSS_ADM_MSG_DESC_REPLY_AEQE_EN_SHIFT 3 +#define SSS_ADM_MSG_DESC_MSG_VALID_SHIFT 4 +#define SSS_ADM_MSG_DESC_MSG_CHANNEL_SHIFT 6 +#define SSS_ADM_MSG_DESC_PRIV_DATA_SHIFT 8 +#define SSS_ADM_MSG_DESC_DEST_SHIFT 32 +#define SSS_ADM_MSG_DESC_SIZE_SHIFT 40 +#define SSS_ADM_MSG_DESC_XOR_CHKSUM_SHIFT 56 + +#define SSS_ADM_MSG_DESC_SGL_TYPE_MASK 0x1U +#define SSS_ADM_MSG_DESC_RD_WR_MASK 0x1U +#define SSS_ADM_MSG_DESC_MGMT_BYPASS_MASK 0x1U +#define SSS_ADM_MSG_DESC_REPLY_AEQE_EN_MASK 0x1U +#define SSS_ADM_MSG_DESC_MSG_VALID_MASK 0x3U +#define SSS_ADM_MSG_DESC_MSG_CHANNEL_MASK 0x3U +#define SSS_ADM_MSG_DESC_PRIV_DATA_MASK 0xFFFFFFU +#define SSS_ADM_MSG_DESC_DEST_MASK 0x1FU +#define SSS_ADM_MSG_DESC_SIZE_MASK 0x7FFU +#define SSS_ADM_MSG_DESC_XOR_CHKSUM_MASK 0xFFU + +#define SSS_ADM_MSG_DESC_SET(val, member) \ + ((((u64)(val)) & SSS_ADM_MSG_DESC_##member##_MASK) << \ + SSS_ADM_MSG_DESC_##member##_SHIFT) + +/* adm_msg_elem structure */ +#define SSS_ADM_MSG_ELEM_CTRL_ELEM_LEN_SHIFT 0 +#define SSS_ADM_MSG_ELEM_CTRL_RD_DMA_ATTR_OFF_SHIFT 16 +#define SSS_ADM_MSG_ELEM_CTRL_WR_DMA_ATTR_OFF_SHIFT 24 +#define SSS_ADM_MSG_ELEM_CTRL_XOR_CHKSUM_SHIFT 56 + +#define SSS_ADM_MSG_ELEM_CTRL_ELEM_LEN_MASK 0x3FU +#define SSS_ADM_MSG_ELEM_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU +#define SSS_ADM_MSG_ELEM_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU +#define SSS_ADM_MSG_ELEM_CTRL_XOR_CHKSUM_MASK 0xFFU + +#define SSS_ADM_MSG_ELEM_CTRL_SET(val, member) \ + ((((u64)(val)) & SSS_ADM_MSG_ELEM_CTRL_##member##_MASK) << \ + SSS_ADM_MSG_ELEM_CTRL_##member##_SHIFT) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.c new file mode 100644 index 0000000000000..eac95315138d0 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.c @@ -0,0 +1,762 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_common.h" +#include "sss_hwdev.h" +#include "sss_csr.h" +#include "sss_hwif_api.h" +#include "sss_hwif_adm_common.h" +#include "sss_hwif_mgmt_common.h" + +/* ADM_MSG_REQ CSR: 0x0020+adm_id*0x080 */ +#define SSS_ADM_MSG_REQ_RESTART_SHIFT 1 +#define SSS_ADM_MSG_REQ_WB_TRIGGER_SHIFT 2 + +#define SSS_ADM_MSG_REQ_RESTART_MASK 0x1U +#define SSS_ADM_MSG_REQ_WB_TRIGGER_MASK 0x1U + +#define SSS_SET_ADM_MSG_REQ(val, member) \ + (((val) & SSS_ADM_MSG_REQ_##member##_MASK) << \ + SSS_ADM_MSG_REQ_##member##_SHIFT) + +#define SSS_GET_ADM_MSG_REQ(val, member) \ + (((val) >> SSS_ADM_MSG_REQ_##member##_SHIFT) & \ + SSS_ADM_MSG_REQ_##member##_MASK) + +#define SSS_CLEAR_ADM_MSG_REQ(val, member) \ + ((val) & (~(SSS_ADM_MSG_REQ_##member##_MASK \ + << SSS_ADM_MSG_REQ_##member##_SHIFT))) + +/* ADM_MSG_CTRL CSR: 0x0014+adm_id*0x080 */ +#define SSS_ADM_MSG_CTRL_RESTART_EN_SHIFT 1 +#define SSS_ADM_MSG_CTRL_XOR_ERR_SHIFT 2 +#define SSS_ADM_MSG_CTRL_AEQE_EN_SHIFT 4 +#define SSS_ADM_MSG_CTRL_AEQ_ID_SHIFT 8 +#define SSS_ADM_MSG_CTRL_XOR_CHK_EN_SHIFT 28 +#define SSS_ADM_MSG_CTRL_ELEM_SIZE_SHIFT 30 + +#define SSS_ADM_MSG_CTRL_RESTART_EN_MASK 0x1U +#define SSS_ADM_MSG_CTRL_XOR_ERR_MASK 0x1U +#define SSS_ADM_MSG_CTRL_AEQE_EN_MASK 0x1U +#define SSS_ADM_MSG_CTRL_AEQ_ID_MASK 0x3U +#define SSS_ADM_MSG_CTRL_XOR_CHK_EN_MASK 0x3U +#define SSS_ADM_MSG_CTRL_ELEM_SIZE_MASK 0x3U + +#define SSS_SET_ADM_MSG_CTRL(val, member) \ + (((val) & SSS_ADM_MSG_CTRL_##member##_MASK) << \ + SSS_ADM_MSG_CTRL_##member##_SHIFT) + +#define SSS_CLEAR_ADM_MSG_CTRL(val, member) \ + ((val) & (~(SSS_ADM_MSG_CTRL_##member##_MASK \ + << SSS_ADM_MSG_CTRL_##member##_SHIFT))) + +#define SSS_ADM_MSG_BUF_SIZE 2048ULL + +#define SSS_ADM_MSG_NODE_ALIGN_SIZE 512ULL +#define SSS_ADM_MSG_PAYLOAD_ALIGN_SIZE 64ULL + +#define SSS_ADM_MSG_REPLY_ALIGNMENT 128ULL + +#define SSS_ADM_MSG_TIMEOUT 10000 + +#define SSS_ADM_MSG_ELEM_SIZE_SHIFT 6U + +#define SSS_ADM_MSG_ELEM_NUM 32 +#define SSS_ADM_MSG_ELEM_SIZE 128 +#define SSS_ADM_MSG_REPLY_DATA_SIZE 128 + +#define SSS_MGMT_WQ_NAME "sssnic_mgmt" + +#define SSS_GET_ADM_MSG_ELEM_PADDR(adm_msg, elem_id) \ + ((adm_msg)->elem_paddr_base + (adm_msg)->elem_size_align * (elem_id)) + +#define SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id) \ + ((adm_msg)->elem_vaddr_base + (adm_msg)->elem_size_align * (elem_id)) + +#define SSS_GET_ADM_MSG_BUF_PADDR(adm_msg, elem_id) \ + ((adm_msg)->buf_paddr_base + (adm_msg)->buf_size_align * (elem_id)) + +#define SSS_GET_ADM_MSG_BUF_VADDR(adm_msg, elem_id) \ + ((adm_msg)->buf_vaddr_base + (adm_msg)->buf_size_align * (elem_id)) + +#define SSS_GET_ADM_MSG_REPLY_PADDR(adm_msg, elem_id) \ + ((adm_msg)->reply_paddr_base + (adm_msg)->reply_size_align * (elem_id)) + +#define SSS_GET_ADM_MSG_REPLY_VADDR(adm_msg, elem_id) \ + ((adm_msg)->reply_vaddr_base + (adm_msg)->reply_size_align * (elem_id)) + +typedef void (*sss_alloc_elem_buf_handler_t)(struct sss_adm_msg *adm_msg, u32 elem_id); + +struct sss_adm_msg_attr { + struct sss_hwdev *hwdev; + enum sss_adm_msg_type msg_type; + + u32 elem_num; + u16 reply_size; + u16 elem_size; +}; + +static enum sss_process_ret sss_adm_msg_reset_handler(void *priv_data) +{ + u32 val; + u32 addr; + struct sss_adm_msg *adm_msg = priv_data; + + if (!SSS_TO_HWDEV(adm_msg)->chip_present_flag) + return SSS_PROCESS_ERR; + + addr = SSS_CSR_ADM_MSG_REQ_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + if (!SSS_GET_ADM_MSG_REQ(val, RESTART)) + return SSS_PROCESS_OK; + + return SSS_PROCESS_DOING; +} + +static enum sss_process_ret sss_adm_msg_ready_handler(void *priv_data) +{ + u32 val; + u32 addr; + struct sss_adm_msg *adm_msg = priv_data; + + if (!SSS_TO_HWDEV(adm_msg)->chip_present_flag) + return SSS_PROCESS_ERR; + + addr = SSS_CSR_ADM_MSG_STATE_0_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + if (SSS_GET_ADM_MSG_STATE(val, CI) == adm_msg->ci) + return SSS_PROCESS_OK; + + return SSS_PROCESS_DOING; +} + +static void sss_chip_clean_adm_msg(struct sss_adm_msg *adm_msg) +{ + u32 val; + u32 addr = SSS_CSR_ADM_MSG_CTRL_ADDR(adm_msg->msg_type); + + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + val = SSS_CLEAR_ADM_MSG_CTRL(val, RESTART_EN) & + SSS_CLEAR_ADM_MSG_CTRL(val, XOR_ERR) & + SSS_CLEAR_ADM_MSG_CTRL(val, AEQE_EN) & + SSS_CLEAR_ADM_MSG_CTRL(val, XOR_CHK_EN) & + SSS_CLEAR_ADM_MSG_CTRL(val, ELEM_SIZE); + + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); +} + +static void sss_chip_set_adm_msg_wb_addr(struct sss_adm_msg *adm_msg) +{ + u32 val; + u32 addr; + + addr = SSS_CSR_ADM_MSG_STATE_HI_ADDR(adm_msg->msg_type); + val = upper_32_bits(adm_msg->wb_state_paddr); + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); + + addr = SSS_CSR_ADM_MSG_STATE_LO_ADDR(adm_msg->msg_type); + val = lower_32_bits(adm_msg->wb_state_paddr); + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); +} + +static int sss_chip_reset_adm_msg(struct sss_adm_msg *adm_msg) +{ + u32 val; + u32 addr; + + addr = SSS_CSR_ADM_MSG_REQ_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + + val = SSS_CLEAR_ADM_MSG_REQ(val, RESTART); + val |= SSS_SET_ADM_MSG_REQ(1, RESTART); + + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); + + return sss_check_handler_timeout(adm_msg, sss_adm_msg_reset_handler, + SSS_ADM_MSG_TIMEOUT, USEC_PER_MSEC); +} + +static void sss_chip_init_elem_size(struct sss_adm_msg *adm_msg) +{ + u32 val; + u32 addr; + u32 size; + + addr = SSS_CSR_ADM_MSG_CTRL_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + val = SSS_CLEAR_ADM_MSG_CTRL(val, AEQE_EN) & + SSS_CLEAR_ADM_MSG_CTRL(val, ELEM_SIZE); + + size = (u32)ilog2(adm_msg->elem_size >> SSS_ADM_MSG_ELEM_SIZE_SHIFT); + val |= SSS_SET_ADM_MSG_CTRL(0, AEQE_EN) | + SSS_SET_ADM_MSG_CTRL(size, ELEM_SIZE); + + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); +} + +static void sss_chip_set_elem_num(struct sss_adm_msg *adm_msg) +{ + u32 addr; + + addr = SSS_CSR_ADM_MSG_NUM_ELEM_ADDR(adm_msg->msg_type); + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, adm_msg->elem_num); +} + +static void sss_chip_init_elem_head(struct sss_adm_msg *adm_msg) +{ + u32 val; + u32 addr; + + addr = SSS_CSR_ADM_MSG_HEAD_HI_ADDR(adm_msg->msg_type); + val = upper_32_bits(adm_msg->head_elem_paddr); + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); + + addr = SSS_CSR_ADM_MSG_HEAD_LO_ADDR(adm_msg->msg_type); + val = lower_32_bits(adm_msg->head_elem_paddr); + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); +} + +static int sss_wait_adm_msg_ready(struct sss_adm_msg *adm_msg) +{ + return sss_check_handler_timeout(adm_msg, sss_adm_msg_ready_handler, + SSS_ADM_MSG_TIMEOUT, USEC_PER_MSEC); +} + +static int sss_chip_init_adm_msg(struct sss_adm_msg *adm_msg) +{ + sss_chip_clean_adm_msg(adm_msg); + + sss_chip_set_adm_msg_wb_addr(adm_msg); + + if (sss_chip_reset_adm_msg(adm_msg) != 0) { + sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "Fail to restart adm cmd\n"); + return -EBUSY; + } + + sss_chip_init_elem_size(adm_msg); + sss_chip_set_elem_num(adm_msg); + sss_chip_init_elem_head(adm_msg); + + return sss_wait_adm_msg_ready(adm_msg); +} + +static void sss_init_ctx_buf_addr(struct sss_adm_msg *adm_msg, + u32 elem_id) +{ + u64 paddr; + void *vaddr; + struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[elem_id]; + struct sss_adm_msg_elem *elem = NULL; + + vaddr = (u8 *)SSS_GET_ADM_MSG_BUF_VADDR(adm_msg, elem_id); + paddr = SSS_GET_ADM_MSG_BUF_PADDR(adm_msg, elem_id); + + ctx->adm_msg_vaddr = vaddr; + elem = + (struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id); + elem->write.hw_msg_paddr = cpu_to_be64(paddr); +} + +static void sss_init_ctx_reply_addr(struct sss_adm_msg *adm_msg, + u32 elem_id) +{ + u64 paddr; + void *vaddr; + struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[elem_id]; + struct sss_adm_msg_elem *elem = NULL; + + paddr = SSS_GET_ADM_MSG_REPLY_PADDR(adm_msg, elem_id); + vaddr = (u8 *)SSS_GET_ADM_MSG_REPLY_VADDR(adm_msg, elem_id); + + elem = + (struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id); + elem->read.hw_wb_reply_paddr = cpu_to_be64(paddr); + ctx->reply_fmt = vaddr; + ctx->adm_msg_vaddr = &elem->read.hw_msg_paddr; +} + +static void sss_init_ctx_buf_reply_addr(struct sss_adm_msg *adm_msg, + u32 elem_id) +{ + u64 buf_paddr; + void *buf_vaddr; + void *rsp_vaddr; + struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[elem_id]; + struct sss_adm_msg_elem *elem = NULL; + + rsp_vaddr = (u8 *)SSS_GET_ADM_MSG_REPLY_VADDR(adm_msg, elem_id); + buf_paddr = SSS_GET_ADM_MSG_BUF_PADDR(adm_msg, elem_id); + buf_vaddr = (u8 *)SSS_GET_ADM_MSG_BUF_VADDR(adm_msg, elem_id); + + elem = + (struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id); + ctx->reply_fmt = rsp_vaddr; + ctx->adm_msg_vaddr = buf_vaddr; + elem->read.hw_msg_paddr = cpu_to_be64(buf_paddr); +} + +static void sss_alloc_reply_buf(struct sss_adm_msg *adm_msg, + struct sss_adm_msg_elem *elem, u32 cell_idx) +{ + struct sss_adm_msg_elem_ctx *ctx = NULL; + void *resp_vaddr; + u64 resp_paddr; + + resp_vaddr = (u8 *)((u64)adm_msg->reply_vaddr_base + + adm_msg->reply_size_align * cell_idx); + resp_paddr = adm_msg->reply_paddr_base + + adm_msg->reply_size_align * cell_idx; + + ctx = &adm_msg->elem_ctx[cell_idx]; + + ctx->reply_fmt = resp_vaddr; + elem->read.hw_wb_reply_paddr = cpu_to_be64(resp_paddr); +} + +static int sss_init_elem_ctx(struct sss_adm_msg *adm_msg, u32 elem_id) +{ + struct sss_adm_msg_elem_ctx *ctx = NULL; + struct sss_adm_msg_elem *elem; + sss_alloc_elem_buf_handler_t handler[] = { + NULL, + NULL, + sss_init_ctx_buf_addr, + sss_init_ctx_reply_addr, + sss_init_ctx_buf_addr, + sss_init_ctx_buf_reply_addr, + sss_init_ctx_buf_addr + }; + elem = (struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id); + + if (adm_msg->msg_type == SSS_ADM_MSG_MULTI_READ || + adm_msg->msg_type == SSS_ADM_MSG_POLL_READ) + sss_alloc_reply_buf(adm_msg, elem, elem_id); + + ctx = &adm_msg->elem_ctx[elem_id]; + ctx->elem_vaddr = + (struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id); + ctx->hwdev = adm_msg->hwdev; + + if (adm_msg->msg_type >= ARRAY_LEN(handler)) + goto out; + + if (!handler[adm_msg->msg_type]) + goto out; + + handler[adm_msg->msg_type](adm_msg, elem_id); + + return 0; + +out: + sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "Unsupport adm msg type %u\n", adm_msg->msg_type); + return -EINVAL; +} + +static int sss_init_adm_msg_elem(struct sss_adm_msg *adm_msg) +{ + u32 i; + u64 paddr; + void *vaddr; + struct sss_adm_msg_elem *elem = NULL; + struct sss_adm_msg_elem *pre_elt = NULL; + int ret; + + for (i = 0; i < adm_msg->elem_num; i++) { + ret = sss_init_elem_ctx(adm_msg, i); + if (ret != 0) + return ret; + + paddr = SSS_GET_ADM_MSG_ELEM_PADDR(adm_msg, i); + vaddr = SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, i); + + if (!pre_elt) { + adm_msg->head_node = vaddr; + adm_msg->head_elem_paddr = (dma_addr_t)paddr; + } else { + pre_elt->next_elem_paddr = cpu_to_be64(paddr); + } + + elem = vaddr; + elem->next_elem_paddr = 0; + + pre_elt = elem; + } + + elem->next_elem_paddr = cpu_to_be64(adm_msg->head_elem_paddr); + adm_msg->now_node = adm_msg->head_node; + + return 0; +} + +static int sss_alloc_adm_msg_ctx(struct sss_adm_msg *adm_msg) +{ + size_t ctx_size; + + ctx_size = adm_msg->elem_num * sizeof(*adm_msg->elem_ctx); + + adm_msg->elem_ctx = kzalloc(ctx_size, GFP_KERNEL); + if (!adm_msg->elem_ctx) + return -ENOMEM; + + return 0; +} + +static void sss_free_adm_msg_ctx(struct sss_adm_msg *adm_msg) +{ + kfree(adm_msg->elem_ctx); + adm_msg->elem_ctx = NULL; +} + +static int sss_alloc_adm_msg_wb_state(struct sss_adm_msg *adm_msg) +{ + void *dev_hdl = SSS_TO_HWDEV(adm_msg)->dev_hdl; + + adm_msg->wb_state = dma_zalloc_coherent(dev_hdl, sizeof(*adm_msg->wb_state), + &adm_msg->wb_state_paddr, GFP_KERNEL); + if (!adm_msg->wb_state) { + sdk_err(dev_hdl, "Fail to alloc dma wb status\n"); + return -ENOMEM; + } + + return 0; +} + +static void sss_free_adm_msg_wb_state(struct sss_adm_msg *adm_msg) +{ + void *dev_hdl = SSS_TO_HWDEV(adm_msg)->dev_hdl; + + dma_free_coherent(dev_hdl, sizeof(*adm_msg->wb_state), + adm_msg->wb_state, adm_msg->wb_state_paddr); +} + +static int sss_alloc_elem_buf(struct sss_adm_msg *adm_msg) +{ + int ret; + size_t buf_size; + void *dev_hdl = SSS_TO_HWDEV(adm_msg)->dev_hdl; + + adm_msg->buf_size_align = ALIGN(SSS_ADM_MSG_BUF_SIZE, + SSS_ADM_MSG_PAYLOAD_ALIGN_SIZE); + adm_msg->elem_size_align = ALIGN((u64)adm_msg->elem_size, + SSS_ADM_MSG_NODE_ALIGN_SIZE); + adm_msg->reply_size_align = ALIGN((u64)adm_msg->reply_size, + SSS_ADM_MSG_REPLY_ALIGNMENT); + buf_size = (adm_msg->buf_size_align + adm_msg->elem_size_align + + adm_msg->reply_size_align) * adm_msg->elem_num; + + ret = sss_dma_zalloc_coherent_align(dev_hdl, buf_size, SSS_ADM_MSG_NODE_ALIGN_SIZE, + GFP_KERNEL, &adm_msg->elem_addr); + if (ret != 0) { + sdk_err(dev_hdl, "Fail to alloc adm msg elem buffer\n"); + return ret; + } + + adm_msg->elem_vaddr_base = adm_msg->elem_addr.align_vaddr; + adm_msg->elem_paddr_base = adm_msg->elem_addr.align_paddr; + + adm_msg->reply_vaddr_base = (u8 *)((u64)adm_msg->elem_vaddr_base + + adm_msg->elem_size_align * adm_msg->elem_num); + adm_msg->reply_paddr_base = adm_msg->elem_paddr_base + + adm_msg->elem_size_align * adm_msg->elem_num; + + adm_msg->buf_vaddr_base = (u8 *)((u64)adm_msg->reply_vaddr_base + + adm_msg->reply_size_align * adm_msg->elem_num); + adm_msg->buf_paddr_base = adm_msg->reply_paddr_base + + adm_msg->reply_size_align * adm_msg->elem_num; + + return 0; +} + +static void sss_free_elem_buf(struct sss_adm_msg *adm_msg) +{ + void *dev_hdl = SSS_TO_HWDEV(adm_msg)->dev_hdl; + + sss_dma_free_coherent_align(dev_hdl, &adm_msg->elem_addr); +} + +static int sss_alloc_adm_msg_buf(struct sss_adm_msg *adm_msg) +{ + int ret; + + ret = sss_alloc_adm_msg_ctx(adm_msg); + if (ret != 0) + return ret; + + ret = sss_alloc_adm_msg_wb_state(adm_msg); + if (ret != 0) + goto alloc_wb_err; + + ret = sss_alloc_elem_buf(adm_msg); + if (ret != 0) + goto alloc_elem_buf_err; + + return 0; + +alloc_elem_buf_err: + sss_free_adm_msg_wb_state(adm_msg); + +alloc_wb_err: + sss_free_adm_msg_ctx(adm_msg); + + return ret; +} + +static void sss_free_adm_msg_buf(struct sss_adm_msg *adm_msg) +{ + sss_free_elem_buf(adm_msg); + + sss_free_adm_msg_wb_state(adm_msg); + + sss_free_adm_msg_ctx(adm_msg); +} + +static void sss_init_adm_msg_param(struct sss_adm_msg *adm_msg, + struct sss_hwdev *hwdev, u8 msg_type) +{ + adm_msg->hwdev = hwdev; + adm_msg->elem_num = SSS_ADM_MSG_ELEM_NUM; + adm_msg->reply_size = SSS_ADM_MSG_REPLY_DATA_SIZE; + adm_msg->elem_size = SSS_ADM_MSG_ELEM_SIZE; + adm_msg->msg_type = msg_type; + adm_msg->pi = 0; + adm_msg->ci = 0; + if (adm_msg->msg_type == SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE) + spin_lock_init(&adm_msg->async_lock); + else + sema_init(&adm_msg->sem, 1); +} + +static int create_adm_msg(struct sss_hwdev *hwdev, struct sss_adm_msg **adm_msg, u8 msg_type) +{ + struct sss_adm_msg *msg; + int ret; + + msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + + sss_init_adm_msg_param(msg, hwdev, msg_type); + + ret = sss_alloc_adm_msg_buf(msg); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init adm msg buf\n"); + return ret; + } + + ret = sss_init_adm_msg_elem(msg); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init adm msg elem\n"); + sss_free_adm_msg_buf(msg); + return ret; + } + + ret = sss_chip_init_adm_msg(msg); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init adm msg\n"); + sss_free_adm_msg_buf(msg); + return ret; + } + + *adm_msg = msg; + + return 0; +} + +void sss_destroy_adm_msg(struct sss_adm_msg *adm_msg) +{ + sss_free_adm_msg_buf(adm_msg); + kfree(adm_msg); +} + +static int sss_init_adm_msg(struct sss_hwdev *hwdev, + struct sss_adm_msg **adm_msg) +{ + int ret; + u8 i; + u8 adm_msg_type; + void *dev = ((struct sss_hwdev *)hwdev)->dev_hdl; + + if (!SSS_SUPPORT_ADM_MSG(hwdev)) + return 0; + + for (adm_msg_type = SSS_ADM_MSG_WRITE_TO_MGMT_MODULE; + adm_msg_type < SSS_ADM_MSG_MAX; adm_msg_type++) { + ret = create_adm_msg(hwdev, &adm_msg[adm_msg_type], adm_msg_type); + if (ret) { + sdk_err(dev, "Failed to create adm msg %d\n", adm_msg_type); + goto create_adm_msg_err; + } + } + + return 0; + +create_adm_msg_err: + for (i = SSS_ADM_MSG_WRITE_TO_MGMT_MODULE; i < adm_msg_type; i++) + sss_destroy_adm_msg(hwdev->pf_to_mgmt->adm_msg[adm_msg_type]); + + return ret; +} + +static void sss_deinit_adm_msg(const struct sss_hwdev *hwdev, + struct sss_adm_msg **adm_msg) +{ + u8 adm_msg_type; + + if (!SSS_SUPPORT_ADM_MSG(hwdev)) + return; + + for (adm_msg_type = SSS_ADM_MSG_WRITE_TO_MGMT_MODULE; + adm_msg_type < SSS_ADM_MSG_MAX; adm_msg_type++) + sss_destroy_adm_msg(adm_msg[adm_msg_type]); +} + +static int sss_alloc_msg_buf(struct sss_msg_pf_to_mgmt *mgmt_msg) +{ + struct sss_recv_msg *recv_msg = &mgmt_msg->recv_msg; + struct sss_recv_msg *resp_msg = &mgmt_msg->recv_resp_msg; + + recv_msg->seq_id = SSS_MGMT_SEQ_ID_MAX; + resp_msg->seq_id = SSS_MGMT_SEQ_ID_MAX; + + recv_msg->buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL); + if (!recv_msg->buf) + return -ENOMEM; + + resp_msg->buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL); + if (!resp_msg->buf) + goto alloc_resp_msg_err; + + mgmt_msg->ack_buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL); + if (!mgmt_msg->ack_buf) + goto alloc_ack_buf_err; + + mgmt_msg->sync_buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL); + if (!mgmt_msg->sync_buf) + goto alloc_sync_buf_err; + + mgmt_msg->async_msg_buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL); + if (!mgmt_msg->async_msg_buf) + goto alloc_async_msg_buf_err; + + return 0; + +alloc_async_msg_buf_err: + kfree(mgmt_msg->sync_buf); + mgmt_msg->sync_buf = NULL; +alloc_sync_buf_err: + kfree(mgmt_msg->ack_buf); + mgmt_msg->ack_buf = NULL; + +alloc_ack_buf_err: + kfree(resp_msg->buf); + resp_msg->buf = NULL; + +alloc_resp_msg_err: + kfree(recv_msg->buf); + recv_msg->buf = NULL; + + return -ENOMEM; +} + +static void sss_free_msg_buf(struct sss_msg_pf_to_mgmt *mgmt_msg) +{ + struct sss_recv_msg *recv_msg = &mgmt_msg->recv_msg; + struct sss_recv_msg *resp_msg = &mgmt_msg->recv_resp_msg; + + kfree(mgmt_msg->async_msg_buf); + kfree(mgmt_msg->sync_buf); + kfree(mgmt_msg->ack_buf); + kfree(resp_msg->buf); + kfree(recv_msg->buf); +} + +int sss_hwif_init_adm(struct sss_hwdev *hwdev) +{ + int ret; + struct sss_msg_pf_to_mgmt *mgmt_msg; + + mgmt_msg = kzalloc(sizeof(*mgmt_msg), GFP_KERNEL); + if (!mgmt_msg) + return -ENOMEM; + + spin_lock_init(&mgmt_msg->async_msg_lock); + spin_lock_init(&mgmt_msg->sync_event_lock); + sema_init(&mgmt_msg->sync_lock, 1); + mgmt_msg->hwdev = hwdev; + hwdev->pf_to_mgmt = mgmt_msg; + + mgmt_msg->workq = create_singlethread_workqueue(SSS_MGMT_WQ_NAME); + if (!mgmt_msg->workq) { + sdk_err(hwdev->dev_hdl, "Fail to init mgmt workq\n"); + ret = -ENOMEM; + goto alloc_mgmt_wq_err; + } + + ret = sss_alloc_msg_buf(mgmt_msg); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc msg buffer\n"); + goto alloc_msg_buf_err; + } + + ret = sss_init_adm_msg(hwdev, mgmt_msg->adm_msg); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init adm msg\n"); + goto init_all_adm_err; + } + + return 0; + +init_all_adm_err: + sss_free_msg_buf(mgmt_msg); + +alloc_msg_buf_err: + destroy_workqueue(mgmt_msg->workq); + +alloc_mgmt_wq_err: + kfree(mgmt_msg); + hwdev->pf_to_mgmt = NULL; + + return ret; +} + +void sss_hwif_deinit_adm(struct sss_hwdev *hwdev) +{ + struct sss_msg_pf_to_mgmt *mgmt_msg = hwdev->pf_to_mgmt; + + destroy_workqueue(mgmt_msg->workq); + + sss_deinit_adm_msg(hwdev, mgmt_msg->adm_msg); + + sss_free_msg_buf(mgmt_msg); + + kfree(mgmt_msg); + hwdev->pf_to_mgmt = NULL; +} + +void sss_complete_adm_event(struct sss_hwdev *hwdev) +{ + struct sss_recv_msg *recv_msg = + &hwdev->pf_to_mgmt->recv_resp_msg; + + spin_lock_bh(&hwdev->pf_to_mgmt->sync_event_lock); + if (hwdev->pf_to_mgmt->event_state == SSS_ADM_EVENT_START) { + complete(&recv_msg->done); + hwdev->pf_to_mgmt->event_state = SSS_ADM_EVENT_TIMEOUT; + } + spin_unlock_bh(&hwdev->pf_to_mgmt->sync_event_lock); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.h new file mode 100644 index 0000000000000..c2c3092fbdc00 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_ADM_INIT_H +#define SSS_HWIF_ADM_INIT_H + +#include "sss_hwdev.h" + +int sss_hwif_init_adm(struct sss_hwdev *hwdev); +void sss_hwif_deinit_adm(struct sss_hwdev *hwdev); +void sss_complete_adm_event(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.c new file mode 100644 index 0000000000000..93bda1133420c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.c @@ -0,0 +1,568 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hwdev.h" +#include "sss_eq_info.h" +#include "sss_hw_svc_cap.h" +#include "sss_hw_irq.h" +#include "sss_hw_aeq.h" +#include "sss_hw_export.h" +#include "sss_hwif_aeq.h" +#include "sss_hw_common.h" +#include "sss_hwif_eq.h" +#include "sss_hwif_api.h" +#include "sss_hwif_export.h" +#include "sss_csr.h" + +#define SSS_DEF_AEQ_DEPTH 0x10000 + +#define SSS_MIN_AEQ_DEPTH 64 +#define SSS_MAX_AEQ_DEPTH \ + ((SSS_MAX_EQ_PAGE_SIZE / SSS_AEQE_SIZE) * SSS_AEQ_MAX_PAGE) + +#define SSS_AEQE_DESC_SIZE 4 +#define SSS_AEQE_DATA_SIZE (SSS_AEQE_SIZE - SSS_AEQE_DESC_SIZE) + +struct sss_aeq_elem { + u8 aeqe_data[SSS_AEQE_DATA_SIZE]; + u32 desc; +}; + +#define SSS_GET_AEQ_ELEM(aeq, id) \ + ((struct sss_aeq_elem *)SSS_GET_EQ_ELEM((aeq), (id))) + +#define SSS_GET_CUR_AEQ_ELEM(aeq) SSS_GET_AEQ_ELEM((aeq), (aeq)->ci) + +#define SSS_GET_AEQ_SW_EVENT(type) \ + (((type) >= SSS_ERR_MAX) ? \ + SSS_STF_EVENT : SSS_STL_EVENT) + +#define SSS_AEQ_CTRL_0_INTR_ID_SHIFT 0 +#define SSS_AEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define SSS_AEQ_CTRL_0_PCI_INTF_ID_SHIFT 20 +#define SSS_AEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define SSS_AEQ_CTRL_0_INTR_ID_MASK 0x3FFU +#define SSS_AEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define SSS_AEQ_CTRL_0_PCI_INTF_ID_MASK 0x7U +#define SSS_AEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define SSS_SET_AEQ_CTRL_0(val, member) \ + (((val) & SSS_AEQ_CTRL_0_##member##_MASK) << \ + SSS_AEQ_CTRL_0_##member##_SHIFT) + +#define SSS_CLEAR_AEQ_CTRL_0(val, member) \ + ((val) & (~(SSS_AEQ_CTRL_0_##member##_MASK << \ + SSS_AEQ_CTRL_0_##member##_SHIFT))) + +#define SSS_AEQ_CTRL_1_SIZE_SHIFT 0 +#define SSS_AEQ_CTRL_1_ELEM_SIZE_SHIFT 24 +#define SSS_AEQ_CTRL_1_PAGE_SIZE_SHIFT 28 + +#define SSS_AEQ_CTRL_1_SIZE_MASK 0x1FFFFFU +#define SSS_AEQ_CTRL_1_ELEM_SIZE_MASK 0x3U +#define SSS_AEQ_CTRL_1_PAGE_SIZE_MASK 0xFU + +#define SSS_SET_AEQ_CTRL_1(val, member) \ + (((val) & SSS_AEQ_CTRL_1_##member##_MASK) << \ + SSS_AEQ_CTRL_1_##member##_SHIFT) + +#define SSS_CLEAR_AEQ_CTRL_1(val, member) \ + ((val) & (~(SSS_AEQ_CTRL_1_##member##_MASK << \ + SSS_AEQ_CTRL_1_##member##_SHIFT))) + +#define SSS_ELEM_SIZE_IN_32B(aeq) (((aeq)->entry_size) >> 5) +#define SSS_SET_EQ_HW_E_SIZE(aeq) ((u32)ilog2(SSS_ELEM_SIZE_IN_32B(aeq))) + +#define SSS_AEQ_WQ_NAME "sss_eqs" + +#define SSS_AEQ_NAME "sss_aeq" + +#define SSS_AEQ_TO_INFO(eq) \ + container_of((eq) - (eq)->qid, struct sss_aeq_info, aeq[0]) + +#define SSS_AEQ_DMA_ATTR_DEF 0 + +enum sss_aeq_cb_state { + SSS_AEQ_HW_CB_REG = 0, + SSS_AEQ_HW_CB_RUNNING, + SSS_AEQ_SW_CB_REG, + SSS_AEQ_SW_CB_RUNNING, +}; + +static u32 aeq_depth = SSS_DEF_AEQ_DEPTH; +module_param(aeq_depth, uint, 0444); +MODULE_PARM_DESC(aeq_depth, + "aeq depth, valid range is " __stringify(SSS_MIN_AEQ_DEPTH) + " - " __stringify(SSS_MAX_AEQ_DEPTH)); + +static void sss_chip_set_aeq_intr(struct sss_eq *aeq) +{ + u32 val; + struct sss_hwif *hwif = SSS_TO_HWDEV(aeq)->hwif; + + val = sss_chip_read_reg(hwif, SSS_CSR_AEQ_CTRL_0_ADDR); + + val = SSS_CLEAR_AEQ_CTRL_0(val, INTR_ID) & + SSS_CLEAR_AEQ_CTRL_0(val, DMA_ATTR) & + SSS_CLEAR_AEQ_CTRL_0(val, PCI_INTF_ID) & + SSS_CLEAR_AEQ_CTRL_0(val, INTR_MODE); + + val |= SSS_SET_AEQ_CTRL_0(SSS_EQ_IRQ_ID(aeq), INTR_ID) | + SSS_SET_AEQ_CTRL_0(SSS_AEQ_DMA_ATTR_DEF, DMA_ATTR) | + SSS_SET_AEQ_CTRL_0(SSS_GET_HWIF_PCI_INTF_ID(hwif), PCI_INTF_ID) | + SSS_SET_AEQ_CTRL_0(SSS_INTR_MODE_ARMED, INTR_MODE); + + sss_chip_write_reg(hwif, SSS_CSR_AEQ_CTRL_0_ADDR, val); +} + +static void sss_chip_set_aeq_size(struct sss_eq *aeq) +{ + u32 val; + struct sss_hwif *hwif = SSS_TO_HWDEV(aeq)->hwif; + + val = SSS_SET_AEQ_CTRL_1(aeq->len, SIZE) | + SSS_SET_AEQ_CTRL_1(SSS_SET_EQ_HW_E_SIZE(aeq), ELEM_SIZE) | + SSS_SET_AEQ_CTRL_1(SSS_SET_EQ_HW_PAGE_SIZE(aeq), PAGE_SIZE); + + sss_chip_write_reg(hwif, SSS_CSR_AEQ_CTRL_1_ADDR, val); +} + +static u32 sss_chip_init_aeq_attr(void *aeq) +{ + sss_chip_set_aeq_intr(aeq); + sss_chip_set_aeq_size(aeq); + + return 0; +} + +static void sss_init_aeqe_desc(void *data) +{ + u32 i; + u32 init_val; + struct sss_aeq_elem *aeqe = NULL; + struct sss_eq *aeq = (struct sss_eq *)data; + + init_val = cpu_to_be32(SSS_EQ_WRAPPED(aeq)); + for (i = 0; i < aeq->len; i++) { + aeqe = SSS_GET_AEQ_ELEM(aeq, i); + aeqe->desc = init_val; + } + + /* write all aeq desc */ + wmb(); +} + +static irqreturn_t sss_aeq_intr_handle(int irq, void *data) +{ + struct sss_eq *aeq = (struct sss_eq *)data; + struct sss_aeq_info *aeq_info = SSS_AEQ_TO_INFO(aeq); + + sss_chip_clear_msix_resend_bit(aeq->hwdev, SSS_EQ_IRQ_ID(aeq), + SSS_EQ_MSIX_RESEND_TIMER_CLEAR); + + queue_work_on(WORK_CPU_UNBOUND, aeq_info->workq, &aeq->aeq_work); + + return IRQ_HANDLED; +} + +static void sss_aeq_event_handle(struct sss_eq *aeq, u32 desc) +{ + u32 size; + u32 event; + u8 data[SSS_AEQE_DATA_SIZE]; + enum sss_aeq_hw_event hw_event; + enum sss_aeq_sw_event sw_event; + struct sss_aeq_info *aeq_info = SSS_AEQ_TO_INFO(aeq); + struct sss_aeq_elem *aeqe; + + aeqe = SSS_GET_CUR_AEQ_ELEM(aeq); + hw_event = SSS_GET_EQE_DESC(desc, TYPE); + SSS_TO_HWDEV(aeq)->aeq_stat.cur_recv_cnt++; + + if (SSS_GET_EQE_DESC(desc, SRC)) { + event = hw_event; + sw_event = SSS_GET_AEQ_SW_EVENT(event); + + memcpy(data, aeqe->aeqe_data, SSS_AEQE_DATA_SIZE); + sss_be32_to_cpu(data, SSS_AEQE_DATA_SIZE); + set_bit(SSS_AEQ_SW_CB_RUNNING, &aeq_info->sw_event_handler_state[sw_event]); + + if (aeq_info->sw_event_handler[sw_event] && + test_bit(SSS_AEQ_SW_CB_REG, &aeq_info->sw_event_handler_state[sw_event])) + aeq_info->sw_event_handler[sw_event](aeq_info->sw_event_data[sw_event], + hw_event, data); + + clear_bit(SSS_AEQ_SW_CB_RUNNING, &aeq_info->sw_event_handler_state[sw_event]); + + return; + } + + if (hw_event < SSS_AEQ_EVENT_MAX) { + memcpy(data, aeqe->aeqe_data, SSS_AEQE_DATA_SIZE); + sss_be32_to_cpu(data, SSS_AEQE_DATA_SIZE); + + size = SSS_GET_EQE_DESC(desc, SIZE); + set_bit(SSS_AEQ_HW_CB_RUNNING, &aeq_info->hw_event_handler_state[hw_event]); + + if (aeq_info->hw_event_handler[hw_event] && + test_bit(SSS_AEQ_HW_CB_REG, &aeq_info->hw_event_handler_state[hw_event])) + aeq_info->hw_event_handler[hw_event](aeq_info->hw_event_data[hw_event], + data, size); + + clear_bit(SSS_AEQ_HW_CB_RUNNING, &aeq_info->hw_event_handler_state[hw_event]); + + return; + } + sdk_warn(SSS_TO_HWDEV(aeq)->dev_hdl, "Unknown aeq event %d\n", hw_event); +} + +static bool sss_aeq_irq_handle(struct sss_eq *aeq) +{ + struct sss_aeq_elem *elem = NULL; + u32 desc; + u32 i; + u32 eqe_cnt = 0; + + for (i = 0; i < SSS_TASK_PROCESS_EQE_LIMIT; i++) { + elem = SSS_GET_CUR_AEQ_ELEM(aeq); + + /* Data in HW is in Big endian Format */ + desc = be32_to_cpu(elem->desc); + + /* HW updates wrap bit, when it adds eq element event */ + if (SSS_GET_EQE_DESC(desc, WRAPPED) == aeq->wrap) + return false; + + dma_rmb(); + + sss_aeq_event_handle(aeq, desc); + + sss_increase_eq_ci(aeq); + + if (++eqe_cnt >= SSS_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + sss_chip_set_eq_ci(aeq, SSS_EQ_NOT_ARMED); + } + } + + return true; +} + +static void sss_aeq_irq_work(struct work_struct *work) +{ + bool unfinish; + struct sss_eq *aeq = container_of(work, struct sss_eq, aeq_work); + struct sss_aeq_info *aeq_info = SSS_AEQ_TO_INFO(aeq); + + unfinish = sss_aeq_irq_handle(aeq); + sss_chip_set_eq_ci(aeq, SSS_EQ_ARM_STATE(unfinish)); + + if (unfinish) + queue_work_on(WORK_CPU_UNBOUND, aeq_info->workq, &aeq->aeq_work); +} + +static void sss_init_aeq_para(struct sss_eq *aeq, u16 qid) +{ + aeq->init_desc_handler = sss_init_aeqe_desc; + aeq->init_attr_handler = sss_chip_init_aeq_attr; + aeq->irq_handler = sss_aeq_intr_handle; + aeq->name = SSS_AEQ_NAME; + INIT_WORK(&aeq->aeq_work, sss_aeq_irq_work); + + aeq->qid = qid; + aeq->len = aeq_depth; + aeq->type = SSS_AEQ; + aeq->entry_size = SSS_AEQE_SIZE; +} + +static int sss_init_aeq(struct sss_hwdev *hwdev, + u16 aeq_num, struct sss_irq_desc *irq) +{ + u16 i; + u16 qid; + int ret; + struct sss_aeq_info *aeq_info = NULL; + + aeq_info = kzalloc(sizeof(*aeq_info), GFP_KERNEL); + if (!aeq_info) + return -ENOMEM; + + hwdev->aeq_info = aeq_info; + aeq_info->hwdev = hwdev; + aeq_info->num = aeq_num; + + aeq_info->workq = alloc_workqueue(SSS_AEQ_WQ_NAME, WQ_MEM_RECLAIM, SSS_MAX_AEQ); + if (!aeq_info->workq) { + ret = -ENOMEM; + sdk_err(hwdev->dev_hdl, "Fail to alloc aeq workqueue\n"); + goto alloc_workq_err; + } + + if (aeq_depth < SSS_MIN_AEQ_DEPTH || aeq_depth > SSS_MAX_AEQ_DEPTH) { + sdk_warn(hwdev->dev_hdl, "Invalid aeq_depth value %u, adjust to %d\n", + aeq_depth, SSS_DEF_AEQ_DEPTH); + aeq_depth = SSS_DEF_AEQ_DEPTH; + } + + for (qid = 0; qid < aeq_num; qid++) { + sss_init_aeq_para(&aeq_info->aeq[qid], qid); + ret = sss_init_eq(hwdev, &aeq_info->aeq[qid], &irq[qid]); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init aeq %u\n", qid); + goto init_aeq_err; + } + } + + for (qid = 0; qid < aeq_num; qid++) + sss_chip_set_msix_state(hwdev, irq[qid].msix_id, SSS_MSIX_ENABLE); + + return 0; + +init_aeq_err: + for (i = 0; i < qid; i++) + sss_deinit_eq(&aeq_info->aeq[i]); + + destroy_workqueue(aeq_info->workq); + +alloc_workq_err: + kfree(aeq_info); + hwdev->aeq_info = NULL; + + return ret; +} + +void sss_deinit_aeq(struct sss_hwdev *hwdev) +{ + struct sss_aeq_info *aeq_info = hwdev->aeq_info; + enum sss_aeq_hw_event aeq_event; + enum sss_aeq_sw_event sw_aeq_event; + u16 qid; + + for (qid = 0; qid < aeq_info->num; qid++) + sss_deinit_eq(&aeq_info->aeq[qid]); + + for (sw_aeq_event = SSS_STL_EVENT; + sw_aeq_event < SSS_AEQ_SW_EVENT_MAX; sw_aeq_event++) + sss_aeq_unregister_swe_cb(hwdev, sw_aeq_event); + + for (aeq_event = SSS_HW_FROM_INT; + aeq_event < SSS_AEQ_EVENT_MAX; aeq_event++) + sss_aeq_unregister_hw_cb(hwdev, aeq_event); + + destroy_workqueue(aeq_info->workq); + + kfree(aeq_info); + hwdev->aeq_info = NULL; +} + +void sss_get_aeq_irq(struct sss_hwdev *hwdev, + struct sss_irq_desc *irq_array, u16 *irq_num) +{ + struct sss_aeq_info *aeq_info = hwdev->aeq_info; + u16 qid; + + for (qid = 0; qid < aeq_info->num; qid++) { + irq_array[qid].irq_id = aeq_info->aeq[qid].irq_desc.irq_id; + irq_array[qid].msix_id = + aeq_info->aeq[qid].irq_desc.msix_id; + } + + *irq_num = aeq_info->num; +} + +void sss_dump_aeq_info(struct sss_hwdev *hwdev) +{ + struct sss_aeq_elem *aeqe = NULL; + struct sss_eq *aeq = NULL; + u32 addr; + u32 ci; + u32 pi; + u32 ctrl0; + u32 id; + int qid; + + for (qid = 0; qid < hwdev->aeq_info->num; qid++) { + aeq = &hwdev->aeq_info->aeq[qid]; + /* Indirect access should set qid first */ + sss_chip_write_reg(SSS_TO_HWDEV(aeq)->hwif, + SSS_EQ_INDIR_ID_ADDR(aeq->type), aeq->qid); + wmb(); /* make sure set qid firstly */ + + addr = SSS_CSR_AEQ_CTRL_0_ADDR; + ctrl0 = sss_chip_read_reg(hwdev->hwif, addr); + id = sss_chip_read_reg(hwdev->hwif, SSS_EQ_INDIR_ID_ADDR(aeq->type)); + + addr = SSS_EQ_CI_REG_ADDR(aeq); + ci = sss_chip_read_reg(hwdev->hwif, addr); + addr = SSS_EQ_PI_REG_ADDR(aeq); + pi = sss_chip_read_reg(hwdev->hwif, addr); + aeqe = SSS_GET_CUR_AEQ_ELEM(aeq); + sdk_err(hwdev->dev_hdl, + "Aeq id: %d, id: %u, ctrl0: 0x%08x, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %u, desc: 0x%x swci:0x%x\n", + qid, id, ctrl0, ci, pi, work_busy(&aeq->aeq_work), + aeq->wrap, be32_to_cpu(aeqe->desc), aeq->ci); + } + + sss_dump_chip_err_info(hwdev); +} + +int sss_aeq_register_hw_cb(void *hwdev, void *pri_handle, + enum sss_aeq_hw_event event, sss_aeq_hw_event_handler_t event_handler) +{ + struct sss_aeq_info *aeq_info = NULL; + + if (!hwdev || !event_handler || event >= SSS_AEQ_EVENT_MAX) + return -EINVAL; + + aeq_info = SSS_TO_AEQ_INFO(hwdev); + aeq_info->hw_event_handler[event] = event_handler; + aeq_info->hw_event_data[event] = pri_handle; + set_bit(SSS_AEQ_HW_CB_REG, &aeq_info->hw_event_handler_state[event]); + + return 0; +} + +void sss_aeq_unregister_hw_cb(void *hwdev, enum sss_aeq_hw_event event) +{ + struct sss_aeq_info *aeq_info = NULL; + + if (!hwdev || event >= SSS_AEQ_EVENT_MAX) + return; + + aeq_info = SSS_TO_AEQ_INFO(hwdev); + clear_bit(SSS_AEQ_HW_CB_REG, &aeq_info->hw_event_handler_state[event]); + while (test_bit(SSS_AEQ_HW_CB_RUNNING, &aeq_info->hw_event_handler_state[event])) + usleep_range(SSS_EQ_USLEEP_LOW_LIMIT, SSS_EQ_USLEEP_HIG_LIMIT); + aeq_info->hw_event_handler[event] = NULL; +} + +int sss_aeq_register_swe_cb(void *hwdev, void *pri_handle, + enum sss_aeq_sw_event event, + sss_aeq_sw_event_handler_t sw_event_handler) +{ + struct sss_aeq_info *aeq_info = NULL; + + if (!hwdev || !sw_event_handler || event >= SSS_AEQ_SW_EVENT_MAX) + return -EINVAL; + + aeq_info = SSS_TO_AEQ_INFO(hwdev); + aeq_info->sw_event_handler[event] = sw_event_handler; + aeq_info->sw_event_data[event] = pri_handle; + set_bit(SSS_AEQ_SW_CB_REG, &aeq_info->sw_event_handler_state[event]); + + return 0; +} + +void sss_aeq_unregister_swe_cb(void *hwdev, enum sss_aeq_sw_event event) +{ + struct sss_aeq_info *aeq_info = NULL; + + if (!hwdev || event >= SSS_AEQ_SW_EVENT_MAX) + return; + + aeq_info = SSS_TO_AEQ_INFO(hwdev); + clear_bit(SSS_AEQ_SW_CB_REG, &aeq_info->sw_event_handler_state[event]); + while (test_bit(SSS_AEQ_SW_CB_RUNNING, + &aeq_info->sw_event_handler_state[event])) + usleep_range(SSS_EQ_USLEEP_LOW_LIMIT, SSS_EQ_USLEEP_HIG_LIMIT); + aeq_info->sw_event_handler[event] = NULL; +} + +int sss_hwif_init_aeq(struct sss_hwdev *hwdev) +{ + u16 i; + u16 aeq_num; + u16 act_num = 0; + int ret; + struct sss_irq_desc irq_array[SSS_MAX_AEQ] = {0}; + + aeq_num = SSS_GET_HWIF_AEQ_NUM(hwdev->hwif); + if (aeq_num > SSS_MAX_AEQ) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq_num to %d\n", SSS_MAX_AEQ); + aeq_num = SSS_MAX_AEQ; + } + + act_num = sss_alloc_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_array, aeq_num); + if (act_num == 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc irq, aeq_num: %u\n", aeq_num); + return -ENOMEM; + } + + if (act_num < aeq_num) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq_num to %u\n", act_num); + aeq_num = act_num; + } + + ret = sss_init_aeq(hwdev, aeq_num, irq_array); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init aeq\n"); + goto init_aeqs_err; + } + + return 0; + +init_aeqs_err: + for (i = 0; i < aeq_num; i++) + sss_free_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_array[i].irq_id); + + return ret; +} + +void sss_hwif_deinit_aeq(struct sss_hwdev *hwdev) +{ + u16 i; + u16 irq_num; + struct sss_irq_desc irq_array[SSS_MAX_AEQ] = {0}; + + sss_get_aeq_irq(hwdev, irq_array, &irq_num); + + sss_deinit_aeq(hwdev); + + for (i = 0; i < irq_num; i++) + sss_free_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_array[i].irq_id); +} + +int sss_init_aeq_msix_attr(struct sss_hwdev *hwdev) +{ + int i; + int ret; + struct sss_aeq_info *aeq_info = hwdev->aeq_info; + struct sss_irq_cfg intr_info = {0}; + + sss_init_eq_intr_info(&intr_info); + + for (i = aeq_info->num - 1; i >= 0; i--) { + intr_info.msix_id = SSS_EQ_IRQ_ID(&aeq_info->aeq[i]); + ret = sss_chip_set_eq_msix_attr(hwdev, &intr_info, SSS_CHANNEL_COMM); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to set msix attr for aeq %d\n", i); + return -EFAULT; + } + } + + return 0; +} + +u8 sss_sw_aeqe_handler(void *dev, u8 aeq_event, u8 *data) +{ + struct sss_hwdev *hwdev = (struct sss_hwdev *)dev; + + if (!hwdev) + return 0; + + sdk_err(hwdev->dev_hdl, "Received ucode aeq event, type: 0x%x, data: 0x%llx\n", + aeq_event, *((u64 *)data)); + + if (aeq_event < SSS_ERR_MAX) + atomic_inc(&hwdev->hw_stats.nic_ucode_event_stats[aeq_event]); + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.h new file mode 100644 index 0000000000000..105c8e9857231 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_AEQ_H +#define SSS_HWIF_AEQ_H + +#include "sss_hw_irq.h" +#include "sss_hw_aeq.h" +#include "sss_hwdev.h" +#include "sss_aeq_info.h" + +void sss_deinit_aeq(struct sss_hwdev *hwdev); +void sss_get_aeq_irq(struct sss_hwdev *hwdev, + struct sss_irq_desc *irq_array, u16 *irq_num); +void sss_dump_aeq_info(struct sss_hwdev *hwdev); +int sss_aeq_register_hw_cb(void *hwdev, void *pri_handle, + enum sss_aeq_hw_event event, sss_aeq_hw_event_handler_t event_handler); +void sss_aeq_unregister_hw_cb(void *hwdev, enum sss_aeq_hw_event event); +int sss_aeq_register_swe_cb(void *hwdev, void *pri_handle, + enum sss_aeq_sw_event event, + sss_aeq_sw_event_handler_t sw_event_handler); +void sss_aeq_unregister_swe_cb(void *hwdev, enum sss_aeq_sw_event event); +int sss_hwif_init_aeq(struct sss_hwdev *hwdev); +void sss_hwif_deinit_aeq(struct sss_hwdev *hwdev); +int sss_init_aeq_msix_attr(struct sss_hwdev *hwdev); +u8 sss_sw_aeqe_handler(void *dev, u8 aeq_event, u8 *data); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.c new file mode 100644 index 0000000000000..1c7c907dea313 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.c @@ -0,0 +1,293 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_csr.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" +#include "sss_hwif_export.h" + +#define SSS_GET_REG_FLAG(reg) ((reg) & (~(SSS_CSR_FLAG_MASK))) +#define SSS_GET_REG_ADDR(reg) ((reg) & (SSS_CSR_FLAG_MASK)) + +#define SSS_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12))) + +#define SSS_CLEAR_SLAVE_HOST_STATUS(host_id, val) ((val) & (~(1U << (host_id)))) +#define SSS_SET_SLAVE_HOST_STATUS(host_id, enable) (((u8)(enable) & 1U) << (host_id)) + +#define SSS_MULT_HOST_SLAVE_STATUS_ADDR (SSS_MGMT_FLAG + 0xDF30) + +u32 sss_chip_read_reg(struct sss_hwif *hwif, u32 reg) +{ + if (SSS_GET_REG_FLAG(reg) == SSS_MGMT_FLAG) + return be32_to_cpu(readl(hwif->mgmt_reg_base + + SSS_GET_REG_ADDR(reg))); + else + return be32_to_cpu(readl(hwif->cfg_reg_base + + SSS_GET_REG_ADDR(reg))); +} + +void sss_chip_write_reg(struct sss_hwif *hwif, u32 reg, u32 val) +{ + if (SSS_GET_REG_FLAG(reg) == SSS_MGMT_FLAG) + writel(cpu_to_be32(val), + hwif->mgmt_reg_base + SSS_GET_REG_ADDR(reg)); + else + writel(cpu_to_be32(val), + hwif->cfg_reg_base + SSS_GET_REG_ADDR(reg)); +} + +bool sss_chip_get_present_state(void *hwdev) +{ + u32 val; + + val = sss_chip_read_reg(SSS_TO_HWIF(hwdev), SSS_CSR_HW_ATTR1_ADDR); + if (val == SSS_PCIE_LINK_DOWN) { + sdk_warn(SSS_TO_DEV(hwdev), "Card is not present\n"); + return false; + } + + return true; +} + +u32 sss_chip_get_pcie_link_status(void *hwdev) +{ + u32 val; + + if (!hwdev) + return SSS_PCIE_LINK_DOWN; + + val = sss_chip_read_reg(SSS_TO_HWIF(hwdev), SSS_CSR_HW_ATTR1_ADDR); + if (val == SSS_PCIE_LINK_DOWN) + return val; + + return !SSS_GET_AF1(val, MGMT_INIT_STATUS); +} + +void sss_chip_set_pf_status(struct sss_hwif *hwif, + enum sss_pf_status status) +{ + u32 val; + + if (SSS_GET_HWIF_FUNC_TYPE(hwif) == SSS_FUNC_TYPE_VF) + return; + + val = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR6_ADDR); + val = SSS_CLEAR_AF6(val, PF_STATUS); + val |= SSS_SET_AF6(status, PF_STATUS); + + sss_chip_write_reg(hwif, SSS_CSR_HW_ATTR6_ADDR, val); +} + +enum sss_pf_status sss_chip_get_pf_status(struct sss_hwif *hwif) +{ + u32 val = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR6_ADDR); + + return SSS_GET_AF6(val, PF_STATUS); +} + +void sss_chip_enable_doorbell(struct sss_hwif *hwif) +{ + u32 addr; + u32 val; + + addr = SSS_CSR_HW_ATTR4_ADDR; + val = sss_chip_read_reg(hwif, addr); + + val = SSS_CLEAR_AF4(val, DOORBELL_CTRL); + val |= SSS_SET_AF4(DB_ENABLE, DOORBELL_CTRL); + + sss_chip_write_reg(hwif, addr, val); +} + +void sss_chip_disable_doorbell(struct sss_hwif *hwif) +{ + u32 addr; + u32 val; + + addr = SSS_CSR_HW_ATTR4_ADDR; + val = sss_chip_read_reg(hwif, addr); + + val = SSS_CLEAR_AF4(val, DOORBELL_CTRL); + val |= SSS_SET_AF4(DB_DISABLE, DOORBELL_CTRL); + + sss_chip_write_reg(hwif, addr, val); +} + +void sss_free_db_id(struct sss_hwif *hwif, u32 id) +{ + struct sss_db_pool *pool = &hwif->db_pool; + + if (id >= pool->bit_size) + return; + + spin_lock(&pool->id_lock); + clear_bit((int)id, pool->bitmap); + spin_unlock(&pool->id_lock); +} + +int sss_alloc_db_id(struct sss_hwif *hwif, u32 *id) +{ + struct sss_db_pool *pool = &hwif->db_pool; + u32 pg_id; + + spin_lock(&pool->id_lock); + pg_id = (u32)find_first_zero_bit(pool->bitmap, pool->bit_size); + if (pg_id == pool->bit_size) { + spin_unlock(&pool->id_lock); + return -ENOMEM; + } + set_bit(pg_id, pool->bitmap); + spin_unlock(&pool->id_lock); + + *id = pg_id; + + return 0; +} + +void sss_dump_chip_err_info(struct sss_hwdev *hwdev) +{ + u32 value; + + if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF) + return; + + value = sss_chip_read_reg(hwdev->hwif, SSS_CHIP_BASE_INFO_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip base info: 0x%08x\n", value); + + value = sss_chip_read_reg(hwdev->hwif, SSS_MGMT_HEALTH_STATUS_ADDR); + sdk_warn(hwdev->dev_hdl, "Mgmt CPU health status: 0x%08x\n", value); + + value = sss_chip_read_reg(hwdev->hwif, SSS_CHIP_ERR_STATUS0_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip fatal error status0: 0x%08x\n", value); + value = sss_chip_read_reg(hwdev->hwif, SSS_CHIP_ERR_STATUS1_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip fatal error status1: 0x%08x\n", value); + + value = sss_chip_read_reg(hwdev->hwif, SSS_ERR_INFO0_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip exception info0: 0x%08x\n", value); + value = sss_chip_read_reg(hwdev->hwif, SSS_ERR_INFO1_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip exception info1: 0x%08x\n", value); + value = sss_chip_read_reg(hwdev->hwif, SSS_ERR_INFO2_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip exception info2: 0x%08x\n", value); +} + +u8 sss_chip_get_host_ppf_id(struct sss_hwdev *hwdev, u8 host_id) +{ + u32 addr; + u32 val; + + if (!hwdev) + return 0; + + addr = SSS_CSR_FUNC_PPF_ELECT(host_id); + val = sss_chip_read_reg(hwdev->hwif, addr); + + return SSS_GET_PPF_ELECT_PORT(val, ID); +} + +static void sss_init_eq_msix_cfg(void *hwdev, + struct sss_cmd_msix_config *cmd_msix, + struct sss_irq_cfg *info) +{ + cmd_msix->opcode = SSS_MGMT_MSG_SET_CMD; + cmd_msix->func_id = sss_get_global_func_id(hwdev); + cmd_msix->msix_index = (u16)info->msix_id; + cmd_msix->lli_credit_cnt = info->lli_credit; + cmd_msix->lli_timer_cnt = info->lli_timer; + cmd_msix->pending_cnt = info->pending; + cmd_msix->coalesce_timer_cnt = info->coalesc_timer; + cmd_msix->resend_timer_cnt = info->resend_timer; +} + +int sss_chip_set_eq_msix_attr(void *hwdev, + struct sss_irq_cfg *intr_info, u16 ch) +{ + int ret; + struct sss_cmd_msix_config cmd_msix = {0}; + u16 out_len = sizeof(cmd_msix); + + sss_init_eq_msix_cfg(hwdev, &cmd_msix, intr_info); + + ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, + &cmd_msix, sizeof(cmd_msix), &cmd_msix, &out_len, ch); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_msix)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to set eq msix cfg, ret: %d, status: 0x%x, out_len: 0x%x, ch: 0x%x\n", + ret, cmd_msix.head.state, out_len, ch); + return -EINVAL; + } + + return 0; +} + +int sss_chip_set_wq_page_size(void *hwdev, u16 func_id, u32 page_size) +{ + int ret; + struct sss_cmd_wq_page_size cmd_page = {0}; + u16 out_len = sizeof(cmd_page); + + cmd_page.opcode = SSS_MGMT_MSG_SET_CMD; + cmd_page.func_id = func_id; + cmd_page.page_size = SSS_PAGE_SIZE_HW(page_size); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_CFG_PAGESIZE, + &cmd_page, sizeof(cmd_page), &cmd_page, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_page)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to set wq page size, ret: %d, status: 0x%x, out_len: 0x%0x\n", + ret, cmd_page.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +int sss_chip_set_ceq_attr(struct sss_hwdev *hwdev, u16 qid, + u32 attr0, u32 attr1) +{ + int ret; + struct sss_cmd_ceq_ctrl_reg cmd_ceq = {0}; + u16 out_len = sizeof(cmd_ceq); + + cmd_ceq.func_id = sss_get_global_func_id(hwdev); + cmd_ceq.qid = qid; + cmd_ceq.ctrl0 = attr0; + cmd_ceq.ctrl1 = attr1; + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SET_CEQ_CTRL_REG, + &cmd_ceq, sizeof(cmd_ceq), &cmd_ceq, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_ceq)) { + sdk_err(hwdev->dev_hdl, + "Fail to set ceq %u ctrl, ret: %d status: 0x%x, out_len: 0x%x\n", + qid, ret, cmd_ceq.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +void sss_chip_set_slave_host_status(void *dev, u8 host_id, bool enable) +{ + u32 val; + struct sss_hwdev *hwdev = dev; + + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PPF) + return; + + val = sss_chip_read_reg(hwdev->hwif, SSS_MULT_HOST_SLAVE_STATUS_ADDR); + val = SSS_CLEAR_SLAVE_HOST_STATUS(host_id, val); + val |= SSS_SET_SLAVE_HOST_STATUS(host_id, !!enable); + + sss_chip_write_reg(hwdev->hwif, SSS_MULT_HOST_SLAVE_STATUS_ADDR, val); + + sdk_info(hwdev->dev_hdl, "Set slave host %d status %d, reg value: 0x%x\n", + host_id, enable, val); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.h new file mode 100644 index 0000000000000..f299bf0fa6d90 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_API_H +#define SSS_HWIF_API_H + +#include "sss_hwdev.h" + +enum sss_pf_status { + SSS_PF_STATUS_INIT = 0X0, + SSS_PF_STATUS_ACTIVE_FLAG = 0x11, + SSS_PF_STATUS_FLR_START_FLAG = 0x12, + SSS_PF_STATUS_FLR_FINISH_FLAG = 0x13, +}; + +enum sss_doorbell_ctrl { + DB_ENABLE, + DB_DISABLE, +}; + +enum sss_outbound_ctrl { + OUTBOUND_ENABLE, + OUTBOUND_DISABLE, +}; + +#define SSS_PCIE_LINK_DOWN 0xFFFFFFFF +#define SSS_PCIE_LINK_UP 0 + +#define SSS_AF1_PPF_ID_SHIFT 0 +#define SSS_AF1_AEQ_PER_FUNC_SHIFT 8 +#define SSS_AF1_MGMT_INIT_STATUS_SHIFT 30 +#define SSS_AF1_PF_INIT_STATUS_SHIFT 31 + +#define SSS_AF1_PPF_ID_MASK 0x3F +#define SSS_AF1_AEQ_PER_FUNC_MASK 0x3 +#define SSS_AF1_MGMT_INIT_STATUS_MASK 0x1 +#define SSS_AF1_PF_INIT_STATUS_MASK 0x1 + +#define SSS_GET_AF1(val, member) \ + (((val) >> SSS_AF1_##member##_SHIFT) & SSS_AF1_##member##_MASK) + +#define SSS_AF4_DOORBELL_CTRL_SHIFT 0 +#define SSS_AF4_DOORBELL_CTRL_MASK 0x1 + +#define SSS_GET_AF4(val, member) \ + (((val) >> SSS_AF4_##member##_SHIFT) & SSS_AF4_##member##_MASK) + +#define SSS_SET_AF4(val, member) \ + (((val) & SSS_AF4_##member##_MASK) << SSS_AF4_##member##_SHIFT) + +#define SSS_CLEAR_AF4(val, member) \ + ((val) & (~(SSS_AF4_##member##_MASK << SSS_AF4_##member##_SHIFT))) + +#define SSS_AF6_PF_STATUS_SHIFT 0 +#define SSS_AF6_PF_STATUS_MASK 0xFFFF + +#define SSS_AF6_FUNC_MAX_SQ_SHIFT 23 +#define SSS_AF6_FUNC_MAX_SQ_MASK 0x1FF + +#define SSS_AF6_MSIX_FLEX_EN_SHIFT 22 +#define SSS_AF6_MSIX_FLEX_EN_MASK 0x1 + +#define SSS_SET_AF6(val, member) \ + ((((u32)(val)) & SSS_AF6_##member##_MASK) << \ + SSS_AF6_##member##_SHIFT) + +#define SSS_GET_AF6(val, member) \ + (((u32)(val) >> SSS_AF6_##member##_SHIFT) & SSS_AF6_##member##_MASK) + +#define SSS_CLEAR_AF6(val, member) \ + ((u32)(val) & (~(SSS_AF6_##member##_MASK << \ + SSS_AF6_##member##_SHIFT))) + +#define SSS_PPF_ELECT_PORT_ID_SHIFT 0 + +#define SSS_PPF_ELECT_PORT_ID_MASK 0x3F + +#define SSS_GET_PPF_ELECT_PORT(val, member) \ + (((val) >> SSS_PPF_ELECT_PORT_##member##_SHIFT) & \ + SSS_PPF_ELECT_PORT_##member##_MASK) + +#define SSS_PPF_ELECTION_ID_SHIFT 0 + +#define SSS_PPF_ELECTION_ID_MASK 0x3F + +#define SSS_SET_PPF(val, member) \ + (((val) & SSS_PPF_ELECTION_##member##_MASK) << \ + SSS_PPF_ELECTION_##member##_SHIFT) + +#define SSS_GET_PPF(val, member) \ + (((val) >> SSS_PPF_ELECTION_##member##_SHIFT) & \ + SSS_PPF_ELECTION_##member##_MASK) + +#define SSS_CLEAR_PPF(val, member) \ + ((val) & (~(SSS_PPF_ELECTION_##member##_MASK << \ + SSS_PPF_ELECTION_##member##_SHIFT))) + +#define SSS_DB_DWQE_SIZE 0x00400000 + +/* db/dwqe page size: 4K */ +#define SSS_DB_PAGE_SIZE 0x00001000ULL +#define SSS_DWQE_OFFSET 0x00000800ULL + +#define SSS_DB_MAX_AREAS (SSS_DB_DWQE_SIZE / SSS_DB_PAGE_SIZE) + +#define SSS_DB_ID(db, db_base) \ + ((u32)(((ulong)(db) - (ulong)(db_base)) / SSS_DB_PAGE_SIZE)) + +u32 sss_chip_read_reg(struct sss_hwif *hwif, u32 reg); +void sss_chip_write_reg(struct sss_hwif *hwif, u32 reg, u32 val); +bool sss_chip_get_present_state(void *hwdev); +u32 sss_chip_get_pcie_link_status(void *hwdev); +void sss_chip_set_pf_status(struct sss_hwif *hwif, enum sss_pf_status status); +enum sss_pf_status sss_chip_get_pf_status(struct sss_hwif *hwif); +void sss_chip_enable_doorbell(struct sss_hwif *hwif); +void sss_chip_disable_doorbell(struct sss_hwif *hwif); +int sss_alloc_db_id(struct sss_hwif *hwif, u32 *id); +void sss_free_db_id(struct sss_hwif *hwif, u32 id); +void sss_dump_chip_err_info(struct sss_hwdev *hwdev); +u8 sss_chip_get_host_ppf_id(struct sss_hwdev *hwdev, u8 host_id); +int sss_chip_set_eq_msix_attr(void *hwdev, struct sss_irq_cfg *info, u16 channel); +int sss_chip_set_wq_page_size(void *hwdev, u16 func_id, u32 page_size); +int sss_chip_set_ceq_attr(struct sss_hwdev *hwdev, u16 qid, + u32 attr0, u32 attr1); +void sss_chip_set_slave_host_status(void *hwdev, u8 host_id, bool enable); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.c new file mode 100644 index 0000000000000..ffc3d4bdb4292 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.c @@ -0,0 +1,441 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hwdev.h" +#include "sss_eq_info.h" +#include "sss_hw_svc_cap.h" +#include "sss_hw_irq.h" +#include "sss_hw_ceq.h" +#include "sss_hw_export.h" +#include "sss_hwif_ceq.h" +#include "sss_hw_common.h" +#include "sss_hwif_eq.h" +#include "sss_hwif_api.h" +#include "sss_hwif_export.h" + +#define SSS_DEF_CEQ_DEPTH 8192 + +#define SSS_CEQ_NAME "sss_ceq" + +#define SSS_CEQ_CTRL_0_INTR_ID_SHIFT 0 +#define SSS_CEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define SSS_CEQ_CTRL_0_LIMIT_KICK_SHIFT 20 +#define SSS_CEQ_CTRL_0_PCI_INTF_ID_SHIFT 24 +#define SSS_CEQ_CTRL_0_PAGE_SIZE_SHIFT 27 +#define SSS_CEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define SSS_CEQ_CTRL_0_INTR_ID_MASK 0x3FFU +#define SSS_CEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define SSS_CEQ_CTRL_0_LIMIT_KICK_MASK 0xFU +#define SSS_CEQ_CTRL_0_PCI_INTF_ID_MASK 0x3U +#define SSS_CEQ_CTRL_0_PAGE_SIZE_MASK 0xF +#define SSS_CEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define SSS_SET_CEQ_CTRL_0(val, member) \ + (((val) & SSS_CEQ_CTRL_0_##member##_MASK) << \ + SSS_CEQ_CTRL_0_##member##_SHIFT) + +#define SSS_CEQ_CTRL_1_LEN_SHIFT 0 +#define SSS_CEQ_CTRL_1_GLB_FUNC_ID_SHIFT 20 + +#define SSS_CEQ_CTRL_1_LEN_MASK 0xFFFFFU +#define SSS_CEQ_CTRL_1_GLB_FUNC_ID_MASK 0xFFFU + +#define SSS_SET_CEQ_CTRL_1(val, member) \ + (((val) & SSS_CEQ_CTRL_1_##member##_MASK) << \ + SSS_CEQ_CTRL_1_##member##_SHIFT) + +#define SSS_CEQ_DMA_ATTR_DEF 0 + +#define SSS_MIN_CEQ_DEPTH 64 +#define SSS_MAX_CEQ_DEPTH \ + ((SSS_MAX_EQ_PAGE_SIZE / SSS_CEQE_SIZE) * SSS_CEQ_MAX_PAGE) + +#define SSS_GET_CEQ_ELEM(ceq, id) ((u32 *)SSS_GET_EQ_ELEM((ceq), (id))) + +#define SSS_GET_CUR_CEQ_ELEM(ceq) SSS_GET_CEQ_ELEM((ceq), (ceq)->ci) + +#define SSS_CEQE_TYPE_SHIFT 23 +#define SSS_CEQE_TYPE_MASK 0x7 + +#define SSS_CEQE_TYPE(type) \ + (((type) >> SSS_CEQE_TYPE_SHIFT) & SSS_CEQE_TYPE_MASK) + +#define SSS_CEQE_DATA_MASK 0x3FFFFFF +#define SSS_CEQE_DATA(data) ((data) & SSS_CEQE_DATA_MASK) + +#define SSS_CEQ_TO_INFO(eq) \ + container_of((eq) - (eq)->qid, struct sss_ceq_info, ceq[0]) + +#define CEQ_LMT_KICK_DEF 0 + +enum sss_ceq_cb_state { + SSS_CEQ_CB_REG = 0, + SSS_CEQ_CB_RUNNING, +}; + +static u32 ceq_depth = SSS_DEF_CEQ_DEPTH; +module_param(ceq_depth, uint, 0444); +MODULE_PARM_DESC(ceq_depth, + "ceq depth, valid range is " __stringify(SSS_MIN_CEQ_DEPTH) + " - " __stringify(SSS_MAX_CEQ_DEPTH)); + +static u32 tasklet_depth = SSS_TASK_PROCESS_EQE_LIMIT; +module_param(tasklet_depth, uint, 0444); +MODULE_PARM_DESC(tasklet_depth, + "The max number of ceqe can be processed in tasklet, default = 1024"); + +void sss_init_ceqe_desc(void *data) +{ + u32 i; + u32 init_val; + u32 *ceqe = NULL; + struct sss_eq *ceq = (struct sss_eq *)data; + + init_val = cpu_to_be32(SSS_EQ_WRAPPED(ceq)); + for (i = 0; i < ceq->len; i++) { + ceqe = SSS_GET_CEQ_ELEM(ceq, i); + *(ceqe) = init_val; + } + + /* write all ceq desc */ + wmb(); +} + +static u32 sss_chip_init_ceq_attr(void *data) +{ + u32 val; + u32 len; + struct sss_eq *ceq = (struct sss_eq *)data; + struct sss_hwif *hwif = SSS_TO_HWDEV(ceq)->hwif; + + val = SSS_SET_CEQ_CTRL_0(SSS_EQ_IRQ_ID(ceq), INTR_ID) | + SSS_SET_CEQ_CTRL_0(SSS_CEQ_DMA_ATTR_DEF, DMA_ATTR) | + SSS_SET_CEQ_CTRL_0(CEQ_LMT_KICK_DEF, LIMIT_KICK) | + SSS_SET_CEQ_CTRL_0(SSS_GET_HWIF_PCI_INTF_ID(hwif), PCI_INTF_ID) | + SSS_SET_CEQ_CTRL_0(SSS_SET_EQ_HW_PAGE_SIZE(ceq), PAGE_SIZE) | + SSS_SET_CEQ_CTRL_0(SSS_INTR_MODE_ARMED, INTR_MODE); + len = SSS_SET_CEQ_CTRL_1(ceq->len, LEN); + + return sss_chip_set_ceq_attr(SSS_TO_HWDEV(ceq), ceq->qid, val, len); +} + +irqreturn_t sss_ceq_intr_handle(int irq, void *data) +{ + struct sss_eq *ceq = (struct sss_eq *)data; + + ceq->hw_intr_jiffies = jiffies; + + sss_chip_clear_msix_resend_bit(ceq->hwdev, SSS_EQ_IRQ_ID(ceq), + SSS_EQ_MSIX_RESEND_TIMER_CLEAR); + + tasklet_schedule(&ceq->ceq_tasklet); + + return IRQ_HANDLED; +} + +static void sss_ceqe_handler(struct sss_eq *ceq, u32 ceqe) +{ + u32 ceqe_data = SSS_CEQE_DATA(ceqe); + enum sss_ceq_event ceq_event = SSS_CEQE_TYPE(ceqe); + struct sss_ceq_info *ceq_info = SSS_CEQ_TO_INFO(ceq); + + if (ceq_event >= SSS_CEQ_EVENT_MAX) { + sdk_err(SSS_TO_HWDEV(ceq)->dev_hdl, "Unknown ceq_event:%d, ceqe_data: 0x%x\n", + ceq_event, ceqe_data); + return; + } + + set_bit(SSS_CEQ_CB_RUNNING, &ceq_info->event_handler_state[ceq_event]); + + if (ceq_info->event_handler[ceq_event] && + test_bit(SSS_CEQ_CB_REG, &ceq_info->event_handler_state[ceq_event])) + ceq_info->event_handler[ceq_event](ceq_info->event_handler_data[ceq_event], + ceqe_data); + + clear_bit(SSS_CEQ_CB_RUNNING, &ceq_info->event_handler_state[ceq_event]); +} + +static bool sss_ceq_irq_handle(struct sss_eq *ceq) +{ + u32 elem; + u32 eqe_cnt = 0; + u32 i; + + for (i = 0; i < tasklet_depth; i++) { + elem = *(SSS_GET_CUR_CEQ_ELEM(ceq)); + elem = be32_to_cpu(elem); + + /* HW updates wrap bit, when it adds eq element event */ + if (SSS_GET_EQE_DESC(elem, WRAPPED) == ceq->wrap) + return false; + + sss_ceqe_handler(ceq, elem); + + sss_increase_eq_ci(ceq); + + if (++eqe_cnt >= SSS_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + sss_chip_set_eq_ci(ceq, SSS_EQ_NOT_ARMED); + } + } + + return true; +} + +static void sss_ceq_tasklet(ulong ceq_data) +{ + bool unfinish; + struct sss_eq *ceq = (struct sss_eq *)ceq_data; + + ceq->sw_intr_jiffies = jiffies; + unfinish = sss_ceq_irq_handle(ceq); + sss_chip_set_eq_ci(ceq, SSS_EQ_ARM_STATE(unfinish)); + + if (unfinish) + tasklet_schedule(&ceq->ceq_tasklet); +} + +static void sss_init_ceq_para(struct sss_eq *ceq, u16 qid) +{ + ceq->init_desc_handler = sss_init_ceqe_desc; + ceq->init_attr_handler = sss_chip_init_ceq_attr; + ceq->irq_handler = sss_ceq_intr_handle; + ceq->name = SSS_CEQ_NAME; + tasklet_init(&ceq->ceq_tasklet, sss_ceq_tasklet, (ulong)ceq); + + ceq->qid = qid; + ceq->len = ceq_depth; + ceq->type = SSS_CEQ; + ceq->entry_size = SSS_CEQE_SIZE; +} + +static int sss_init_ceq(struct sss_hwdev *hwdev, + struct sss_irq_desc *irq_array, u16 irq_num) +{ + u16 i; + u16 qid; + int ret; + struct sss_ceq_info *ceq_info = NULL; + + ceq_info = kzalloc(sizeof(*ceq_info), GFP_KERNEL); + if (!ceq_info) + return -ENOMEM; + + ceq_info->hwdev = hwdev; + ceq_info->num = irq_num; + hwdev->ceq_info = ceq_info; + + if (tasklet_depth == 0) { + sdk_warn(hwdev->dev_hdl, + "Invalid tasklet_depth can not be zero, adjust to %d\n", + SSS_TASK_PROCESS_EQE_LIMIT); + tasklet_depth = SSS_TASK_PROCESS_EQE_LIMIT; + } + + if (ceq_depth < SSS_MIN_CEQ_DEPTH || ceq_depth > SSS_MAX_CEQ_DEPTH) { + sdk_warn(hwdev->dev_hdl, + "Invalid ceq_depth %u out of range, adjust to %d\n", + ceq_depth, SSS_DEF_CEQ_DEPTH); + ceq_depth = SSS_DEF_CEQ_DEPTH; + } + + for (qid = 0; qid < irq_num; qid++) { + sss_init_ceq_para(&ceq_info->ceq[qid], qid); + ret = sss_init_eq(hwdev, &ceq_info->ceq[qid], &irq_array[qid]); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ceq %u\n", qid); + goto init_ceq_err; + } + } + + for (qid = 0; qid < irq_num; qid++) + sss_chip_set_msix_state(hwdev, irq_array[qid].msix_id, SSS_MSIX_ENABLE); + + return 0; + +init_ceq_err: + for (i = 0; i < qid; i++) + sss_deinit_eq(&ceq_info->ceq[i]); + + kfree(ceq_info); + hwdev->ceq_info = NULL; + + return ret; +} + +static void sss_get_ceq_irq(struct sss_hwdev *hwdev, struct sss_irq_desc *irq, + u16 *irq_num) +{ + u16 i; + struct sss_ceq_info *ceq_info = hwdev->ceq_info; + + for (i = 0; i < ceq_info->num; i++) { + irq[i].msix_id = ceq_info->ceq[i].irq_desc.msix_id; + irq[i].irq_id = ceq_info->ceq[i].irq_desc.irq_id; + } + + *irq_num = ceq_info->num; +} + +int sss_hwif_init_ceq(struct sss_hwdev *hwdev) +{ + u16 i; + u16 ceq_num; + u16 act_num = 0; + int ret; + struct sss_irq_desc irq_desc[SSS_MAX_CEQ] = {0}; + + ceq_num = SSS_GET_HWIF_CEQ_NUM(hwdev->hwif); + if (ceq_num > SSS_MAX_CEQ) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n", SSS_MAX_CEQ); + ceq_num = SSS_MAX_CEQ; + } + + act_num = sss_alloc_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_desc, ceq_num); + if (act_num == 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc irq, ceq_num: %u\n", ceq_num); + return -EINVAL; + } + + if (act_num < ceq_num) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %u\n", act_num); + ceq_num = act_num; + } + + ret = sss_init_ceq(hwdev, irq_desc, ceq_num); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ceq, ret:%d\n", ret); + goto init_ceq_err; + } + + return 0; + +init_ceq_err: + for (i = 0; i < act_num; i++) + sss_free_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_desc[i].irq_id); + + return ret; +} + +static void sss_deinit_ceq(struct sss_hwdev *hwdev) +{ + u16 i; + struct sss_ceq_info *ceq_info = hwdev->ceq_info; + enum sss_ceq_event event; + + for (i = 0; i < ceq_info->num; i++) + sss_deinit_eq(&ceq_info->ceq[i]); + + for (event = SSS_NIC_CTRLQ; event < SSS_CEQ_EVENT_MAX; event++) + sss_ceq_unregister_cb(hwdev, event); + + kfree(ceq_info); + hwdev->ceq_info = NULL; +} + +void sss_hwif_deinit_ceq(struct sss_hwdev *hwdev) +{ + int i; + u16 irq_num = 0; + struct sss_irq_desc irq[SSS_MAX_CEQ] = {0}; + + sss_get_ceq_irq(hwdev, irq, &irq_num); + + sss_deinit_ceq(hwdev); + + for (i = 0; i < irq_num; i++) + sss_free_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq[i].irq_id); +} + +void sss_dump_ceq_info(struct sss_hwdev *hwdev) +{ + struct sss_eq *ceq_info = NULL; + u32 addr; + u32 ci; + u32 pi; + int qid; + + for (qid = 0; qid < hwdev->ceq_info->num; qid++) { + ceq_info = &hwdev->ceq_info->ceq[qid]; + /* Indirect access should set qid first */ + sss_chip_write_reg(SSS_TO_HWDEV(ceq_info)->hwif, + SSS_EQ_INDIR_ID_ADDR(ceq_info->type), ceq_info->qid); + wmb(); /* make sure set qid firstly */ + + addr = SSS_EQ_CI_REG_ADDR(ceq_info); + ci = sss_chip_read_reg(hwdev->hwif, addr); + addr = SSS_EQ_PI_REG_ADDR(ceq_info); + pi = sss_chip_read_reg(hwdev->hwif, addr); + sdk_err(hwdev->dev_hdl, + "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %u, ceqe: 0x%x\n", + qid, ci, ceq_info->ci, pi, tasklet_state(&ceq_info->ceq_tasklet), + ceq_info->wrap, be32_to_cpu(*(SSS_GET_CUR_CEQ_ELEM(ceq_info)))); + + sdk_err(hwdev->dev_hdl, "Ceq last response hard interrupt time: %u\n", + jiffies_to_msecs(jiffies - ceq_info->hw_intr_jiffies)); + sdk_err(hwdev->dev_hdl, "Ceq last response soft interrupt time: %u\n", + jiffies_to_msecs(jiffies - ceq_info->sw_intr_jiffies)); + } + + sss_dump_chip_err_info(hwdev); +} + +int sss_ceq_register_cb(void *hwdev, void *data, + enum sss_ceq_event ceq_event, sss_ceq_event_handler_t event_handler) +{ + struct sss_ceq_info *ceq_info = NULL; + + if (!hwdev || ceq_event >= SSS_CEQ_EVENT_MAX) + return -EINVAL; + + ceq_info = SSS_TO_CEQ_INFO(hwdev); + ceq_info->event_handler_data[ceq_event] = data; + ceq_info->event_handler[ceq_event] = event_handler; + set_bit(SSS_CEQ_CB_REG, &ceq_info->event_handler_state[ceq_event]); + + return 0; +} + +void sss_ceq_unregister_cb(void *hwdev, enum sss_ceq_event ceq_event) +{ + struct sss_ceq_info *ceq_info = NULL; + + if (!hwdev || ceq_event >= SSS_CEQ_EVENT_MAX) + return; + + ceq_info = SSS_TO_CEQ_INFO(hwdev); + clear_bit(SSS_CEQ_CB_REG, &ceq_info->event_handler_state[ceq_event]); + while (test_bit(SSS_CEQ_CB_RUNNING, + &ceq_info->event_handler_state[ceq_event])) + usleep_range(SSS_EQ_USLEEP_LOW_LIMIT, SSS_EQ_USLEEP_HIG_LIMIT); + ceq_info->event_handler[ceq_event] = NULL; +} + +int sss_init_ceq_msix_attr(struct sss_hwdev *hwdev) +{ + u16 i; + int ret; + struct sss_ceq_info *ceq_info = hwdev->ceq_info; + struct sss_irq_cfg intr_info = {0}; + + sss_init_eq_intr_info(&intr_info); + + for (i = 0; i < ceq_info->num; i++) { + intr_info.msix_id = SSS_EQ_IRQ_ID(&ceq_info->ceq[i]); + ret = sss_chip_set_msix_attr(hwdev, intr_info, SSS_CHANNEL_COMM); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to set msix attr for ceq %u\n", i); + return -EFAULT; + } + } + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.h new file mode 100644 index 0000000000000..29e65016b1170 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_CEQ_H +#define SSS_HWIF_CEQ_H + +#include "sss_hw_ceq.h" +#include "sss_ceq_info.h" +#include "sss_hwdev.h" + +int sss_ceq_register_cb(void *hwdev, void *data, + enum sss_ceq_event ceq_event, sss_ceq_event_handler_t event_handler); +void sss_ceq_unregister_cb(void *hwdev, enum sss_ceq_event ceq_event); +int sss_hwif_init_ceq(struct sss_hwdev *hwdev); +void sss_hwif_deinit_ceq(struct sss_hwdev *hwdev); +void sss_dump_ceq_info(struct sss_hwdev *hwdev); +int sss_init_ceq_msix_attr(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.c new file mode 100644 index 0000000000000..43386b7984b9d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.c @@ -0,0 +1,928 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_eq.h" +#include "sss_hwif_api.h" +#include "sss_hwif_ctrlq.h" +#include "sss_hwif_aeq.h" +#include "sss_hwif_ceq.h" +#include "sss_common.h" + +#define SSS_CTRLQ_CMD_TIMEOUT 5000 /* millisecond */ + +#define SSS_CTRLQ_WQE_HEAD_LEN 32 + +#define SSS_HI_8_BITS(data) (((data) >> 8) & 0xFF) +#define SSS_LO_8_BITS(data) ((data) & 0xFF) + +#define SSS_CTRLQ_DB_INFO_HI_PI_SHIFT 0 +#define SSS_CTRLQ_DB_INFO_HI_PI_MASK 0xFFU +#define SSS_CTRLQ_DB_INFO_SET(val, member) \ + ((((u32)(val)) & SSS_CTRLQ_DB_INFO_##member##_MASK) << \ + SSS_CTRLQ_DB_INFO_##member##_SHIFT) + +#define SSS_CTRLQ_DB_HEAD_QUEUE_TYPE_SHIFT 23 +#define SSS_CTRLQ_DB_HEAD_CTRLQ_TYPE_SHIFT 24 +#define SSS_CTRLQ_DB_HEAD_SRC_TYPE_SHIFT 27 +#define SSS_CTRLQ_DB_HEAD_QUEUE_TYPE_MASK 0x1U +#define SSS_CTRLQ_DB_HEAD_CTRLQ_TYPE_MASK 0x7U +#define SSS_CTRLQ_DB_HEAD_SRC_TYPE_MASK 0x1FU +#define SSS_CTRLQ_DB_HEAD_SET(val, member) \ + ((((u32)(val)) & SSS_CTRLQ_DB_HEAD_##member##_MASK) << \ + SSS_CTRLQ_DB_HEAD_##member##_SHIFT) + +#define SSS_CTRLQ_CTRL_PI_SHIFT 0 +#define SSS_CTRLQ_CTRL_CMD_SHIFT 16 +#define SSS_CTRLQ_CTRL_MOD_SHIFT 24 +#define SSS_CTRLQ_CTRL_ACK_TYPE_SHIFT 29 +#define SSS_CTRLQ_CTRL_HW_BUSY_BIT_SHIFT 31 + +#define SSS_CTRLQ_CTRL_PI_MASK 0xFFFFU +#define SSS_CTRLQ_CTRL_CMD_MASK 0xFFU +#define SSS_CTRLQ_CTRL_MOD_MASK 0x1FU +#define SSS_CTRLQ_CTRL_ACK_TYPE_MASK 0x3U +#define SSS_CTRLQ_CTRL_HW_BUSY_BIT_MASK 0x1U + +#define SSS_CTRLQ_CTRL_SET(val, member) \ + ((((u32)(val)) & SSS_CTRLQ_CTRL_##member##_MASK) << \ + SSS_CTRLQ_CTRL_##member##_SHIFT) + +#define SSS_CTRLQ_CTRL_GET(val, member) \ + (((val) >> SSS_CTRLQ_CTRL_##member##_SHIFT) & \ + SSS_CTRLQ_CTRL_##member##_MASK) + +#define SSS_CTRLQ_WQE_HEAD_BD_LEN_SHIFT 0 +#define SSS_CTRLQ_WQE_HEAD_COMPLETE_FMT_SHIFT 15 +#define SSS_CTRLQ_WQE_HEAD_DATA_FMT_SHIFT 22 +#define SSS_CTRLQ_WQE_HEAD_COMPLETE_REQ_SHIFT 23 +#define SSS_CTRLQ_WQE_HEAD_COMPLETE_SECT_LEN_SHIFT 27 +#define SSS_CTRLQ_WQE_HEAD_CTRL_LEN_SHIFT 29 +#define SSS_CTRLQ_WQE_HEAD_HW_BUSY_BIT_SHIFT 31 + +#define SSS_CTRLQ_WQE_HEAD_BD_LEN_MASK 0xFFU +#define SSS_CTRLQ_WQE_HEAD_COMPLETE_FMT_MASK 0x1U +#define SSS_CTRLQ_WQE_HEAD_DATA_FMT_MASK 0x1U +#define SSS_CTRLQ_WQE_HEAD_COMPLETE_REQ_MASK 0x1U +#define SSS_CTRLQ_WQE_HEAD_COMPLETE_SECT_LEN_MASK 0x3U +#define SSS_CTRLQ_WQE_HEAD_CTRL_LEN_MASK 0x3U +#define SSS_CTRLQ_WQE_HEAD_HW_BUSY_BIT_MASK 0x1U + +#define SSS_CTRLQ_WQE_HEAD_SET(val, member) \ + ((((u32)(val)) & SSS_CTRLQ_WQE_HEAD_##member##_MASK) << \ + SSS_CTRLQ_WQE_HEAD_##member##_SHIFT) + +#define SSS_GET_CTRLQ_WQE_HEAD(val, member) \ + (((val) >> SSS_CTRLQ_WQE_HEAD_##member##_SHIFT) & \ + SSS_CTRLQ_WQE_HEAD_##member##_MASK) + +#define SSS_STORE_DATA_ARM_SHIFT 31 + +#define SSS_STORE_DATA_ARM_MASK 0x1U + +#define SSS_STORE_DATA_SET(val, member) \ + (((val) & SSS_STORE_DATA_##member##_MASK) << \ + SSS_STORE_DATA_##member##_SHIFT) + +#define SSS_STORE_DATA_CLEAR(val, member) \ + ((val) & (~(SSS_STORE_DATA_##member##_MASK << \ + SSS_STORE_DATA_##member##_SHIFT))) + +#define SSS_WQE_ERRCODE_VAL_SHIFT 0 + +#define SSS_WQE_ERRCODE_VAL_MASK 0x7FFFFFFF + +#define SSS_GET_WQE_ERRCODE(val, member) \ + (((val) >> SSS_WQE_ERRCODE_##member##_SHIFT) & \ + SSS_WQE_ERRCODE_##member##_MASK) + +#define SSS_CEQE_CTRLQ_TYPE_SHIFT 0 + +#define SSS_CEQE_CTRLQ_TYPE_MASK 0x7 + +#define SSS_GET_CEQE_CTRLQ(val, member) \ + (((val) >> SSS_CEQE_CTRLQ_##member##_SHIFT) & \ + SSS_CEQE_CTRLQ_##member##_MASK) + +#define SSS_WQE_COMPLETE(ctrl_info) SSS_CTRLQ_CTRL_GET(ctrl_info, HW_BUSY_BIT) + +#define SSS_WQE_HEAD(wqe) ((struct sss_ctrlq_head *)(wqe)) + +#define SSS_CTRLQ_DB_PI_OFF(pi) (((u16)SSS_LO_8_BITS(pi)) << 3) + +#define SSS_CTRLQ_DB_ADDR(db_base, pi) \ + (((u8 *)(db_base)) + SSS_CTRLQ_DB_PI_OFF(pi)) + +#define SSS_FIRST_DATA_TO_WRITE_LAST sizeof(u64) + +#define SSS_WQE_LCMD_SIZE 64 +#define SSS_WQE_SCMD_SIZE 64 + +#define SSS_COMPLETE_LEN 3 + +#define SSS_CTRLQ_WQE_SIZE 64 + +#define SSS_CTRLQ_TO_INFO(ctrlq) \ + container_of((ctrlq) - (ctrlq)->ctrlq_type, struct sss_ctrlq_info, ctrlq[0]) + +#define SSS_CTRLQ_COMPLETE_CODE 11 + +enum SSS_ctrlq_scmd_type { + SSS_CTRLQ_SET_ARM_CMD = 2, +}; + +enum sss_ctrl_sect_len { + SSS_CTRL_SECT_LEN = 1, + SSS_CTRL_DIRECT_SECT_LEN = 2, +}; + +enum sss_bd_len { + SSS_BD_LCMD_LEN = 2, + SSS_BD_SCMD_LEN = 3, +}; + +enum sss_data_fmt { + SSS_DATA_SGE, + SSS_DATA_DIRECT, +}; + +enum sss_completion_fmt { + SSS_COMPLETE_DIRECT, + SSS_COMPLETE_SGE, +}; + +enum sss_completion_request { + SSS_CEQ_SET = 1, +}; + +enum sss_ctrlq_comm_msg_type { + SSS_SYNC_MSG_DIRECT_REPLY, + SSS_SYNC_MSG_SGE_REPLY, + SSS_ASYNC_MSG, +}; + +#define SSS_SCMD_DATA_LEN 16 + +enum sss_db_src_type { + SSS_DB_SRC_CTRLQ_TYPE, + SSS_DB_SRC_L2NIC_SQ_TYPE, +}; + +enum sss_ctrlq_db_type { + SSS_DB_SQ_RQ_TYPE, + SSS_DB_CTRLQ_TYPE, +}; + +struct sss_ctrlq_db { + u32 head; + u32 info; +}; + +/* hardware define: ctrlq wqe */ +struct sss_ctrlq_head { + u32 info; + u32 store_data; +}; + +struct sss_scmd_bd { + u32 data_len; + u32 rsvd; + u8 data[SSS_SCMD_DATA_LEN]; +}; + +struct sss_lcmd_bd { + struct sss_sge sge; + u32 rsvd1; + u64 store_async_buf; + u64 rsvd3; +}; + +struct sss_wqe_state { + u32 info; +}; + +struct sss_wqe_ctrl { + u32 info; +}; + +struct sss_sge_reply { + struct sss_sge sge; + u32 rsvd; +}; + +struct sss_ctrlq_completion { + union { + struct sss_sge_reply sge_reply; + u64 direct_reply; + }; +}; + +struct sss_ctrlq_wqe_scmd { + struct sss_ctrlq_head head; + u64 rsvd; + struct sss_wqe_state state; + struct sss_wqe_ctrl ctrl; + struct sss_ctrlq_completion completion; + struct sss_scmd_bd bd; +}; + +struct sss_ctrlq_wqe_lcmd { + struct sss_ctrlq_head head; + struct sss_wqe_state state; + struct sss_wqe_ctrl ctrl; + struct sss_ctrlq_completion completion; + struct sss_lcmd_bd bd; +}; + +struct sss_ctrlq_inline_wqe { + struct sss_ctrlq_wqe_scmd wqe_scmd; +}; + +struct sss_ctrlq_wqe { + union { + struct sss_ctrlq_inline_wqe inline_wqe; + struct sss_ctrlq_wqe_lcmd wqe_lcmd; + }; +}; + +typedef int (*sss_ctrlq_type_handler_t)(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci); + +void *sss_ctrlq_read_wqe(struct sss_wq *wq, u16 *ci) +{ + if (sss_wq_is_empty(wq)) + return NULL; + + return sss_wq_read_one_wqebb(wq, ci); +} + +static void *sss_ctrlq_get_wqe(struct sss_wq *wq, u16 *pi) +{ + if (!sss_wq_free_wqebb(wq)) + return NULL; + + return sss_wq_get_one_wqebb(wq, pi); +} + +static void sss_ctrlq_set_completion(struct sss_ctrlq_completion *complete, + struct sss_ctrl_msg_buf *out_buf) +{ + struct sss_sge_reply *sge_reply = &complete->sge_reply; + + sss_set_sge(&sge_reply->sge, out_buf->dma_addr, SSS_CTRLQ_BUF_LEN); +} + +static void sss_ctrlq_set_lcmd_bufdesc(struct sss_ctrlq_wqe_lcmd *wqe, + struct sss_ctrl_msg_buf *in_buf) +{ + sss_set_sge(&wqe->bd.sge, in_buf->dma_addr, in_buf->size); +} + +static void sss_ctrlq_fill_db(struct sss_ctrlq_db *db, + enum sss_ctrlq_type ctrlq_type, u16 pi) +{ + db->info = SSS_CTRLQ_DB_INFO_SET(SSS_HI_8_BITS(pi), HI_PI); + + db->head = SSS_CTRLQ_DB_HEAD_SET(SSS_DB_CTRLQ_TYPE, QUEUE_TYPE) | + SSS_CTRLQ_DB_HEAD_SET(ctrlq_type, CTRLQ_TYPE) | + SSS_CTRLQ_DB_HEAD_SET(SSS_DB_SRC_CTRLQ_TYPE, SRC_TYPE); +} + +static void sss_ctrlq_set_db(struct sss_ctrlq *ctrlq, + enum sss_ctrlq_type ctrlq_type, u16 pi) +{ + struct sss_ctrlq_db db = {0}; + u8 *db_base = SSS_TO_HWDEV(ctrlq)->ctrlq_info->db_base; + + sss_ctrlq_fill_db(&db, ctrlq_type, pi); + + /* The data that is written to HW should be in Big Endian Format */ + db.info = sss_hw_be32(db.info); + db.head = sss_hw_be32(db.head); + + wmb(); /* make sure write db info to reg */ + writeq(*((u64 *)&db), SSS_CTRLQ_DB_ADDR(db_base, pi)); +} + +static void sss_ctrlq_fill_wqe(void *dst, const void *src) +{ + memcpy((u8 *)dst + SSS_FIRST_DATA_TO_WRITE_LAST, + (u8 *)src + SSS_FIRST_DATA_TO_WRITE_LAST, + SSS_CTRLQ_WQE_SIZE - SSS_FIRST_DATA_TO_WRITE_LAST); + + wmb(); /* The first 8 bytes should be written last */ + + *(u64 *)dst = *(u64 *)src; +} + +static void sss_ctrlq_prepare_wqe_ctrl(struct sss_ctrlq_wqe *wqe, + int wrapped, u8 mod, u8 cmd, u16 pi, + enum sss_completion_fmt complete_fmt, + enum sss_data_fmt data_fmt, + enum sss_bd_len buf_len) +{ + struct sss_wqe_ctrl *ctrl = NULL; + enum sss_ctrl_sect_len ctrl_len; + struct sss_ctrlq_wqe_lcmd *wqe_lcmd = NULL; + struct sss_ctrlq_wqe_scmd *wqe_scmd = NULL; + u32 saved_data = SSS_WQE_HEAD(wqe)->store_data; + + if (data_fmt == SSS_DATA_SGE) { + wqe_lcmd = &wqe->wqe_lcmd; + + wqe_lcmd->state.info = 0; + ctrl = &wqe_lcmd->ctrl; + ctrl_len = SSS_CTRL_SECT_LEN; + } else { + wqe_scmd = &wqe->inline_wqe.wqe_scmd; + + wqe_scmd->state.info = 0; + ctrl = &wqe_scmd->ctrl; + ctrl_len = SSS_CTRL_DIRECT_SECT_LEN; + } + + ctrl->info = SSS_CTRLQ_CTRL_SET(pi, PI) | + SSS_CTRLQ_CTRL_SET(cmd, CMD) | + SSS_CTRLQ_CTRL_SET(mod, MOD) | + SSS_CTRLQ_CTRL_SET(SSS_ACK_TYPE_CTRLQ, ACK_TYPE); + + SSS_WQE_HEAD(wqe)->info = + SSS_CTRLQ_WQE_HEAD_SET(buf_len, BD_LEN) | + SSS_CTRLQ_WQE_HEAD_SET(complete_fmt, COMPLETE_FMT) | + SSS_CTRLQ_WQE_HEAD_SET(data_fmt, DATA_FMT) | + SSS_CTRLQ_WQE_HEAD_SET(SSS_CEQ_SET, COMPLETE_REQ) | + SSS_CTRLQ_WQE_HEAD_SET(SSS_COMPLETE_LEN, COMPLETE_SECT_LEN) | + SSS_CTRLQ_WQE_HEAD_SET(ctrl_len, CTRL_LEN) | + SSS_CTRLQ_WQE_HEAD_SET((u32)wrapped, HW_BUSY_BIT); + + if (cmd == SSS_CTRLQ_SET_ARM_CMD && mod == SSS_MOD_TYPE_COMM) { + saved_data &= SSS_STORE_DATA_CLEAR(saved_data, ARM); + SSS_WQE_HEAD(wqe)->store_data = saved_data | + SSS_STORE_DATA_SET(1, ARM); + } else { + saved_data &= SSS_STORE_DATA_CLEAR(saved_data, ARM); + SSS_WQE_HEAD(wqe)->store_data = saved_data; + } +} + +static void sss_ctrlq_set_lcmd_wqe(struct sss_ctrlq_wqe *wqe, + enum sss_ctrlq_comm_msg_type cmd_type, + struct sss_ctrl_msg_buf *in_buf, + struct sss_ctrl_msg_buf *out_buf, int wrapped, + u8 mod, u8 cmd, u16 pi) +{ + struct sss_ctrlq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; + enum sss_completion_fmt complete_fmt = SSS_COMPLETE_DIRECT; + + switch (cmd_type) { + case SSS_SYNC_MSG_DIRECT_REPLY: + wqe_lcmd->completion.direct_reply = 0; + break; + case SSS_SYNC_MSG_SGE_REPLY: + if (out_buf) { + complete_fmt = SSS_COMPLETE_SGE; + sss_ctrlq_set_completion(&wqe_lcmd->completion, out_buf); + } + break; + case SSS_ASYNC_MSG: + wqe_lcmd->completion.direct_reply = 0; + wqe_lcmd->bd.store_async_buf = (u64)(in_buf); + break; + } + + sss_ctrlq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, pi, complete_fmt, + SSS_DATA_SGE, SSS_BD_LCMD_LEN); + + sss_ctrlq_set_lcmd_bufdesc(wqe_lcmd, in_buf); +} + +static void sss_ctrlq_update_cmd_state(struct sss_ctrlq *ctrlq, u16 pi, + struct sss_ctrlq_wqe *wqe) +{ + struct sss_ctrlq_cmd_info *info = &ctrlq->cmd_info[pi]; + struct sss_ctrlq_wqe_lcmd *lcmd = &wqe->wqe_lcmd; + u32 state = sss_hw_cpu32(lcmd->state.info); + + if (info->direct_resp) + *info->direct_resp = + sss_hw_cpu32(lcmd->completion.direct_reply); + + if (info->err_code) + *info->err_code = SSS_GET_WQE_ERRCODE(state, VAL); +} + +static int sss_ctrlq_check_sync_timeout(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 pi) +{ + struct sss_ctrlq_wqe_lcmd *wqe_lcmd; + struct sss_wqe_ctrl *ctrl; + u32 ctrl_info; + + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + ctrl_info = sss_hw_cpu32((ctrl)->info); + if (!SSS_WQE_COMPLETE(ctrl_info)) { + sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Ctrlq wqe do not complete\n"); + return -EFAULT; + } + + sss_ctrlq_update_cmd_state(ctrlq, pi, wqe); + + sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Success to check ctrlq sync cmd\n"); + return 0; +} + +static void sss_reset_cmd_info(struct sss_ctrlq_cmd_info *cmd_info, + const struct sss_ctrlq_cmd_info *store_cmd_info) +{ + if (cmd_info->err_code == store_cmd_info->err_code) + cmd_info->err_code = NULL; + + if (cmd_info->done == store_cmd_info->done) + cmd_info->done = NULL; + + if (cmd_info->direct_resp == store_cmd_info->direct_resp) + cmd_info->direct_resp = NULL; +} + +static int sss_ctrlq_ceq_handler_state(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_cmd_info *cmd_info, + struct sss_ctrlq_cmd_info *store_cmd_info, + u64 curr_msg_id, u16 curr_pi, + struct sss_ctrlq_wqe *curr_wqe, + u32 timeout) +{ + ulong timeo; + int ret; + ulong end = jiffies + msecs_to_jiffies(timeout); + + if (SSS_TO_HWDEV(ctrlq)->poll) { + while (time_before(jiffies, end)) { + sss_ctrlq_ceq_handler(SSS_TO_HWDEV(ctrlq), 0); + if (store_cmd_info->done->done != 0) + return 0; + usleep_range(9, 10); /* sleep 9 us ~ 10 us */ + } + } else { + timeo = msecs_to_jiffies(timeout); + if (wait_for_completion_timeout(store_cmd_info->done, timeo)) + return 0; + } + + spin_lock_bh(&ctrlq->ctrlq_lock); + + if (cmd_info->cmpt_code == store_cmd_info->cmpt_code) + cmd_info->cmpt_code = NULL; + + if (*store_cmd_info->cmpt_code == SSS_CTRLQ_COMPLETE_CODE) { + sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Ctrlq direct sync command complete\n"); + spin_unlock_bh(&ctrlq->ctrlq_lock); + return 0; + } + + if (curr_msg_id == cmd_info->msg_id) { + ret = sss_ctrlq_check_sync_timeout(ctrlq, curr_wqe, curr_pi); + if (ret != 0) + cmd_info->msg_type = SSS_MSG_TYPE_TIMEOUT; + else + cmd_info->msg_type = SSS_MSG_TYPE_PSEUDO_TIMEOUT; + } else { + ret = -ETIMEDOUT; + sdk_err(SSS_TO_HWDEV(ctrlq)->dev_hdl, + "Ctrlq sync command curr_msg_id dismatch with cmd_info msg_id\n"); + } + + sss_reset_cmd_info(cmd_info, store_cmd_info); + + spin_unlock_bh(&ctrlq->ctrlq_lock); + + if (ret == 0) + return 0; + + sss_dump_ceq_info(SSS_TO_HWDEV(ctrlq)); + + return -ETIMEDOUT; +} + +static int sss_wait_ctrlq_sync_cmd_completion(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_cmd_info *cmd_info, + struct sss_ctrlq_cmd_info *store_cmd_info, + u64 curr_msg_id, u16 curr_pi, + struct sss_ctrlq_wqe *curr_wqe, u32 timeout) +{ + return sss_ctrlq_ceq_handler_state(ctrlq, cmd_info, store_cmd_info, + curr_msg_id, curr_pi, curr_wqe, timeout); +} + +static int sss_ctrlq_msg_lock(struct sss_ctrlq *ctrlq, u16 channel) +{ + struct sss_ctrlq_info *ctrlq_info = SSS_CTRLQ_TO_INFO(ctrlq); + + spin_lock_bh(&ctrlq->ctrlq_lock); + + if (ctrlq_info->lock_channel_en && test_bit(channel, &ctrlq_info->channel_stop)) { + spin_unlock_bh(&ctrlq->ctrlq_lock); + return -EAGAIN; + } + + return 0; +} + +static void sss_ctrlq_msg_unlock(struct sss_ctrlq *ctrlq) +{ + spin_unlock_bh(&ctrlq->ctrlq_lock); +} + +static void sss_ctrlq_set_cmd_buf(struct sss_ctrlq_cmd_info *cmd_info, + struct sss_hwdev *hwdev, + struct sss_ctrl_msg_buf *in_buf, + struct sss_ctrl_msg_buf *out_buf) +{ + cmd_info->in_buf = in_buf; + cmd_info->out_buf = out_buf; + + if (in_buf) + atomic_inc(&in_buf->ref_cnt); + + if (out_buf) + atomic_inc(&out_buf->ref_cnt); +} + +int sss_ctrlq_sync_cmd_direct_reply(struct sss_ctrlq *ctrlq, u8 mod, + u8 cmd, struct sss_ctrl_msg_buf *in_buf, + u64 *out_param, u32 timeout, u16 channel) +{ + struct sss_wq *wq = &ctrlq->wq; + struct sss_ctrlq_wqe *curr_wqe = NULL; + struct sss_ctrlq_wqe wqe; + struct sss_ctrlq_cmd_info *cmd_info = NULL; + struct sss_ctrlq_cmd_info store_cmd_info; + struct completion done; + u16 curr_pi, next_pi; + int wrapped; + int errcode = 0; + int cmpt_code = SSS_CTRLQ_SEND_CMPT_CODE; + u64 curr_msg_id; + int ret; + u32 real_timeout; + + ret = sss_ctrlq_msg_lock(ctrlq, channel); + if (ret != 0) + return ret; + + curr_wqe = sss_ctrlq_get_wqe(wq, &curr_pi); + if (!curr_wqe) { + sss_ctrlq_msg_unlock(ctrlq); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = ctrlq->wrapped; + + next_pi = curr_pi + SSS_WQEBB_NUM_FOR_CTRLQ; + if (next_pi >= wq->q_depth) { + ctrlq->wrapped = (ctrlq->wrapped == 0) ? 1 : 0; + next_pi -= (u16)wq->q_depth; + } + + cmd_info = &ctrlq->cmd_info[curr_pi]; + + init_completion(&done); + + cmd_info->msg_type = SSS_MSG_TYPE_DIRECT_RESP; + cmd_info->done = &done; + cmd_info->err_code = &errcode; + cmd_info->direct_resp = out_param; + cmd_info->cmpt_code = &cmpt_code; + cmd_info->channel = channel; + sss_ctrlq_set_cmd_buf(cmd_info, SSS_TO_HWDEV(ctrlq), in_buf, NULL); + + memcpy(&store_cmd_info, cmd_info, sizeof(*cmd_info)); + + sss_ctrlq_set_lcmd_wqe(&wqe, SSS_SYNC_MSG_DIRECT_REPLY, in_buf, NULL, + wrapped, mod, cmd, curr_pi); + + /* CTRLQ WQE is not shadow, therefore wqe will be written to wq */ + sss_ctrlq_fill_wqe(curr_wqe, &wqe); + + (cmd_info->msg_id)++; + curr_msg_id = cmd_info->msg_id; + + sss_ctrlq_set_db(ctrlq, SSS_CTRLQ_SYNC, next_pi); + + sss_ctrlq_msg_unlock(ctrlq); + + real_timeout = timeout ? timeout : SSS_CTRLQ_CMD_TIMEOUT; + ret = sss_wait_ctrlq_sync_cmd_completion(ctrlq, cmd_info, &store_cmd_info, + curr_msg_id, curr_pi, curr_wqe, real_timeout); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(ctrlq)->dev_hdl, + "Ctrlq sync cmd direct resp timeout, mod: %u, cmd: %u, pi: 0x%x\n", + mod, cmd, curr_pi); + ret = -ETIMEDOUT; + } + + if (cmpt_code == SSS_CTRLQ_FORCE_STOP_CMPT_CODE) { + sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Force stop ctrlq cmd, mod: %u, cmd: %u\n", + mod, cmd); + ret = -EAGAIN; + } + + destroy_completion(&done); + smp_rmb(); /* read error code after completion */ + + return (ret != 0) ? ret : errcode; +} + +int sss_ctrlq_sync_cmd_detail_reply(struct sss_ctrlq *ctrlq, u8 mod, u8 cmd, + struct sss_ctrl_msg_buf *in_buf, + struct sss_ctrl_msg_buf *out_buf, + u64 *out_param, u32 timeout, u16 channel) +{ + struct sss_wq *wq = &ctrlq->wq; + struct sss_ctrlq_wqe *curr_wqe = NULL, wqe; + struct sss_ctrlq_cmd_info *cmd_info = NULL, store_cmd_info; + struct completion done; + u16 curr_pi, next_pi; + int wrapped, errcode = 0; + int cmpt_code = SSS_CTRLQ_SEND_CMPT_CODE; + u64 curr_msg_id; + int ret; + u32 real_timeout; + + ret = sss_ctrlq_msg_lock(ctrlq, channel); + if (ret != 0) + return ret; + + curr_wqe = sss_ctrlq_get_wqe(wq, &curr_pi); + if (!curr_wqe) { + sss_ctrlq_msg_unlock(ctrlq); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = ctrlq->wrapped; + + next_pi = curr_pi + SSS_WQEBB_NUM_FOR_CTRLQ; + if (next_pi >= wq->q_depth) { + ctrlq->wrapped = (ctrlq->wrapped == 0) ? 1 : 0; + next_pi -= (u16)wq->q_depth; + } + + cmd_info = &ctrlq->cmd_info[curr_pi]; + + init_completion(&done); + + cmd_info->msg_type = SSS_MSG_TYPE_SGE_RESP; + cmd_info->done = &done; + cmd_info->err_code = &errcode; + cmd_info->direct_resp = out_param; + cmd_info->cmpt_code = &cmpt_code; + cmd_info->channel = channel; + sss_ctrlq_set_cmd_buf(cmd_info, SSS_TO_HWDEV(ctrlq), in_buf, out_buf); + + memcpy(&store_cmd_info, cmd_info, sizeof(*cmd_info)); + + sss_ctrlq_set_lcmd_wqe(&wqe, SSS_SYNC_MSG_SGE_REPLY, in_buf, out_buf, + wrapped, mod, cmd, curr_pi); + + sss_ctrlq_fill_wqe(curr_wqe, &wqe); + + (cmd_info->msg_id)++; + curr_msg_id = cmd_info->msg_id; + + sss_ctrlq_set_db(ctrlq, ctrlq->ctrlq_type, next_pi); + + sss_ctrlq_msg_unlock(ctrlq); + + real_timeout = timeout ? timeout : SSS_CTRLQ_CMD_TIMEOUT; + ret = sss_wait_ctrlq_sync_cmd_completion(ctrlq, cmd_info, &store_cmd_info, + curr_msg_id, curr_pi, curr_wqe, real_timeout); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(ctrlq)->dev_hdl, + "Ctrlq sync cmd detail resp timeout, mod: %u, cmd: %u, pi: 0x%x\n", + mod, cmd, curr_pi); + ret = -ETIMEDOUT; + } + + if (cmpt_code == SSS_CTRLQ_FORCE_STOP_CMPT_CODE) { + sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Force stop ctrlq cmd, mod: %u, cmd: %u\n", + mod, cmd); + ret = -EAGAIN; + } + + destroy_completion(&done); + smp_rmb(); /* read error code after completion */ + + return (ret != 0) ? ret : errcode; +} + +void sss_free_ctrlq_cmd_buf(struct sss_hwdev *hwdev, + struct sss_ctrlq_cmd_info *info) +{ + if (info->in_buf) + sss_free_ctrlq_msg_buf(hwdev, info->in_buf); + + if (info->out_buf) + sss_free_ctrlq_msg_buf(hwdev, info->out_buf); + + info->out_buf = NULL; + info->in_buf = NULL; +} + +static void sss_erase_wqe_complete_bit(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + struct sss_wqe_ctrl *wqe_ctrl = NULL; + u32 head = sss_hw_cpu32(SSS_WQE_HEAD(wqe)->info); + enum sss_data_fmt format = SSS_GET_CTRLQ_WQE_HEAD(head, DATA_FMT); + + wqe_ctrl = (format == SSS_DATA_SGE) ? &wqe->wqe_lcmd.ctrl : + &wqe->inline_wqe.wqe_scmd.ctrl; + + wqe_ctrl->info = 0; + ctrlq->cmd_info[ci].msg_type = SSS_MSG_TYPE_NONE; + + /* write ctrlq wqe msg type */ + wmb(); + + sss_update_wq_ci(&ctrlq->wq, SSS_WQEBB_NUM_FOR_CTRLQ); +} + +static void sss_ctrlq_update_cmd_info(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + struct sss_ctrlq_cmd_info *info = &ctrlq->cmd_info[ci]; + struct sss_ctrlq_wqe_lcmd *lcmd = &wqe->wqe_lcmd; + u32 status; + + spin_lock(&ctrlq->ctrlq_lock); + + if (info->direct_resp) + *info->direct_resp = + sss_hw_cpu32(lcmd->completion.direct_reply); + + if (info->err_code) { + status = sss_hw_cpu32(lcmd->state.info); + *info->err_code = SSS_GET_WQE_ERRCODE(status, VAL); + } + + if (info->cmpt_code) { + *info->cmpt_code = SSS_CTRLQ_COMPLETE_CODE; + info->cmpt_code = NULL; + } + + /* read all before set info done */ + smp_rmb(); + + if (info->done) { + complete(info->done); + info->done = NULL; + } + + spin_unlock(&ctrlq->ctrlq_lock); +} + +static int sss_ctrlq_arm_ceq_handler(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + struct sss_wqe_ctrl *ctrl = &wqe->inline_wqe.wqe_scmd.ctrl; + u32 info = sss_hw_cpu32((ctrl)->info); + + if (!SSS_WQE_COMPLETE(info)) + return -EBUSY; + + sss_erase_wqe_complete_bit(ctrlq, wqe, ci); + + return 0; +} + +static int sss_ctrlq_default_handler(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + struct sss_wqe_ctrl *ctrl = &wqe->wqe_lcmd.ctrl; + u32 info = sss_hw_cpu32((ctrl)->info); + + if (!SSS_WQE_COMPLETE(info)) + return -EBUSY; + + dma_rmb(); + + sss_ctrlq_update_cmd_info(ctrlq, wqe, ci); + sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[ci]); + sss_erase_wqe_complete_bit(ctrlq, wqe, ci); + + return 0; +} + +static int sss_ctrlq_async_cmd_handler(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + struct sss_wqe_ctrl *ctrl = &wqe->wqe_lcmd.ctrl; + u32 info = sss_hw_cpu32((ctrl)->info); + + if (!SSS_WQE_COMPLETE(info)) + return -EBUSY; + + dma_rmb(); + + sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[ci]); + sss_erase_wqe_complete_bit(ctrlq, wqe, ci); + + return 0; +} + +static int sss_ctrlq_pseudo_timeout_handler(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[ci]); + sss_erase_wqe_complete_bit(ctrlq, wqe, ci); + + return 0; +} + +static int sss_ctrlq_timeout_handler(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + u32 i; + u32 *data = (u32 *)wqe; + u32 num = SSS_CTRLQ_WQE_HEAD_LEN / sizeof(u32); + + sdk_warn(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Ctrlq timeout, ci: %u\n", ci); + + for (i = 0; i < num; i += 0x4) { + sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Ctrlq wqe data: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + *(data + i), *(data + i + 0x1), *(data + i + 0x2), + *(data + i + 0x3)); + } + + sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[ci]); + sss_erase_wqe_complete_bit(ctrlq, wqe, ci); + + return 0; +} + +static int sss_ctrlq_force_stop_handler(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + return sss_ctrlq_async_cmd_handler(ctrlq, wqe, ci); +} + +void sss_ctrlq_ceq_handler(void *dev, u32 data) +{ + u16 ci; + int ret; + enum sss_ctrlq_type type = SSS_GET_CEQE_CTRLQ(data, TYPE); + struct sss_ctrlq *ctrlq = &SSS_TO_CTRLQ_INFO(dev)->ctrlq[type]; + struct sss_ctrlq_wqe *ctrlq_wqe = NULL; + struct sss_ctrlq_cmd_info *info = NULL; + + sss_ctrlq_type_handler_t handler[] = { + NULL, + sss_ctrlq_arm_ceq_handler, + sss_ctrlq_default_handler, + sss_ctrlq_default_handler, + sss_ctrlq_async_cmd_handler, + sss_ctrlq_pseudo_timeout_handler, + sss_ctrlq_timeout_handler, + sss_ctrlq_force_stop_handler, + }; + + while ((ctrlq_wqe = sss_ctrlq_read_wqe(&ctrlq->wq, &ci)) != NULL) { + info = &ctrlq->cmd_info[ci]; + + if (info->msg_type < SSS_MSG_TYPE_NONE || + info->msg_type >= SSS_MSG_TYPE_MAX) { + ret = sss_ctrlq_default_handler(ctrlq, ctrlq_wqe, ci); + if (ret) + break; + + continue; + } + + if (!handler[info->msg_type]) + break; + + ret = handler[info->msg_type](ctrlq, ctrlq_wqe, ci); + if (ret) + break; + } +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.h new file mode 100644 index 0000000000000..219ef90baf44e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_CTRLQ_H +#define SSS_HWIF_CTRLQ_H + +#include "sss_hw_wq.h" + +#define SSS_CTRLQ_BUF_LEN 2048U + +#define SSS_CTRLQ_SEND_CMPT_CODE 10 + +#define SSS_CTRLQ_FORCE_STOP_CMPT_CODE 12 + +#define SSS_WQEBB_NUM_FOR_CTRLQ 1 + +enum sss_ctrlq_state { + SSS_CTRLQ_ENABLE = BIT(0), +}; + +void *sss_ctrlq_read_wqe(struct sss_wq *wq, u16 *ci); +void sss_ctrlq_ceq_handler(void *handle, u32 ceqe_data); +void sss_free_ctrlq_cmd_buf(struct sss_hwdev *hwdev, + struct sss_ctrlq_cmd_info *cmd_info); +int sss_ctrlq_sync_cmd_direct_reply(struct sss_ctrlq *ctrlq, u8 mod, + u8 cmd, struct sss_ctrl_msg_buf *in_buf, + u64 *out_param, u32 timeout, u16 channel); +int sss_ctrlq_sync_cmd_detail_reply(struct sss_ctrlq *ctrlq, u8 mod, u8 cmd, + struct sss_ctrl_msg_buf *in_buf, + struct sss_ctrl_msg_buf *out_buf, + u64 *out_param, u32 timeout, u16 channel); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_export.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_export.c new file mode 100644 index 0000000000000..8f579544c0cdc --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_export.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_pci.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" +#include "sss_hwif_ctrlq.h" +#include "sss_common.h" + +#define SSS_CTRLQ_ENABLE_TIMEOUT 300 + +static int sss_wait_ctrlq_enable(struct sss_ctrlq_info *ctrlq_info) +{ + unsigned long end; + + end = jiffies + msecs_to_jiffies(SSS_CTRLQ_ENABLE_TIMEOUT); + do { + if (ctrlq_info->state & SSS_CTRLQ_ENABLE) + return 0; + } while (time_before(jiffies, end) && + SSS_TO_HWDEV(ctrlq_info)->chip_present_flag && + !ctrlq_info->disable_flag); + + ctrlq_info->disable_flag = 1; + + return -EBUSY; +} + +static int sss_check_ctrlq_param(const void *hwdev, const struct sss_ctrl_msg_buf *in_buf) +{ + if (!hwdev || !in_buf) { + pr_err("Invalid ctrlq param: hwdev: %p or in_buf: %p\n", hwdev, in_buf); + return -EINVAL; + } + + if (in_buf->size == 0 || in_buf->size > SSS_CTRLQ_BUF_LEN) { + pr_err("Invalid ctrlq buf size: 0x%x\n", in_buf->size); + return -EINVAL; + } + + return 0; +} + +struct sss_ctrl_msg_buf *sss_alloc_ctrlq_msg_buf(void *hwdev) +{ + struct sss_ctrlq_info *ctrlq_info = NULL; + struct sss_ctrl_msg_buf *msg_buf = NULL; + void *dev = NULL; + + if (!hwdev) { + pr_err("Alloc ctrlq msg buf: hwdev is NULL\n"); + return NULL; + } + + ctrlq_info = ((struct sss_hwdev *)hwdev)->ctrlq_info; + dev = ((struct sss_hwdev *)hwdev)->dev_hdl; + + msg_buf = kzalloc(sizeof(*msg_buf), GFP_ATOMIC); + if (!msg_buf) + return NULL; + + msg_buf->buf = pci_pool_alloc(ctrlq_info->msg_buf_pool, GFP_ATOMIC, + &msg_buf->dma_addr); + if (!msg_buf->buf) { + sdk_err(dev, "Fail to allocate ctrlq pci pool\n"); + goto alloc_pci_buf_err; + } + + msg_buf->size = SSS_CTRLQ_BUF_LEN; + atomic_set(&msg_buf->ref_cnt, 1); + + return msg_buf; + +alloc_pci_buf_err: + kfree(msg_buf); + return NULL; +} +EXPORT_SYMBOL(sss_alloc_ctrlq_msg_buf); + +void sss_free_ctrlq_msg_buf(void *hwdev, struct sss_ctrl_msg_buf *msg_buf) +{ + struct sss_ctrlq_info *ctrlq_info = SSS_TO_CTRLQ_INFO(hwdev); + + if (!hwdev || !msg_buf) { + pr_err("Invalid ctrlq param: hwdev: %p or msg_buf: %p\n", hwdev, msg_buf); + return; + } + + if (atomic_dec_and_test(&msg_buf->ref_cnt) == 0) + return; + + pci_pool_free(ctrlq_info->msg_buf_pool, msg_buf->buf, msg_buf->dma_addr); + kfree(msg_buf); +} +EXPORT_SYMBOL(sss_free_ctrlq_msg_buf); + +int sss_ctrlq_direct_reply(void *hwdev, u8 mod, u8 cmd, + struct sss_ctrl_msg_buf *in_buf, u64 *out_param, + u32 timeout, u16 channel) +{ + int ret; + struct sss_ctrlq_info *ctrlq_info = NULL; + + ret = sss_check_ctrlq_param(hwdev, in_buf); + if (ret != 0) { + pr_err("Invalid ctrlq parameters\n"); + return ret; + } + + if (!sss_chip_get_present_state((struct sss_hwdev *)hwdev)) + return -EPERM; + + ctrlq_info = ((struct sss_hwdev *)hwdev)->ctrlq_info; + ret = sss_wait_ctrlq_enable(ctrlq_info); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Ctrlq is disable\n"); + return ret; + } + + ret = sss_ctrlq_sync_cmd_direct_reply(&ctrlq_info->ctrlq[SSS_CTRLQ_SYNC], + mod, cmd, in_buf, out_param, timeout, channel); + + if (!(((struct sss_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return ret; +} +EXPORT_SYMBOL(sss_ctrlq_direct_reply); + +int sss_ctrlq_detail_reply(void *hwdev, u8 mod, u8 cmd, + struct sss_ctrl_msg_buf *in_buf, struct sss_ctrl_msg_buf *out_buf, + u64 *out_param, u32 timeout, u16 channel) +{ + int ret; + struct sss_ctrlq_info *ctrlq_info = NULL; + + ret = sss_check_ctrlq_param(hwdev, in_buf); + if (ret != 0) + return ret; + + ctrlq_info = ((struct sss_hwdev *)hwdev)->ctrlq_info; + + if (!sss_chip_get_present_state((struct sss_hwdev *)hwdev)) + return -EPERM; + + ret = sss_wait_ctrlq_enable(ctrlq_info); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Ctrlq is disable\n"); + return ret; + } + + ret = sss_ctrlq_sync_cmd_detail_reply(&ctrlq_info->ctrlq[SSS_CTRLQ_SYNC], + mod, cmd, in_buf, out_buf, + out_param, timeout, channel); + if (!(((struct sss_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return ret; +} +EXPORT_SYMBOL(sss_ctrlq_detail_reply); diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.c new file mode 100644 index 0000000000000..2414a5624176a --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.c @@ -0,0 +1,598 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_pci.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_export.h" +#include "sss_hwif_ceq.h" +#include "sss_hwif_api.h" +#include "sss_hwif_ctrlq.h" +#include "sss_common.h" + +#define SSS_CTRLQ_DEPTH 4096 + +#define SSS_CTRLQ_PFN_SHIFT 12 +#define SSS_CTRLQ_PFN(addr) ((addr) >> SSS_CTRLQ_PFN_SHIFT) + +#define SSS_CTRLQ_CEQ_ID 0 + +#define SSS_CTRLQ_WQ_CLA_SIZE 512 + +#define SSS_CTRLQ_WQEBB_SIZE 64 + +#define SSS_CTRLQ_IDLE_TIMEOUT 5000 + +#define SSS_CTRLQ_CTX_NOW_WQE_PAGE_PFN_SHIFT 0 +#define SSS_CTRLQ_CTX_CEQ_ID_SHIFT 53 +#define SSS_CTRLQ_CTX_CEQ_ARM_SHIFT 61 +#define SSS_CTRLQ_CTX_CEQ_EN_SHIFT 62 +#define SSS_CTRLQ_CTX_HW_BUSY_BIT_SHIFT 63 + +#define SSS_CTRLQ_CTX_NOW_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF +#define SSS_CTRLQ_CTX_CEQ_ID_MASK 0xFF +#define SSS_CTRLQ_CTX_CEQ_ARM_MASK 0x1 +#define SSS_CTRLQ_CTX_CEQ_EN_MASK 0x1 +#define SSS_CTRLQ_CTX_HW_BUSY_BIT_MASK 0x1 + +#define SSS_SET_CTRLQ_CTX_INFO(val, member) \ + (((u64)(val) & SSS_CTRLQ_CTX_##member##_MASK) \ + << SSS_CTRLQ_CTX_##member##_SHIFT) + +#define SSS_CTRLQ_CTX_WQ_BLOCK_PFN_SHIFT 0 +#define SSS_CTRLQ_CTX_CI_SHIFT 52 + +#define SSS_CTRLQ_CTX_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF +#define SSS_CTRLQ_CTX_CI_MASK 0xFFF + +#define SSS_SET_CTRLQ_CTX_BLOCK_INFO(val, member) \ + (((u64)(val) & SSS_CTRLQ_CTX_##member##_MASK) \ + << SSS_CTRLQ_CTX_##member##_SHIFT) + +#define SSS_CTRLQ_CLA_WQ_PAGE_NUM (SSS_CTRLQ_WQ_CLA_SIZE / sizeof(u64)) + +#define SSS_GET_WQ_PAGE_SIZE(page_order) (SSS_HW_WQ_PAGE_SIZE * (1U << (page_order))) + +#define SSS_CTRLQ_DMA_POOL_NAME "sss_ctrlq" + +#define SSS_CTRLQ_WRAP_ENABLE 1 + +#define SSS_SET_WQE_PAGE_PFN(pfn) \ + (SSS_SET_CTRLQ_CTX_INFO(1, CEQ_ARM) | \ + SSS_SET_CTRLQ_CTX_INFO(1, CEQ_EN) | \ + SSS_SET_CTRLQ_CTX_INFO((pfn), NOW_WQE_PAGE_PFN) | \ + SSS_SET_CTRLQ_CTX_INFO(SSS_CTRLQ_CEQ_ID, CEQ_ID) | \ + SSS_SET_CTRLQ_CTX_INFO(1, HW_BUSY_BIT)) + +#define SSS_SET_WQ_BLOCK_PFN(wq, pfn) \ + (SSS_SET_CTRLQ_CTX_BLOCK_INFO((pfn), WQ_BLOCK_PFN) | \ + SSS_SET_CTRLQ_CTX_BLOCK_INFO((u16)(wq)->ci, CI)) + +static u32 wq_page_num = SSS_MAX_WQ_PAGE_NUM; +module_param(wq_page_num, uint, 0444); +MODULE_PARM_DESC(wq_page_num, + "Set wq page num, wq page size is 4K * (2 ^ wq_page_num) - default is 8"); + +static int sss_init_ctrq_block(struct sss_ctrlq_info *ctrlq_info) +{ + u8 i; + + if (SSS_WQ_IS_0_LEVEL_CLA(&ctrlq_info->ctrlq[SSS_CTRLQ_SYNC].wq)) + return 0; + + /* ctrlq wq's CLA table is up to 512B */ + if (ctrlq_info->ctrlq[SSS_CTRLQ_SYNC].wq.page_num > SSS_CTRLQ_CLA_WQ_PAGE_NUM) { + sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Ctrlq wq page out of range: %lu\n", + SSS_CTRLQ_CLA_WQ_PAGE_NUM); + return -EINVAL; + } + + ctrlq_info->wq_block_vaddr = + dma_zalloc_coherent(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, PAGE_SIZE, + &ctrlq_info->wq_block_paddr, GFP_KERNEL); + if (!ctrlq_info->wq_block_vaddr) { + sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Fail to alloc ctrlq wq block\n"); + return -ENOMEM; + } + + for (i = 0; i < ctrlq_info->num; i++) + memcpy((u8 *)ctrlq_info->wq_block_vaddr + SSS_CTRLQ_WQ_CLA_SIZE * i, + ctrlq_info->ctrlq[i].wq.block_vaddr, + ctrlq_info->ctrlq[i].wq.page_num * sizeof(u64)); + + return 0; +} + +static void sss_deinit_ctrq_block(struct sss_ctrlq_info *ctrlq_info) +{ + if (ctrlq_info->wq_block_vaddr) { + dma_free_coherent(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, PAGE_SIZE, + ctrlq_info->wq_block_vaddr, ctrlq_info->wq_block_paddr); + ctrlq_info->wq_block_vaddr = NULL; + } +} + +static int sss_create_ctrlq_wq(struct sss_ctrlq_info *ctrlq_info) +{ + u8 i; + int ret; + u8 q_type; + + for (q_type = 0; q_type < ctrlq_info->num; q_type++) { + ret = sss_create_wq(SSS_TO_HWDEV(ctrlq_info), &ctrlq_info->ctrlq[q_type].wq, + SSS_CTRLQ_DEPTH, SSS_CTRLQ_WQEBB_SIZE); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Fail to create ctrlq wq\n"); + goto destroy_wq; + } + } + + /* 1-level CLA must put all ctrlq's wq page addr in one wq block */ + ret = sss_init_ctrq_block(ctrlq_info); + if (ret != 0) + goto destroy_wq; + + return 0; + +destroy_wq: + for (i = 0; i < q_type; i++) + sss_destroy_wq(&ctrlq_info->ctrlq[i].wq); + sss_deinit_ctrq_block(ctrlq_info); + + return ret; +} + +static void sss_destroy_ctrlq_wq(struct sss_ctrlq_info *ctrlq_info) +{ + u8 type; + + sss_deinit_ctrq_block(ctrlq_info); + + for (type = 0; type < ctrlq_info->num; type++) + sss_destroy_wq(&ctrlq_info->ctrlq[type].wq); +} + +static int sss_init_ctrlq_info(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_ctxt_info *ctx, + dma_addr_t wq_block_paddr) +{ + struct sss_wq *wq = &ctrlq->wq; + u64 pfn = SSS_CTRLQ_PFN(wq->page[0].align_paddr); + + ctrlq->cmd_info = kcalloc(ctrlq->wq.q_depth, sizeof(*ctrlq->cmd_info), + GFP_KERNEL); + if (!ctrlq->cmd_info) + return -ENOMEM; + + ctrlq->wrapped = SSS_CTRLQ_WRAP_ENABLE; + spin_lock_init(&ctrlq->ctrlq_lock); + + ctx->curr_wqe_page_pfn = SSS_SET_WQE_PAGE_PFN(pfn); + pfn = SSS_WQ_IS_0_LEVEL_CLA(wq) ? pfn : SSS_CTRLQ_PFN(wq_block_paddr); + ctx->wq_block_pfn = SSS_SET_WQ_BLOCK_PFN(wq, pfn); + + return 0; +} + +static void sss_deinit_ctrlq_info(struct sss_ctrlq *ctrlq) +{ + kfree(ctrlq->cmd_info); +} + +static void sss_flush_ctrlq_sync_cmd(struct sss_ctrlq_cmd_info *info) +{ + if (info->msg_type != SSS_MSG_TYPE_DIRECT_RESP && + info->msg_type != SSS_MSG_TYPE_SGE_RESP) + return; + + info->msg_type = SSS_MSG_TYPE_FORCE_STOP; + + if (info->cmpt_code && *info->cmpt_code == SSS_CTRLQ_SEND_CMPT_CODE) + *info->cmpt_code = SSS_CTRLQ_FORCE_STOP_CMPT_CODE; + + if (info->done) { + complete(info->done); + info->cmpt_code = NULL; + info->direct_resp = NULL; + info->err_code = NULL; + info->done = NULL; + } +} + +static void sss_flush_ctrlq_cmd(struct sss_ctrlq *ctrlq) +{ + u16 ci = 0; + + spin_lock_bh(&ctrlq->ctrlq_lock); + while (sss_ctrlq_read_wqe(&ctrlq->wq, &ci)) { + sss_update_wq_ci(&ctrlq->wq, SSS_WQEBB_NUM_FOR_CTRLQ); + sss_flush_ctrlq_sync_cmd(&ctrlq->cmd_info[ci]); + } + spin_unlock_bh(&ctrlq->ctrlq_lock); +} + +static void sss_free_all_ctrlq_cmd_buff(struct sss_ctrlq *ctrlq) +{ + u16 i; + + for (i = 0; i < ctrlq->wq.q_depth; i++) + sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[i]); +} + +static int sss_chip_set_ctrlq_ctx(struct sss_hwdev *hwdev, u8 qid, + struct sss_ctrlq_ctxt_info *ctxt) +{ + int ret; + struct sss_cmd_ctrlq_ctxt cmd_ctx = {0}; + u16 out_len = sizeof(cmd_ctx); + + memcpy(&cmd_ctx.ctxt, ctxt, sizeof(*ctxt)); + cmd_ctx.ctrlq_id = qid; + cmd_ctx.func_id = sss_get_global_func_id(hwdev); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SET_CTRLQ_CTXT, + &cmd_ctx, sizeof(cmd_ctx), &cmd_ctx, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_ctx)) { + sdk_err(hwdev->dev_hdl, + "Fail to set ctrlq ctx, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_ctx.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +static int sss_init_ctrlq_ctx(struct sss_hwdev *hwdev) +{ + u8 q_type; + int ret; + struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info; + + for (q_type = 0; q_type < ctrlq_info->num; q_type++) { + ret = sss_chip_set_ctrlq_ctx(hwdev, q_type, &ctrlq_info->ctrlq[q_type].ctrlq_ctxt); + if (ret != 0) + return ret; + } + + ctrlq_info->disable_flag = 0; + ctrlq_info->state |= SSS_CTRLQ_ENABLE; + + return 0; +} + +int sss_reinit_ctrlq_ctx(struct sss_hwdev *hwdev) +{ + u8 ctrlq_type; + struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info; + + for (ctrlq_type = 0; ctrlq_type < ctrlq_info->num; ctrlq_type++) { + sss_flush_ctrlq_cmd(&ctrlq_info->ctrlq[ctrlq_type]); + sss_free_all_ctrlq_cmd_buff(&ctrlq_info->ctrlq[ctrlq_type]); + ctrlq_info->ctrlq[ctrlq_type].wrapped = 1; + sss_wq_reset(&ctrlq_info->ctrlq[ctrlq_type].wq); + } + + return sss_init_ctrlq_ctx(hwdev); +} + +static int sss_init_ctrlq(struct sss_hwdev *hwdev) +{ + u8 i; + u8 q_type; + int ret = -ENOMEM; + struct sss_ctrlq_info *ctrlq_info = NULL; + + ctrlq_info = kzalloc(sizeof(*ctrlq_info), GFP_KERNEL); + if (!ctrlq_info) + return -ENOMEM; + + ctrlq_info->hwdev = hwdev; + hwdev->ctrlq_info = ctrlq_info; + + if (SSS_SUPPORT_CTRLQ_NUM(hwdev)) { + ctrlq_info->num = hwdev->glb_attr.ctrlq_num; + if (hwdev->glb_attr.ctrlq_num > SSS_MAX_CTRLQ_TYPE) { + sdk_warn(hwdev->dev_hdl, "Adjust ctrlq num to %d\n", SSS_MAX_CTRLQ_TYPE); + ctrlq_info->num = SSS_MAX_CTRLQ_TYPE; + } + } else { + ctrlq_info->num = SSS_MAX_CTRLQ_TYPE; + } + + ctrlq_info->msg_buf_pool = dma_pool_create(SSS_CTRLQ_DMA_POOL_NAME, hwdev->dev_hdl, + SSS_CTRLQ_BUF_LEN, SSS_CTRLQ_BUF_LEN, 0ULL); + if (!ctrlq_info->msg_buf_pool) { + sdk_err(hwdev->dev_hdl, "Fail to create ctrlq buffer pool\n"); + goto create_pool_err; + } + + ret = sss_create_ctrlq_wq(ctrlq_info); + if (ret != 0) + goto create_wq_err; + + ret = sss_alloc_db_addr(hwdev, (void __iomem *)&ctrlq_info->db_base); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc doorbell addr\n"); + goto init_db_err; + } + + for (q_type = 0; q_type < ctrlq_info->num; q_type++) { + ctrlq_info->ctrlq[q_type].hwdev = hwdev; + ctrlq_info->ctrlq[q_type].ctrlq_type = q_type; + ret = sss_init_ctrlq_info(&ctrlq_info->ctrlq[q_type], + &ctrlq_info->ctrlq[q_type].ctrlq_ctxt, + ctrlq_info->wq_block_paddr); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ctrlq i :%d\n", q_type); + goto init_ctrlq_info_err; + } + } + + ret = sss_init_ctrlq_ctx(hwdev); + if (ret != 0) + goto init_ctrlq_info_err; + + return 0; + +init_ctrlq_info_err: + for (i = 0; i < q_type; i++) + sss_deinit_ctrlq_info(&ctrlq_info->ctrlq[i]); + + sss_free_db_addr(hwdev, ctrlq_info->db_base); +init_db_err: + sss_destroy_ctrlq_wq(ctrlq_info); +create_wq_err: + dma_pool_destroy(ctrlq_info->msg_buf_pool); +create_pool_err: + kfree(ctrlq_info); + hwdev->ctrlq_info = NULL; + + return ret; +} + +void sss_deinit_ctrlq(struct sss_hwdev *hwdev) +{ + u8 i; + struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info; + + ctrlq_info->state &= ~SSS_CTRLQ_ENABLE; + + for (i = 0; i < ctrlq_info->num; i++) { + sss_flush_ctrlq_cmd(&ctrlq_info->ctrlq[i]); + sss_free_all_ctrlq_cmd_buff(&ctrlq_info->ctrlq[i]); + sss_deinit_ctrlq_info(&ctrlq_info->ctrlq[i]); + } + + sss_free_db_addr(hwdev, ctrlq_info->db_base); + sss_destroy_ctrlq_wq(ctrlq_info); + + dma_pool_destroy(ctrlq_info->msg_buf_pool); + + kfree(ctrlq_info); + hwdev->ctrlq_info = NULL; +} + +static int sss_set_ctrlq_depth(void *hwdev) +{ + int ret; + struct sss_cmd_root_ctxt cmd_ctx = {0}; + u16 out_len = sizeof(cmd_ctx); + + cmd_ctx.set_ctrlq_depth = 1; + cmd_ctx.ctrlq_depth = (u8)ilog2(SSS_CTRLQ_DEPTH); + cmd_ctx.func_id = sss_get_global_func_id(hwdev); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SET_VAT, &cmd_ctx, + sizeof(cmd_ctx), &cmd_ctx, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_ctx)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to set ctrlq depth, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_ctx.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +static int sss_hwif_init_ctrlq(struct sss_hwdev *hwdev) +{ + int ret; + + ret = sss_init_ctrlq(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ctrlq\n"); + return ret; + } + + sss_ceq_register_cb(hwdev, hwdev, SSS_NIC_CTRLQ, sss_ctrlq_ceq_handler); + + ret = sss_set_ctrlq_depth(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to set ctrlq depth\n"); + goto set_depth_err; + } + + set_bit(SSS_HW_CTRLQ_INIT_OK, &hwdev->func_state); + + return 0; + +set_depth_err: + sss_deinit_ctrlq(hwdev); + + return ret; +} + +static void sss_hwif_deinit_ctrlq(struct sss_hwdev *hwdev) +{ + spin_lock_bh(&hwdev->channel_lock); + clear_bit(SSS_HW_CTRLQ_INIT_OK, &hwdev->func_state); + spin_unlock_bh(&hwdev->channel_lock); + + sss_ceq_unregister_cb(hwdev, SSS_NIC_CTRLQ); + sss_deinit_ctrlq(hwdev); +} + +static bool sss_ctrlq_is_idle(struct sss_ctrlq *ctrlq) +{ + return sss_wq_is_empty(&ctrlq->wq); +} + +static enum sss_process_ret sss_check_ctrlq_stop_handler(void *priv_data) +{ + struct sss_hwdev *hwdev = priv_data; + struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info; + enum sss_ctrlq_type ctrlq_type; + + /* Stop waiting when card unpresent */ + if (!hwdev->chip_present_flag) + return SSS_PROCESS_OK; + + for (ctrlq_type = 0; ctrlq_type < ctrlq_info->num; ctrlq_type++) { + if (!sss_ctrlq_is_idle(&ctrlq_info->ctrlq[ctrlq_type])) + return SSS_PROCESS_DOING; + } + + return SSS_PROCESS_OK; +} + +static int sss_init_ctrlq_page_size(struct sss_hwdev *hwdev) +{ + int ret; + + if (wq_page_num > SSS_MAX_WQ_PAGE_NUM) { + sdk_info(hwdev->dev_hdl, + "Invalid wq_page_num %u out of range, adjust to %d\n", + wq_page_num, SSS_MAX_WQ_PAGE_NUM); + wq_page_num = SSS_MAX_WQ_PAGE_NUM; + } + + hwdev->wq_page_size = SSS_GET_WQ_PAGE_SIZE(wq_page_num); + ret = sss_chip_set_wq_page_size(hwdev, sss_get_global_func_id(hwdev), + hwdev->wq_page_size); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to set wq page size\n"); + return ret; + } + + return 0; +} + +static void sss_deinit_ctrlq_page_size(struct sss_hwdev *hwdev) +{ + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_VF) + sss_chip_set_wq_page_size(hwdev, sss_get_global_func_id(hwdev), + SSS_HW_WQ_PAGE_SIZE); +} + +int sss_init_ctrlq_channel(struct sss_hwdev *hwdev) +{ + int ret; + + ret = sss_hwif_init_ceq(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init hwdev ceq.\n"); + return ret; + } + + ret = sss_init_ceq_msix_attr(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ceq msix attr\n"); + goto init_msix_err; + } + + ret = sss_init_ctrlq_page_size(hwdev); + if (ret != 0) + goto init_size_err; + + ret = sss_hwif_init_ctrlq(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init hwif ctrlq\n"); + goto init_ctrlq_err; + } + + return 0; + +init_ctrlq_err: + sss_deinit_ctrlq_page_size(hwdev); +init_size_err: +init_msix_err: + sss_hwif_deinit_ceq(hwdev); + + return ret; +} + +void sss_deinit_ctrlq_channel(struct sss_hwdev *hwdev) +{ + sss_hwif_deinit_ctrlq(hwdev); + + sss_deinit_ctrlq_page_size(hwdev); + + sss_hwif_deinit_ceq(hwdev); +} + +void sss_ctrlq_flush_sync_cmd(struct sss_hwdev *hwdev) +{ + u16 cnt; + u16 ci; + u16 i; + u16 id; + struct sss_wq *wq = NULL; + struct sss_ctrlq *ctrlq = NULL; + struct sss_ctrlq_cmd_info *info = NULL; + + ctrlq = &hwdev->ctrlq_info->ctrlq[SSS_CTRLQ_SYNC]; + + spin_lock_bh(&ctrlq->ctrlq_lock); + wq = &ctrlq->wq; + id = wq->pi + wq->q_depth - wq->ci; + cnt = (u16)SSS_WQ_MASK_ID(wq, id); + ci = wq->ci; + + for (i = 0; i < cnt; i++) { + info = &ctrlq->cmd_info[SSS_WQ_MASK_ID(wq, ci + i)]; + sss_flush_ctrlq_sync_cmd(info); + } + + spin_unlock_bh(&ctrlq->ctrlq_lock); +} + +int sss_wait_ctrlq_stop(struct sss_hwdev *hwdev) +{ + enum sss_ctrlq_type ctrlq_type; + struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info; + int ret; + + if (!(ctrlq_info->state & SSS_CTRLQ_ENABLE)) + return 0; + + ctrlq_info->state &= ~SSS_CTRLQ_ENABLE; + + ret = sss_check_handler_timeout(hwdev, sss_check_ctrlq_stop_handler, + SSS_CTRLQ_IDLE_TIMEOUT, USEC_PER_MSEC); + if (ret == 0) + return 0; + + for (ctrlq_type = 0; ctrlq_type < ctrlq_info->num; ctrlq_type++) { + if (!sss_ctrlq_is_idle(&ctrlq_info->ctrlq[ctrlq_type])) + sdk_err(hwdev->dev_hdl, "Ctrlq %d is busy\n", ctrlq_type); + } + + ctrlq_info->state |= SSS_CTRLQ_ENABLE; + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.h new file mode 100644 index 0000000000000..8aa0788c25bec --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_CTRLQ_INIT_H +#define SSS_HWIF_CTRLQ_INIT_H + +#include "sss_hwdev.h" + +int sss_init_ctrlq_channel(struct sss_hwdev *hwdev); +void sss_deinit_ctrlq_channel(struct sss_hwdev *hwdev); +int sss_reinit_ctrlq_ctx(struct sss_hwdev *hwdev); +int sss_wait_ctrlq_stop(struct sss_hwdev *hwdev); +void sss_ctrlq_flush_sync_cmd(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.c new file mode 100644 index 0000000000000..d735f1bf68d78 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.c @@ -0,0 +1,355 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_common.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" +#include "sss_csr.h" +#include "sss_hwif_eq.h" + +#define SSS_EQ_CI_SIMPLE_INDIR_CI_SHIFT 0 +#define SSS_EQ_CI_SIMPLE_INDIR_ARMED_SHIFT 21 +#define SSS_EQ_CI_SIMPLE_INDIR_AEQ_ID_SHIFT 30 +#define SSS_EQ_CI_SIMPLE_INDIR_CEQ_ID_SHIFT 24 + +#define SSS_EQ_CI_SIMPLE_INDIR_CI_MASK 0x1FFFFFU +#define SSS_EQ_CI_SIMPLE_INDIR_ARMED_MASK 0x1U +#define SSS_EQ_CI_SIMPLE_INDIR_AEQ_ID_MASK 0x3U +#define SSS_EQ_CI_SIMPLE_INDIR_CEQ_ID_MASK 0xFFU + +#define SSS_SET_EQ_CI_SIMPLE_INDIR(val, member) \ + (((val) & SSS_EQ_CI_SIMPLE_INDIR_##member##_MASK) << \ + SSS_EQ_CI_SIMPLE_INDIR_##member##_SHIFT) + +#define SSS_EQ_WRAPPED_SHIFT 20 + +#define SSS_EQ_CI(eq) ((eq)->ci | \ + ((u32)(eq)->wrap << SSS_EQ_WRAPPED_SHIFT)) + +#define SSS_EQ_CI_SIMPLE_INDIR_REG_ADDR(eq) \ + (((eq)->type == SSS_AEQ) ? \ + SSS_CSR_AEQ_CI_SIMPLE_INDIR_ADDR : \ + SSS_CSR_CEQ_CI_SIMPLE_INDIR_ADDR) + +#define SSS_EQ_HI_PHYS_ADDR_REG(type, pg_num) \ + ((u32)((type == SSS_AEQ) ? \ + SSS_AEQ_PHY_HI_ADDR_REG(pg_num) : \ + SSS_CEQ_PHY_HI_ADDR_REG(pg_num))) + +#define SSS_EQ_LO_PHYS_ADDR_REG(type, pg_num) \ + ((u32)((type == SSS_AEQ) ? \ + SSS_AEQ_PHY_LO_ADDR_REG(pg_num) : \ + SSS_CEQ_PHY_LO_ADDR_REG(pg_num))) + +#define SSS_GET_EQ_PAGES_NUM(eq, size) \ + ((u16)(ALIGN((u32)((eq)->len * (eq)->entry_size), \ + (size)) / (size))) + +#define SSS_GET_EQ_MAX_PAGES(eq) \ + ((eq)->type == SSS_AEQ ? SSS_AEQ_MAX_PAGE : \ + SSS_CEQ_MAX_PAGE) + +#define SSS_GET_EQE_NUM(eq, pg_size) ((pg_size) / (u32)(eq)->entry_size) + +#define SSS_EQE_NUM_IS_ALIGN(eq) ((eq)->num_entry_per_pg & ((eq)->num_entry_per_pg - 1)) + +void sss_chip_set_eq_ci(struct sss_eq *eq, u32 arm_state) +{ + u32 val; + + if (eq->qid != 0 && SSS_TO_HWDEV(eq)->poll) + arm_state = SSS_EQ_NOT_ARMED; + + val = SSS_SET_EQ_CI_SIMPLE_INDIR(arm_state, ARMED) | + SSS_SET_EQ_CI_SIMPLE_INDIR(SSS_EQ_CI(eq), CI); + + if (eq->type == SSS_AEQ) + val |= SSS_SET_EQ_CI_SIMPLE_INDIR(eq->qid, AEQ_ID); + else + val |= SSS_SET_EQ_CI_SIMPLE_INDIR(eq->qid, CEQ_ID); + + sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, SSS_EQ_CI_SIMPLE_INDIR_REG_ADDR(eq), val); +} + +static void sss_chip_set_eq_page_addr(struct sss_eq *eq, + u16 page_id, struct sss_dma_addr_align *dma_addr) +{ + u32 addr; + + addr = SSS_EQ_HI_PHYS_ADDR_REG(eq->type, page_id); + sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, addr, + upper_32_bits(dma_addr->align_paddr)); + + addr = SSS_EQ_LO_PHYS_ADDR_REG(eq->type, page_id); + sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, addr, + lower_32_bits(dma_addr->align_paddr)); +} + +static int sss_chip_init_eq_attr(struct sss_eq *eq) +{ + u32 i; + int ret; + + for (i = 0; i < eq->page_num; i++) + sss_chip_set_eq_page_addr(eq, i, &eq->page_array[i]); + + ret = eq->init_attr_handler(eq); + if (ret != 0) + return ret; + + sss_chip_set_eq_ci(eq, SSS_EQ_ARMED); + + return 0; +} + +static u32 sss_init_eqe_desc(struct sss_eq *eq) +{ + eq->num_entry_per_pg = SSS_GET_EQE_NUM(eq, eq->page_size); + if (SSS_EQE_NUM_IS_ALIGN(eq)) { + sdk_err(SSS_TO_HWDEV(eq)->dev_hdl, "Number element in eq page is not align\n"); + return -EINVAL; + } + + eq->init_desc_handler(eq); + + return 0; +} + +static int sss_alloc_eq_dma_page(struct sss_eq *eq, u16 id) +{ + int ret; + + ret = sss_dma_zalloc_coherent_align(SSS_TO_HWDEV(eq)->dev_hdl, eq->page_size, + SSS_MIN_EQ_PAGE_SIZE, GFP_KERNEL, &eq->page_array[id]); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(eq)->dev_hdl, "Alloc eq page fail, pg index: %u\n", id); + return ret; + } + + return 0; +} + +static void sss_free_eq_dma_page(struct sss_eq *eq, u16 max_id) +{ + int i; + + for (i = 0; i < max_id; i++) + sss_dma_free_coherent_align(SSS_TO_DEV(eq->hwdev), &eq->page_array[i]); +} + +static int sss_alloc_eq_page(struct sss_eq *eq) +{ + u16 page_id; + int ret; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(eq); + + eq->page_array = kcalloc(eq->page_num, sizeof(*eq->page_array), GFP_KERNEL); + if (!eq->page_array) + return -ENOMEM; + + for (page_id = 0; page_id < eq->page_num; page_id++) { + ret = sss_alloc_eq_dma_page(eq, page_id); + if (ret != 0) + goto alloc_dma_err; + } + + ret = sss_init_eqe_desc(eq); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init eqe\n"); + goto alloc_dma_err; + } + + return 0; + +alloc_dma_err: + sss_free_eq_dma_page(eq, page_id); + kfree(eq->page_array); + eq->page_array = NULL; + + return ret; +} + +static void sss_free_eq_page(struct sss_eq *eq) +{ + u16 i; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(eq); + + for (i = 0; i < eq->page_num; i++) + sss_dma_free_coherent_align(hwdev->dev_hdl, &eq->page_array[i]); + + kfree(eq->page_array); + eq->page_array = NULL; +} + +static inline u32 sss_get_eq_page_size(const struct sss_eq *eq) +{ + u32 total_size; + u32 count; + + total_size = ALIGN((eq->len * eq->entry_size), + SSS_MIN_EQ_PAGE_SIZE); + if (total_size <= (SSS_GET_EQ_MAX_PAGES(eq) * SSS_MIN_EQ_PAGE_SIZE)) + return SSS_MIN_EQ_PAGE_SIZE; + + count = (u32)(ALIGN((total_size / SSS_GET_EQ_MAX_PAGES(eq)), + SSS_MIN_EQ_PAGE_SIZE) / SSS_MIN_EQ_PAGE_SIZE); + + /* round up to nearest power of two */ + count = 1U << (u8)fls((int)(count - 1)); + + return ((u32)SSS_MIN_EQ_PAGE_SIZE) * count; +} + +static int sss_request_eq_irq(struct sss_eq *eq, struct sss_irq_desc *entry) +{ + struct pci_dev *pdev = SSS_TO_HWDEV(eq)->pcidev_hdl; + + snprintf(eq->irq_name, sizeof(eq->irq_name), "%s%u@pci:%s", + eq->name, eq->qid, pci_name(pdev)); + + return request_irq(entry->irq_id, eq->irq_handler, 0UL, eq->irq_name, eq); +} + +static void sss_chip_reset_eq(struct sss_eq *eq) +{ + struct sss_hwdev *hwdev = eq->hwdev; + struct sss_hwif *hwif = hwdev->hwif; + + sss_chip_write_reg(hwif, SSS_EQ_INDIR_ID_ADDR(eq->type), eq->qid); + + /* make sure set qid firstly*/ + wmb(); + + if (eq->type == SSS_AEQ) + sss_chip_write_reg(hwif, SSS_CSR_AEQ_CTRL_1_ADDR, 0); + else + sss_chip_set_ceq_attr(hwdev, eq->qid, 0, 0); + + /* make sure write ctrl reg secondly */ + wmb(); + + sss_chip_write_reg(hwif, SSS_EQ_PI_REG_ADDR(eq), 0); +} + +static int sss_init_eq_page_size(struct sss_eq *eq) +{ + eq->page_size = sss_get_eq_page_size(eq); + eq->old_page_size = eq->page_size; + eq->page_num = SSS_GET_EQ_PAGES_NUM(eq, eq->page_size); + + if (eq->page_num > SSS_GET_EQ_MAX_PAGES(eq)) { + sdk_err(SSS_TO_HWDEV(eq)->dev_hdl, "Number pages: %u too many pages for eq\n", + eq->page_num); + return -EINVAL; + } + + return 0; +} + +void sss_increase_eq_ci(struct sss_eq *eq) +{ + if (!eq) + return; + + eq->ci++; + + if (eq->ci == eq->len) { + eq->ci = 0; + eq->wrap = !eq->wrap; + } +} + +int sss_init_eq(struct sss_hwdev *hwdev, struct sss_eq *eq, + struct sss_irq_desc *entry) +{ + int ret = 0; + + eq->hwdev = hwdev; + eq->irq_desc.irq_id = entry->irq_id; + eq->irq_desc.msix_id = entry->msix_id; + + ret = sss_init_eq_page_size(eq); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init eq params\n"); + return ret; + } + + ret = sss_alloc_eq_page(eq); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc eq page\n"); + return ret; + } + + sss_chip_reset_eq(eq); + + ret = sss_chip_init_eq_attr(eq); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init eq attr\n"); + goto out; + } + + ret = sss_request_eq_irq(eq, entry); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to request eq irq, err: %d\n", ret); + goto out; + } + + sss_chip_set_msix_state(hwdev, SSS_EQ_IRQ_ID(eq), SSS_MSIX_DISABLE); + + return 0; + +out: + sss_free_eq_page(eq); + return ret; +} + +void sss_deinit_eq(struct sss_eq *eq) +{ + struct sss_irq_desc *irq = &eq->irq_desc; + + sss_chip_set_msix_state(SSS_TO_HWDEV(eq), SSS_EQ_IRQ_ID(eq), SSS_MSIX_DISABLE); + + synchronize_irq(irq->irq_id); + + free_irq(irq->irq_id, eq); + + sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, SSS_EQ_INDIR_ID_ADDR(eq->type), eq->qid); + + /* make sure disable msix */ + wmb(); + + if (eq->type == SSS_AEQ) { + cancel_work_sync(&eq->aeq_work); + sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, SSS_CSR_AEQ_CTRL_1_ADDR, 0); + } else { + tasklet_kill(&eq->ceq_tasklet); + sss_chip_set_ceq_attr(SSS_TO_HWDEV(eq), eq->qid, 0, 0); + } + + eq->ci = sss_chip_read_reg(SSS_TO_HWDEV(eq)->hwif, SSS_EQ_PI_REG_ADDR(eq)); + sss_chip_set_eq_ci(eq, SSS_EQ_NOT_ARMED); + + sss_free_eq_page(eq); +} + +void sss_init_eq_intr_info(struct sss_irq_cfg *intr_info) +{ + intr_info->coalesc_intr_set = SSS_EQ_INTR_COALESC; + intr_info->coalesc_timer = SSS_EQ_INTR_COALESC_TIMER_CFG; + intr_info->resend_timer = SSS_EQ_INTR_RESEND_TIMER_CFG; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.h new file mode 100644 index 0000000000000..45db82abb497c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_EQ_H +#define SSS_HWIF_EQ_H + +#include +#include +#include + +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_csr.h" + +#define SSS_EQ_UPDATE_CI_STEP 64 + +#define SSS_TASK_PROCESS_EQE_LIMIT 1024 + +#define SSS_MIN_EQ_PAGE_SIZE 0x1000 /* min eq page size 4K Bytes */ +#define SSS_MAX_EQ_PAGE_SIZE 0x400000 /* max eq page size 4M Bytes */ + +#define SSS_EQ_USLEEP_LOW_LIMIT 900 +#define SSS_EQ_USLEEP_HIG_LIMIT 1000 + +#define SSS_EQ_IRQ_ID(eq) ((eq)->irq_desc.msix_id) + +#define SSS_GET_EQ_ELEM(eq, id) \ + (((u8 *)(eq)->page_array[(id) / (eq)->num_entry_per_pg].align_vaddr) + \ + (u32)(((id) & ((eq)->num_entry_per_pg - 1)) * (eq)->entry_size)) + +#define SSS_EQ_VALID_SHIFT 31 +#define SSS_EQ_WRAPPED(eq) ((u32)(eq)->wrap << SSS_EQ_VALID_SHIFT) + +#define SSS_AEQ_MAX_PAGE 4 +#define SSS_CEQ_MAX_PAGE 8 + +#define SSS_AEQE_SIZE 64 +#define SSS_CEQE_SIZE 4 + +#define SSS_EQ_CI_REG_ADDR(eq) \ + (((eq)->type == SSS_AEQ) ? \ + SSS_CSR_AEQ_CI_ADDR : SSS_CSR_CEQ_CI_ADDR) + +#define SSS_EQ_PI_REG_ADDR(eq) \ + (((eq)->type == SSS_AEQ) ? \ + SSS_CSR_AEQ_PI_ADDR : SSS_CSR_CEQ_PI_ADDR) + +#define SSS_EQ_MSIX_RESEND_TIMER_CLEAR 1 + +#define SSS_EQ_ELEM_DESC_TYPE_SHIFT 0 +#define SSS_EQ_ELEM_DESC_SRC_SHIFT 7 +#define SSS_EQ_ELEM_DESC_SIZE_SHIFT 8 +#define SSS_EQ_ELEM_DESC_WRAPPED_SHIFT 31 + +#define SSS_EQ_ELEM_DESC_TYPE_MASK 0x7FU +#define SSS_EQ_ELEM_DESC_SRC_MASK 0x1U +#define SSS_EQ_ELEM_DESC_SIZE_MASK 0xFFU +#define SSS_EQ_ELEM_DESC_WRAPPED_MASK 0x1U + +#define SSS_GET_EQE_DESC(val, member) \ + (((val) >> SSS_EQ_ELEM_DESC_##member##_SHIFT) & \ + SSS_EQ_ELEM_DESC_##member##_MASK) + +#define SSS_PAGE_IN_4K(page_size) ((page_size) >> 12) +#define SSS_SET_EQ_HW_PAGE_SIZE(eq) ((u32)ilog2(SSS_PAGE_IN_4K((eq)->page_size))) + +enum sss_eq_intr_mode { + SSS_INTR_MODE_ARMED, + SSS_INTR_MODE_ALWAY, +}; + +enum sss_eq_ci_arm_state { + SSS_EQ_NOT_ARMED, + SSS_EQ_ARMED, +}; + +#define SSS_EQ_ARM_STATE(unfinish) \ + ((unfinish) ? SSS_EQ_NOT_ARMED : SSS_EQ_ARMED) + +#define SSS_EQ_INTR_COALESC 1 +#define SSS_EQ_INTR_COALESC_TIMER_CFG 0xFF +#define SSS_EQ_INTR_RESEND_TIMER_CFG 7 + +void sss_increase_eq_ci(struct sss_eq *eq); +int sss_init_eq(struct sss_hwdev *hwdev, struct sss_eq *eq, + struct sss_irq_desc *entry); +void sss_deinit_eq(struct sss_eq *eq); +void sss_chip_set_eq_ci(struct sss_eq *eq, u32 arm_state); +void sss_init_eq_intr_info(struct sss_irq_cfg *intr_info); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_export.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_export.c new file mode 100644 index 0000000000000..c4639c18297b9 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_export.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include + +#include "sss_kernel.h" +#include "sss_hw_irq.h" +#include "sss_csr.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" + +int sss_alloc_db_addr(void *hwdev, void __iomem **db_base) +{ + struct sss_hwif *hwif = NULL; + u32 id = 0; + + int ret; + + if (!hwdev || !db_base) + return -EINVAL; + + hwif = SSS_TO_HWIF(hwdev); + + ret = sss_alloc_db_id(hwif, &id); + if (ret != 0) + return -EFAULT; + + *db_base = hwif->db_base_vaddr + id * SSS_DB_PAGE_SIZE; + + return 0; +} +EXPORT_SYMBOL(sss_alloc_db_addr); + +void sss_free_db_addr(void *hwdev, const void __iomem *db_base) +{ + struct sss_hwif *hwif = NULL; + u32 id; + + if (!hwdev || !db_base) + return; + + hwif = SSS_TO_HWIF(hwdev); + id = SSS_DB_ID(db_base, hwif->db_base_vaddr); + + sss_free_db_id(hwif, id); +} +EXPORT_SYMBOL(sss_free_db_addr); + +void sss_chip_set_msix_auto_mask(void *hwdev, u16 msix_id, + enum sss_msix_auto_mask flag) +{ + u32 val; + + if (!hwdev) + return; + + val = (flag == SSS_CLR_MSIX_AUTO_MASK) ? + SSS_SET_MSI_CLR_INDIR(1, AUTO_MSK_CLR) : + SSS_SET_MSI_CLR_INDIR(1, AUTO_MSK_SET); + + val |= SSS_SET_MSI_CLR_INDIR(msix_id, SIMPLE_INDIR_ID); + + sss_chip_write_reg(SSS_TO_HWIF(hwdev), SSS_CSR_FUNC_MSI_CLR_WR_ADDR, val); +} +EXPORT_SYMBOL(sss_chip_set_msix_auto_mask); + +void sss_chip_set_msix_state(void *hwdev, u16 msix_id, + enum sss_msix_state flag) +{ + u32 val; + + if (!hwdev) + return; + + val = (flag == SSS_MSIX_ENABLE) ? SSS_SET_MSI_CLR_INDIR(1, INT_MSK_CLR) : + SSS_SET_MSI_CLR_INDIR(1, INT_MSK_SET); + val |= SSS_SET_MSI_CLR_INDIR(msix_id, SIMPLE_INDIR_ID); + + sss_chip_write_reg(SSS_TO_HWIF(hwdev), SSS_CSR_FUNC_MSI_CLR_WR_ADDR, val); +} +EXPORT_SYMBOL(sss_chip_set_msix_state); + +u16 sss_get_global_func_id(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_HWIF_GLOBAL_ID(SSS_TO_HWIF(hwdev)); +} +EXPORT_SYMBOL(sss_get_global_func_id); + +u8 sss_get_pf_id_of_vf(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_HWIF_PF_ID(SSS_TO_HWIF(hwdev)); +} +EXPORT_SYMBOL(sss_get_pf_id_of_vf); + +u8 sss_get_pcie_itf_id(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_HWIF_PCI_INTF_ID(SSS_TO_HWIF(hwdev)); +} +EXPORT_SYMBOL(sss_get_pcie_itf_id); + +enum sss_func_type sss_get_func_type(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_FUNC_TYPE((struct sss_hwdev *)hwdev); +} +EXPORT_SYMBOL(sss_get_func_type); + +enum sss_func_type sss_get_func_id(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_FUNC_ID((struct sss_hwdev *)hwdev); +} +EXPORT_SYMBOL(sss_get_func_id); + +u16 sss_get_glb_pf_vf_offset(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_HWIF_GLOBAL_VF_OFFSET(SSS_TO_HWIF(hwdev)); +} +EXPORT_SYMBOL(sss_get_glb_pf_vf_offset); + +u8 sss_get_ppf_id(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_HWIF_PPF_ID(SSS_TO_HWIF(hwdev)); +} +EXPORT_SYMBOL(sss_get_ppf_id); diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.c new file mode 100644 index 0000000000000..5451b85ffac88 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_csr.h" +#include "sss_common.h" +#include "sss_hwdev.h" +#include "sss_hwif_init.h" +#include "sss_hwif_api.h" + +#define SSS_WAIT_CHIP_READY_TIMEOUT 10000 + +#define SSS_WAIT_DB_READY_TIMEOUT 60000 + +#define SSS_MAX_MSIX_ENTRY 2048 + +#define SSS_AF0_FUNC_GLOBAL_ID_SHIFT 0 +#define SSS_AF0_PF_ID_SHIFT 12 +#define SSS_AF0_PCI_INTF_ID_SHIFT 17 +#define SSS_AF0_VF_IN_PF_SHIFT 20 +#define SSS_AF0_FUNC_TYPE_SHIFT 28 + +#define SSS_AF0_FUNC_GLOBAL_ID_MASK 0xFFF +#define SSS_AF0_PF_ID_MASK 0x1F +#define SSS_AF0_PCI_INTF_ID_MASK 0x7 +#define SSS_AF0_VF_IN_PF_MASK 0xFF +#define SSS_AF0_FUNC_TYPE_MASK 0x1 + +#define SSS_GET_AF0(val, member) \ + (((val) >> SSS_AF0_##member##_SHIFT) & SSS_AF0_##member##_MASK) + +#define SSS_AF2_CEQ_PER_FUNC_SHIFT 0 +#define SSS_AF2_DMA_ATTR_PER_FUNC_SHIFT 9 +#define SSS_AF2_IRQ_PER_FUNC_SHIFT 16 + +#define SSS_AF2_CEQ_PER_FUNC_MASK 0x1FF +#define SSS_AF2_DMA_ATTR_PER_FUNC_MASK 0x7 +#define SSS_AF2_IRQ_PER_FUNC_MASK 0x7FF + +#define SSS_GET_AF2(val, member) \ + (((val) >> SSS_AF2_##member##_SHIFT) & SSS_AF2_##member##_MASK) + +#define SSS_AF3_GLOBAL_VF_ID_OF_NXT_PF_SHIFT 0 +#define SSS_AF3_GLOBAL_VF_ID_OF_PF_SHIFT 16 + +#define SSS_AF3_GLOBAL_VF_ID_OF_NXT_PF_MASK 0xFFF +#define SSS_AF3_GLOBAL_VF_ID_OF_PF_MASK 0xFFF + +#define SSS_GET_AF3(val, member) \ + (((val) >> SSS_AF3_##member##_SHIFT) & SSS_AF3_##member##_MASK) + +#define SSS_AF5_OUTBOUND_CTRL_SHIFT 0 +#define SSS_AF5_OUTBOUND_CTRL_MASK 0x1 + +#define SSS_GET_AF5(val, member) \ + (((val) >> SSS_AF5_##member##_SHIFT) & SSS_AF5_##member##_MASK) + +#define SSS_SET_AF5(val, member) \ + (((val) & SSS_AF5_##member##_MASK) << SSS_AF5_##member##_SHIFT) + +#define SSS_CLEAR_AF5(val, member) \ + ((val) & (~(SSS_AF5_##member##_MASK << SSS_AF5_##member##_SHIFT))) + +#define SSS_MPF_ELECTION_ID_SHIFT 0 + +#define SSS_MPF_ELECTION_ID_MASK 0x1F + +#define SSS_SET_MPF(val, member) \ + (((val) & SSS_MPF_ELECTION_##member##_MASK) << \ + SSS_MPF_ELECTION_##member##_SHIFT) + +#define SSS_GET_MPF(val, member) \ + (((val) >> SSS_MPF_ELECTION_##member##_SHIFT) & \ + SSS_MPF_ELECTION_##member##_MASK) + +#define SSS_CLEAR_MPF(val, member) \ + ((val) & (~(SSS_MPF_ELECTION_##member##_MASK << \ + SSS_MPF_ELECTION_##member##_SHIFT))) + +static enum sss_process_ret sss_check_pcie_link_handle(void *data) +{ + u32 status; + + status = sss_chip_get_pcie_link_status(data); + if (status == SSS_PCIE_LINK_DOWN) + return SSS_PROCESS_ERR; + else if (status == SSS_PCIE_LINK_UP) + return SSS_PROCESS_OK; + + return SSS_PROCESS_DOING; +} + +static int sss_wait_pcie_link_up(struct sss_hwdev *hwdev) +{ + int ret; + + ret = sss_check_handler_timeout(hwdev, sss_check_pcie_link_handle, + SSS_WAIT_CHIP_READY_TIMEOUT, USEC_PER_MSEC); + if (ret == -ETIMEDOUT) + sdk_err(hwdev->dev_hdl, "Wait for chip ready timeout\n"); + + return ret; +} + +static int sss_chip_get_func_attr0(struct sss_hwif *hwif) +{ + u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR0_ADDR); + + if (attr == SSS_PCIE_LINK_DOWN) + return -EFAULT; + + SSS_SET_HWIF_GLOBAL_ID(hwif, SSS_GET_AF0(attr, FUNC_GLOBAL_ID)); + SSS_SET_HWIF_PF_ID(hwif, SSS_GET_AF0(attr, PF_ID)); + SSS_SET_HWIF_PCI_INTF_ID(hwif, SSS_GET_AF0(attr, PCI_INTF_ID)); + SSS_SET_HWIF_FUNC_TYPE(hwif, SSS_GET_AF0(attr, FUNC_TYPE)); + + return 0; +} + +static int sss_chip_get_func_attr1(struct sss_hwif *hwif) +{ + u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR1_ADDR); + + if (attr == SSS_PCIE_LINK_DOWN) + return -EFAULT; + + SSS_SET_HWIF_PPF_ID(hwif, SSS_GET_AF1(attr, PPF_ID)); + SSS_SET_HWIF_AEQ_NUM(hwif, BIT(SSS_GET_AF1(attr, AEQ_PER_FUNC))); + + return 0; +} + +static int sss_chip_get_func_attr2(struct sss_hwif *hwif) +{ + u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR2_ADDR); + + if (attr == SSS_PCIE_LINK_DOWN) + return -EFAULT; + + SSS_SET_HWIF_CEQ_NUM(hwif, (u8)SSS_GET_AF2(attr, CEQ_PER_FUNC)); + SSS_SET_HWIF_IRQ_NUM(hwif, SSS_GET_AF2(attr, IRQ_PER_FUNC)); + if (SSS_GET_HWIF_IRQ_NUM(hwif) > SSS_MAX_MSIX_ENTRY) + SSS_SET_HWIF_IRQ_NUM(hwif, SSS_MAX_MSIX_ENTRY); + SSS_SET_HWIF_DMA_ATTR_NUM(hwif, BIT(SSS_GET_AF2(attr, DMA_ATTR_PER_FUNC))); + + return 0; +} + +static int sss_chip_get_func_attr3(struct sss_hwif *hwif) +{ + u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR3_ADDR); + + if (attr == SSS_PCIE_LINK_DOWN) + return -EFAULT; + + SSS_SET_HWIF_GLOBAL_VF_OFFSET(hwif, SSS_GET_AF3(attr, GLOBAL_VF_ID_OF_PF)); + + return 0; +} + +static int sss_chip_get_func_attr6(struct sss_hwif *hwif) +{ + u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR6_ADDR); + + if (attr == SSS_PCIE_LINK_DOWN) + return -EFAULT; + + SSS_SET_HWIF_SQ_NUM(hwif, SSS_GET_AF6(attr, FUNC_MAX_SQ)); + SSS_SET_HWIF_MSIX_EN(hwif, SSS_GET_AF6(attr, MSIX_FLEX_EN)); + + return 0; +} + +static int sss_hwif_init_func_attr(struct sss_hwif *hwif) +{ + int ret; + + ret = sss_chip_get_func_attr0(hwif); + if (ret != 0) + return ret; + + ret = sss_chip_get_func_attr1(hwif); + if (ret != 0) + return ret; + + ret = sss_chip_get_func_attr2(hwif); + if (ret != 0) + return ret; + + ret = sss_chip_get_func_attr3(hwif); + if (ret != 0) + return ret; + + ret = sss_chip_get_func_attr6(hwif); + if (ret != 0) + return ret; + + return 0; +} + +static void sss_chip_init_ppf(struct sss_hwif *hwif) +{ + u32 val; + + val = sss_chip_read_reg(hwif, SSS_CSR_PPF_ELECT_ADDR); + val = SSS_CLEAR_PPF(val, ID); + val |= SSS_SET_PPF(SSS_GET_HWIF_GLOBAL_ID(hwif), ID); + + sss_chip_write_reg(hwif, SSS_CSR_PPF_ELECT_ADDR, val); + + /* Check PPF */ + val = sss_chip_read_reg(hwif, SSS_CSR_PPF_ELECT_ADDR); + SSS_SET_HWIF_PPF_ID(hwif, SSS_GET_PPF(val, ID)); + if (SSS_GET_HWIF_PPF_ID(hwif) == SSS_GET_HWIF_GLOBAL_ID(hwif)) + SSS_SET_HWIF_FUNC_TYPE(hwif, SSS_FUNC_TYPE_PPF); +} + +static void sss_chip_get_mpf(struct sss_hwif *hwif) +{ + u32 mpf; + + mpf = sss_chip_read_reg(hwif, SSS_CSR_GLOBAL_MPF_ELECT_ADDR); + SSS_SET_HWIF_MPF_ID(hwif, SSS_GET_MPF(mpf, ID)); +} + +static void sss_chip_init_mpf(struct sss_hwif *hwif) +{ + u32 val; + + val = sss_chip_read_reg(hwif, SSS_CSR_GLOBAL_MPF_ELECT_ADDR); + val = SSS_CLEAR_MPF(val, ID); + val |= SSS_SET_MPF(SSS_GET_HWIF_GLOBAL_ID(hwif), ID); + + sss_chip_write_reg(hwif, SSS_CSR_GLOBAL_MPF_ELECT_ADDR, val); +} + +static int sss_hwif_alloc_db_pool(struct sss_hwif *hwif) +{ + struct sss_db_pool *pool = &hwif->db_pool; + u32 bit_size; + + bit_size = (hwif->db_dwqe_len > SSS_DB_DWQE_SIZE) ? SSS_DB_MAX_AREAS : + ((u32)(hwif->db_dwqe_len / SSS_DB_PAGE_SIZE)); + pool->bitmap = bitmap_zalloc(bit_size, GFP_KERNEL); + if (!pool->bitmap) { + pr_err("Fail to allocate db area.\n"); + return -ENOMEM; + } + pool->bit_size = bit_size; + spin_lock_init(&pool->id_lock); + + return 0; +} + +static void sss_hwif_free_db_pool(struct sss_db_pool *pool) +{ + kfree(pool->bitmap); +} + +static void sss_chip_disable_all_msix(struct sss_hwdev *hwdev) +{ + u16 i; + u16 irq_num = SSS_GET_HWIF_IRQ_NUM(hwdev->hwif); + + for (i = 0; i < irq_num; i++) + sss_chip_set_msix_state(hwdev, i, SSS_MSIX_DISABLE); +} + +static enum sss_process_ret sss_chip_check_db_ready(void *data) +{ + int outbound_status; + int db_status; + struct sss_hwif *hwif = data; + u32 db_attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR4_ADDR); + u32 outband_attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR5_ADDR); + + db_status = SSS_GET_AF4(db_attr, DOORBELL_CTRL); + outbound_status = SSS_GET_AF5(outband_attr, OUTBOUND_CTRL); + + if (db_status == DB_ENABLE && outbound_status == OUTBOUND_ENABLE) + return SSS_PROCESS_OK; + + return SSS_PROCESS_DOING; +} + +static int sss_wait_db_ready(struct sss_hwif *hwif) +{ + return sss_check_handler_timeout(hwif, sss_chip_check_db_ready, + SSS_WAIT_DB_READY_TIMEOUT, USEC_PER_MSEC); +} + +static void sss_hwif_init_bar_base(struct sss_pci_adapter *adapter) +{ + struct sss_hwif *hwif = SSS_TO_HWIF(adapter->hwdev); + + hwif->db_dwqe_len = adapter->db_dwqe_len; + hwif->db_base_vaddr = adapter->db_reg_bar; + hwif->db_base_paddr = adapter->db_base_paddr; + + hwif->mgmt_reg_base = adapter->mgmt_reg_bar; + hwif->cfg_reg_base = (adapter->mgmt_reg_bar) ? + adapter->cfg_reg_bar : + ((u8 *)adapter->cfg_reg_bar + SSS_VF_CFG_REG_OFFSET); +} + +static int sss_hwif_wait_chip_ready(struct sss_hwdev *hwdev) +{ + int ret; + u32 db_attr; + u32 outband_attr; + + ret = sss_wait_pcie_link_up(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Pcie is not link up\n"); + return ret; + } + + ret = sss_wait_db_ready(hwdev->hwif); + if (ret != 0) { + db_attr = sss_chip_read_reg(hwdev->hwif, SSS_CSR_HW_ATTR4_ADDR); + outband_attr = sss_chip_read_reg(hwdev->hwif, SSS_CSR_HW_ATTR5_ADDR); + sdk_err(hwdev->dev_hdl, "Hw doorbell is disabled, db 0x%x outbound 0x%x\n", + db_attr, outband_attr); + return ret; + } + + return 0; +} + +static void sss_hwif_init_pf(struct sss_hwdev *hwdev) +{ + struct sss_hwif *hwif = hwdev->hwif; + + if (!SSS_IS_VF(hwdev)) { + sss_chip_init_ppf(hwif); + + if (SSS_IS_PPF(hwdev)) + sss_chip_init_mpf(hwif); + sss_chip_get_mpf(hwif); + } + + sss_chip_disable_all_msix(hwdev); + + sss_chip_set_pf_status(hwif, SSS_PF_STATUS_INIT); + + sdk_info(hwdev->dev_hdl, + "Global_func_id: %u, func_type: %d, host_id: %u, ppf: %u, mpf: %u\n", + SSS_GET_HWIF_GLOBAL_ID(hwif), SSS_GET_HWIF_FUNC_TYPE(hwif), + SSS_GET_HWIF_PCI_INTF_ID(hwif), SSS_GET_HWIF_PPF_ID(hwif), + SSS_GET_HWIF_MPF_ID(hwif)); +} + +int sss_hwif_init(struct sss_pci_adapter *adapter) +{ + struct sss_hwdev *hwdev = adapter->hwdev; + struct sss_hwif *hwif = NULL; + int ret; + + hwif = kzalloc(sizeof(*hwif), GFP_KERNEL); + if (!hwif) + return -ENOMEM; + + hwif->pdev = hwdev->pcidev_hdl; + hwdev->hwif = hwif; + + sss_hwif_init_bar_base(adapter); + + ret = sss_hwif_alloc_db_pool(hwif); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init db pool.\n"); + goto alloc_db_pool_err; + } + + ret = sss_hwif_wait_chip_ready(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Chip is not ready\n"); + goto wait_chip_ready_err; + } + + ret = sss_hwif_init_func_attr(hwif); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail init hwif attr\n"); + goto wait_chip_ready_err; + } + + sss_hwif_init_pf(hwdev); + + return 0; + +wait_chip_ready_err: + sss_dump_chip_err_info(hwdev); + sss_hwif_free_db_pool(&hwif->db_pool); +alloc_db_pool_err: + kfree(hwif); + hwdev->hwif = NULL; + + return ret; +} + +void sss_hwif_deinit(struct sss_hwdev *hwdev) +{ + sss_hwif_free_db_pool(&hwdev->hwif->db_pool); + kfree(hwdev->hwif); + hwdev->hwif = NULL; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.h new file mode 100644 index 0000000000000..ca5e2ce972e55 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_INIT_H +#define SSS_HWIF_INIT_H + +#include "sss_hwdev.h" +#include "sss_adapter.h" + +int sss_hwif_init(struct sss_pci_adapter *adapter); +void sss_hwif_deinit(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.c new file mode 100644 index 0000000000000..574e9aa025153 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hwdev.h" +#include "sss_hw_svc_cap.h" + +#define SSS_GET_NEED_IRQ_NUM(hwif, intr_num) \ + (SSS_GET_HWIF_MSIX_EN(hwif) ? (SSS_GET_HWIF_AEQ_NUM(hwif) + \ + SSS_GET_HWIF_CEQ_NUM(hwif) + (hwif)->attr.sq_num) : (intr_num)) + +#define SSS_MIN_VECTOR 2 + +static int sss_alloc_irq_info(struct sss_hwdev *hwdev) +{ + u16 total_num = SSS_GET_HWIF_IRQ_NUM(hwdev->hwif); + u16 need_num = SSS_GET_NEED_IRQ_NUM(hwdev->hwif, total_num); + struct sss_mgmt_info *mgmt_info = hwdev->mgmt_info; + struct sss_irq_info *irq_info = &mgmt_info->irq_info; + + if (total_num == 0) { + sdk_err(hwdev->dev_hdl, "Mgmt irq info: intr total_num = 0, msix_flex_en %d\n", + SSS_GET_HWIF_MSIX_EN(hwdev->hwif)); + return -EFAULT; + } + + if (need_num > total_num) { + sdk_warn(hwdev->dev_hdl, "Mgmt irq info: intr total_num %d < need_num %d, msix_flex_en %d\n", + total_num, need_num, SSS_GET_HWIF_MSIX_EN(hwdev->hwif)); + need_num = total_num; + } + + irq_info->irq = kcalloc(total_num, sizeof(*irq_info->irq), GFP_KERNEL); + if (!irq_info->irq) + return -ENOMEM; + + irq_info->max_num = need_num; + + return 0; +} + +static void sss_free_irq_info(struct sss_hwdev *hwdev) +{ + kfree(hwdev->mgmt_info->irq_info.irq); + hwdev->mgmt_info->irq_info.irq = NULL; +} + +int sss_init_irq_info(struct sss_hwdev *hwdev) +{ + u16 i = 0; + u16 irq_num; + int enable_irq_num; + int ret; + struct sss_mgmt_info *mgmt_info = hwdev->mgmt_info; + struct sss_irq *irq = NULL; + struct msix_entry *entry = NULL; + + ret = sss_alloc_irq_info(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc irq info, err: %d\n", ret); + return ret; + } + + irq_num = mgmt_info->irq_info.max_num; + entry = kcalloc(irq_num, sizeof(*entry), GFP_KERNEL); + if (!entry) { + sss_free_irq_info(hwdev); + return -ENOMEM; + } + + for (i = 0; i < irq_num; i++) + entry[i].entry = i; + + enable_irq_num = pci_enable_msix_range(hwdev->pcidev_hdl, entry, + SSS_MIN_VECTOR, irq_num); + if (enable_irq_num < 0) { + kfree(entry); + sss_free_irq_info(hwdev); + sdk_err(hwdev->dev_hdl, "Fail to alloc msix entries with threshold 2. enabled_irq: %d\n", + enable_irq_num); + return -ENOMEM; + } + + irq_num = (u16)enable_irq_num; + mgmt_info->irq_info.total_num = irq_num; + mgmt_info->irq_info.free_num = irq_num; + mgmt_info->svc_cap.intr_type = SSS_INTR_TYPE_MSIX; + + irq = mgmt_info->irq_info.irq; + for (i = 0; i < irq_num; i++) { + irq[i].desc.msix_id = entry[i].entry; + irq[i].desc.irq_id = entry[i].vector; + irq[i].type = SSS_SERVICE_TYPE_MAX; + irq[i].busy = SSS_CFG_FREE; + } + + mutex_init(&mgmt_info->irq_info.irq_mutex); + + sdk_info(hwdev->dev_hdl, "Success to request %u msix vector.\n", irq_num); + kfree(entry); + + return 0; +} + +void sss_deinit_irq_info(struct sss_hwdev *hwdev) +{ + struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap; + struct sss_irq_info *irq_info = &hwdev->mgmt_info->irq_info; + + if (irq_info->free_num != irq_info->total_num) + sdk_err(hwdev->dev_hdl, "Fail to reclaim all irq and eq, please check\n"); + + if (svc_cap->intr_type == SSS_INTR_TYPE_MSIX) + pci_disable_msix(hwdev->pcidev_hdl); + else if (svc_cap->intr_type == SSS_INTR_TYPE_MSI) + pci_disable_msi(hwdev->pcidev_hdl); + + sss_free_irq_info(hwdev); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.h new file mode 100644 index 0000000000000..0918d74ebaa01 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_IRQ_H +#define SSS_HWIF_IRQ_H + +#include "sss_hwdev.h" + +int sss_init_irq_info(struct sss_hwdev *dev); +void sss_deinit_irq_info(struct sss_hwdev *dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.c new file mode 100644 index 0000000000000..4490e4378cbca --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.c @@ -0,0 +1,656 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw_common.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" +#include "sss_hwif_eq.h" +#include "sss_hwif_mbx.h" +#include "sss_hwif_aeq.h" +#include "sss_csr.h" +#include "sss_common.h" + +#define SSS_MBX_INT_DST_AEQN_SHIFT 10 +#define SSS_MBX_INT_SRC_RESP_AEQN_SHIFT 12 +#define SSS_MBX_INT_STAT_DMA_SHIFT 14 +/* The size of data to be send (unit of 4 bytes) */ +#define SSS_MBX_INT_TX_SIZE_SHIFT 20 +/* SO_RO(strong order, relax order) */ +#define SSS_MBX_INT_STAT_DMA_SO_RO_SHIFT 25 +#define SSS_MBX_INT_WB_EN_SHIFT 28 + +#define SSS_MBX_INT_DST_AEQN_MASK 0x3 +#define SSS_MBX_INT_SRC_RESP_AEQN_MASK 0x3 +#define SSS_MBX_INT_STAT_DMA_MASK 0x3F +#define SSS_MBX_INT_TX_SIZE_MASK 0x1F +#define SSS_MBX_INT_STAT_DMA_SO_RO_MASK 0x3 +#define SSS_MBX_INT_WB_EN_MASK 0x1 + +#define SSS_SET_MBX_INT(val, field) \ + (((val) & SSS_MBX_INT_##field##_MASK) << \ + SSS_MBX_INT_##field##_SHIFT) + +enum sss_mbx_tx_status { + SSS_MBX_TX_NOT_COMPLETE = 1, +}; + +#define SSS_MBX_CTRL_TRIGGER_AEQE_SHIFT 0 + +#define SSS_MBX_CTRL_TX_STATUS_SHIFT 1 +#define SSS_MBX_CTRL_DST_FUNC_SHIFT 16 + +#define SSS_MBX_CTRL_TRIGGER_AEQE_MASK 0x1 +#define SSS_MBX_CTRL_TX_STATUS_MASK 0x1 +#define SSS_MBX_CTRL_DST_FUNC_MASK 0x1FFF + +#define SSS_SET_MBX_CTRL(val, field) \ + (((val) & SSS_MBX_CTRL_##field##_MASK) << \ + SSS_MBX_CTRL_##field##_SHIFT) + +#define SSS_MBX_SEGLEN_MASK \ + SSS_SET_MSG_HEADER(SSS_MSG_HEADER_SEG_LEN_MASK, SEG_LEN) + +#define SSS_MBX_MSG_POLL_TIMEOUT_MS 8000 +#define SSS_MBX_COMPLETE_WAIT_TIME_MS 40000U + +#define SSS_SEQ_ID_START_VAL 0 + +/* mbx write back status is 16B, only first 4B is used */ +#define SSS_MBX_WB_STATUS_ERRCODE_MASK 0xFFFF +#define SSS_MBX_WB_STATUS_MASK 0xFF +#define SSS_MBX_WB_ERRCODE_MASK 0xFF00 +#define SSS_MBX_WB_STATUS_FINISHED_SUCCESS 0xFF +#define SSS_MBX_WB_STATUS_NOT_FINISHED 0x00 + +#define SSS_MBX_STATUS_FINISHED(wb) \ + (((wb) & SSS_MBX_WB_STATUS_MASK) != SSS_MBX_WB_STATUS_NOT_FINISHED) +#define SSS_MBX_STATUS_SUCCESS(wb) \ + (((wb) & SSS_MBX_WB_STATUS_MASK) == SSS_MBX_WB_STATUS_FINISHED_SUCCESS) +#define SSS_MBX_STATUS_ERRCODE(wb) \ + ((wb) & SSS_MBX_WB_ERRCODE_MASK) + +#define SSS_NO_DMA_ATTR 0 + +#define SSS_MBX_MSG_ID_MASK 0xF +#define SSS_MBX_MSG_ID(mbx) ((mbx)->send_msg_id) +#define SSS_INCREASE_MBX_MSG_ID(mbx) \ + ((mbx)->send_msg_id = ((mbx)->send_msg_id + 1) & SSS_MBX_MSG_ID_MASK) + +#define SSS_MBX_MSG_CHN_STOP(mbx) \ + ((((mbx)->lock_channel_en) && \ + test_bit((mbx)->cur_msg_channel, &(mbx)->channel_stop)) ? true : false) + +#define SSS_MBX_DMA_MSG_INIT_XOR_VAL 0x5a5a5a5a +#define SSS_MBX_XOR_DATA_ALIGN 4 + +#define SSS_MQ_ID_MASK(mq, id) ((id) & ((mq)->depth - 1)) +#define SSS_IS_MSG_QUEUE_FULL(mq) \ + (SSS_MQ_ID_MASK(mq, (mq)->pi + 1) == SSS_MQ_ID_MASK(mq, (mq)->ci)) + +#define SSS_MBX_TRY_LOCK_SLEPP_US 1000 + +#define SSS_FILL_MSG_HEADER(hwdev, msg_info, msg_len, mod, ack_type, type, direction, cmd) \ + (SSS_SET_MSG_HEADER((msg_len), MSG_LEN) | \ + SSS_SET_MSG_HEADER((mod), MODULE) | \ + SSS_SET_MSG_HEADER(SSS_MBX_SEG_SIZE, SEG_LEN) | \ + SSS_SET_MSG_HEADER((ack_type), NO_ACK) | \ + SSS_SET_MSG_HEADER((type), DATA_TYPE) | \ + SSS_SET_MSG_HEADER(SSS_SEQ_ID_START_VAL, SEQID) | \ + SSS_SET_MSG_HEADER(SSS_NOT_LAST_SEG, LAST) | \ + SSS_SET_MSG_HEADER((direction), DIRECTION) | \ + SSS_SET_MSG_HEADER((cmd), CMD) | \ + SSS_SET_MSG_HEADER((msg_info)->msg_id, MSG_ID) | \ + SSS_SET_MSG_HEADER((((hwdev)->poll || \ + (hwdev)->hwif->attr.aeq_num >= SSS_MGMT_RSP_MSG_AEQ) ? \ + SSS_MBX_RSP_MSG_AEQ : SSS_ASYNC_MSG_AEQ), AEQ_ID) | \ + SSS_SET_MSG_HEADER(SSS_MSG_SRC_MBX, SOURCE) | \ + SSS_SET_MSG_HEADER(!!(msg_info)->state, STATUS) | \ + SSS_SET_MSG_HEADER(sss_get_global_func_id(hwdev), SRC_GLB_FUNC_ID)) + +#define SSS_MBX_SEG_LEN_ALIGN 4 + +enum sss_msg_aeq_type { + SSS_ASYNC_MSG_AEQ = 0, + /* indicate dest func or mgmt cpu which aeq to response mbx message */ + SSS_MBX_RSP_MSG_AEQ = 1, + /* indicate mgmt cpu which aeq to response adm message */ + SSS_MGMT_RSP_MSG_AEQ = 2, +}; + +enum sss_mbx_order_type { + SSS_MBX_STRONG_ORDER, +}; + +enum sss_mbx_wb_type { + SSS_MBX_WB = 1, +}; + +enum sss_mbx_aeq_trig_type { + SSS_MBX_NOT_TRIG, +}; + +struct sss_mbx_dma_msg { + u32 xor; + u32 dma_addr_h; + u32 dma_addr_l; + u32 msg_len; + u64 rsvd; +}; + +static struct sss_msg_buffer *sss_get_msg_buffer_from_mgmt(struct sss_mbx *mbx) +{ + return &mbx->mgmt_msg; +} + +static struct sss_msg_buffer *sss_get_msg_buffer_from_pf(struct sss_mbx *mbx, u64 src_func_id) +{ + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (src_func_id != sss_get_pf_id_of_vf(hwdev) || !mbx->func_msg) + return NULL; + + return mbx->func_msg; +} + +static struct sss_msg_buffer *sss_get_msg_buffer_from_vf(struct sss_mbx *mbx, u64 src_func_id) +{ + u16 func_id; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + func_id = (u16)(src_func_id - 1U) - sss_get_glb_pf_vf_offset(hwdev); + if (func_id >= mbx->num_func_msg) + return NULL; + + return &mbx->func_msg[func_id]; +} + +static struct sss_msg_buffer *sss_get_msg_buffer_from_ppf(struct sss_mbx *mbx, u64 src_func_id) +{ + u16 func_id; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (!mbx->support_h2h_msg) + return NULL; + + for (func_id = 0; func_id < SSS_MAX_HOST_NUM(hwdev); func_id++) { + if (src_func_id == sss_chip_get_host_ppf_id(hwdev, (u8)func_id)) + break; + } + + if (func_id == SSS_MAX_HOST_NUM(hwdev) || !mbx->host_msg) + return NULL; + + return &mbx->host_msg[func_id]; +} + +struct sss_msg_desc *sss_get_mbx_msg_desc(struct sss_mbx *mbx, u64 src_func_id, u64 direction) +{ + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + struct sss_msg_buffer *msg_buffer = NULL; + + if (src_func_id == SSS_MGMT_SRC_ID) + msg_buffer = sss_get_msg_buffer_from_mgmt(mbx); + else if (SSS_IS_VF(hwdev)) + msg_buffer = sss_get_msg_buffer_from_pf(mbx, src_func_id); + else if (src_func_id > sss_get_glb_pf_vf_offset(hwdev)) + msg_buffer = sss_get_msg_buffer_from_vf(mbx, src_func_id); + else + msg_buffer = sss_get_msg_buffer_from_ppf(mbx, src_func_id); + + return (direction == SSS_DIRECT_SEND_MSG) ? + &msg_buffer->recv_msg : &msg_buffer->resp_msg; +} + +static u32 sss_mbx_dma_data_xor(u32 *data, u16 data_len) +{ + u16 i; + u16 cnt = data_len / sizeof(u32); + u32 val = SSS_MBX_DMA_MSG_INIT_XOR_VAL; + + for (i = 0; i < cnt; i++) + val ^= data[i]; + + return val; +} + +static void sss_mbx_fill_dma_msg_buf(struct sss_mbx_dma_queue *queue, + struct sss_mbx_dma_msg *dma_msg, + void *data, u16 data_len) +{ + u64 pi; + u64 dma_paddr; + void *dma_vaddr; + + pi = queue->pi * SSS_MBX_BUF_SIZE_MAX; + dma_vaddr = (u8 *)queue->dma_buff_vaddr + pi; + dma_paddr = queue->dma_buff_paddr + pi; + memcpy(dma_vaddr, data, data_len); + + dma_msg->dma_addr_h = upper_32_bits(dma_paddr); + dma_msg->dma_addr_l = lower_32_bits(dma_paddr); + dma_msg->msg_len = data_len; + dma_msg->xor = sss_mbx_dma_data_xor(dma_vaddr, + ALIGN(data_len, SSS_MBX_XOR_DATA_ALIGN)); +} + +static struct sss_mbx_dma_queue * +sss_get_mbx_dma_queue(struct sss_mbx *mbx, + enum sss_msg_ack_type ack_type) +{ + u32 val; + struct sss_mbx_dma_queue *queue = NULL; + + val = sss_chip_read_reg(SSS_TO_HWDEV(mbx)->hwif, SSS_MBX_MQ_CI_OFF); + if (ack_type == SSS_MSG_ACK) { + queue = &mbx->sync_msg_queue; + queue->ci = SSS_GET_MBX_MQ_CI(val, SYNC); + } else { + queue = &mbx->async_msg_queue; + queue->ci = SSS_GET_MBX_MQ_CI(val, ASYNC); + } + + if (SSS_IS_MSG_QUEUE_FULL(queue)) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, "Mbx sync mq is busy, pi: %u, ci: %u\n", + queue->pi, SSS_MQ_ID_MASK(queue, queue->ci)); + return NULL; + } + + return queue; +} + +static void sss_fill_mbx_msg_body(struct sss_mbx_dma_queue *queue, + struct sss_mbx_dma_msg *dma_msg, void *msg_body, u16 body_len) +{ + sss_mbx_fill_dma_msg_buf(queue, dma_msg, msg_body, body_len); + queue->pi = SSS_MQ_ID_MASK(queue, queue->pi + 1); +} + +static void sss_clear_mbx_status(struct sss_mbx_send *send_mbx) +{ + *send_mbx->wb_state = 0; + + /* clear mbx wb state */ + wmb(); +} + +static void sss_chip_send_mbx_msg_header(struct sss_hwdev *hwdev, + struct sss_mbx_send *send_mbx, u64 *msg_header) +{ + u32 i; + u32 *header = (u32 *)msg_header; + u32 cnt = SSS_MBX_HEADER_SIZE / sizeof(u32); + + for (i = 0; i < cnt; i++) + __raw_writel(cpu_to_be32(*(header + i)), send_mbx->data + i * sizeof(u32)); +} + +static void sss_chip_send_mbx_msg_body(struct sss_hwdev *hwdev, + struct sss_mbx_send *send_mbx, void *body, u16 body_len) +{ + u32 *msg_data = body; + u32 size = sizeof(u32); + u32 i; + u8 buf[SSS_MBX_SEG_SIZE] = {0}; + u32 cnt = ALIGN(body_len, size) / size; + + if (body_len % size != 0) { + memcpy(buf, body, body_len); + msg_data = (u32 *)buf; + } + + for (i = 0; i < cnt; i++) { + __raw_writel(cpu_to_be32(*(msg_data + i)), + send_mbx->data + SSS_MBX_HEADER_SIZE + i * size); + } +} + +static void sss_chip_write_mbx_msg_attr(struct sss_mbx *mbx, + u16 dest, u16 aeq_num, u16 seg_len) +{ + u16 size; + u16 dest_func_id; + u32 intr; + u32 ctrl; + + size = ALIGN(seg_len + SSS_MBX_HEADER_SIZE, SSS_MBX_SEG_LEN_ALIGN) >> 2; + intr = SSS_SET_MBX_INT(aeq_num, DST_AEQN) | + SSS_SET_MBX_INT(0, SRC_RESP_AEQN) | + SSS_SET_MBX_INT(SSS_NO_DMA_ATTR, STAT_DMA) | + SSS_SET_MBX_INT(size, TX_SIZE) | + SSS_SET_MBX_INT(SSS_MBX_STRONG_ORDER, STAT_DMA_SO_RO) | + SSS_SET_MBX_INT(SSS_MBX_WB, WB_EN); + + sss_chip_write_reg(SSS_TO_HWDEV(mbx)->hwif, + SSS_HW_CSR_MBX_INT_OFFSET_OFF, intr); + + /* make sure write mbx intr attr reg */ + wmb(); + + dest_func_id = (SSS_IS_VF(SSS_TO_HWDEV(mbx)) && dest != SSS_MGMT_SRC_ID) ? 0 : dest; + ctrl = SSS_SET_MBX_CTRL(SSS_MBX_TX_NOT_COMPLETE, TX_STATUS) | + SSS_SET_MBX_CTRL(SSS_MBX_NOT_TRIG, TRIGGER_AEQE) | + SSS_SET_MBX_CTRL(dest_func_id, DST_FUNC); + + sss_chip_write_reg(SSS_TO_HWDEV(mbx)->hwif, + SSS_HW_CSR_MBX_CTRL_OFF, ctrl); + + /* make sure write mbx ctrl reg */ + wmb(); +} + +static void sss_dump_mbx_reg(struct sss_hwdev *hwdev) +{ + u32 val1; + u32 val2; + + val1 = sss_chip_read_reg(hwdev->hwif, SSS_HW_CSR_MBX_CTRL_OFF); + val2 = sss_chip_read_reg(hwdev->hwif, SSS_HW_CSR_MBX_INT_OFFSET_OFF); + + sdk_err(hwdev->dev_hdl, "Mbx ctrl reg:0x%x, intr offset:0x%x\n", val1, val2); +} + +static u16 sss_get_mbx_status(const struct sss_mbx_send *send_mbx) +{ + u64 val = be64_to_cpu(*send_mbx->wb_state); + + /* read wb state before returning it */ + rmb(); + + return (u16)(val & SSS_MBX_WB_STATUS_ERRCODE_MASK); +} + +static enum sss_process_ret sss_check_mbx_wb_status(void *priv_data) +{ + u16 status; + struct sss_mbx *mbx = priv_data; + + if (SSS_MBX_MSG_CHN_STOP(mbx) || !SSS_TO_HWDEV(mbx)->chip_present_flag) + return SSS_PROCESS_ERR; + + status = sss_get_mbx_status(&mbx->mbx_send); + + return SSS_MBX_STATUS_FINISHED(status) ? SSS_PROCESS_OK : SSS_PROCESS_DOING; +} + +static int sss_chip_send_mbx_fragment(struct sss_mbx *mbx, u16 dest_func_id, + u64 msg_header, void *msg_body, u16 body_len) +{ + u16 aeq_type; + u16 status = 0; + u16 err_code; + u16 direction; + int ret; + struct sss_mbx_send *send_mbx = &mbx->mbx_send; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + direction = SSS_GET_MSG_HEADER(msg_header, DIRECTION); + aeq_type = (SSS_GET_HWIF_AEQ_NUM(hwdev->hwif) > SSS_MBX_RSP_MSG_AEQ && + direction != SSS_DIRECT_SEND_MSG) ? SSS_MBX_RSP_MSG_AEQ : SSS_ASYNC_MSG_AEQ; + + sss_clear_mbx_status(send_mbx); + + sss_chip_send_mbx_msg_header(hwdev, send_mbx, &msg_header); + + sss_chip_send_mbx_msg_body(hwdev, send_mbx, msg_body, body_len); + + sss_chip_write_mbx_msg_attr(mbx, dest_func_id, aeq_type, body_len); + + ret = sss_check_handler_timeout(mbx, sss_check_mbx_wb_status, + SSS_MBX_MSG_POLL_TIMEOUT_MS, USEC_PER_MSEC); + status = sss_get_mbx_status(send_mbx); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Send mbx seg timeout, wb status: 0x%x\n", status); + sss_dump_mbx_reg(hwdev); + return -ETIMEDOUT; + } + + if (!SSS_MBX_STATUS_SUCCESS(status)) { + sdk_err(hwdev->dev_hdl, "Fail to send mbx seg to func %u, wb status: 0x%x\n", + dest_func_id, status); + err_code = SSS_MBX_STATUS_ERRCODE(status); + return (err_code != 0) ? err_code : -EFAULT; + } + + return 0; +} + +static int sss_send_mbx_to_chip(struct sss_mbx *mbx, u16 dest_func_id, + u64 msg_header, u8 *msg_body, u16 body_len) +{ + int ret; + u16 seg_len = SSS_MBX_SEG_SIZE; + u32 seq_id = 0; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + while (body_len > 0) { + if (body_len <= SSS_MBX_SEG_SIZE) { + msg_header &= ~SSS_MBX_SEGLEN_MASK; + msg_header |= SSS_SET_MSG_HEADER(body_len, SEG_LEN); + msg_header |= SSS_SET_MSG_HEADER(SSS_LAST_SEG, LAST); + seg_len = body_len; + } + + ret = sss_chip_send_mbx_fragment(mbx, dest_func_id, msg_header, msg_body, seg_len); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to send mbx seg, seq_id=0x%llx\n", + SSS_GET_MSG_HEADER(msg_header, SEQID)); + return ret; + } + + seq_id++; + msg_body += seg_len; + body_len -= seg_len; + msg_header &= ~(SSS_SET_MSG_HEADER(SSS_MSG_HEADER_SEQID_MASK, SEQID)); + msg_header |= SSS_SET_MSG_HEADER(seq_id, SEQID); + } + + return 0; +} + +int sss_send_mbx_msg(struct sss_mbx *mbx, u8 mod, u16 cmd, void *msg, + u16 msg_len, u16 dest_func_id, enum sss_msg_direction_type direction, + enum sss_msg_ack_type ack_type, struct sss_mbx_msg_info *msg_info) +{ + u8 *msg_body = NULL; + u64 msg_header = 0; + int ret = 0; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + struct sss_mbx_dma_msg msg_dma = {0}; + enum sss_data_type type = SSS_INLINE_DATA; + struct sss_mbx_dma_queue *queue = NULL; + + mutex_lock(&mbx->msg_send_lock); + + if (SSS_IS_DMA_MBX_MSG(dest_func_id) && !SSS_SUPPORT_MBX_SEGMENT(hwdev)) { + queue = sss_get_mbx_dma_queue(mbx, ack_type); + if (!queue) { + ret = -EBUSY; + goto out; + } + + sss_fill_mbx_msg_body(queue, &msg_dma, msg, msg_len); + + type = SSS_DMA_DATA; + msg = &msg_dma; + msg_len = sizeof(msg_dma); + } + + msg_body = (u8 *)msg; + msg_header = SSS_FILL_MSG_HEADER(hwdev, msg_info, msg_len, mod, + ack_type, type, direction, cmd); + + ret = sss_send_mbx_to_chip(mbx, dest_func_id, msg_header, msg_body, msg_len); + +out: + mutex_unlock(&mbx->msg_send_lock); + + return ret; +} + +static void sss_set_mbx_event_flag(struct sss_mbx *mbx, + enum sss_mbx_event_state event_flag) +{ + spin_lock(&mbx->mbx_lock); + mbx->event_flag = event_flag; + spin_unlock(&mbx->mbx_lock); +} + +static enum sss_process_ret check_mbx_msg_finish(void *priv_data) +{ + struct sss_mbx *mbx = priv_data; + + if (SSS_MBX_MSG_CHN_STOP(mbx) || SSS_TO_HWDEV(mbx)->chip_present_flag == 0) + return SSS_PROCESS_ERR; + + return (mbx->event_flag == SSS_EVENT_SUCCESS) ? SSS_PROCESS_OK : SSS_PROCESS_DOING; +} + +static int sss_wait_mbx_msg_completion(struct sss_mbx *mbx, u32 timeout) +{ + u32 wait_time; + int ret; + + wait_time = (timeout != 0) ? timeout : SSS_MBX_COMPLETE_WAIT_TIME_MS; + ret = sss_check_handler_timeout(mbx, check_mbx_msg_finish, + wait_time, USEC_PER_MSEC); + if (ret != 0) { + sss_set_mbx_event_flag(mbx, SSS_EVENT_TIMEOUT); + return -ETIMEDOUT; + } + + sss_set_mbx_event_flag(mbx, SSS_EVENT_END); + + return 0; +} + +static int sss_send_mbx_msg_lock(struct sss_mbx *mbx, u16 channel) +{ + if (!mbx->lock_channel_en) { + mutex_lock(&mbx->mbx_send_lock); + return 0; + } + + while (test_bit(channel, &mbx->channel_stop) == 0) { + if (mutex_trylock(&mbx->mbx_send_lock) != 0) + return 0; + + usleep_range(SSS_MBX_TRY_LOCK_SLEPP_US - 1, SSS_MBX_TRY_LOCK_SLEPP_US); + } + + return -EAGAIN; +} + +static void sss_send_mbx_msg_unlock(struct sss_mbx *mbx) +{ + mutex_unlock(&mbx->mbx_send_lock); +} + +int sss_send_mbx_to_func(struct sss_mbx *mbx, u8 mod, u16 cmd, + u16 dest_func_id, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, u16 channel) +{ + struct sss_msg_desc *msg_desc = NULL; + struct sss_mbx_msg_info msg_info = {0}; + int ret; + + if (SSS_TO_HWDEV(mbx)->chip_present_flag == 0) + return -EPERM; + + msg_desc = sss_get_mbx_msg_desc(mbx, dest_func_id, SSS_RESP_MSG); + if (!msg_desc) + return -EFAULT; + + ret = sss_send_mbx_msg_lock(mbx, channel); + if (ret != 0) + return ret; + + mbx->cur_msg_channel = channel; + SSS_INCREASE_MBX_MSG_ID(mbx); + sss_set_mbx_event_flag(mbx, SSS_EVENT_START); + + msg_info.msg_id = SSS_MBX_MSG_ID(mbx); + ret = sss_send_mbx_msg(mbx, mod, cmd, buf_in, in_size, dest_func_id, + SSS_DIRECT_SEND_MSG, SSS_MSG_ACK, &msg_info); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Fail to send mbx mod %u, cmd %u, msg_id: %u, err: %d\n", + mod, cmd, msg_info.msg_id, ret); + sss_set_mbx_event_flag(mbx, SSS_EVENT_FAIL); + goto send_err; + } + + if (sss_wait_mbx_msg_completion(mbx, timeout)) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Send mbx msg timeout, msg_id: %u\n", msg_info.msg_id); + sss_dump_aeq_info(SSS_TO_HWDEV(mbx)); + ret = -ETIMEDOUT; + goto send_err; + } + + if (mod != msg_desc->mod || cmd != msg_desc->cmd) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Invalid response mbx message, mod: 0x%x, cmd: 0x%x, expect mod: 0x%x, cmd: 0x%x\n", + msg_desc->mod, msg_desc->cmd, mod, cmd); + ret = -EFAULT; + goto send_err; + } + + if (msg_desc->msg_info.state) { + ret = msg_desc->msg_info.state; + goto send_err; + } + + if (buf_out && out_size) { + if (*out_size < msg_desc->msg_len) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Invalid response mbx message length: %u for mod %d cmd %u, should less than: %u\n", + msg_desc->msg_len, mod, cmd, *out_size); + ret = -EFAULT; + goto send_err; + } + + if (msg_desc->msg_len) + memcpy(buf_out, msg_desc->msg, msg_desc->msg_len); + + *out_size = msg_desc->msg_len; + } + +send_err: + sss_send_mbx_msg_unlock(mbx); + + return ret; +} + +int sss_send_mbx_to_func_no_ack(struct sss_hwdev *hwdev, u16 func_id, + u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel) +{ + struct sss_mbx_msg_info msg_info = {0}; + int ret; + + ret = sss_check_mbx_param(hwdev->mbx, buf_in, in_size, channel); + if (ret != 0) + return ret; + + ret = sss_send_mbx_msg_lock(hwdev->mbx, channel); + if (ret != 0) + return ret; + + ret = sss_send_mbx_msg(hwdev->mbx, mod, cmd, buf_in, in_size, + func_id, SSS_DIRECT_SEND_MSG, SSS_MSG_NO_ACK, &msg_info); + if (ret != 0) + sdk_err(hwdev->dev_hdl, "Fail to send mbx no ack\n"); + + sss_send_mbx_msg_unlock(hwdev->mbx); + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.h new file mode 100644 index 0000000000000..f3f253046f8fc --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_MBX_H +#define SSS_HWIF_MBX_H + +#include "sss_hw.h" +#include "sss_hwdev.h" + +#define SSS_MGMT_SRC_ID 0x1FFF + +#define SSS_IS_DMA_MBX_MSG(dest_func_id) ((dest_func_id) == SSS_MGMT_SRC_ID) + +#define SSS_MBX_BUF_SIZE_MAX 2048U + +#define SSS_MBX_HEADER_SIZE 8 + +/* MBX size is 64B, 8B for mbx_header, 8B reserved */ +#define SSS_MBX_SEG_SIZE 48 +#define SSS_MBX_DATA_SIZE (SSS_MBX_BUF_SIZE_MAX - SSS_MBX_HEADER_SIZE) + +#define SSS_MBX_MQ_CI_OFF (SSS_CSR_CFG_FLAG + \ + SSS_HW_CSR_MBX_DATA_OFF + SSS_MBX_HEADER_SIZE + SSS_MBX_SEG_SIZE) + +#define SSS_MBX_MQ_SYNC_CI_SHIFT 0 +#define SSS_MBX_MQ_ASYNC_CI_SHIFT 8 + +#define SSS_MBX_MQ_SYNC_CI_MASK 0xFF +#define SSS_MBX_MQ_ASYNC_CI_MASK 0xFF + +#define SSS_GET_MBX_MQ_CI(val, field) \ + (((val) >> SSS_MBX_MQ_##field##_CI_SHIFT) & SSS_MBX_MQ_##field##_CI_MASK) +#define SSS_CLEAR_MBX_MQ_CI(val, field) \ + ((val) & (~(SSS_MBX_MQ_##field##_CI_MASK << SSS_MBX_MQ_##field##_CI_SHIFT))) + +/* Recv func mbx msg */ +struct sss_recv_mbx { + void *buf; + u16 buf_len; + u8 msg_id; + u8 mod; + u16 cmd; + u16 src_func_id; + enum sss_msg_ack_type ack_type; + void *resp_buf; +}; + +enum sss_mbx_cb_state { + SSS_VF_RECV_HANDLER_REG = 0, + SSS_VF_RECV_HANDLER_RUN, + SSS_PF_RECV_HANDLER_REG, + SSS_PF_RECV_HANDLER_RUN, + SSS_PPF_RECV_HANDLER_REG, + SSS_PPF_RECV_HANDLER_RUN, + SSS_PPF_TO_PF_RECV_HANDLER_REG, + SSS_PPF_TO_PF_RECV_HANDLER_RUN, +}; + +static inline int sss_check_mbx_param(struct sss_mbx *mbx, + void *buf_in, u16 in_size, u16 channel) +{ + if (!buf_in || in_size == 0) + return -EINVAL; + + if (in_size > SSS_MBX_DATA_SIZE) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Mbx msg len %u exceed limit: [1, %u]\n", + in_size, SSS_MBX_DATA_SIZE); + return -EINVAL; + } + + if (channel >= SSS_CHANNEL_MAX) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Invalid channel id: 0x%x\n", channel); + return -EINVAL; + } + + return 0; +} + +struct sss_msg_desc *sss_get_mbx_msg_desc(struct sss_mbx *mbx, u64 src_func_id, u64 direction); +int sss_send_mbx_msg(struct sss_mbx *mbx, u8 mod, u16 cmd, + void *msg, u16 msg_len, u16 dest, enum sss_msg_direction_type direction_type, + enum sss_msg_ack_type type, struct sss_mbx_msg_info *msg_info); +int sss_send_mbx_to_func(struct sss_mbx *mbx, u8 mod, u16 cmd, + u16 dest_func_id, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout, u16 channel); +int sss_send_mbx_to_func_no_ack(struct sss_hwdev *hwdev, u16 func_id, + u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel); +#define sss_send_mbx_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, in_size, channel) \ + sss_send_mbx_to_func_no_ack(hwdev, SSS_MGMT_SRC_ID, mod, cmd, \ + buf_in, in_size, channel) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_export.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_export.c new file mode 100644 index 0000000000000..02ee99eba20a1 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_export.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_mbx.h" +#include "sss_hwif_export.h" + +#define SSS_WAIT_CB_COMPLETE_MIN 900 +#define SSS_WAIT_CB_COMPLETE_MAX 1000 + +int sss_register_pf_mbx_handler(void *hwdev, u8 mod, void *pri_handle, sss_pf_mbx_handler_t cb) +{ + struct sss_mbx *mbx = NULL; + + if (!hwdev || mod >= SSS_MOD_TYPE_MAX) + return -EFAULT; + + mbx = ((struct sss_hwdev *)hwdev)->mbx; + mbx->pf_mbx_cb[mod] = cb; + mbx->pf_mbx_data[mod] = pri_handle; + + set_bit(SSS_PF_RECV_HANDLER_REG, &mbx->pf_mbx_cb_state[mod]); + + return 0; +} +EXPORT_SYMBOL(sss_register_pf_mbx_handler); + +int sss_register_vf_mbx_handler(void *hwdev, u8 mod, void *pri_handle, sss_vf_mbx_handler_t cb) +{ + struct sss_mbx *mbx = NULL; + + if (!hwdev || mod >= SSS_MOD_TYPE_MAX) + return -EFAULT; + + mbx = ((struct sss_hwdev *)hwdev)->mbx; + mbx->vf_mbx_cb[mod] = cb; + mbx->vf_mbx_data[mod] = pri_handle; + + set_bit(SSS_VF_RECV_HANDLER_REG, &mbx->vf_mbx_cb_state[mod]); + + return 0; +} +EXPORT_SYMBOL(sss_register_vf_mbx_handler); + +void sss_unregister_pf_mbx_handler(void *hwdev, u8 mod) +{ + struct sss_mbx *mbx = NULL; + + if (!hwdev || mod >= SSS_MOD_TYPE_MAX) + return; + + mbx = ((struct sss_hwdev *)hwdev)->mbx; + + clear_bit(SSS_PF_RECV_HANDLER_REG, &mbx->pf_mbx_cb_state[mod]); + + while (test_bit(SSS_PF_RECV_HANDLER_RUN, &mbx->pf_mbx_cb_state[mod]) != 0) + usleep_range(SSS_WAIT_CB_COMPLETE_MIN, SSS_WAIT_CB_COMPLETE_MAX); + + mbx->pf_mbx_cb[mod] = NULL; + mbx->pf_mbx_data[mod] = NULL; +} +EXPORT_SYMBOL(sss_unregister_pf_mbx_handler); + +void sss_unregister_vf_mbx_handler(void *hwdev, u8 mod) +{ + struct sss_mbx *mbx = NULL; + + if (!hwdev || mod >= SSS_MOD_TYPE_MAX) + return; + + mbx = ((struct sss_hwdev *)hwdev)->mbx; + + clear_bit(SSS_VF_RECV_HANDLER_REG, &mbx->vf_mbx_cb_state[mod]); + + while (test_bit(SSS_VF_RECV_HANDLER_RUN, &mbx->vf_mbx_cb_state[mod]) != 0) + usleep_range(SSS_WAIT_CB_COMPLETE_MIN, SSS_WAIT_CB_COMPLETE_MAX); + + mbx->vf_mbx_cb[mod] = NULL; + mbx->vf_mbx_data[mod] = NULL; +} +EXPORT_SYMBOL(sss_unregister_vf_mbx_handler); + +int sss_mbx_send_to_pf(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel) +{ + struct sss_hwdev *dev = hwdev; + int ret; + + if (!hwdev) + return -EINVAL; + + if (!(dev->chip_present_flag)) + return -EPERM; + + ret = sss_check_mbx_param(dev->mbx, buf_in, in_size, channel); + if (ret != 0) + return ret; + + if (!SSS_IS_VF(dev)) { + sdk_err(dev->dev_hdl, "Invalid func_type: %d\n", + SSS_GET_FUNC_TYPE(dev)); + return -EINVAL; + } + + return sss_send_mbx_to_func(dev->mbx, mod, cmd, + sss_get_pf_id_of_vf(dev), buf_in, in_size, + buf_out, out_size, timeout, channel); +} +EXPORT_SYMBOL(sss_mbx_send_to_pf); + +int sss_mbx_send_to_vf(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel) +{ + struct sss_hwdev *dev = hwdev; + int ret = 0; + u16 dst_func_id; + + if (!hwdev) + return -EINVAL; + + ret = sss_check_mbx_param(dev->mbx, buf_in, in_size, channel); + if (ret != 0) + return ret; + + if (SSS_IS_VF(dev)) { + sdk_err(dev->dev_hdl, "Invalid func_type: %d\n", + SSS_GET_FUNC_TYPE((struct sss_hwdev *)hwdev)); + return -EINVAL; + } + + if (vf_id == 0) { + sdk_err(dev->dev_hdl, "Invalid vf_id: %u\n", vf_id); + return -EINVAL; + } + + /* vf_offset_to_pf + vf_id is the vf's global function id of vf in + * this pf + */ + dst_func_id = sss_get_glb_pf_vf_offset(hwdev) + vf_id; + + return sss_send_mbx_to_func(dev->mbx, mod, cmd, + dst_func_id, buf_in, in_size, + buf_out, out_size, timeout, channel); +} +EXPORT_SYMBOL(sss_mbx_send_to_vf); + +static int sss_send_mbx_to_mgmt(struct sss_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size, + u32 timeout, u16 channel) +{ + struct sss_mbx *func_to_func = hwdev->mbx; + int ret; + + ret = sss_check_mbx_param(func_to_func, buf_in, in_size, channel); + if (ret != 0) + return ret; + + if (mod == SSS_MOD_TYPE_COMM && cmd == SSS_COMM_MGMT_CMD_SEND_API_ACK_BY_UP) + return 0; + + return sss_send_mbx_to_func(func_to_func, mod, cmd, SSS_MGMT_SRC_ID, + buf_in, in_size, buf_out, out_size, timeout, channel); +} + +int sss_sync_mbx_send_msg(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel) +{ + if (!hwdev) + return -EINVAL; + + if (sss_get_dev_present_flag(hwdev) == 0) + return -EPERM; + + return sss_send_mbx_to_mgmt(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout, channel); +} +EXPORT_SYMBOL(sss_sync_mbx_send_msg); diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.c new file mode 100644 index 0000000000000..96c034c203d8d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.c @@ -0,0 +1,888 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw_common.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" +#include "sss_hwif_eq.h" +#include "sss_hwif_mbx.h" +#include "sss_csr.h" +#include "sss_common.h" +#include "sss_adapter_mgmt.h" + +#define SSS_MBX_WB_STATUS_SIZE 16UL + +#define SSS_MBX_DMA_MSG_QUEUE_DEPTH 32 + +#define SSS_MBX_WQ_NAME "sss_mbx" + +#define SSS_MBX_AREA(hwif) \ + ((hwif)->cfg_reg_base + SSS_HW_CSR_MBX_DATA_OFF) + +#define SSS_GET_MBX_BODY(header) ((u8 *)(header) + SSS_MBX_HEADER_SIZE) + +#define SSS_MBX_LAST_SEG_MAX_SIZE \ + (SSS_MBX_BUF_SIZE_MAX - SSS_MAX_SEG_ID * SSS_MBX_SEG_SIZE) + +#define SSS_MSG_PROCESS_CNT_MAX 10 + +#define SSS_SRC_IS_PF_OR_PPF(hwdev, src_func_id) \ + ((src_func_id) < SSS_MAX_PF_NUM(hwdev)) + +#define SSS_MBX_MSG_NO_DATA_SIZE 1 + +#define SSS_MBX_PF_SEND_ERR 0x1 + +#define SSS_MAX_SEG_ID 42 + +struct sss_mbx_work { + struct work_struct work; + struct sss_mbx *mbx; + struct sss_recv_mbx *recv_mbx; + struct sss_msg_buffer *msg_buffer; +}; + +static int sss_alloc_mbx_mq_dma_buf(struct sss_hwdev *hwdev, struct sss_mbx_dma_queue *mq) +{ + u32 size; + + size = mq->depth * SSS_MBX_BUF_SIZE_MAX; + mq->dma_buff_vaddr = dma_zalloc_coherent(hwdev->dev_hdl, size, &mq->dma_buff_paddr, + GFP_KERNEL); + if (!mq->dma_buff_vaddr) { + sdk_err(hwdev->dev_hdl, "Fail to alloc dma_buffer\n"); + return -ENOMEM; + } + + return 0; +} + +static void sss_free_mbx_mq_dma_buf(struct sss_hwdev *hwdev, struct sss_mbx_dma_queue *mq) +{ + dma_free_coherent(hwdev->dev_hdl, mq->depth * SSS_MBX_BUF_SIZE_MAX, + mq->dma_buff_vaddr, mq->dma_buff_paddr); + mq->dma_buff_vaddr = NULL; + mq->dma_buff_paddr = 0; +} + +static int sss_mbx_alloc_mq_dma_addr(struct sss_mbx *mbx) +{ + int ret; + + ret = sss_alloc_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->sync_msg_queue); + if (ret != 0) + return ret; + + ret = sss_alloc_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->async_msg_queue); + if (ret != 0) { + sss_free_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->sync_msg_queue); + return ret; + } + + return 0; +} + +static void sss_mbx_free_mq_dma_addr(struct sss_mbx *mbx) +{ + sss_free_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->sync_msg_queue); + sss_free_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->async_msg_queue); +} + +static int sss_mbx_alloc_mq_wb_addr(struct sss_mbx *mbx) +{ + struct sss_mbx_send *send_mbx = &mbx->mbx_send; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + send_mbx->wb_vaddr = dma_zalloc_coherent(hwdev->dev_hdl, SSS_MBX_WB_STATUS_SIZE, + &send_mbx->wb_paddr, GFP_KERNEL); + if (!send_mbx->wb_vaddr) + return -ENOMEM; + + send_mbx->wb_state = send_mbx->wb_vaddr; + + return 0; +} + +static void sss_mbx_free_mq_wb_addr(struct sss_mbx *mbx) +{ + struct sss_mbx_send *send_mbx = &mbx->mbx_send; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + dma_free_coherent(hwdev->dev_hdl, SSS_MBX_WB_STATUS_SIZE, + send_mbx->wb_vaddr, send_mbx->wb_paddr); + + send_mbx->wb_vaddr = NULL; +} + +static int sss_alloc_mbx_msg_buffer(struct sss_msg_buffer *msg_buffer) +{ + msg_buffer->resp_msg.msg = kzalloc(SSS_MBX_BUF_SIZE_MAX, GFP_KERNEL); + if (!msg_buffer->resp_msg.msg) + return -ENOMEM; + + msg_buffer->recv_msg.msg = kzalloc(SSS_MBX_BUF_SIZE_MAX, GFP_KERNEL); + if (!msg_buffer->recv_msg.msg) { + kfree(msg_buffer->resp_msg.msg); + msg_buffer->resp_msg.msg = NULL; + return -ENOMEM; + } + + atomic_set(&msg_buffer->recv_msg_cnt, 0); + msg_buffer->recv_msg.seq_id = SSS_MAX_SEG_ID; + msg_buffer->resp_msg.seq_id = SSS_MAX_SEG_ID; + + return 0; +} + +static void sss_free_mbx_msg_buffer(struct sss_msg_buffer *msg_buffer) +{ + kfree(msg_buffer->recv_msg.msg); + msg_buffer->recv_msg.msg = NULL; + kfree(msg_buffer->resp_msg.msg); + msg_buffer->resp_msg.msg = NULL; +} + +static int sss_mbx_alloc_dma_addr(struct sss_mbx *sss_mbx) +{ + int ret; + + ret = sss_mbx_alloc_mq_dma_addr(sss_mbx); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(sss_mbx)->dev_hdl, "Fail to alloc mbx dma queue\n"); + return -ENOMEM; + } + + ret = sss_mbx_alloc_mq_wb_addr(sss_mbx); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(sss_mbx)->dev_hdl, "Fail to init mbx dma wb addr\n"); + goto alloc_dma_wb_addr_err; + } + + return 0; + +alloc_dma_wb_addr_err: + sss_mbx_free_mq_dma_addr(sss_mbx); + + return -ENOMEM; +} + +static void sss_mbx_free_dma_addr(struct sss_mbx *mbx) +{ + sss_mbx_free_mq_wb_addr(mbx); + sss_mbx_free_mq_dma_addr(mbx); +} + +static int sss_init_mbx_info(struct sss_mbx *mbx) +{ + int ret; + + mutex_init(&mbx->mbx_send_lock); + mutex_init(&mbx->msg_send_lock); + spin_lock_init(&mbx->mbx_lock); + mbx->sync_msg_queue.depth = SSS_MBX_DMA_MSG_QUEUE_DEPTH; + mbx->async_msg_queue.depth = SSS_MBX_DMA_MSG_QUEUE_DEPTH; + + mbx->workq = create_singlethread_workqueue(SSS_MBX_WQ_NAME); + if (!mbx->workq) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, "Fail to create mbx workq\n"); + return -ENOMEM; + } + + ret = sss_alloc_mbx_msg_buffer(&mbx->mgmt_msg); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, "Fail to alloc mgmt message buffer\n"); + goto alloc_mbx_msg_buffer_err; + } + + ret = sss_mbx_alloc_dma_addr(mbx); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, "Fail to alloc dma addr\n"); + goto mbx_alloc_dma_addr_err; + } + + return 0; + +mbx_alloc_dma_addr_err: + sss_free_mbx_msg_buffer(&mbx->mgmt_msg); +alloc_mbx_msg_buffer_err: + destroy_workqueue(mbx->workq); + + return -ENOMEM; +} + +static void sss_deinit_mbx_info(struct sss_mbx *mbx) +{ + if (mbx->workq) { + destroy_workqueue(mbx->workq); + mbx->workq = NULL; + } + + sss_mbx_free_dma_addr(mbx); + sss_free_mbx_msg_buffer(&mbx->mgmt_msg); +} + +static int sss_alloc_func_mbx_msg(struct sss_mbx *mbx, u16 func_num) +{ + if (mbx->func_msg) + return (mbx->num_func_msg == func_num) ? 0 : -EFAULT; + + mbx->func_msg = kcalloc(func_num, sizeof(*mbx->func_msg), GFP_KERNEL); + if (!mbx->func_msg) + return -ENOMEM; + + return 0; +} + +static void sss_free_func_mbx_msg(struct sss_mbx *mbx) +{ + kfree(mbx->func_msg); + mbx->func_msg = NULL; +} + +int sss_init_func_mbx_msg(void *hwdev, u16 func_num) +{ + u16 i; + u16 cnt; + int ret; + struct sss_hwdev *dev = hwdev; + struct sss_mbx *mbx = dev->mbx; + + if (!hwdev || func_num == 0 || func_num > SSS_MAX_FUNC) + return -EINVAL; + + ret = sss_alloc_func_mbx_msg(mbx, func_num); + if (ret != 0) { + sdk_err(dev->dev_hdl, "Fail to alloc func msg\n"); + return ret; + } + + for (cnt = 0; cnt < func_num; cnt++) { + ret = sss_alloc_mbx_msg_buffer(&mbx->func_msg[cnt]); + if (ret != 0) { + sdk_err(dev->dev_hdl, "Fail to alloc func %u msg buf\n", cnt); + goto alloc_mbx_msg_buf_err; + } + } + + mbx->num_func_msg = func_num; + + return 0; + +alloc_mbx_msg_buf_err: + for (i = 0; i < cnt; i++) + sss_free_mbx_msg_buffer(&mbx->func_msg[i]); + + sss_free_func_mbx_msg(mbx); + + return -ENOMEM; +} + +static void sss_deinit_func_mbx_msg(struct sss_mbx *mbx) +{ + u16 i; + + if (!mbx->func_msg) + return; + + for (i = 0; i < mbx->num_func_msg; i++) + sss_free_mbx_msg_buffer(&mbx->func_msg[i]); + + sss_free_func_mbx_msg(mbx); +} + +static void sss_chip_reset_mbx_ci(struct sss_mbx *mbx) +{ + u32 val; + + val = sss_chip_read_reg(SSS_TO_HWDEV(mbx)->hwif, SSS_MBX_MQ_CI_OFF); + val = SSS_CLEAR_MBX_MQ_CI(val, SYNC); + val = SSS_CLEAR_MBX_MQ_CI(val, ASYNC); + + sss_chip_write_reg(SSS_TO_HWDEV(mbx)->hwif, SSS_MBX_MQ_CI_OFF, val); +} + +static void sss_chip_set_mbx_wb_attr(struct sss_mbx *mbx) +{ + u32 addr_h; + u32 addr_l; + struct sss_mbx_send *send_mbx = &mbx->mbx_send; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + addr_h = upper_32_bits(send_mbx->wb_paddr); + addr_l = lower_32_bits(send_mbx->wb_paddr); + + sss_chip_write_reg(hwdev->hwif, SSS_HW_CSR_MBX_RES_H_OFF, addr_h); + sss_chip_write_reg(hwdev->hwif, SSS_HW_CSR_MBX_RES_L_OFF, addr_l); +} + +static void sss_chip_set_mbx_attr(struct sss_mbx *mbx) +{ + sss_chip_reset_mbx_ci(mbx); + sss_chip_set_mbx_wb_attr(mbx); +} + +static void sss_chip_reset_mbx_attr(struct sss_mbx *sss_mbx) +{ + struct sss_hwdev *hwdev = SSS_TO_HWDEV(sss_mbx); + + sss_chip_write_reg(hwdev->hwif, SSS_HW_CSR_MBX_RES_H_OFF, 0); + sss_chip_write_reg(hwdev->hwif, SSS_HW_CSR_MBX_RES_L_OFF, 0); +} + +static void sss_prepare_send_mbx(struct sss_mbx *mbx) +{ + struct sss_mbx_send *send_mbx = &mbx->mbx_send; + + send_mbx->data = SSS_MBX_AREA(SSS_TO_HWDEV(mbx)->hwif); +} + +static int sss_alloc_host_msg(struct sss_hwdev *hwdev) +{ + int i; + int ret; + int host_id; + u8 max_host = SSS_MAX_HOST_NUM(hwdev); + struct sss_mbx *mbx = hwdev->mbx; + + if (max_host == 0) + return 0; + + mbx->host_msg = kcalloc(max_host, sizeof(*mbx->host_msg), GFP_KERNEL); + if (!mbx->host_msg) + return -ENOMEM; + + for (host_id = 0; host_id < max_host; host_id++) { + ret = sss_alloc_mbx_msg_buffer(&mbx->host_msg[host_id]); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Fail to alloc host %d msg channel\n", host_id); + goto out; + } + } + + mbx->support_h2h_msg = true; + + return 0; + +out: + for (i = 0; i < host_id; i++) + sss_free_mbx_msg_buffer(&mbx->host_msg[i]); + + kfree(mbx->host_msg); + mbx->host_msg = NULL; + + return -ENOMEM; +} + +static void sss_free_host_msg(struct sss_mbx *mbx) +{ + int i; + + if (!mbx->host_msg) + return; + + for (i = 0; i < SSS_MAX_HOST_NUM(SSS_TO_HWDEV(mbx)); i++) + sss_free_mbx_msg_buffer(&mbx->host_msg[i]); + + kfree(mbx->host_msg); + mbx->host_msg = NULL; +} + +int sss_hwif_init_mbx(struct sss_hwdev *hwdev) +{ + int ret; + struct sss_mbx *mbx; + + mbx = kzalloc(sizeof(*mbx), GFP_KERNEL); + if (!mbx) + return -ENOMEM; + + hwdev->mbx = mbx; + mbx->hwdev = hwdev; + + ret = sss_init_mbx_info(mbx); + if (ret != 0) + goto init_mbx_info_err; + + if (SSS_IS_VF(hwdev)) { + ret = sss_init_func_mbx_msg(hwdev, 1); + if (ret != 0) + goto init_func_mbx_msg_err; + } + + sss_chip_set_mbx_attr(mbx); + + sss_prepare_send_mbx(mbx); + + ret = sss_alloc_host_msg(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc host msg\n"); + goto alloc_host_msg_err; + } + + return 0; + +alloc_host_msg_err: + sss_chip_reset_mbx_attr(mbx); + sss_deinit_func_mbx_msg(mbx); + +init_func_mbx_msg_err: + sss_deinit_mbx_info(mbx); + +init_mbx_info_err: + kfree(mbx); + hwdev->mbx = NULL; + + return ret; +} + +void sss_hwif_deinit_mbx(struct sss_hwdev *hwdev) +{ + struct sss_mbx *mbx = hwdev->mbx; + + destroy_workqueue(mbx->workq); + mbx->workq = NULL; + + sss_chip_reset_mbx_attr(mbx); + + sss_free_host_msg(mbx); + + sss_deinit_func_mbx_msg(mbx); + + sss_deinit_mbx_info(mbx); + + kfree(mbx); + hwdev->mbx = NULL; +} + +static bool sss_check_mbx_msg_header(void *dev_hdl, + struct sss_msg_desc *msg_desc, u64 mbx_header) +{ + u8 seq_id = SSS_GET_MSG_HEADER(mbx_header, SEQID); + u8 seg_len = SSS_GET_MSG_HEADER(mbx_header, SEG_LEN); + u8 msg_id = SSS_GET_MSG_HEADER(mbx_header, MSG_ID); + u8 mod = SSS_GET_MSG_HEADER(mbx_header, MODULE); + u16 cmd = SSS_GET_MSG_HEADER(mbx_header, CMD); + + if (seq_id > SSS_MAX_SEG_ID) { + sdk_err(dev_hdl, "Current seg info: seq_id = 0x%x\n", seq_id); + return false; + } + + if (seg_len > SSS_MBX_SEG_SIZE) { + sdk_err(dev_hdl, "Current seg info: seg_len = 0x%x\n", seg_len); + return false; + } + + if (seq_id == SSS_MAX_SEG_ID && seg_len > SSS_MBX_LAST_SEG_MAX_SIZE) { + sdk_err(dev_hdl, "Current seg info: seq_id = 0x%x, seg_len = 0x%x\n", + seq_id, seg_len); + return false; + } + + if (seq_id == 0) + return true; + + if (seq_id != msg_desc->seq_id + 1) { + sdk_err(dev_hdl, "Current seg info: seq_id = 0x%x, 0x%x\n", + seq_id, msg_desc->seq_id); + return false; + } + + if (msg_id != msg_desc->msg_info.msg_id) { + sdk_err(dev_hdl, "Current seg info: msg_id = 0x%x, 0x%x\n", + msg_id, msg_desc->msg_info.msg_id); + return false; + } + + if (mod != msg_desc->mod) { + sdk_err(dev_hdl, "Current seg info: mod = 0x%x, 0x%x\n", + mod, msg_desc->mod); + return false; + } + + if (cmd != msg_desc->cmd) { + sdk_err(dev_hdl, "Current seg info: cmd = 0x%x, 0x%x\n", + cmd, msg_desc->cmd); + return false; + } + + return true; +} + +static void sss_fill_msg_desc(struct sss_msg_desc *msg_desc, u64 *msg_header) +{ + u64 mbx_header = *msg_header; + u8 seq_id = SSS_GET_MSG_HEADER(mbx_header, SEQID); + u8 seg_len = SSS_GET_MSG_HEADER(mbx_header, SEG_LEN); + u8 msg_id = SSS_GET_MSG_HEADER(mbx_header, MSG_ID); + u8 mod = SSS_GET_MSG_HEADER(mbx_header, MODULE); + u16 cmd = SSS_GET_MSG_HEADER(mbx_header, CMD); + u32 offset = seq_id * SSS_MBX_SEG_SIZE; + void *msg_body = SSS_GET_MBX_BODY(((void *)msg_header)); + + msg_desc->seq_id = seq_id; + if (seq_id == 0) { + msg_desc->msg_info.msg_id = msg_id; + msg_desc->mod = mod; + msg_desc->cmd = cmd; + } + msg_desc->msg_len = SSS_GET_MSG_HEADER(mbx_header, MSG_LEN); + msg_desc->msg_info.state = SSS_GET_MSG_HEADER(mbx_header, STATUS); + memcpy((u8 *)msg_desc->msg + offset, msg_body, seg_len); +} + +static struct sss_recv_mbx *sss_alloc_recv_mbx(void) +{ + struct sss_recv_mbx *recv_mbx = NULL; + + recv_mbx = kzalloc(sizeof(*recv_mbx), GFP_KERNEL); + if (!recv_mbx) + return NULL; + + recv_mbx->buf = kzalloc(SSS_MBX_BUF_SIZE_MAX, GFP_KERNEL); + if (!recv_mbx->buf) + goto alloc_recv_mbx_buf_err; + + recv_mbx->resp_buf = kzalloc(SSS_MBX_BUF_SIZE_MAX, GFP_KERNEL); + if (!recv_mbx->resp_buf) + goto alloc_recv_mbx_resp_buf_err; + + return recv_mbx; + +alloc_recv_mbx_resp_buf_err: + kfree(recv_mbx->buf); + +alloc_recv_mbx_buf_err: + kfree(recv_mbx); + + return NULL; +} + +static void sss_free_recv_mbx(struct sss_recv_mbx *recv_mbx) +{ + kfree(recv_mbx->resp_buf); + kfree(recv_mbx->buf); + kfree(recv_mbx); +} + +static int sss_recv_vf_mbx_handler(struct sss_mbx *mbx, + struct sss_recv_mbx *recv_mbx, void *resp_buf, u16 *size) +{ + int ret; + sss_vf_mbx_handler_t callback; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (recv_mbx->mod >= SSS_MOD_TYPE_MAX) { + sdk_warn(hwdev->dev_hdl, "Recv err mbx msg, mod = %u\n", recv_mbx->mod); + return -EINVAL; + } + + set_bit(SSS_VF_RECV_HANDLER_RUN, &mbx->vf_mbx_cb_state[recv_mbx->mod]); + + callback = mbx->vf_mbx_cb[recv_mbx->mod]; + if (callback && + test_bit(SSS_VF_RECV_HANDLER_REG, &mbx->vf_mbx_cb_state[recv_mbx->mod])) { + ret = callback(mbx->vf_mbx_data[recv_mbx->mod], recv_mbx->cmd, recv_mbx->buf, + recv_mbx->buf_len, resp_buf, size); + } else { + sdk_warn(hwdev->dev_hdl, "VF mbx cb is unregistered\n"); + ret = -EINVAL; + } + + clear_bit(SSS_VF_RECV_HANDLER_RUN, &mbx->vf_mbx_cb_state[recv_mbx->mod]); + + return ret; +} + +static int sss_recv_pf_from_ppf_handler(struct sss_mbx *mbx, + struct sss_recv_mbx *recv_mbx, void *resp_buf, u16 *size) +{ + int ret; + sss_pf_from_ppf_mbx_handler_t callback; + enum sss_mod_type mod = recv_mbx->mod; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (mod >= SSS_MOD_TYPE_MAX) { + sdk_warn(hwdev->dev_hdl, "Recv err mbx msg, mod = %d\n", mod); + return -EINVAL; + } + + set_bit(SSS_PPF_TO_PF_RECV_HANDLER_RUN, &mbx->ppf_to_pf_mbx_cb_state[mod]); + + callback = mbx->pf_recv_ppf_mbx_cb[mod]; + if (callback && + test_bit(SSS_PPF_TO_PF_RECV_HANDLER_REG, &mbx->ppf_to_pf_mbx_cb_state[mod]) != 0) { + ret = callback(mbx->pf_recv_ppf_mbx_data[mod], recv_mbx->cmd, + recv_mbx->buf, recv_mbx->buf_len, resp_buf, size); + } else { + sdk_warn(hwdev->dev_hdl, "PF recv ppf mbx cb is not registered\n"); + ret = -EINVAL; + } + + clear_bit(SSS_PPF_TO_PF_RECV_HANDLER_RUN, &mbx->ppf_to_pf_mbx_cb_state[mod]); + + return ret; +} + +static int sss_recv_ppf_mbx_handler(struct sss_mbx *mbx, + struct sss_recv_mbx *recv_mbx, u8 pf_id, + void *resp_buf, u16 *size) +{ + int ret; + u16 vf_id = 0; + sss_ppf_mbx_handler_t callback; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (recv_mbx->mod >= SSS_MOD_TYPE_MAX) { + sdk_warn(hwdev->dev_hdl, "Recv err mbx msg, mod = %u\n", recv_mbx->mod); + return -EINVAL; + } + + set_bit(SSS_PPF_RECV_HANDLER_RUN, &mbx->ppf_mbx_cb_state[recv_mbx->mod]); + + callback = mbx->ppf_mbx_cb[recv_mbx->mod]; + if (callback && + test_bit(SSS_PPF_RECV_HANDLER_REG, &mbx->ppf_mbx_cb_state[recv_mbx->mod])) { + ret = callback(mbx->ppf_mbx_data[recv_mbx->mod], pf_id, vf_id, recv_mbx->cmd, + recv_mbx->buf, recv_mbx->buf_len, resp_buf, size); + } else { + sdk_warn(hwdev->dev_hdl, "PPF mbx cb is unregistered, mod = %u\n", recv_mbx->mod); + ret = -EINVAL; + } + + clear_bit(SSS_PPF_RECV_HANDLER_RUN, &mbx->ppf_mbx_cb_state[recv_mbx->mod]); + + return ret; +} + +static int sss_recv_pf_from_vf_mbx_handler(struct sss_mbx *mbx, + struct sss_recv_mbx *recv_mbx, + u16 src_func_id, void *resp_buf, + u16 *size) +{ + int ret; + u16 vf_id = 0; + sss_pf_mbx_handler_t callback; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (recv_mbx->mod >= SSS_MOD_TYPE_MAX) { + sdk_warn(hwdev->dev_hdl, "Recv err mbx msg, mod = %u\n", recv_mbx->mod); + return -EINVAL; + } + + set_bit(SSS_PF_RECV_HANDLER_RUN, &mbx->pf_mbx_cb_state[recv_mbx->mod]); + + callback = mbx->pf_mbx_cb[recv_mbx->mod]; + if (callback && + test_bit(SSS_PF_RECV_HANDLER_REG, &mbx->pf_mbx_cb_state[recv_mbx->mod]) != 0) { + vf_id = src_func_id - sss_get_glb_pf_vf_offset(SSS_TO_HWDEV(mbx)); + ret = callback(mbx->pf_mbx_data[recv_mbx->mod], vf_id, recv_mbx->cmd, + recv_mbx->buf, recv_mbx->buf_len, resp_buf, size); + } else { + sdk_warn(hwdev->dev_hdl, "PF mbx mod(0x%x) cb is unregistered\n", recv_mbx->mod); + ret = -EINVAL; + } + + clear_bit(SSS_PF_RECV_HANDLER_RUN, &mbx->pf_mbx_cb_state[recv_mbx->mod]); + + return ret; +} + +static void sss_send_mbx_response(struct sss_mbx *mbx, + struct sss_recv_mbx *recv_mbx, int ret, u16 size, u16 src_func_id) +{ + u16 data_size; + struct sss_mbx_msg_info msg_info = {0}; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + msg_info.msg_id = recv_mbx->msg_id; + if (ret != 0) + msg_info.state = SSS_MBX_PF_SEND_ERR; + + data_size = (size == 0 || ret != 0) ? SSS_MBX_MSG_NO_DATA_SIZE : size; + if (data_size > SSS_MBX_DATA_SIZE) { + sdk_err(hwdev->dev_hdl, "Resp msg len(%d), out of range: %d\n", + data_size, SSS_MBX_DATA_SIZE); + data_size = SSS_MBX_DATA_SIZE; + } + + sss_send_mbx_msg(mbx, recv_mbx->mod, recv_mbx->cmd, recv_mbx->resp_buf, data_size, + src_func_id, SSS_RESP_MSG, SSS_MSG_NO_ACK, &msg_info); +} + +static void sss_recv_mbx_handler(struct sss_mbx *mbx, + struct sss_recv_mbx *recv_mbx) +{ + int ret = 0; + void *resp_buf = recv_mbx->resp_buf; + u16 size = SSS_MBX_DATA_SIZE; + u16 src_func_id = recv_mbx->src_func_id; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (SSS_IS_VF(hwdev)) { + ret = sss_recv_vf_mbx_handler(mbx, recv_mbx, resp_buf, &size); + goto out; + } + + if (SSS_SRC_IS_PF_OR_PPF(hwdev, src_func_id)) { + if (SSS_IS_PPF(hwdev)) + ret = sss_recv_ppf_mbx_handler(mbx, recv_mbx, + (u8)src_func_id, + resp_buf, &size); + else + ret = sss_recv_pf_from_ppf_handler(mbx, recv_mbx, resp_buf, &size); + } else { + ret = sss_recv_pf_from_vf_mbx_handler(mbx, + recv_mbx, src_func_id, + resp_buf, &size); + } + +out: + if (recv_mbx->ack_type == SSS_MSG_ACK) + sss_send_mbx_response(mbx, recv_mbx, ret, size, src_func_id); +} + +static void sss_recv_mbx_work_handler(struct work_struct *work) +{ + struct sss_mbx_work *mbx_work = container_of(work, struct sss_mbx_work, work); + + sss_recv_mbx_handler(mbx_work->mbx, mbx_work->recv_mbx); + + atomic_dec(&mbx_work->msg_buffer->recv_msg_cnt); + + destroy_work(&mbx_work->work); + + sss_free_recv_mbx(mbx_work->recv_mbx); + + kfree(mbx_work); +} + +static void sss_init_recv_mbx_param(struct sss_recv_mbx *recv_mbx, + struct sss_msg_desc *msg_desc, u64 msg_header) +{ + recv_mbx->msg_id = msg_desc->msg_info.msg_id; + recv_mbx->mod = SSS_GET_MSG_HEADER(msg_header, MODULE); + recv_mbx->cmd = SSS_GET_MSG_HEADER(msg_header, CMD); + recv_mbx->ack_type = SSS_GET_MSG_HEADER(msg_header, NO_ACK); + recv_mbx->src_func_id = SSS_GET_MSG_HEADER(msg_header, SRC_GLB_FUNC_ID); + recv_mbx->buf_len = msg_desc->msg_len; + memcpy(recv_mbx->buf, msg_desc->msg, msg_desc->msg_len); +} + +static int sss_init_mbx_work(struct sss_mbx *mbx, struct sss_recv_mbx *recv_mbx, + struct sss_msg_buffer *msg_buffer) +{ + struct sss_mbx_work *mbx_work = NULL; + + mbx_work = kzalloc(sizeof(*mbx_work), GFP_KERNEL); + if (!mbx_work) + return -ENOMEM; + + atomic_inc(&msg_buffer->recv_msg_cnt); + + mbx_work->msg_buffer = msg_buffer; + mbx_work->recv_mbx = recv_mbx; + mbx_work->mbx = mbx; + + INIT_WORK(&mbx_work->work, sss_recv_mbx_work_handler); + queue_work_on(WORK_CPU_UNBOUND, mbx->workq, &mbx_work->work); + + return 0; +} + +static void sss_recv_mbx_msg_handler(struct sss_mbx *mbx, + struct sss_msg_desc *msg_desc, u64 msg_header) +{ + u32 msg_cnt; + int ret; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + struct sss_recv_mbx *recv_mbx = NULL; + struct sss_msg_buffer *msg_buffer = container_of(msg_desc, struct sss_msg_buffer, recv_msg); + + msg_cnt = atomic_read(&msg_buffer->recv_msg_cnt); + if (msg_cnt > SSS_MSG_PROCESS_CNT_MAX) { + u64 src_func_id = SSS_GET_MSG_HEADER(msg_header, SRC_GLB_FUNC_ID); + + sdk_warn(hwdev->dev_hdl, "This func(%llu) have %u msg wait to process\n", + src_func_id, msg_cnt); + return; + } + + recv_mbx = sss_alloc_recv_mbx(); + if (!recv_mbx) { + sdk_err(hwdev->dev_hdl, "Fail to alloc receive recv_mbx message buffer\n"); + return; + } + + sss_init_recv_mbx_param(recv_mbx, msg_desc, msg_header); + + ret = sss_init_mbx_work(mbx, recv_mbx, msg_buffer); + if (ret != 0) + sss_free_recv_mbx(recv_mbx); +} + +static void sss_resp_mbx_handler(struct sss_mbx *mbx, + const struct sss_msg_desc *msg_desc) +{ + spin_lock(&mbx->mbx_lock); + if (msg_desc->msg_info.msg_id == mbx->send_msg_id && + mbx->event_flag == SSS_EVENT_START) + mbx->event_flag = SSS_EVENT_SUCCESS; + else + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Mbx resp timeout, current send msg_id(0x%x), recv msg_id(0x%x), status(0x%x)\n", + mbx->send_msg_id, msg_desc->msg_info.msg_id, msg_desc->msg_info.state); + spin_unlock(&mbx->mbx_lock); +} + +static void sss_recv_mbx_aeq(struct sss_mbx *mbx, u64 *msg_header, + struct sss_msg_desc *msg_desc) +{ + u64 header = *msg_header; + + if (!sss_check_mbx_msg_header(SSS_TO_HWDEV(mbx)->dev_hdl, msg_desc, header)) { + msg_desc->seq_id = SSS_MAX_SEG_ID; + return; + } + + sss_fill_msg_desc(msg_desc, msg_header); + + if (!SSS_GET_MSG_HEADER(header, LAST)) + return; + + if (SSS_GET_MSG_HEADER(header, DIRECTION) == SSS_DIRECT_SEND_MSG) { + sss_recv_mbx_msg_handler(mbx, msg_desc, header); + return; + } + + sss_resp_mbx_handler(mbx, msg_desc); +} + +void sss_recv_mbx_aeq_handler(void *handle, u8 *header, u8 size) +{ + u64 msg_header = *((u64 *)header); + u64 src_func_id = SSS_GET_MSG_HEADER(msg_header, SRC_GLB_FUNC_ID); + u64 direction = SSS_GET_MSG_HEADER(msg_header, DIRECTION); + struct sss_msg_desc *msg_desc = NULL; + struct sss_hwdev *hwdev = (struct sss_hwdev *)handle; + struct sss_mbx *mbx = hwdev->mbx; + + msg_desc = sss_get_mbx_msg_desc(mbx, src_func_id, direction); + if (!msg_desc) { + sdk_err(hwdev->dev_hdl, "Invalid mbx src_func_id: %u\n", (u32)src_func_id); + return; + } + + sss_recv_mbx_aeq(mbx, (u64 *)header, msg_desc); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.h new file mode 100644 index 0000000000000..ab440fea3e0a5 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_MBX_INIT_H +#define SSS_HWIF_MBX_INIT_H + +#include "sss_hwdev.h" + +int sss_init_func_mbx_msg(void *hwdev, u16 func_num); +int sss_hwif_init_mbx(struct sss_hwdev *hwdev); +void sss_hwif_deinit_mbx(struct sss_hwdev *hwdev); +void sss_recv_mbx_aeq_handler(void *handle, u8 *header, u8 size); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_common.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_common.h new file mode 100644 index 0000000000000..c6a085e5444ab --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_common.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_MGMT_COMMON_H +#define SSS_HWIF_MGMT_COMMON_H + +#define SSS_ASYNC_MSG_FLAG 0x8 + +#define SSS_PF_MGMT_BUF_LEN_MAX 2048UL + +#define SSS_MSG_TO_MGMT_LEN_MAX 2016 + +#define SSS_SEG_LEN 48 + +#define SSS_MGMT_SEQ_ID_MAX \ + (ALIGN(SSS_MSG_TO_MGMT_LEN_MAX, SSS_SEG_LEN) / SSS_SEG_LEN) + +#define SSS_MGMT_LAST_SEG_LEN_MAX \ + (SSS_PF_MGMT_BUF_LEN_MAX - SSS_SEG_LEN * SSS_MGMT_SEQ_ID_MAX) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.c new file mode 100644 index 0000000000000..af2e9d44d2bdb --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_mbx.h" +#include "sss_hwif_mbx_init.h" +#include "sss_hwif_mgmt_common.h" +#include "sss_hwif_ctrlq_init.h" +#include "sss_hwif_adm_init.h" + +#define SSS_DEF_OUT_SIZE 1 + +struct sss_mgmt_msg_handle_work { + struct work_struct work; + struct sss_msg_pf_to_mgmt *pf_to_mgmt; + + void *msg; + u16 msg_len; + u8 no_ack; + u8 resvd; + + enum sss_mod_type mod; + u16 cmd; + u16 msg_id; +}; + +static void sss_send_response_mbx_to_mgmt(struct sss_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 msg_id) +{ + struct sss_mbx_msg_info info; + + info.msg_id = (u8)msg_id; + info.state = 0; + + sss_send_mbx_msg(hwdev->mbx, mod, cmd, buf_in, in_size, + SSS_MGMT_SRC_ID, SSS_RESP_MSG, SSS_MSG_NO_ACK, &info); +} + +static void sss_mgmt_recv_msg_handler(struct sss_msg_pf_to_mgmt *mgmt_msg, + u8 mod, u16 cmd, void *in_buf, + u16 in_size, u16 msg_id, int resp_need) +{ + u16 size; + u16 out_size = 0; + void *dev_hdl = SSS_TO_HWDEV(mgmt_msg)->dev_hdl; + void *out_buf = mgmt_msg->ack_buf; + + memset(out_buf, 0, SSS_PF_MGMT_BUF_LEN_MAX); + + if (mod >= SSS_MOD_TYPE_HW_MAX) { + sdk_warn(dev_hdl, "Recv illegal msg from mgmt cpu, mod = %d\n", mod); + out_size = sizeof(struct sss_mgmt_msg_head); + ((struct sss_mgmt_msg_head *)out_buf)->state = SSS_MGMT_CMD_UNSUPPORTED; + goto out; + } + + set_bit(SSS_CALLBACK_RUNNING, &mgmt_msg->recv_handler_state[mod]); + + if (!mgmt_msg->recv_handler[mod] || + !test_bit(SSS_CALLBACK_REG, &mgmt_msg->recv_handler_state[mod])) { + sdk_warn(dev_hdl, "Recv mgmt cb is null, mod = %d\n", mod); + clear_bit(SSS_CALLBACK_RUNNING, &mgmt_msg->recv_handler_state[mod]); + out_size = sizeof(struct sss_mgmt_msg_head); + ((struct sss_mgmt_msg_head *)out_buf)->state = SSS_MGMT_CMD_UNSUPPORTED; + goto out; + } + + mgmt_msg->recv_handler[mod](mgmt_msg->recv_data[mod], + cmd, in_buf, in_size, out_buf, &out_size); + + clear_bit(SSS_CALLBACK_RUNNING, &mgmt_msg->recv_handler_state[mod]); + +out: + if (resp_need != 0) { + size = (out_size == 0) ? SSS_DEF_OUT_SIZE : out_size; + sss_send_response_mbx_to_mgmt(SSS_TO_HWDEV(mgmt_msg), mod, cmd, + out_buf, size, msg_id); + } +} + +static void sss_recv_mgmt_msg_work_handler(struct work_struct *work) +{ + struct sss_mgmt_msg_handle_work *msg_work = + container_of(work, struct sss_mgmt_msg_handle_work, work); + + sss_mgmt_recv_msg_handler(msg_work->pf_to_mgmt, msg_work->mod, + msg_work->cmd, msg_work->msg, msg_work->msg_len, msg_work->msg_id, + !msg_work->no_ack); + + destroy_work(&msg_work->work); + + kfree(msg_work->msg); + kfree(msg_work); +} + +static void sss_init_mgmt_recv_msg(struct sss_recv_msg *msg_recv, u64 msg_header) +{ + msg_recv->cmd = SSS_GET_MSG_HEADER(msg_header, CMD); + msg_recv->mod = SSS_GET_MSG_HEADER(msg_header, MODULE); + msg_recv->no_ack = SSS_GET_MSG_HEADER(msg_header, NO_ACK); + msg_recv->buf_len = SSS_GET_MSG_HEADER(msg_header, MSG_LEN); + msg_recv->msg_id = SSS_GET_MSG_HEADER(msg_header, MSG_ID); + msg_recv->seq_id = SSS_MGMT_SEQ_ID_MAX; +} + +static bool sss_check_mgmt_head_info(struct sss_recv_msg *msg_recv, u64 header) +{ + u8 seg_len = SSS_GET_MSG_HEADER(header, SEG_LEN); + u8 seg_id = SSS_GET_MSG_HEADER(header, SEQID); + u16 msg_id = SSS_GET_MSG_HEADER(header, MSG_ID); + + if (seg_id > SSS_MGMT_SEQ_ID_MAX || seg_len > SSS_SEG_LEN || + (seg_id == SSS_MGMT_SEQ_ID_MAX && seg_len > SSS_MGMT_LAST_SEG_LEN_MAX)) + return false; + + if (seg_id == 0) { + msg_recv->msg_id = msg_id; + msg_recv->seq_id = seg_id; + + return true; + } + + if (seg_id != (msg_recv->seq_id + 1) || msg_id != msg_recv->msg_id) + return false; + + msg_recv->seq_id = seg_id; + + return true; +} + +static void sss_mgmt_resp_msg_handler(struct sss_msg_pf_to_mgmt *mgmt_msg, + struct sss_recv_msg *msg_recv) +{ + void *dev_hdl = SSS_TO_HWDEV(mgmt_msg)->dev_hdl; + + if ((msg_recv->msg_id & SSS_ASYNC_MSG_FLAG) != 0) + return; + + spin_lock(&mgmt_msg->sync_event_lock); + if (msg_recv->msg_id == mgmt_msg->sync_msg_id && + mgmt_msg->event_state == SSS_ADM_EVENT_START) { + mgmt_msg->event_state = SSS_ADM_EVENT_SUCCESS; + complete(&msg_recv->done); + spin_unlock(&mgmt_msg->sync_event_lock); + return; + } + + sdk_err(dev_hdl, "Send msg id(0x%x) recv msg id(0x%x) dismatch, event state=%d\n", + mgmt_msg->sync_msg_id, msg_recv->msg_id, mgmt_msg->event_state); + sdk_err(dev_hdl, "Wait timeout, send and recv msg id(0x%x)(0x%x), event state=%d\n", + mgmt_msg->sync_msg_id, msg_recv->msg_id, mgmt_msg->event_state); + spin_unlock(&mgmt_msg->sync_event_lock); +} + +static void sss_init_mgmt_msg_work(struct sss_msg_pf_to_mgmt *mgmt_msg, + struct sss_recv_msg *msg_recv) +{ + struct sss_mgmt_msg_handle_work *msg_work = NULL; + + msg_work = kzalloc(sizeof(*msg_work), GFP_KERNEL); + if (!msg_work) + return; + + if (msg_recv->buf_len != 0) { + msg_work->msg = kzalloc(msg_recv->buf_len, GFP_KERNEL); + if (!msg_work->msg) { + kfree(msg_work); + return; + } + } + + msg_work->pf_to_mgmt = mgmt_msg; + msg_work->msg_len = msg_recv->buf_len; + memcpy(msg_work->msg, msg_recv->buf, msg_recv->buf_len); + msg_work->msg_id = msg_recv->msg_id; + msg_work->mod = msg_recv->mod; + msg_work->cmd = msg_recv->cmd; + msg_work->no_ack = msg_recv->no_ack; + + INIT_WORK(&msg_work->work, sss_recv_mgmt_msg_work_handler); + queue_work_on(WORK_CPU_UNBOUND, mgmt_msg->workq, &msg_work->work); +} + +static void sss_recv_mgmt_msg_handler(struct sss_msg_pf_to_mgmt *mgmt_msg, + u8 *msg_header, struct sss_recv_msg *msg_recv) +{ + u8 seq_id; + u8 seq_len; + u16 msg_id; + u32 msg_offset; + u64 dir; + u64 header = *((u64 *)msg_header); + void *msg_body; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mgmt_msg); + + dir = SSS_GET_MSG_HEADER(header, DIRECTION); + msg_id = SSS_GET_MSG_HEADER(header, MSG_ID); + if (dir == SSS_RESP_MSG && (msg_id & SSS_ASYNC_MSG_FLAG) != 0) + return; + + if (!sss_check_mgmt_head_info(msg_recv, header)) { + msg_recv->seq_id = SSS_MGMT_SEQ_ID_MAX; + sdk_err(hwdev->dev_hdl, "Fail to check Mgmt msg seq id and seq len\n"); + return; + } + + seq_len = SSS_GET_MSG_HEADER(header, SEG_LEN); + seq_id = SSS_GET_MSG_HEADER(header, SEQID); + msg_offset = seq_id * SSS_SEG_LEN; + msg_body = msg_header + sizeof(header); + memcpy((u8 *)msg_recv->buf + msg_offset, msg_body, seq_len); + + if (!SSS_GET_MSG_HEADER(header, LAST)) + return; + + sss_init_mgmt_recv_msg(msg_recv, header); + + if (SSS_GET_MSG_HEADER(header, DIRECTION) == SSS_RESP_MSG) { + sss_mgmt_resp_msg_handler(mgmt_msg, msg_recv); + return; + } + + sss_init_mgmt_msg_work(mgmt_msg, msg_recv); +} + +static void sss_set_mbx_event_timeout(struct sss_hwdev *hwdev) +{ + struct sss_mbx *mbx = hwdev->mbx; + + spin_lock(&mbx->mbx_lock); + if (mbx->event_flag == SSS_EVENT_START) + mbx->event_flag = SSS_EVENT_TIMEOUT; + spin_unlock(&mbx->mbx_lock); +} + +void sss_mgmt_msg_aeqe_handler(void *hwdev, u8 *msg_header, u8 size) +{ + bool msg_dir; + struct sss_recv_msg *msg = NULL; + struct sss_msg_pf_to_mgmt *mgmt_msg = NULL; + struct sss_hwdev *dev = (struct sss_hwdev *)hwdev; + + if (SSS_GET_MSG_HEADER(*(u64 *)msg_header, SOURCE) == SSS_MSG_SRC_MBX) { + sss_recv_mbx_aeq_handler(hwdev, msg_header, size); + return; + } + + mgmt_msg = dev->pf_to_mgmt; + if (!mgmt_msg) + return; + + msg_dir = SSS_GET_MSG_HEADER(*(u64 *)msg_header, DIRECTION) == SSS_DIRECT_SEND_MSG; + + msg = msg_dir ? &mgmt_msg->recv_msg : &mgmt_msg->recv_resp_msg; + + sss_recv_mgmt_msg_handler(mgmt_msg, msg_header, msg); +} + +void sss_force_complete_all(void *dev) +{ + struct sss_hwdev *hwdev = dev; + + spin_lock_bh(&hwdev->channel_lock); + + if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_VF && + test_bit(SSS_HW_ADM_INIT_OK, &hwdev->func_state)) + sss_complete_adm_event(hwdev); + + if (test_bit(SSS_HW_MBX_INIT_OK, &hwdev->func_state)) + sss_set_mbx_event_timeout(hwdev); + + if (test_bit(SSS_HW_CTRLQ_INIT_OK, &hwdev->func_state)) + sss_ctrlq_flush_sync_cmd(hwdev); + + spin_unlock_bh(&hwdev->channel_lock); +} + +void sss_flush_mgmt_workq(void *hwdev) +{ + struct sss_hwdev *dev = (struct sss_hwdev *)hwdev; + + flush_workqueue(dev->aeq_info->workq); + + if (sss_get_func_type(dev) != SSS_FUNC_TYPE_VF) + flush_workqueue(dev->pf_to_mgmt->workq); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.h new file mode 100644 index 0000000000000..19196c2b6f9bc --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_MGMT_INIT_H +#define SSS_HWIF_MGMT_INIT_H + +#include "sss_hwdev.h" + +void sss_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size); +void sss_force_complete_all(void *dev); +void sss_flush_mgmt_workq(void *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci.c new file mode 100644 index 0000000000000..c50ae2daa06a3 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2024 3snic Technologies Co., Ltd */ + +#include +#include +#include "sss_kernel.h" +#include "sss_pci.h" + +#ifdef USE_OLD_PCI_FUNCTION +#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \ + PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE) + +MODULE_IMPORT_NS(CXL); + +int pci_disable_pcie_error_reporting(struct pci_dev *dev) +{ + int rc; + + if (!pcie_aer_is_native(dev)) + return -EIO; + + rc = pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS); + return pcibios_err_to_errno(rc); +} + +int pci_enable_pcie_error_reporting(struct pci_dev *dev) +{ + int rc; + + if (!pcie_aer_is_native(dev)) + return -EIO; + + rc = pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS); + return pcibios_err_to_errno(rc); +} + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci.h new file mode 100644 index 0000000000000..bebb175535013 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_H +#define SSS_PCI_H + +#ifdef USE_OLD_PCI_FUNCTION +#include + +#define pci_pool dma_pool +#define pci_pool_create(name, pdev, size, align, allocation) \ + dma_pool_create(name, &(pdev)->dev, size, align, allocation) +#define pci_pool_destroy(pool) dma_pool_destroy(pool) +#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) +#define pci_pool_zalloc(pool, flags, handle) \ + dma_pool_zalloc(pool, flags, handle) +#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) + +static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) +{ + return dma_set_mask(&dev->dev, mask); +} + +static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) +{ + return dma_set_coherent_mask(&dev->dev, mask); +} + +int pci_disable_pcie_error_reporting(struct pci_dev *dev); +int pci_enable_pcie_error_reporting(struct pci_dev *dev); +#endif + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.c new file mode 100644 index 0000000000000..ead8a09435c67 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_pci_id_tbl.h" +#include "sss_pci_sriov.h" +#include "sss_adapter_mgmt.h" +#include "sss_hwdev.h" + +static void sss_record_pcie_error(void *dev) +{ + struct sss_hwdev *hwdev = (struct sss_hwdev *)dev; + + atomic_inc(&hwdev->hw_stats.fault_event_stats.pcie_fault_stats); +} + +pci_ers_result_t sss_detect_pci_error(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev); + + sdk_err(&pdev->dev, "Pci error, state: 0x%08x\n", state); + + pci_cleanup_aer_uncorrect_error_status(pdev); + + if (adapter) + sss_record_pcie_error(adapter->hwdev); + + return PCI_ERS_RESULT_CAN_RECOVER; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.h new file mode 100644 index 0000000000000..26e65d77b98e5 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_ERROR_H +#define SSS_PCI_ERROR_H + +#include + +pci_ers_result_t sss_detect_pci_error(struct pci_dev *pdev, + pci_channel_state_t state); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.c new file mode 100644 index 0000000000000..d73b7d2db2897 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" + +static bool attach_uld = true; +module_param(attach_uld, bool, 0444); +MODULE_PARM_DESC(attach_uld, "enable attach upper driver - default is true"); + +static struct sss_uld_info g_uld_info[SSS_SERVICE_TYPE_MAX]; + +static const char *g_uld_name[SSS_SERVICE_TYPE_MAX] = { + "nic", "ovs", "roce", "toe", "ioe", + "fc", "vbs", "ipsec", "virtio", "migrate", "ppa", "custom" +}; + +/* lock for attach/detach all uld and register/ unregister uld */ +struct mutex g_uld_mutex; + +void sss_init_uld_lock(void) +{ + mutex_init(&g_uld_mutex); +} + +void sss_lock_uld(void) +{ + mutex_lock(&g_uld_mutex); +} + +void sss_unlock_uld(void) +{ + mutex_unlock(&g_uld_mutex); +} + +const char **sss_get_uld_names(void) +{ + return g_uld_name; +} + +struct sss_uld_info *sss_get_uld_info(void) +{ + return g_uld_info; +} + +bool sss_attach_is_enable(void) +{ + return attach_uld; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.h new file mode 100644 index 0000000000000..c703eb3ab0d28 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_GLOBAL_H +#define SSS_PCI_GLOBAL_H + +#include + +#include "sss_hw_uld_driver.h" + +struct sss_uld_info *sss_get_uld_info(void); +bool sss_attach_is_enable(void); +const char **sss_get_uld_names(void); +void sss_init_uld_lock(void); +void sss_lock_uld(void); +void sss_unlock_uld(void); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_id_tbl.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_id_tbl.h new file mode 100644 index 0000000000000..699748a46505e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_id_tbl.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_ID_TBL_H +#define SSS_PCI_ID_TBL_H + +#define PCI_VENDOR_ID_SSSNIC 0x1F3F +#define SSS_DEV_ID_STANDARD 0x9020 +#define SSS_DEV_ID_SPN120 0x9021 +#define SSS_DEV_ID_VF 0x9001 +#define SSS_DEV_ID_VF_HV 0x9002 +#define SSS_DEV_ID_SPU 0xAC00 + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.c new file mode 100644 index 0000000000000..9214d52beff08 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.c @@ -0,0 +1,588 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_pci.h" +#include "sss_pci_id_tbl.h" +#include "sss_pci_sriov.h" +#include "sss_adapter_mgmt.h" +#include "sss_hwdev_init.h" +#include "sss_hwdev_api.h" +#include "sss_pci_remove.h" +#include "sss_pci_global.h" +#include "sss_tool.h" + +#define SSS_SYNC_YEAR_OFFSET 1900 +#define SSS_SYNC_MONTH_OFFSET 1 + +#define SSS_CHECK_EVENT_INFO(event) \ + ((event)->service == SSS_EVENT_SRV_COMM && \ + (event)->type == SSS_EVENT_FAULT) + +#define SSS_CHECK_FAULT_EVENT_INFO(hwdev, fault_event) \ + ((fault_event)->fault_level == SSS_FAULT_LEVEL_SERIOUS_FLR && \ + (fault_event)->info.chip.func_id < sss_get_max_pf_num(hwdev)) + +#define SSS_GET_CFG_REG_BAR(pdev) (SSS_IS_VF_DEV(pdev) ? \ + SSS_VF_PCI_CFG_REG_BAR : SSS_PF_PCI_CFG_REG_BAR) + +static bool sss_get_vf_load_state(struct pci_dev *pdev) +{ + struct sss_pci_adapter *adapter = NULL; + struct pci_dev *dev = NULL; + + if (pci_is_root_bus(pdev->bus)) + return false; + + dev = pdev->is_virtfn ? pdev->physfn : pdev; + adapter = pci_get_drvdata(dev); + + if (!adapter) { + sdk_err(&pdev->dev, "Invalid adapter, is null.\n"); + return false; + } + + return true; +} + +static int sss_init_pci_dev(struct pci_dev *pdev) +{ + int ret; + + ret = pci_enable_device(pdev); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to enable pci device\n"); + goto enable_err; + } + + ret = pci_request_regions(pdev, SSS_DRV_NAME); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to request regions\n"); + goto regions_err; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret != 0) { + sdk_warn(&pdev->dev, "Fail to set 64-bit DMA mask\n"); + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to set DMA mask\n"); + goto dma_err; + } + } + + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret != 0) { + sdk_warn(&pdev->dev, "Fail to set 64-bit coherent DMA mask\n"); + + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to set coherent DMA mask\n"); + goto dma_err; + } + } + + return 0; + +dma_err: + pci_clear_master(pdev); + pci_disable_pcie_error_reporting(pdev); + pci_release_regions(pdev); + +regions_err: + pci_disable_device(pdev); + +enable_err: + pci_set_drvdata(pdev, NULL); + + return ret; +} + +void sss_set_adapter_probe_state(struct sss_pci_adapter *adapter, int state) +{ + mutex_lock(&adapter->uld_attach_mutex); + adapter->init_state = state; + mutex_unlock(&adapter->uld_attach_mutex); +} + +static int sss_map_pci_bar(struct pci_dev *pdev, + struct sss_pci_adapter *adapter) +{ + adapter->db_base_paddr = pci_resource_start(pdev, SSS_PCI_DB_BAR); + adapter->db_dwqe_len = pci_resource_len(pdev, SSS_PCI_DB_BAR); + adapter->db_reg_bar = pci_ioremap_bar(pdev, SSS_PCI_DB_BAR); + if (!adapter->db_reg_bar) { + sdk_err(&pdev->dev, "Fail to map db reg bar\n"); + return -ENOMEM; + } + + if (!SSS_IS_VF_DEV(pdev)) { + adapter->mgmt_reg_bar = pci_ioremap_bar(pdev, SSS_PCI_MGMT_REG_BAR); + if (!adapter->mgmt_reg_bar) { + sdk_err(&pdev->dev, "Fail to map mgmt reg bar\n"); + goto mgmt_bar_err; + } + } + + adapter->intr_reg_bar = pci_ioremap_bar(pdev, SSS_PCI_INTR_REG_BAR); + if (!adapter->intr_reg_bar) { + sdk_err(&pdev->dev, "Fail to map intr reg bar\n"); + goto intr_bar_err; + } + + adapter->cfg_reg_bar = pci_ioremap_bar(pdev, SSS_GET_CFG_REG_BAR(pdev)); + if (!adapter->cfg_reg_bar) { + sdk_err(&pdev->dev, "Fail to map config reg bar\n"); + goto cfg_bar_err; + } + + return 0; + +cfg_bar_err: + iounmap(adapter->intr_reg_bar); + +intr_bar_err: + if (!SSS_IS_VF_DEV(pdev)) + iounmap(adapter->mgmt_reg_bar); + +mgmt_bar_err: + iounmap(adapter->db_reg_bar); + + return -ENOMEM; +} + +static void sss_send_event_to_uld(struct sss_pci_adapter *adapter, + struct sss_event_info *event_info) +{ + enum sss_service_type type; + const char **uld_name = sss_get_uld_names(); + struct sss_uld_info *uld_info = sss_get_uld_info(); + + for (type = SSS_SERVICE_TYPE_NIC; type < SSS_SERVICE_TYPE_MAX; type++) { + if (test_and_set_bit(type, &adapter->uld_run_state)) { + sdk_warn(&adapter->pcidev->dev, + "Fail to send event, svc: 0x%x, event type: 0x%x, uld_name: %s\n", + event_info->service, event_info->type, uld_name[type]); + continue; + } + + if (uld_info[type].event) + uld_info[type].event(&adapter->hal_dev, + adapter->uld_dev[type], event_info); + clear_bit(type, &adapter->uld_run_state); + } +} + +static void sss_send_event_to_dst(struct sss_pci_adapter *adapter, u16 func_id, + struct sss_event_info *event_info) +{ + struct sss_pci_adapter *dest_adapter = NULL; + + sss_hold_chip_node(); + list_for_each_entry(dest_adapter, &adapter->chip_node->func_list, node) { + if (adapter->init_state == SSS_IN_REMOVE) + continue; + if (sss_get_func_type(dest_adapter->hwdev) == SSS_FUNC_TYPE_VF) + continue; + + if (sss_get_global_func_id(dest_adapter->hwdev) == func_id) { + sss_send_event_to_uld(dest_adapter, event_info); + break; + } + } + sss_put_chip_node(); +} + +static void sss_send_event_to_all_pf(struct sss_pci_adapter *adapter, + struct sss_event_info *event_info) +{ + struct sss_pci_adapter *dest_adapter = NULL; + + sss_hold_chip_node(); + list_for_each_entry(dest_adapter, &adapter->chip_node->func_list, node) { + if (adapter->init_state == SSS_IN_REMOVE) + continue; + + if (sss_get_func_type(dest_adapter->hwdev) == SSS_FUNC_TYPE_VF) + continue; + + sss_send_event_to_uld(dest_adapter, event_info); + } + sss_put_chip_node(); +} + +static void sss_process_event(void *data, struct sss_event_info *event_info) +{ + u16 id; + struct sss_pci_adapter *pci_adapter = data; + struct sss_fault_event *fault_event = (void *)event_info->event_data; + + if (SSS_CHECK_EVENT_INFO(event_info) && + SSS_CHECK_FAULT_EVENT_INFO(pci_adapter->hwdev, fault_event)) { + id = fault_event->info.chip.func_id; + return sss_send_event_to_dst(pci_adapter, id, event_info); + } + + if (event_info->type == SSS_EVENT_MGMT_WATCHDOG) + sss_send_event_to_all_pf(pci_adapter, event_info); + else + sss_send_event_to_uld(pci_adapter, event_info); +} + +static void sss_sync_time_to_chip(struct sss_pci_adapter *adapter) +{ + int ret; + u64 mstime; + struct timeval val = {0}; + struct rtc_time r_time = {0}; + + do_gettimeofday(&val); + + mstime = (u64)(val.tv_sec * MSEC_PER_SEC + val.tv_usec / USEC_PER_MSEC); + ret = sss_chip_sync_time(adapter->hwdev, mstime); + if (ret != 0) { + sdk_err(&adapter->pcidev->dev, "Fail to sync UTC time to fw, ret:%d.\n", ret); + } else { + rtc_time_to_tm((unsigned long)(val.tv_sec), &r_time); + sdk_info(&adapter->pcidev->dev, + "Success to sync UTC time to fw. UTC time %d-%02d-%02d %02d:%02d:%02d.\n", + r_time.tm_year + SSS_SYNC_YEAR_OFFSET, + r_time.tm_mon + SSS_SYNC_MONTH_OFFSET, + r_time.tm_mday, r_time.tm_hour, r_time.tm_min, r_time.tm_sec); + } +} + +int sss_attach_uld_driver(struct sss_pci_adapter *adapter, + enum sss_service_type type, const struct sss_uld_info *uld_info) +{ + int ret = 0; + void *uld = NULL; + const char **name = sss_get_uld_names(); + struct pci_dev *pdev = adapter->pcidev; + + mutex_lock(&adapter->uld_attach_mutex); + + if (adapter->uld_dev[type]) { + sdk_err(&pdev->dev, "Fail to attach pci dev, driver %s\n", name[type]); + mutex_unlock(&adapter->uld_attach_mutex); + return 0; + } + + ret = uld_info->probe(&adapter->hal_dev, &uld, adapter->uld_dev_name[type]); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to probe for driver %s\n", name[type]); + mutex_unlock(&adapter->uld_attach_mutex); + return ret; + } + + adapter->uld_dev[type] = uld; + set_bit(type, &adapter->uld_attach_state); + mutex_unlock(&adapter->uld_attach_mutex); + + sdk_info(&pdev->dev, "Success to attach %s driver\n", name[type]); + + return 0; +} + +static bool sss_get_vf_service_load(struct pci_dev *pdev, + enum sss_service_type service_type) +{ + struct sss_pci_adapter *adapter = NULL; + struct pci_dev *dev = NULL; + + if (!pdev) { + pr_err("Invalid pdev, is null.\n"); + return false; + } + + dev = (pdev->is_virtfn != 0) ? pdev->physfn : pdev; + + adapter = pci_get_drvdata(dev); + if (!adapter) { + sdk_err(&pdev->dev, "Invalid pci adapter, is null.\n"); + return false; + } + + return true; +} + +static void sss_attach_all_uld_driver(struct sss_pci_adapter *adapter) +{ + enum sss_service_type type; + struct pci_dev *pdev = adapter->pcidev; + struct sss_uld_info *info = sss_get_uld_info(); + + sss_hold_chip_node(); + sss_lock_uld(); + for (type = SSS_SERVICE_TYPE_NIC; type < SSS_SERVICE_TYPE_MAX; type++) { + if (!info[type].probe) + continue; + if (pdev->is_virtfn && + !sss_get_vf_service_load(pdev, type)) { + sdk_info(&pdev->dev, + "VF dev disable service_type = %d load in host\n", type); + continue; + } + sss_attach_uld_driver(adapter, type, &info[type]); + } + sss_unlock_uld(); + sss_put_chip_node(); +} + +static int sss_attach_uld_dev(struct sss_pci_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pcidev; + + adapter->hal_dev.pdev = pdev; + adapter->hal_dev.hwdev = adapter->hwdev; + + if (!sss_attach_is_enable()) + return 0; + + sss_attach_all_uld_driver(adapter); + + return 0; +} + +int sss_register_uld(enum sss_service_type type, struct sss_uld_info *uld_info) +{ + struct sss_pci_adapter *adapter = NULL; + struct sss_card_node *card_node = NULL; + struct list_head *list = NULL; + struct sss_uld_info *info = sss_get_uld_info(); + const char **uld_name = sss_get_uld_names(); + + if (type >= SSS_SERVICE_TYPE_MAX) { + pr_err("Unknown type %d of uld to register\n", type); + return -EINVAL; + } + + if (!uld_info || !uld_info->probe || !uld_info->remove) { + pr_err("Invalid info of %s driver to register\n", uld_name[type]); + return -EINVAL; + } + + sss_hold_chip_node(); + sss_lock_uld(); + + if (info[type].probe) { + sss_unlock_uld(); + sss_put_chip_node(); + pr_err("Driver %s already register\n", uld_name[type]); + return -EINVAL; + } + + list = sss_get_chip_list(); + memcpy(&info[type], uld_info, sizeof(*uld_info)); + list_for_each_entry(card_node, list, node) { + list_for_each_entry(adapter, &card_node->func_list, node) { + if (sss_attach_uld_driver(adapter, type, uld_info) != 0) { + sdk_err(&adapter->pcidev->dev, + "Fail to attach %s driver to pci dev\n", uld_name[type]); + continue; + } + } + } + + sss_unlock_uld(); + sss_put_chip_node(); + + pr_info("Success to register %s driver\n", uld_name[type]); + return 0; +} +EXPORT_SYMBOL(sss_register_uld); + +static int sss_notify_ok_to_chip(struct sss_pci_adapter *adapter) +{ + int ret; + struct pci_dev *pdev = adapter->pcidev; + + if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_VF) + return 0; + + ret = sss_chip_set_pci_bdf_num(adapter->hwdev, pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to set BDF info to chip\n"); + return ret; + } + + return 0; +} + +static int sss_init_function(struct pci_dev *pdev, struct sss_pci_adapter *adapter) +{ + int ret; + + ret = sss_init_hwdev(adapter); + if (ret != 0) { + adapter->hwdev = NULL; + sdk_err(&pdev->dev, "Fail to init hardware device\n"); + return -EFAULT; + } + + sss_register_dev_event(adapter->hwdev, adapter, sss_process_event); + + if (sss_get_func_type(adapter->hwdev) != SSS_FUNC_TYPE_VF) { + set_bit(SSS_SRIOV_PRESENT, &adapter->sriov_info.state); + sss_sync_time_to_chip(adapter); + } + + sss_chip_node_lock(); + ret = sss_tool_init(adapter->hwdev, adapter->chip_node); + if (ret) { + sss_chip_node_unlock(); + sdk_err(&pdev->dev, "Failed to initialize dbgtool\n"); + goto nictool_init_err; + } + sss_chip_node_unlock(); + + sss_add_func_list(adapter); + + ret = sss_attach_uld_dev(adapter); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to attach uld dev\n"); + goto attach_uld_err; + } + + return 0; + +attach_uld_err: + sss_del_func_list(adapter); + + sss_chip_node_lock(); + sss_tool_uninit(adapter->hwdev, adapter->chip_node); + sss_chip_node_unlock(); +nictool_init_err: + sss_unregister_dev_event(adapter->hwdev); + + sss_deinit_hwdev(adapter->hwdev); + + return ret; +} + +static int sss_init_adapter(struct sss_pci_adapter *adapter) +{ + int ret; + struct pci_dev *pdev = adapter->pcidev; + + if (pdev->is_virtfn != 0 && (!sss_get_vf_load_state(pdev))) { + sdk_info(&pdev->dev, "Vf dev disable load in host\n"); + return 0; + } + + sss_set_adapter_probe_state(adapter, SSS_PROBE_START); + + ret = sss_map_pci_bar(pdev, adapter); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to map bar\n"); + goto map_bar_fail; + } + + /* if chip information of pcie function exist, add the function into chip */ + ret = sss_alloc_chip_node(adapter); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to add new chip node to global list\n"); + goto alloc_chip_node_fail; + } + + ret = sss_init_function(pdev, adapter); + if (ret != 0) + goto func_init_err; + + ret = sss_notify_ok_to_chip(adapter); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to notify ok\n"); + goto notify_err; + } + + sss_set_adapter_probe_state(adapter, SSS_PROBE_OK); + + return 0; + +notify_err: + sss_deinit_function(pdev); + +func_init_err: + sss_free_chip_node(adapter); + +alloc_chip_node_fail: + sss_unmap_pci_bar(adapter); + +map_bar_fail: + sdk_err(&pdev->dev, "Fail to init adapter\n"); + return ret; +} + +static void sss_init_adapter_param(struct sss_pci_adapter *adapter, + struct pci_dev *pdev) +{ + adapter->pcidev = pdev; + adapter->init_state = SSS_NO_PROBE; + spin_lock_init(&adapter->dettach_uld_lock); + mutex_init(&adapter->uld_attach_mutex); + pci_set_drvdata(pdev, adapter); +} + +int sss_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int ret; + struct sss_pci_adapter *adapter = NULL; + + sdk_info(&pdev->dev, "Pci probe begin\n"); + + if (!pdev) + return -EINVAL; + + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) { + ret = -ENOMEM; + goto init_pci_err; + } + + sss_init_adapter_param(adapter, pdev); + + ret = sss_init_pci_dev(pdev); + if (ret != 0) { + kfree(adapter); + sdk_err(&pdev->dev, "Fail to init pci device\n"); + goto init_pci_err; + } + + ret = sss_init_adapter(adapter); + if (ret != 0) + goto init_adapter_err; + + sdk_info(&pdev->dev, "Success to probe pci\n"); + return 0; + +init_adapter_err: + sss_deinit_pci_dev(pdev); + +init_pci_err: + sdk_err(&pdev->dev, "Fail to pci probe\n"); + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.h new file mode 100644 index 0000000000000..64cb4ab6a6e1c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_PROBE_H +#define SSS_PCI_PROBE_H + +#include + +#include "sss_adapter.h" + +int sss_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.c new file mode 100644 index 0000000000000..8282d0ba73cba --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_pci.h" +#include "sss_pci_id_tbl.h" +#include "sss_pci_sriov.h" +#include "sss_adapter_mgmt.h" +#include "sss_hwdev_init.h" +#include "sss_hwdev_api.h" +#include "sss_hwif_mgmt_init.h" +#include "sss_pci_global.h" +#include "sss_tool.h" + +#define SSS_WAIT_SRIOV_CFG_TIMEOUT 15000 +#define SSS_EVENT_PROCESS_TIMEOUT 10000 + +#define SSS_SRIOV_MIN_USLEEP 9900 +#define SSS_SRIOV_MAX_USLEEP 10000 + +#define SSS_EVENT_MIN_USLEEP 900 +#define SSS_EVENT_MAX_USLEEP 1000 + +static void sss_set_adapter_remove_state(struct sss_pci_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pcidev; + + mutex_lock(&adapter->uld_attach_mutex); + if (adapter->init_state != SSS_PROBE_OK) { + sdk_warn(&pdev->dev, "Current function don not need remove\n"); + mutex_unlock(&adapter->uld_attach_mutex); + } + adapter->init_state = SSS_IN_REMOVE; + mutex_unlock(&adapter->uld_attach_mutex); +} + +static void sss_wait_sriov_cfg_complete(struct sss_pci_adapter *adapter) +{ + unsigned long end_time; + struct sss_sriov_info *info = &adapter->sriov_info; + + clear_bit(SSS_SRIOV_PRESENT, &info->state); + usleep_range(SSS_SRIOV_MIN_USLEEP, SSS_SRIOV_MAX_USLEEP); + + end_time = jiffies + msecs_to_jiffies(SSS_WAIT_SRIOV_CFG_TIMEOUT); + do { + if (!test_bit(SSS_SRIOV_ENABLE, &info->state) && + !test_bit(SSS_SRIOV_DISABLE, &info->state)) + return; + + usleep_range(SSS_SRIOV_MIN_USLEEP, SSS_SRIOV_MAX_USLEEP); + } while (time_before(jiffies, end_time)); +} + +static bool sss_wait_uld_dev_timeout(struct sss_pci_adapter *adapter, + enum sss_service_type type) +{ + unsigned long end_time; + + end_time = jiffies + msecs_to_jiffies(SSS_EVENT_PROCESS_TIMEOUT); + do { + if (!test_and_set_bit(type, &adapter->uld_run_state)) + return false; + + usleep_range(SSS_EVENT_MIN_USLEEP, SSS_EVENT_MAX_USLEEP); + } while (time_before(jiffies, end_time)); + + if (!test_and_set_bit(type, &adapter->uld_run_state)) + return false; + + return true; +} + +void sss_detach_uld_driver(struct sss_pci_adapter *adapter, + enum sss_service_type type) +{ + bool timeout; + struct sss_uld_info *info = sss_get_uld_info(); + const char **name = sss_get_uld_names(); + + mutex_lock(&adapter->uld_attach_mutex); + if (!adapter->uld_dev[type]) { + mutex_unlock(&adapter->uld_attach_mutex); + return; + } + + timeout = sss_wait_uld_dev_timeout(adapter, type); + + spin_lock_bh(&adapter->dettach_uld_lock); + clear_bit(type, &adapter->uld_attach_state); + spin_unlock_bh(&adapter->dettach_uld_lock); + + info[type].remove(&adapter->hal_dev, adapter->uld_dev[type]); + adapter->uld_dev[type] = NULL; + + if (!timeout) + clear_bit(type, &adapter->uld_run_state); + + sdk_info(&adapter->pcidev->dev, + "Success to detach %s driver from pci device\n", name[type]); + mutex_unlock(&adapter->uld_attach_mutex); +} + +void sss_detach_all_uld_driver(struct sss_pci_adapter *adapter) +{ + struct sss_uld_info *info = sss_get_uld_info(); + enum sss_service_type type; + + sss_hold_chip_node(); + sss_lock_uld(); + for (type = SSS_SERVICE_TYPE_MAX - 1; type > SSS_SERVICE_TYPE_NIC; type--) { + if (info[type].probe) + sss_detach_uld_driver(adapter, type); + } + + if (info[SSS_SERVICE_TYPE_NIC].probe) + sss_detach_uld_driver(adapter, SSS_SERVICE_TYPE_NIC); + sss_unlock_uld(); + sss_put_chip_node(); +} + +void sss_dettach_uld_dev(struct sss_pci_adapter *adapter) +{ + sss_detach_all_uld_driver(adapter); +} + +void sss_unregister_uld(enum sss_service_type type) +{ + struct sss_pci_adapter *adapter = NULL; + struct sss_card_node *card_node = NULL; + struct list_head *card_list = NULL; + struct sss_uld_info *info = sss_get_uld_info(); + + if (type >= SSS_SERVICE_TYPE_MAX) { + pr_err("Unknown type %d of uld to unregister\n", type); + return; + } + + sss_hold_chip_node(); + sss_lock_uld(); + card_list = sss_get_chip_list(); + list_for_each_entry(card_node, card_list, node) { + /* detach vf first */ + list_for_each_entry(adapter, &card_node->func_list, node) + if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_VF) + sss_detach_uld_driver(adapter, type); + + list_for_each_entry(adapter, &card_node->func_list, node) + if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_PF) + sss_detach_uld_driver(adapter, type); + + list_for_each_entry(adapter, &card_node->func_list, node) + if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_PPF) + sss_detach_uld_driver(adapter, type); + } + + memset(&info[type], 0, sizeof(*info)); + sss_unlock_uld(); + sss_put_chip_node(); +} +EXPORT_SYMBOL(sss_unregister_uld); + +void sss_deinit_function(struct pci_dev *pdev) +{ + struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev); + + sss_chip_disable_mgmt_channel(adapter->hwdev); + + sss_flush_mgmt_workq(adapter->hwdev); + + sss_del_func_list(adapter); + + sss_chip_node_lock(); + sss_tool_uninit(adapter->hwdev, adapter->chip_node); + sss_chip_node_unlock(); + + sss_dettach_uld_dev(adapter); + + sss_unregister_dev_event(adapter->hwdev); + + sss_deinit_hwdev(adapter->hwdev); +} + +void sss_unmap_pci_bar(struct sss_pci_adapter *adapter) +{ + iounmap(adapter->cfg_reg_bar); + iounmap(adapter->intr_reg_bar); + + if (!SSS_IS_VF_DEV(adapter->pcidev)) + iounmap(adapter->mgmt_reg_bar); + + iounmap(adapter->db_reg_bar); +} + +int sss_deinit_adapter(struct sss_pci_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pcidev; + + sss_set_adapter_remove_state(adapter); + + sss_hwdev_detach(adapter->hwdev); + + if (sss_get_func_type(adapter->hwdev) != SSS_FUNC_TYPE_VF) { + sss_wait_sriov_cfg_complete(adapter); + sss_pci_disable_sriov(adapter); + } + + sss_deinit_function(pdev); + + sss_free_chip_node(adapter); + + sss_unmap_pci_bar(adapter); + + sss_set_adapter_probe_state(adapter, SSS_NO_PROBE); + + sdk_info(&pdev->dev, "Pcie device removed function\n"); + + return 0; +} + +void sss_deinit_pci_dev(struct pci_dev *pdev) +{ + struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev); + + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + kfree(adapter); +} + +void sss_pci_remove(struct pci_dev *pdev) +{ + struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev); + + if (!adapter) + return; + + sdk_info(&pdev->dev, "Begin pcie device remove\n"); + + sss_deinit_adapter(adapter); + + sss_deinit_pci_dev(pdev); + + sdk_info(&pdev->dev, "Success to remove pcie device\n"); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.h new file mode 100644 index 0000000000000..ddd760ee53dff --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_REMOVE_H +#define SSS_PCI_REMOVE_H + +#include + +#include "sss_hw_svc_cap.h" +#include "sss_adapter.h" + +void sss_detach_uld_driver(struct sss_pci_adapter *adapter, enum sss_service_type type); +void sss_detach_all_uld_driver(struct sss_pci_adapter *adapter); +void sss_dettach_uld_dev(struct sss_pci_adapter *adapter); +void sss_deinit_function(struct pci_dev *pdev); +void sss_unmap_pci_bar(struct sss_pci_adapter *adapter); +int sss_deinit_adapter(struct sss_pci_adapter *adapter); +void sss_deinit_pci_dev(struct pci_dev *pdev); + +void sss_pci_remove(struct pci_dev *pdev); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.c new file mode 100644 index 0000000000000..54337fd447a95 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_pci_id_tbl.h" +#include "sss_pci_sriov.h" +#include "sss_adapter_mgmt.h" +#include "sss_hwdev_api.h" +#include "sss_hwdev_init.h" + +void sss_pci_shutdown(struct pci_dev *pdev) +{ + struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev); + + sdk_info(&pdev->dev, "Shutdown device\n"); + + if (adapter) + sss_hwdev_shutdown(adapter->hwdev); + + pci_disable_device(pdev); + + if (adapter) + sss_hwdev_stop(adapter->hwdev); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.h new file mode 100644 index 0000000000000..7c9e92edda6ec --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_SHUTDOWN_H +#define SSS_PCI_SHUTDOWN_H + +#include + +void sss_pci_shutdown(struct pci_dev *pdev); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.c new file mode 100644 index 0000000000000..88fead9f65cb5 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_adapter_mgmt.h" +#include "sss_hwif_mbx.h" +#include "sss_hwif_mbx_init.h" +#include "sss_pci_sriov.h" +#include "sss_hwdev_api.h" +#include "sss_hwif_api.h" + +#ifdef CONFIG_PCI_IOV +static int sss_init_vf_hw(void *hwdev, u16 vf_num) +{ + int ret; + u16 i; + u16 id; + + /* mbx msg channel resources will be freed during remove process */ + ret = sss_init_func_mbx_msg(hwdev, sss_get_max_vf_num(hwdev)); + if (ret != 0) + return ret; + + /* vf use 256K as default wq page size, and can't change it */ + for (i = 1; i <= vf_num; i++) { + id = sss_get_glb_pf_vf_offset(hwdev) + i; + ret = sss_chip_set_wq_page_size(hwdev, id, SSS_DEFAULT_WQ_PAGE_SIZE); + if (ret != 0) + return ret; + } + + return 0; +} + +static void sss_deinit_vf_hw(void *hwdev, u16 vf_num) +{ + u16 i; + u16 id; + + for (i = 1; i <= vf_num; i++) { + id = sss_get_glb_pf_vf_offset(hwdev) + i; + sss_chip_set_wq_page_size(hwdev, id, SSS_HW_WQ_PAGE_SIZE); + } +} + +static void sss_notify_sriov_state_change(void *hwdev, u16 vf_num) +{ + struct sss_event_info event = {0}; + + event.service = SSS_EVENT_SRV_COMM; + event.type = SSS_EVENT_SRIOV_STATE_CHANGE; + + if (vf_num > 0) { + ((struct sss_sriov_state_info *)(void *)event.event_data)->enable = 1; + ((struct sss_sriov_state_info *)(void *)event.event_data)->vf_num = vf_num; + } + + sss_do_event_callback(hwdev, &event); +} +#endif + +int sss_pci_disable_sriov(struct sss_pci_adapter *adapter) +{ +#ifdef CONFIG_PCI_IOV + void *hwdev = adapter->hwdev; + struct pci_dev *pdev = adapter->pcidev; + struct sss_sriov_info *info = &adapter->sriov_info; + + if (!info->enabled) + return 0; + + if (test_and_set_bit(SSS_SRIOV_DISABLE, &info->state)) { + sdk_err(&pdev->dev, "SR-IOV disable in process."); + return -EPERM; + } + + if (pci_vfs_assigned(pdev) != 0) { + clear_bit(SSS_SRIOV_DISABLE, &info->state); + sdk_warn(&pdev->dev, "VFs are assigned - VFs will not be deallocated\n"); + return -EPERM; + } + + sss_notify_sriov_state_change(hwdev, 0); + + info->enabled = false; + + pci_disable_sriov(pdev); + + sss_deinit_vf_hw(hwdev, (u16)info->vf_num); + info->vf_num = 0; + + clear_bit(SSS_SRIOV_DISABLE, &info->state); + +#endif + + return 0; +} + +#ifdef CONFIG_PCI_IOV +static int sss_check_existing_vf(struct sss_pci_adapter *adapter, u16 vf_num) +{ + int ret; + struct pci_dev *pdev = adapter->pcidev; + int existing_vf = pci_num_vf(pdev); + struct sss_sriov_info *info = &adapter->sriov_info; + + if (existing_vf != 0 && existing_vf != vf_num) { + ret = sss_pci_disable_sriov(adapter); + if (ret != 0) { + clear_bit(SSS_SRIOV_ENABLE, &info->state); + return ret; + } + } else if (existing_vf == vf_num) { + clear_bit(SSS_SRIOV_ENABLE, &info->state); + return vf_num; + } + + return 0; +} +#endif + +static int sss_pci_enable_sriov(struct sss_pci_adapter *adapter, u16 vf_num) +{ +#ifdef CONFIG_PCI_IOV + int ret = 0; + void *hwdev = adapter->hwdev; + struct pci_dev *pdev = adapter->pcidev; + struct sss_sriov_info *info = &adapter->sriov_info; + + if (test_and_set_bit(SSS_SRIOV_ENABLE, &info->state)) { + sdk_err(&pdev->dev, "SR-IOV disable, vf_num %d\n", vf_num); + return -EPERM; + } + + if (vf_num > pci_sriov_get_totalvfs(pdev)) { + clear_bit(SSS_SRIOV_ENABLE, &info->state); + return -ERANGE; + } + + ret = sss_check_existing_vf(adapter, vf_num); + if (ret != 0) + return ret; + + ret = sss_init_vf_hw(hwdev, vf_num); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to init vf in hw, ret: %d\n", ret); + clear_bit(SSS_SRIOV_ENABLE, &info->state); + return ret; + } + + ret = pci_enable_sriov(pdev, vf_num); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to enable SR-IOV, ret: %d\n", ret); + clear_bit(SSS_SRIOV_ENABLE, &info->state); + return ret; + } + + info->enabled = true; + info->vf_num = vf_num; + + sss_notify_sriov_state_change(hwdev, vf_num); + + clear_bit(SSS_SRIOV_ENABLE, &info->state); + + return vf_num; +#else + + return 0; +#endif +} + +int sss_pci_configure_sriov(struct pci_dev *pdev, int vf_num) +{ + struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev); + + if (!adapter) + return -EFAULT; + + if (!test_bit(SSS_SRIOV_PRESENT, &adapter->sriov_info.state)) + return -EFAULT; + + return (vf_num == 0) ? sss_pci_disable_sriov(adapter) : + sss_pci_enable_sriov(adapter, (u16)vf_num); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.h new file mode 100644 index 0000000000000..3146e8eb9f8f7 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_SRIOV_H +#define SSS_PCI_SRIOV_H + +#include +#include + +#include "sss_sriov_info.h" +#include "sss_adapter.h" + +int sss_pci_disable_sriov(struct sss_pci_adapter *adapter); + +int sss_pci_configure_sriov(struct pci_dev *pdev, int num_vfs); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_wq.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_wq.c new file mode 100644 index 0000000000000..96d57922821b6 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_wq.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_common.h" +#include "sss_hwdev.h" +#include "sss_hw_wq.h" + +#define SSS_WQ_MIN_DEPTH 64 +#define SSS_WQ_MAX_DEPTH 65536 +#define SSS_WQ_MAX_PAGE_NUM (PAGE_SIZE / sizeof(u64)) + +static int sss_init_wq_block(struct sss_wq *wq) +{ + int i; + + if (SSS_WQ_IS_0_LEVEL_CLA(wq)) { + wq->block_paddr = wq->page[0].align_paddr; + wq->block_vaddr = wq->page[0].align_vaddr; + return 0; + } + + if (wq->page_num > SSS_WQ_MAX_PAGE_NUM) { + sdk_err(wq->dev_hdl, "Wq page num: 0x%x out of range: %lu\n", + wq->page_num, SSS_WQ_MAX_PAGE_NUM); + return -EFAULT; + } + + wq->block_vaddr = dma_zalloc_coherent(wq->dev_hdl, PAGE_SIZE, + &wq->block_paddr, GFP_KERNEL); + if (!wq->block_vaddr) { + sdk_err(wq->dev_hdl, "Fail to alloc wq block vaddr\n"); + return -ENOMEM; + } + + for (i = 0; i < wq->page_num; i++) + wq->block_vaddr[i] = cpu_to_be64(wq->page[i].align_paddr); + + return 0; +} + +static void sss_deinit_wq_block(struct sss_wq *wq) +{ + if (!SSS_WQ_IS_0_LEVEL_CLA(wq)) + dma_free_coherent(wq->dev_hdl, PAGE_SIZE, wq->block_vaddr, + wq->block_paddr); +} + +static int sss_alloc_wq_page(struct sss_wq *wq) +{ + int i; + int ret; + int id; + + wq->page = kcalloc(wq->page_num, sizeof(*wq->page), GFP_KERNEL); + if (!wq->page) + return -ENOMEM; + + for (id = 0; id < wq->page_num; id++) { + ret = sss_dma_zalloc_coherent_align(wq->dev_hdl, wq->page_size, + wq->page_size, GFP_KERNEL, &wq->page[id]); + if (ret != 0) { + sdk_err(wq->dev_hdl, "Fail to alloc wq dma page\n"); + goto dma_page_err; + } + } + + ret = sss_init_wq_block(wq); + if (ret != 0) + goto block_err; + + return 0; + +block_err: +dma_page_err: + for (i = 0; i < id; i++) + sss_dma_free_coherent_align(wq->dev_hdl, &wq->page[i]); + + kfree(wq->page); + wq->page = NULL; + + return -ENOMEM; +} + +static void sss_free_wq_page(struct sss_wq *wq) +{ + int i; + + sss_deinit_wq_block(wq); + + for (i = 0; i < wq->page_num; i++) + sss_dma_free_coherent_align(wq->dev_hdl, &wq->page[i]); + + kfree(wq->page); + wq->page = NULL; +} + +static void sss_init_wq_param(struct sss_hwdev *hwdev, struct sss_wq *wq, + u32 q_depth, u16 block_size) +{ + u32 page_size = ALIGN(hwdev->wq_page_size, PAGE_SIZE); + + wq->ci = 0; + wq->pi = 0; + wq->dev_hdl = hwdev->dev_hdl; + wq->q_depth = q_depth; + wq->id_mask = (u16)(q_depth - 1); + wq->elem_size = block_size; + wq->elem_size_shift = (u16)ilog2(wq->elem_size); + wq->page_size = page_size; + wq->elem_per_page = min(page_size / block_size, q_depth); + wq->elem_per_page_shift = (u16)ilog2(wq->elem_per_page); + wq->elem_per_page_mask = (u16)(wq->elem_per_page - 1); + wq->page_num = + (u16)(ALIGN(((u32)q_depth * block_size), page_size) / page_size); +} + +int sss_create_wq(void *hwdev, struct sss_wq *wq, u32 q_depth, u16 block_size) +{ + if (!wq || !hwdev) { + pr_err("Invalid wq or dev_hdl\n"); + return -EINVAL; + } + + if (q_depth < SSS_WQ_MIN_DEPTH || q_depth > SSS_WQ_MAX_DEPTH || + (q_depth & (q_depth - 1)) != 0) { + sdk_err(SSS_TO_DEV(hwdev), "Invalid q_depth(%u)\n", q_depth); + return -EINVAL; + } + + if (block_size == 0 || (block_size & (block_size - 1)) != 0) { + sdk_err(SSS_TO_DEV(hwdev), "Invalid block_size(%u)\n", block_size); + return -EINVAL; + } + + sss_init_wq_param(hwdev, wq, q_depth, block_size); + + return sss_alloc_wq_page(wq); +} +EXPORT_SYMBOL(sss_create_wq); + +void sss_destroy_wq(struct sss_wq *wq) +{ + if (!wq) + return; + + sss_free_wq_page(wq); +} +EXPORT_SYMBOL(sss_destroy_wq); diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool.h b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool.h new file mode 100644 index 0000000000000..073f44213a0c5 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSSNIC_NICTOOL_H +#define SSSNIC_NICTOOL_H + +#include "sss_tool_chip.h" +#include "sss_tool_sdk.h" +#include "sss_tool_sm.h" +#include "sss_tool_comm.h" + +#ifndef _LLT_TEST_ +#define SSS_TOOL_PAGE_ORDER (10) +#else +#define SSS_TOOL_PAGE_ORDER (1) +#endif + +#define SSS_TOOL_MEM_MAP_SIZE (PAGE_SIZE * (1 << SSS_TOOL_PAGE_ORDER)) + +#define SSS_TOOL_CARD_MAX (64) + +int sss_tool_init(void *hwdev, void *chip_node); +void sss_tool_uninit(void *hwdev, void *chip_node); + +extern u64 g_card_pa[SSS_TOOL_CARD_MAX]; +extern void *g_card_va[SSS_TOOL_CARD_MAX]; +extern int g_card_id; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.c b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.c new file mode 100644 index 0000000000000..21833df254b52 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.c @@ -0,0 +1,802 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hwdev.h" +#include "sss_common.h" +#include "sss_pci_sriov.h" +#include "sss_adapter_mgmt.h" +#include "sss_hwif_adm.h" +#include "sss_hwif_adm_common.h" +#include "sss_hwif_mgmt_common.h" +#include "sss_hwif_ctrlq.h" +#include "sss_hwif_api.h" +#include "sss_hw_common.h" +#include "sss_mgmt_channel.h" +#include "sss_linux_kernel.h" +#include "sss_csr.h" +#include "sss_hw.h" +#include "sss_adapter.h" +#include "sss_tool.h" + +#define SSS_TOOL_DW_WIDTH 4 + +/* completion timeout interval, unit is millisecond */ +#define SSS_TOOL_UPDATE_MSG_TIMEOUT 50000U + +#define SSS_TOOL_CLP_REG_GAP 0x20 +#define SSS_TOOL_CLP_INPUT_BUF_LEN 4096UL +#define SSS_TOOL_CLP_DATA_UNIT 4UL +#define SSS_TOOL_CLP_MAX_DATA_SIZE (SSS_TOOL_CLP_INPUT_BUF_LEN / SSS_TOOL_CLP_DATA_UNIT) + +#define SSS_TOOL_CLP_REQ_SIZE_OFFSET 0 +#define SSS_TOOL_CLP_RSP_SIZE_OFFSET 16 +#define SSS_TOOL_CLP_BASE_OFFSET 0 +#define SSS_TOOL_CLP_LEN_OFFSET 0 +#define SSS_TOOL_CLP_START_OFFSET 31 +#define SSS_TOOL_CLP_READY_OFFSET 31 +#define SSS_TOOL_CLP_OFFSET(member) (SSS_TOOL_CLP_##member##_OFFSET) + +#define SSS_TOOL_CLP_SIZE_MASK 0x7ffUL +#define SSS_TOOL_CLP_BASE_MASK 0x7ffffffUL +#define SSS_TOOL_CLP_LEN_MASK 0x7ffUL +#define SSS_TOOL_CLP_START_MASK 0x1UL +#define SSS_TOOL_CLP_READY_MASK 0x1UL +#define SSS_TOOL_CLP_MASK(member) (SSS_TOOL_CLP_##member##_MASK) + +#define SSS_TOOL_CLP_DELAY_CNT_MAX 200UL +#define SSS_TOOL_CLP_SRAM_SIZE_REG_MAX 0x3ff +#define SSS_TOOL_CLP_SRAM_BASE_REG_MAX 0x7ffffff +#define SSS_TOOL_CLP_LEN_REG_MAX 0x3ff +#define SSS_TOOL_CLP_START_OR_READY_REG_MAX 0x1 + +#define SSS_TOOL_CLP_DATA_REAL_SIZE(in_size, header) \ + (((in_size) + (u16)sizeof(header) + \ + (((in_size) % SSS_TOOL_CLP_DATA_UNIT) ? SSS_TOOL_CLP_DATA_UNIT : 0)) / \ + SSS_TOOL_CLP_DATA_UNIT) + +#define SSS_TOOL_CLP_REG_VALUE(value, offset, mask) \ + (((value) >> SSS_TOOL_CLP_OFFSET(offset)) & SSS_TOOL_CLP_MASK(mask)) + +enum sss_tool_clp_data_type { + SSS_TOOL_CLP_REQ = 0, + SSS_TOOL_CLP_RSP = 1 +}; + +enum sss_tool_clp_reg_type { + SSS_TOOL_CLP_BASE = 0, + SSS_TOOL_CLP_SIZE = 1, + SSS_TOOL_CLP_LEN = 2, + SSS_TOOL_CLP_START_REQ = 3, + SSS_TOOL_CLP_READY_RSP = 4 +}; + +enum SSS_TOOL_ADM_CSR_DATA_OPERATION { + SSS_TOOL_ADM_CSR_WRITE = 0x1E, + SSS_TOOL_ADM_CSR_READ = 0x1F +}; + +enum SSS_TOOL_ADM_CSR_NEED_RESP_DATA { + SSS_TOOL_ADM_CSR_NO_RESP_DATA = 0, + SSS_TOOL_ADM_CSR_NEED_RESP_DATA = 1 +}; + +enum SSS_TOOL_ADM_CSR_DATA_SIZE { + SSS_TOOL_ADM_CSR_DATA_SZ_32 = 0, + SSS_TOOL_ADM_CSR_DATA_SZ_64 = 1 +}; + +struct sss_tool_csr_request_adm_data { + u32 dw0; + + union { + struct { + u32 reserved1:13; + /* this field indicates the write/read data size: + * 2'b00: 32 bits + * 2'b01: 64 bits + * 2'b10~2'b11:reserved + */ + u32 data_size:2; + /* this field indicates that requestor expect receive a + * response data or not. + * 1'b0: expect not to receive a response data. + * 1'b1: expect to receive a response data. + */ + u32 need_response:1; + /* this field indicates the operation that the requestor + * expected. + * 5'b1_1110: write value to csr space. + * 5'b1_1111: read register from csr space. + */ + u32 operation_id:5; + u32 reserved2:6; + /* this field specifies the Src node ID for this API + * request message. + */ + u32 src_node_id:5; + } bits; + + u32 val32; + } dw1; + + union { + struct { + /* it specifies the CSR address. */ + u32 csr_addr:26; + u32 reserved3:6; + } bits; + + u32 val32; + } dw2; + + /* if data_size=2'b01, it is high 32 bits of write data. else, it is + * 32'hFFFF_FFFF. + */ + u32 csr_write_data_h; + /* the low 32 bits of write data. */ + u32 csr_write_data_l; +}; + +struct sss_tool_csr_read { + u32 rd_len; + u32 addr; +}; + +struct sss_tool_csr_write { + u32 rd_len; + u32 addr; + u8 *data; +}; + +static u32 sss_tool_get_timeout_val(enum sss_mod_type mod, u16 cmd) +{ + if (mod == SSS_MOD_TYPE_COMM && + (cmd == SSS_COMM_MGMT_CMD_UPDATE_FW || + cmd == SSS_COMM_MGMT_CMD_UPDATE_BIOS || + cmd == SSS_COMM_MGMT_CMD_ACTIVE_FW || + cmd == SSS_COMM_MGMT_CMD_SWITCH_CFG || + cmd == SSS_COMM_MGMT_CMD_HOT_ACTIVE_FW)) + return SSS_TOOL_UPDATE_MSG_TIMEOUT; + + return 0; /* use default mbox/adm timeout time */ +} + +static int sss_tool_get_clp_reg(void *hwdev, enum sss_tool_clp_data_type data_type, + enum sss_tool_clp_reg_type type, u32 *addr) +{ + switch (type) { + case SSS_TOOL_CLP_BASE: + *addr = (data_type == SSS_TOOL_CLP_REQ) ? + SSS_CLP_REG(REQBASE) : SSS_CLP_REG(RSPBASE); + break; + + case SSS_TOOL_CLP_SIZE: + *addr = SSS_CLP_REG(SIZE); + break; + + case SSS_TOOL_CLP_LEN: + *addr = (data_type == SSS_TOOL_CLP_REQ) ? + SSS_CLP_REG(REQ) : SSS_CLP_REG(RSP); + break; + + case SSS_TOOL_CLP_START_REQ: + *addr = SSS_CLP_REG(REQ); + break; + + case SSS_TOOL_CLP_READY_RSP: + *addr = SSS_CLP_REG(RSP); + break; + + default: + *addr = 0; + break; + } + + return (*addr == 0) ? -EINVAL : 0; +} + +static inline int sss_tool_clp_param_valid(enum sss_tool_clp_data_type data_type, + enum sss_tool_clp_reg_type reg_type) +{ + if (data_type == SSS_TOOL_CLP_REQ && reg_type == SSS_TOOL_CLP_READY_RSP) + return -EINVAL; + + if (data_type == SSS_TOOL_CLP_RSP && reg_type == SSS_TOOL_CLP_START_REQ) + return -EINVAL; + + return 0; +} + +static u32 sss_tool_get_clp_reg_value(struct sss_hwdev *hwdev, + enum sss_tool_clp_data_type data_type, + enum sss_tool_clp_reg_type reg_type, u32 reg_addr) +{ + u32 value; + + value = sss_chip_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case SSS_TOOL_CLP_BASE: + value = SSS_TOOL_CLP_REG_VALUE(value, BASE, BASE); + break; + + case SSS_TOOL_CLP_SIZE: + if (data_type == SSS_TOOL_CLP_REQ) + value = SSS_TOOL_CLP_REG_VALUE(value, REQ_SIZE, SIZE); + else + value = SSS_TOOL_CLP_REG_VALUE(value, RSP_SIZE, SIZE); + break; + + case SSS_TOOL_CLP_LEN: + value = SSS_TOOL_CLP_REG_VALUE(value, LEN, LEN); + break; + + case SSS_TOOL_CLP_START_REQ: + value = SSS_TOOL_CLP_REG_VALUE(value, START, START); + break; + + case SSS_TOOL_CLP_READY_RSP: + value = SSS_TOOL_CLP_REG_VALUE(value, READY, READY); + break; + + default: + break; + } + + return value; +} + +static int sss_tool_read_clp_reg(struct sss_hwdev *hwdev, + enum sss_tool_clp_data_type data_type, + enum sss_tool_clp_reg_type reg_type, u32 *read_value) +{ + u32 reg_addr; + int ret; + + ret = sss_tool_clp_param_valid(data_type, reg_type); + if (ret) + return ret; + + ret = sss_tool_get_clp_reg(hwdev, data_type, reg_type, ®_addr); + if (ret) + return ret; + + *read_value = sss_tool_get_clp_reg_value(hwdev, data_type, reg_type, reg_addr); + + return 0; +} + +static int sss_tool_check_reg_value(enum sss_tool_clp_reg_type reg_type, u32 value) +{ + if (reg_type == SSS_TOOL_CLP_BASE && + value > SSS_TOOL_CLP_SRAM_BASE_REG_MAX) + return -EINVAL; + + if (reg_type == SSS_TOOL_CLP_SIZE && + value > SSS_TOOL_CLP_SRAM_SIZE_REG_MAX) + return -EINVAL; + + if (reg_type == SSS_TOOL_CLP_LEN && + value > SSS_TOOL_CLP_LEN_REG_MAX) + return -EINVAL; + + if ((reg_type == SSS_TOOL_CLP_START_REQ || + reg_type == SSS_TOOL_CLP_READY_RSP) && + value > SSS_TOOL_CLP_START_OR_READY_REG_MAX) + return -EINVAL; + + return 0; +} + +static int sss_tool_check_clp_init_status(struct sss_hwdev *hwdev) +{ + int ret; + u32 reg_value = 0; + + ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_REQ, + SSS_TOOL_CLP_BASE, ®_value); + if (ret || !reg_value) { + tool_err("Fail to read clp reg: 0x%x\n", reg_value); + return -EINVAL; + } + + ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_BASE, ®_value); + if (ret || !reg_value) { + tool_err("Fail to read rsp ba value: 0x%x\n", reg_value); + return -EINVAL; + } + + ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_REQ, + SSS_TOOL_CLP_SIZE, ®_value); + if (ret || !reg_value) { + tool_err("Fail to read req size\n"); + return -EINVAL; + } + + ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_SIZE, ®_value); + if (ret || !reg_value) { + tool_err("Fail to read rsp size\n"); + return -EINVAL; + } + + return 0; +} + +static void sss_tool_write_clp_reg(struct sss_hwdev *hwdev, + enum sss_tool_clp_data_type data_type, + enum sss_tool_clp_reg_type reg_type, u32 value) +{ + u32 reg_addr, reg_value; + + if (sss_tool_clp_param_valid(data_type, reg_type)) + return; + + if (sss_tool_check_reg_value(reg_type, value)) + return; + + if (sss_tool_get_clp_reg(hwdev, data_type, reg_type, ®_addr)) + return; + + reg_value = sss_chip_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case SSS_TOOL_CLP_LEN: + reg_value &= (~(SSS_TOOL_CLP_MASK(LEN) << SSS_TOOL_CLP_OFFSET(LEN))); + reg_value |= (value << SSS_TOOL_CLP_OFFSET(LEN)); + break; + + case SSS_TOOL_CLP_START_REQ: + reg_value &= (~(SSS_TOOL_CLP_MASK(START) << SSS_TOOL_CLP_OFFSET(START))); + reg_value |= (value << SSS_TOOL_CLP_OFFSET(START)); + break; + + case SSS_TOOL_CLP_READY_RSP: + reg_value &= (~(SSS_TOOL_CLP_MASK(READY) << SSS_TOOL_CLP_OFFSET(READY))); + reg_value |= (value << SSS_TOOL_CLP_OFFSET(READY)); + break; + + default: + return; + } + + sss_chip_write_reg(hwdev->hwif, reg_addr, reg_value); +} + +static int sss_tool_read_clp_data(struct sss_hwdev *hwdev, void *buf_out, u16 *out_size) +{ + int err; + u32 reg = SSS_CLP_DATA(RSP); + u32 ready, delay_cnt; + u32 *ptr = (u32 *)buf_out; + u32 temp_out_size = 0; + + err = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_READY_RSP, &ready); + if (err) + return err; + + delay_cnt = 0; + while (ready == 0) { + usleep_range(9000, 10000); /* sleep 9000 us ~ 10000 us */ + delay_cnt++; + err = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_READY_RSP, &ready); + if (err || delay_cnt > SSS_TOOL_CLP_DELAY_CNT_MAX) { + tool_err("Fail to read clp delay rsp, timeout delay_cnt: %u\n", + delay_cnt); + return -EINVAL; + } + } + + err = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_LEN, &temp_out_size); + if (err) + return err; + + if (temp_out_size > SSS_TOOL_CLP_SRAM_SIZE_REG_MAX || !temp_out_size) { + tool_err("Invalid temp out size: %u\n", temp_out_size); + return -EINVAL; + } + + *out_size = (u16)temp_out_size; + for (; temp_out_size > 0; temp_out_size--) { + *ptr = sss_chip_read_reg(hwdev->hwif, reg); + ptr++; + /* read 4 bytes every time */ + reg = reg + 4; + } + + sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_READY_RSP, (u32)0x0); + sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_RSP, SSS_TOOL_CLP_LEN, (u32)0x0); + + return 0; +} + +static int sss_tool_write_clp_data(struct sss_hwdev *hwdev, void *buf_in, u16 in_size) +{ + int ret; + u32 reg = SSS_CLP_DATA(REQ); + u32 start = 1; + u32 delay_cnt = 0; + u32 *ptr = (u32 *)buf_in; + u16 size_in = in_size; + + ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_REQ, + SSS_TOOL_CLP_START_REQ, &start); + if (ret != 0) + return ret; + + while (start == 1) { + usleep_range(9000, 10000); /* sleep 9000 us ~ 10000 us */ + delay_cnt++; + ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_REQ, + SSS_TOOL_CLP_START_REQ, &start); + if (ret || delay_cnt > SSS_TOOL_CLP_DELAY_CNT_MAX) + return -EINVAL; + } + + sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_REQ, SSS_TOOL_CLP_LEN, size_in); + sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_REQ, SSS_TOOL_CLP_START_REQ, (u32)0x1); + + for (; size_in > 0; size_in--) { + sss_chip_write_reg(hwdev->hwif, reg, *ptr); + ptr++; + reg = reg + sizeof(u32); + } + + return 0; +} + +static void sss_tool_clear_clp_data(struct sss_hwdev *hwdev, + enum sss_tool_clp_data_type data_type) +{ + u32 reg = (data_type == SSS_TOOL_CLP_REQ) ? + SSS_CLP_DATA(REQ) : SSS_CLP_DATA(RSP); + u32 count = SSS_TOOL_CLP_MAX_DATA_SIZE; + + for (; count > 0; count--) { + sss_chip_write_reg(hwdev->hwif, reg, 0x0); + reg = reg + sizeof(u32); + } +} + +static void sss_tool_clp_prepare_header(struct sss_hwdev *hwdev, u64 *header, + u16 msg_len, u8 mod, enum sss_mgmt_cmd cmd) +{ + struct sss_hwif *hwif = hwdev->hwif; + + *header = SSS_SET_MSG_HEADER(msg_len, MSG_LEN) | + SSS_SET_MSG_HEADER(mod, MODULE) | + SSS_SET_MSG_HEADER(msg_len, SEG_LEN) | + SSS_SET_MSG_HEADER(0, NO_ACK) | + SSS_SET_MSG_HEADER(SSS_INLINE_DATA, DATA_TYPE) | + SSS_SET_MSG_HEADER(0, SEQID) | + SSS_SET_MSG_HEADER(SSS_ADM_MSG_AEQ_ID, AEQ_ID) | + SSS_SET_MSG_HEADER(SSS_LAST_SEG, LAST) | + SSS_SET_MSG_HEADER(0, DIRECTION) | + SSS_SET_MSG_HEADER(cmd, CMD) | + SSS_SET_MSG_HEADER(hwif->attr.func_id, SRC_GLB_FUNC_ID) | + SSS_SET_MSG_HEADER(0, MSG_ID); +} + +int sss_tool_send_clp_msg(struct sss_hwdev *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) + +{ + struct sss_clp_pf_to_mgmt *clp_msg; + u64 header; + u16 size; + u8 *msg_buf; + int ret; + + if (!hwdev || SSS_GET_FUNC_TYPE(hwdev) == SSS_FUNC_TYPE_VF) + return -EINVAL; + + if (!hwdev->chip_present_flag || !SSS_SUPPORT_CLP(hwdev)) + return -EPERM; + + clp_msg = hwdev->clp_pf_to_mgmt; + if (!clp_msg) + return -EPERM; + + msg_buf = clp_msg->clp_msg_buf; + + /* 4 bytes alignment */ + size = SSS_TOOL_CLP_DATA_REAL_SIZE(in_size, header); + + if (size > SSS_TOOL_CLP_MAX_DATA_SIZE) { + tool_err("Invalid data size: %u\n", size); + return -EINVAL; + } + down(&clp_msg->clp_msg_lock); + + ret = sss_tool_check_clp_init_status(hwdev); + if (ret) { + tool_err("Fail to check clp init status\n"); + up(&clp_msg->clp_msg_lock); + return ret; + } + + sss_tool_clear_clp_data(hwdev, SSS_TOOL_CLP_RSP); + sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_READY_RSP, 0x0); + + /* Send request */ + memset(msg_buf, 0x0, SSS_TOOL_CLP_INPUT_BUF_LEN); + sss_tool_clp_prepare_header(hwdev, &header, in_size, mod, cmd); + + memcpy(msg_buf, &header, sizeof(header)); + msg_buf += sizeof(header); + memcpy(msg_buf, buf_in, in_size); + + msg_buf = clp_msg->clp_msg_buf; + + sss_tool_clear_clp_data(hwdev, SSS_TOOL_CLP_REQ); + ret = sss_tool_write_clp_data(hwdev, clp_msg->clp_msg_buf, size); + if (ret) { + tool_err("Fail to send clp request\n"); + up(&clp_msg->clp_msg_lock); + return -EINVAL; + } + + /* Get response */ + msg_buf = clp_msg->clp_msg_buf; + memset(msg_buf, 0x0, SSS_TOOL_CLP_INPUT_BUF_LEN); + ret = sss_tool_read_clp_data(hwdev, msg_buf, &size); + sss_tool_clear_clp_data(hwdev, SSS_TOOL_CLP_RSP); + if (ret) { + tool_err("Fail to read clp response\n"); + up(&clp_msg->clp_msg_lock); + return -EINVAL; + } + + size = (u16)((size * SSS_TOOL_CLP_DATA_UNIT) & 0xffff); + if (size <= sizeof(header) || size > SSS_TOOL_CLP_INPUT_BUF_LEN) { + tool_err("Invalid response size: %u", size); + up(&clp_msg->clp_msg_lock); + return -EINVAL; + } + + if (size != *out_size + sizeof(header)) { + tool_err("Invalid size:%u, out_size: %u\n", size, *out_size); + up(&clp_msg->clp_msg_lock); + return -EINVAL; + } + + memcpy(buf_out, (msg_buf + sizeof(header)), size); + up(&clp_msg->clp_msg_lock); + + return 0; +} + +int sss_tool_adm_csr_rd32(struct sss_hwdev *hwdev, u8 dest, u32 addr, u32 *val) +{ + int ret; + u32 csr_val = 0; + struct sss_tool_csr_request_adm_data adm_data = {0}; + + if (!hwdev || !val) + return -EFAULT; + + if (!SSS_SUPPORT_ADM_MSG(hwdev)) + return -EPERM; + + adm_data.dw0 = 0; + adm_data.dw1.bits.operation_id = SSS_TOOL_ADM_CSR_READ; + adm_data.dw1.bits.need_response = SSS_TOOL_ADM_CSR_NEED_RESP_DATA; + adm_data.dw1.bits.data_size = SSS_TOOL_ADM_CSR_DATA_SZ_32; + adm_data.dw1.val32 = cpu_to_be32(adm_data.dw1.val32); + adm_data.dw2.bits.csr_addr = addr; + adm_data.dw2.val32 = cpu_to_be32(adm_data.dw2.val32); + + ret = sss_adm_msg_read_ack(hwdev, dest, (u8 *)(&adm_data), + sizeof(adm_data), &csr_val, 0x4); + if (ret) { + tool_err("Fail to read 32 bit csr, dest %u addr 0x%x, ret: 0x%x\n", + dest, addr, ret); + return ret; + } + + *val = csr_val; + + return 0; +} + +int sss_tool_adm_csr_wr32(struct sss_hwdev *hwdev, u8 dest, u32 addr, u32 val) +{ + int ret; + struct sss_tool_csr_request_adm_data adm_data = {0}; + + if (!hwdev) + return -EFAULT; + + if (!SSS_SUPPORT_ADM_MSG(hwdev)) + return -EPERM; + + adm_data.dw1.bits.operation_id = SSS_TOOL_ADM_CSR_WRITE; + adm_data.dw1.bits.need_response = SSS_TOOL_ADM_CSR_NO_RESP_DATA; + adm_data.dw1.bits.data_size = SSS_TOOL_ADM_CSR_DATA_SZ_32; + adm_data.dw1.val32 = cpu_to_be32(adm_data.dw1.val32); + adm_data.dw2.bits.csr_addr = addr; + adm_data.dw2.val32 = cpu_to_be32(adm_data.dw2.val32); + adm_data.csr_write_data_h = 0xffffffff; + adm_data.csr_write_data_l = val; + + ret = sss_adm_msg_write_nack(hwdev, dest, (u8 *)(&adm_data), sizeof(adm_data)); + if (ret) { + tool_err("Fail to write 32 bit csr! dest %u addr 0x%x val 0x%x\n", + dest, addr, val); + return ret; + } + + return 0; +} + +static int sss_tool_adm_csr_read(void *hwdev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + u32 cnt = 0; + u32 offset = 0; + u32 i; + struct sss_tool_csr_read *rd_msg = (struct sss_tool_csr_read *)buf_in; + u8 node_id = (u8)tool_msg->mpu_cmd.mod; + u32 rd_len = rd_msg->rd_len; + u32 rd_addr = rd_msg->addr; + + if (!buf_in || !buf_out || in_size != sizeof(*rd_msg) || + *out_size != rd_len || rd_len % SSS_TOOL_DW_WIDTH != 0) + return -EINVAL; + + cnt = rd_len / SSS_TOOL_DW_WIDTH; + for (i = 0; i < cnt; i++) { + ret = sss_tool_adm_csr_rd32(hwdev, node_id, rd_addr + offset, + (u32 *)(((u8 *)buf_out) + offset)); + if (ret) { + tool_err("Fail to read csr, err: %d, node_id: %u, csr addr: 0x%08x\n", + ret, node_id, rd_addr + offset); + return ret; + } + offset += SSS_TOOL_DW_WIDTH; + } + *out_size = rd_len; + + return ret; +} + +static int sss_tool_adm_csr_write(void *hwdev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + u32 cnt = 0; + u32 offset = 0; + u32 i; + struct sss_tool_csr_write *wr_msg = (struct sss_tool_csr_write *)buf_in; + u8 node_id = (u8)tool_msg->mpu_cmd.mod; + u32 rd_len = wr_msg->rd_len; + u32 rd_addr = wr_msg->addr; + u8 *data = NULL; + + if (!buf_in || in_size != sizeof(*wr_msg) || + wr_msg->rd_len % SSS_TOOL_DW_WIDTH != 0) + return -EINVAL; + + data = kzalloc(rd_len, GFP_KERNEL); + if (!data) + return -EFAULT; + + if (copy_from_user(data, (void *)wr_msg->data, rd_len)) { + tool_err("Fail to copy information from user\n"); + kfree(data); + return -EFAULT; + } + + cnt = rd_len / SSS_TOOL_DW_WIDTH; + for (i = 0; i < cnt; i++) { + ret = sss_tool_adm_csr_wr32(hwdev, node_id, rd_addr + offset, + *((u32 *)(data + offset))); + if (ret) { + tool_err("Fail to write csr, ret: %d, node_id: %u, csr addr: 0x%08x\n", + ret, rd_addr + offset, node_id); + kfree(data); + return ret; + } + offset += SSS_TOOL_DW_WIDTH; + } + + *out_size = 0; + kfree(data); + return ret; +} + +int sss_tool_msg_to_mpu(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + u16 cmd = tool_msg->mpu_cmd.cmd; + enum sss_mod_type mod = (enum sss_mod_type)tool_msg->mpu_cmd.mod; + u32 timeout = sss_tool_get_timeout_val(mod, cmd); + void *hwdev = hal_dev->hwdev; + + if (tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_MBOX || + tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_CLP) { + if (tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_MBOX) { + ret = sss_sync_mbx_send_msg(hwdev, mod, cmd, buf_in, (u16)in_size, + buf_out, (u16 *)out_size, timeout, + SSS_CHANNEL_DEFAULT); + } else { + ret = sss_tool_send_clp_msg(hwdev, mod, cmd, buf_in, (u16)in_size, + buf_out, (u16 *)out_size); + } + + if (ret) { + tool_err("Fail to send msg to mgmt cpu, mod: %d, cmd: %u\n", mod, cmd); + return ret; + } + + } else if (tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_ADM_MSG_BYPASS) { + if (tool_msg->mpu_cmd.cmd == SSS_TOOL_ADM_MSG_WRITE) + return sss_tool_adm_csr_write(hwdev, tool_msg, buf_in, in_size, + buf_out, out_size); + + ret = sss_tool_adm_csr_read(hwdev, tool_msg, buf_in, in_size, buf_out, out_size); + } else if (tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_ADM_MSG_TO_MPU) { + if (SSS_GET_HWIF_PCI_INTF_ID(SSS_TO_HWIF(hwdev)) != SSS_SPU_HOST_ID) + ret = sss_sync_send_adm_msg(hwdev, mod, cmd, buf_in, (u16)in_size, + buf_out, (u16 *)out_size, timeout); + else + ret = sss_sync_mbx_send_msg(hwdev, mod, cmd, buf_in, (u16)in_size, + buf_out, (u16 *)out_size, timeout, + SSS_CHANNEL_DEFAULT); + + if (ret) { + tool_err("Fail to send adm msg to mgmt cpu, mod: %d, cmd: %u\n", + mod, cmd); + return ret; + } + + } else { + tool_err("Invalid channel %d\n", tool_msg->mpu_cmd.channel); + return -EINVAL; + } + + return ret; +} + +int sss_tool_msg_to_npu(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + u8 cmd = tool_msg->npu_cmd.cmd; + enum sss_mod_type mod = (enum sss_mod_type)tool_msg->npu_cmd.mod; + + if (tool_msg->npu_cmd.direct_resp) { + ret = sss_ctrlq_direct_reply(hal_dev->hwdev, mod, cmd, buf_in, + buf_out, 0, SSS_CHANNEL_DEFAULT); + if (ret) + tool_err("Fail to send direct ctrlq, ret: %d\n", ret); + } else { + ret = sss_ctrlq_sync_cmd_detail_reply(hal_dev->hwdev, mod, cmd, buf_in, buf_out, + NULL, 0, SSS_CHANNEL_DEFAULT); + if (ret) + tool_err("Fail to send detail ctrlq, ret: %d\n", ret); + } + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.h b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.h new file mode 100644 index 0000000000000..4dbaed192f85d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_CHIP_H +#define SSS_TOOL_CHIP_H +#include "sss_hw.h" +#include "sss_tool_comm.h" +#include "sss_tool_hw.h" + +int sss_tool_msg_to_mpu(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size); +int sss_tool_msg_to_npu(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_hw.h b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_hw.h new file mode 100644 index 0000000000000..b951026a7c9c5 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_hw.h @@ -0,0 +1,212 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_HW_H +#define SSS_TOOL_HW_H + +#define SSS_TOOL_CMD_TYPE (0x18) + +#define SSS_TOOL_PF_DEV_MAX 32 +/* Indicates the maximum number of interrupts that can be recorded. + * Subsequent interrupts are not recorded in FFM. + */ +#define SSS_TOOL_FFM_RECORD_MAX 64 + +#define SSS_TOOL_PF_INFO_MAX (16) +#define SSS_TOOL_BUSINFO_LEN (32) + +#define SSS_TOOL_CHIP_FAULT_SIZE (110 * 1024) +#define SSS_TOOL_DRV_BUF_SIZE_MAX 4096 + +/* dbgtool command type */ +/* You can add commands as required. The dbgtool command can be + * used to invoke all interfaces of the kernel-mode x86 driver. + */ +enum sss_tool_dbg_cmd { + SSS_TOOL_DBG_CMD_API_RD = 0, + SSS_TOOL_DBG_CMD_API_WR, + SSS_TOOL_DBG_CMD_FFM_RD, + SSS_TOOL_DBG_CMD_FFM_CLR, + SSS_TOOL_DBG_CMD_PF_DEV_INFO_GET, + SSS_TOOL_DBG_CMD_MSG_2_UP, + SSS_TOOL_DBG_CMD_FREE_MEM, + SSS_TOOL_DBG_CMD_NUM +}; + +enum module_name { + SSS_TOOL_MSG_TO_NPU = 1, + SSS_TOOL_MSG_TO_MPU, + SSS_TOOL_MSG_TO_SM, + SSS_TOOL_MSG_TO_HW_DRIVER, +#define SSS_TOOL_MSG_TO_SRV_DRV_BASE (SSS_TOOL_MSG_TO_HW_DRIVER + 1) + SSS_TOOL_MSG_TO_NIC_DRIVER = SSS_TOOL_MSG_TO_SRV_DRV_BASE, + SSS_TOOL_MSG_TO_OVS_DRIVER, + SSS_TOOL_MSG_TO_ROCE_DRIVER, + SSS_TOOL_MSG_TO_TOE_DRIVER, + SSS_TOOL_MSG_TO_IOE_DRIVER, + SSS_TOOL_MSG_TO_FC_DRIVER, + SSS_TOOL_MSG_TO_VBS_DRIVER, + SSS_TOOL_MSG_TO_IPSEC_DRIVER, + SSS_TOOL_MSG_TO_VIRTIO_DRIVER, + SSS_TOOL_MSG_TO_MIGRATE_DRIVER, + SSS_TOOL_MSG_TO_PPA_DRIVER, + SSS_TOOL_MSG_TO_CUSTOM_DRIVER = SSS_TOOL_MSG_TO_SRV_DRV_BASE + 11, + SSS_TOOL_MSG_TO_DRIVER_MAX = SSS_TOOL_MSG_TO_SRV_DRV_BASE + 15, /* reserved */ +}; + +enum sss_tool_adm_msg_type { + SSS_TOOL_ADM_MSG_READ, + SSS_TOOL_ADM_MSG_WRITE +}; + +enum sss_tool_sm_cmd_type { + SSS_TOOL_SM_CMD_RD16 = 1, + SSS_TOOL_SM_CMD_RD32, + SSS_TOOL_SM_CMD_RD64_PAIR, + SSS_TOOL_SM_CMD_RD64, + SSS_TOOL_SM_CMD_RD32_CLEAR, + SSS_TOOL_SM_CMD_RD64_PAIR_CLEAR, + SSS_TOOL_SM_CMD_RD64_CLEAR +}; + +enum sss_tool_channel_type { + SSS_TOOL_CHANNEL_MBOX = 1, + SSS_TOOL_CHANNEL_ADM_MSG_BYPASS, + SSS_TOOL_CHANNEL_ADM_MSG_TO_MPU, + SSS_TOOL_CHANNEL_CLP, +}; + +struct sss_tool_api_cmd_rd { + u32 pf_id; + u8 dest; + u8 *cmd; + u16 size; + void *ack; + u16 ack_size; +}; + +struct sss_tool_api_cmd_wr { + u32 pf_id; + u8 dest; + u8 *cmd; + u16 size; +}; + +struct sss_tool_pf_dev_info { + u64 bar0_size; + u8 bus; + u8 slot; + u8 func; + u64 phy_addr; +}; + +struct sss_tool_ffm_intr_info { + u8 node_id; + /* error level of the interrupt source */ + u8 err_level; + /* Classification by interrupt source properties */ + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; +}; + +struct sss_tool_ffm_intr_tm_info { + struct sss_tool_ffm_intr_info intr_info; + u8 times; + u8 sec; + u8 min; + u8 hour; + u8 mday; + u8 mon; + u16 year; +}; + +struct sss_tool_ffm_record_info { + u32 ffm_num; + u32 last_err_csr_addr; + u32 last_err_csr_value; + struct sss_tool_ffm_intr_tm_info ffm[SSS_TOOL_FFM_RECORD_MAX]; +}; + +struct sss_tool_knl_dbg_info { + struct semaphore dbgtool_sem; + struct sss_tool_ffm_record_info *ffm; +}; + +struct sss_tool_msg_to_up { + u8 pf_id; + u8 mod; + u8 cmd; + void *buf_in; + u16 in_size; + void *buf_out; + u16 *out_size; +}; + +struct sss_tool_dbg_param { + union { + struct sss_tool_api_cmd_rd api_rd; + struct sss_tool_api_cmd_wr api_wr; + struct sss_tool_pf_dev_info *dev_info; + struct sss_tool_ffm_record_info *ffm_rd; + struct sss_tool_msg_to_up msg2up; + } param; + char chip_name[16]; +}; + +struct sss_tool_pf { + char name[IFNAMSIZ]; + char bus_info[SSS_TOOL_BUSINFO_LEN]; + u32 pf_type; +}; + +struct sss_tool_card_info { + struct sss_tool_pf pf[SSS_TOOL_PF_INFO_MAX]; + u32 pf_num; +}; + +struct sss_tool_pf_info { + u32 valid; + u32 pf_id; +}; + +struct sss_tool_cmd_chip_fault_stats { + u32 offset; + u8 chip_fault_stats[SSS_TOOL_DRV_BUF_SIZE_MAX]; +}; + +struct sss_tool_npu_msg { + u32 mod : 8; + u32 cmd : 8; + u32 ack_type : 3; + u32 direct_resp : 1; + u32 len : 12; +}; + +struct sss_tool_mpu_msg { + u32 channel : 8; + u32 mod : 8; + u32 cmd : 16; +}; + +struct sss_tool_msg { + char device_name[IFNAMSIZ]; + u32 module; + union { + u32 msg_formate; /* for driver */ + struct sss_tool_npu_msg npu_cmd; + struct sss_tool_mpu_msg mpu_cmd; + }; + u32 timeout; /* for mpu/npu cmd */ + u32 func_id; + u32 buf_in_size; + u32 buf_out_size; + void *in_buf; + void *out_buf; + int bus_num; + u8 port_id; + u8 rsvd1[3]; + u32 rsvd2[4]; +}; + +#endif /* SSS_TOOL_HW_H */ diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_main.c b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_main.c new file mode 100644 index 0000000000000..d09d1cbc0b236 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_main.c @@ -0,0 +1,740 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include +#include +#include +#include +#include +#include + +#include "sss_adapter_mgmt.h" +#include "sss_linux_kernel.h" +#include "sss_hw.h" +#include "sss_tool_comm.h" +#include "sss_tool_hw.h" +#include "sss_tool.h" + +#define SSS_TOOL_DEV_PATH "/dev/sssnic_nictool_dev" +#define SSS_TOOL_DEV_CLASS "sssnic_nictool_class" +#define SSS_TOOL_DEV_NAME "sssnic_nictool_dev" + +#define SSS_TOOL_CTRLQ_BUF_SIZE_MAX 2048U +#define SSS_TOOL_MSG_IN_SIZE_MAX (2048 * 1024) +#define SSS_TOOL_MSG_OUT_SIZE_MAX (2048 * 1024) +#define SSS_TOOL_BUF_SIZE_MAX (2048 * 1024) + +typedef int (*sss_tool_deal_handler_fun)(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *in_buf, u32 in_len, void *out_buf, u32 *out_len); + +struct sss_tool_deal_handler { + enum module_name msg_name; + sss_tool_deal_handler_fun func; +}; + +static int g_nictool_ref_cnt; + +static dev_t g_dev_id = {0}; + +static struct class *g_nictool_class; +static struct cdev g_nictool_cdev; + +static void *g_card_node_array[SSS_TOOL_CARD_MAX] = {0}; +void *g_card_va[SSS_TOOL_CARD_MAX] = {0}; +u64 g_card_pa[SSS_TOOL_CARD_MAX] = {0}; +int g_card_id; + +static int sss_tool_msg_to_nic(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *in_buf, u32 in_len, void *out_buf, u32 *out_len) +{ + int ret = -EINVAL; + void *uld_dev = NULL; + enum sss_service_type service_type; + struct sss_uld_info *uld_info = sss_get_uld_info(); + + service_type = tool_msg->module - SSS_TOOL_MSG_TO_SRV_DRV_BASE; + if (service_type >= SSS_SERVICE_TYPE_MAX) { + tool_err("Invalid input module id: %u\n", tool_msg->module); + return -EINVAL; + } + + uld_dev = sss_get_uld_dev(hal_dev, service_type); + if (!uld_dev) { + if (tool_msg->msg_formate == SSS_TOOL_GET_DRV_VERSION) + return 0; + + tool_err("Fail to get uld device\n"); + return -EINVAL; + } + + if (uld_info[service_type].ioctl) + ret = uld_info[service_type].ioctl(uld_dev, tool_msg->msg_formate, + in_buf, in_len, out_buf, out_len); + sss_uld_dev_put(hal_dev, service_type); + + return ret; +} + +void sss_tool_free_in_buf(void *hwdev, const struct sss_tool_msg *tool_msg, void *in_buf) +{ + if (!in_buf) + return; + + if (tool_msg->module == SSS_TOOL_MSG_TO_NPU) + sss_free_ctrlq_msg_buf(hwdev, in_buf); + else + kfree(in_buf); +} + +void sss_tool_free_out_buf(void *hwdev, struct sss_tool_msg *tool_msg, + void *out_buf) +{ + if (!out_buf) + return; + + if (tool_msg->module == SSS_TOOL_MSG_TO_NPU && + !tool_msg->npu_cmd.direct_resp) + sss_free_ctrlq_msg_buf(hwdev, out_buf); + else + kfree(out_buf); +} + +int sss_tool_alloc_in_buf(void *hwdev, struct sss_tool_msg *tool_msg, + u32 in_len, void **in_buf) +{ + void *msg_buf = NULL; + + if (!in_len) + return 0; + + if (tool_msg->module == SSS_TOOL_MSG_TO_NPU) { + struct sss_ctrl_msg_buf *cmd_buf = NULL; + + if (in_len > SSS_TOOL_CTRLQ_BUF_SIZE_MAX) { + tool_err("Invalid ctrlq in len(%u) more than %u\n", + in_len, SSS_TOOL_CTRLQ_BUF_SIZE_MAX); + return -ENOMEM; + } + + cmd_buf = sss_alloc_ctrlq_msg_buf(hwdev); + if (!cmd_buf) { + tool_err("Fail to alloc ctrlq msg buf\n"); + return -ENOMEM; + } + *in_buf = (void *)cmd_buf; + cmd_buf->size = (u16)in_len; + } else { + if (in_len > SSS_TOOL_MSG_IN_SIZE_MAX) { + tool_err("Invalid in len(%u) more than %u\n", + in_len, SSS_TOOL_MSG_IN_SIZE_MAX); + return -ENOMEM; + } + msg_buf = kzalloc(in_len, GFP_KERNEL); + *in_buf = msg_buf; + } + + if (!(*in_buf)) { + tool_err("Fail to alloc in buf\n"); + return -ENOMEM; + } + + return 0; +} + +int sss_tool_alloc_out_buf(void *hwdev, struct sss_tool_msg *tool_msg, + u32 out_len, void **out_buf) +{ + if (!out_len) { + tool_info("out len is 0, need not alloc buf\n"); + return 0; + } + + if (tool_msg->module == SSS_TOOL_MSG_TO_NPU && + !tool_msg->npu_cmd.direct_resp) { + struct sss_ctrl_msg_buf *msg_buf = NULL; + + if (out_len > SSS_TOOL_CTRLQ_BUF_SIZE_MAX) { + tool_err("Invalid ctrlq out len(%u) more than %u\n", + out_len, SSS_TOOL_CTRLQ_BUF_SIZE_MAX); + return -ENOMEM; + } + + msg_buf = sss_alloc_ctrlq_msg_buf(hwdev); + *out_buf = (void *)msg_buf; + } else { + if (out_len > SSS_TOOL_MSG_OUT_SIZE_MAX) { + tool_err("Invalid out len(%u) more than %u\n", + out_len, SSS_TOOL_MSG_OUT_SIZE_MAX); + return -ENOMEM; + } + *out_buf = kzalloc(out_len, GFP_KERNEL); + } + if (!(*out_buf)) { + tool_err("Fail to alloc out buf\n"); + return -ENOMEM; + } + + return 0; +} + +int sss_tool_copy_to_user(struct sss_tool_msg *tool_msg, + u32 out_len, void *out_buf) +{ + void *out_msg = NULL; + + if (tool_msg->module == SSS_TOOL_MSG_TO_NPU && !tool_msg->npu_cmd.direct_resp) { + out_msg = ((struct sss_ctrl_msg_buf *)out_buf)->buf; + if (copy_to_user(tool_msg->out_buf, out_msg, out_len)) + return -EFAULT; + return 0; + } + + if (copy_to_user(tool_msg->out_buf, out_buf, out_len)) + return -EFAULT; + + return 0; +} + +static int sss_tool_alloc_buf(void *hwdev, struct sss_tool_msg *tool_msg, u32 in_len, + void **in_buf, u32 out_len, void **out_buf) +{ + int ret; + + ret = sss_tool_alloc_in_buf(hwdev, tool_msg, in_len, in_buf); + if (ret) { + tool_err("Fail to alloc tool msg in buf\n"); + return ret; + } + + if (copy_from_user(*in_buf, tool_msg->in_buf, in_len)) { + tool_err("Fail to copy tool_msg to in buf\n"); + sss_tool_free_in_buf(hwdev, tool_msg, *in_buf); + return -EFAULT; + } + + ret = sss_tool_alloc_out_buf(hwdev, tool_msg, out_len, out_buf); + if (ret) { + tool_err("Fail to alloc tool msg out buf\n"); + goto alloc_out_buf_err; + } + + return 0; + +alloc_out_buf_err: + sss_tool_free_in_buf(hwdev, tool_msg, *in_buf); + + return ret; +} + +static void sss_tool_free_buf(void *hwdev, struct sss_tool_msg *tool_msg, + void *in_buf, void *out_buf) +{ + sss_tool_free_out_buf(hwdev, tool_msg, out_buf); + sss_tool_free_in_buf(hwdev, tool_msg, in_buf); +} + +const struct sss_tool_deal_handler g_deal_msg_handle[] = { + {SSS_TOOL_MSG_TO_NPU, sss_tool_msg_to_npu}, + {SSS_TOOL_MSG_TO_MPU, sss_tool_msg_to_mpu}, + {SSS_TOOL_MSG_TO_SM, sss_tool_msg_to_sm}, + {SSS_TOOL_MSG_TO_HW_DRIVER, sss_tool_msg_to_hw}, + {SSS_TOOL_MSG_TO_NIC_DRIVER, sss_tool_msg_to_nic} +}; + +static int sss_tool_deal_cmd(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *in_buf, u32 in_len, void *out_buf, u32 *out_len) +{ + int ret = 0; + int index; + int msg_num = ARRAY_LEN(g_deal_msg_handle); + + for (index = 0; index < msg_num; index++) { + if (tool_msg->module != g_deal_msg_handle[index].msg_name) + continue; + + ret = g_deal_msg_handle[index].func(hal_dev, tool_msg, + in_buf, in_len, out_buf, out_len); + break; + } + + if (index == msg_num) + ret = sss_tool_msg_to_nic(hal_dev, tool_msg, + in_buf, in_len, out_buf, out_len); + + return ret; +} + +static struct sss_hal_dev *sss_tool_get_hal_dev_by_msg(struct sss_tool_msg *tool_msg) +{ + struct sss_hal_dev *hal_dev = NULL; + + if (tool_msg->module >= SSS_TOOL_MSG_TO_SRV_DRV_BASE && + tool_msg->module < SSS_TOOL_MSG_TO_DRIVER_MAX && + tool_msg->msg_formate != SSS_TOOL_GET_DRV_VERSION) { + hal_dev = sss_get_lld_dev_by_dev_name(tool_msg->device_name, + tool_msg->module - + SSS_TOOL_MSG_TO_SRV_DRV_BASE); + } else { + hal_dev = sss_get_lld_dev_by_chip_name(tool_msg->device_name); + if (!hal_dev) + hal_dev = sss_get_lld_dev_by_dev_name(tool_msg->device_name, + SSS_SERVICE_TYPE_MAX); + } + + if (tool_msg->module == SSS_TOOL_MSG_TO_NIC_DRIVER && + (tool_msg->msg_formate == SSS_TOOL_GET_XSFP_INFO || + tool_msg->msg_formate == SSS_TOOL_GET_XSFP_PRESENT)) + hal_dev = sss_get_lld_dev_by_chip_and_port(tool_msg->device_name, + tool_msg->port_id); + + return hal_dev; +} + +static int sss_tool_check_msg_valid(struct sss_tool_msg *tool_msg) +{ + if (tool_msg->buf_out_size > SSS_TOOL_BUF_SIZE_MAX || + tool_msg->buf_in_size > SSS_TOOL_BUF_SIZE_MAX) { + tool_err("Invalid in buf len: %u or out buf len: %u\n", + tool_msg->buf_in_size, tool_msg->buf_out_size); + return -EFAULT; + } + + return 0; +} + +static long sss_tool_msg_ioctl(unsigned long arg) +{ + int ret = 0; + u32 in_len = 0; + u32 expect_out_len = 0; + u32 out_len = 0; + void *in_buf = NULL; + void *out_buf = NULL; + struct sss_hal_dev *hal_dev = NULL; + struct sss_tool_msg tool_msg = {0}; + + if (copy_from_user(&tool_msg, (void *)arg, sizeof(tool_msg))) { + tool_err("Fail to copy msg from user space\n"); + return -EFAULT; + } + + if (sss_tool_check_msg_valid(&tool_msg)) { + tool_err("Fail to check msg valid\n"); + return -EFAULT; + } + + tool_msg.device_name[IFNAMSIZ - 1] = '\0'; + expect_out_len = tool_msg.buf_out_size; + in_len = tool_msg.buf_in_size; + + hal_dev = sss_tool_get_hal_dev_by_msg(&tool_msg); + if (!hal_dev) { + if (tool_msg.msg_formate != SSS_TOOL_DEV_NAME_TEST) + tool_err("Fail to find device %s for module %d\n", + tool_msg.device_name, tool_msg.module); + return -ENODEV; + } + + if (tool_msg.msg_formate == SSS_TOOL_DEV_NAME_TEST) + return 0; + + ret = sss_tool_alloc_buf(hal_dev->hwdev, &tool_msg, + in_len, &in_buf, expect_out_len, &out_buf); + if (ret) { + tool_err("Fail to alloc cmd buf\n"); + goto out_free_lock; + } + + out_len = expect_out_len; + + ret = sss_tool_deal_cmd(hal_dev, &tool_msg, in_buf, in_len, out_buf, &out_len); + if (ret) { + tool_err("Fail to execute cmd, module: %u, ret: %d.\n", tool_msg.module, ret); + goto out_free_buf; + } + + if (out_len > expect_out_len) { + ret = -EFAULT; + tool_err("Fail to execute cmd, expected out len from user: %u, out len: %u\n", + expect_out_len, out_len); + goto out_free_buf; + } + + ret = sss_tool_copy_to_user(&tool_msg, out_len, out_buf); + if (ret) + tool_err("Fail to copy return information to user space\n"); + +out_free_buf: + sss_tool_free_buf(hal_dev->hwdev, &tool_msg, in_buf, out_buf); + +out_free_lock: + lld_dev_put(hal_dev); + return (long)ret; +} + +static long sss_tool_knl_ffm_info_rd(struct sss_tool_dbg_param *dbg_param, + struct sss_tool_knl_dbg_info *dbg_info) +{ + if (copy_to_user(dbg_param->param.ffm_rd, dbg_info->ffm, + (unsigned int)sizeof(*dbg_param->param.ffm_rd))) { + tool_err("Fail to copy ffm_info to user space\n"); + return -EFAULT; + } + + return 0; +} + +static struct sss_card_node *sss_tool_find_card_node(char *chip_name) +{ + int i; + struct sss_card_node *card_node = NULL; + + for (i = 0; i < SSS_TOOL_CARD_MAX; i++) { + card_node = (struct sss_card_node *)g_card_node_array[i]; + if (!card_node) + continue; + if (!strncmp(chip_name, card_node->chip_name, IFNAMSIZ)) + break; + } + if (i == SSS_TOOL_CARD_MAX || !card_node) + return NULL; + + g_card_id = i; + + return card_node; +} + +static long sss_tool_dbg_ioctl(unsigned int cmd_type, unsigned long arg) +{ + struct sss_tool_knl_dbg_info *dbg_info = NULL; + struct sss_card_node *card_node = NULL; + struct sss_tool_dbg_param param = {0}; + long ret; + + if (copy_from_user(¶m, (void *)arg, sizeof(param))) { + tool_err("Fail to copy msg param from user\n"); + return -EFAULT; + } + + sss_hold_chip_node(); + + card_node = sss_tool_find_card_node(param.chip_name); + if (!card_node) { + sss_put_chip_node(); + tool_err("Fail to find card node %s\n", param.chip_name); + return -EFAULT; + } + + dbg_info = (struct sss_tool_knl_dbg_info *)card_node->dbgtool_info; + + down(&dbg_info->dbgtool_sem); + + if (cmd_type == SSS_TOOL_DBG_CMD_FFM_RD) { + ret = sss_tool_knl_ffm_info_rd(¶m, dbg_info); + } else if (cmd_type == SSS_TOOL_DBG_CMD_MSG_2_UP) { + tool_info("cmd(0x%x) not suppose.\n", cmd_type); + ret = 0; + } else { + tool_err("Fail to execute cmd(0x%x) ,it is not support\n", cmd_type); + ret = -EFAULT; + } + + up(&dbg_info->dbgtool_sem); + + sss_put_chip_node(); + + return ret; +} + +static int sss_tool_release(struct inode *pnode, struct file *pfile) +{ + return 0; +} + +static int sss_tool_open(struct inode *pnode, struct file *pfile) +{ + return 0; +} + +static ssize_t sss_tool_read(struct file *pfile, char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static ssize_t sss_tool_write(struct file *pfile, const char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static long sss_tool_unlocked_ioctl(struct file *pfile, + unsigned int cmd, unsigned long arg) +{ + unsigned int cmd_type = _IOC_NR(cmd); + + if (cmd_type == SSS_TOOL_CMD_TYPE) + return sss_tool_msg_ioctl(arg); + + return sss_tool_dbg_ioctl(cmd_type, arg); +} + +static int sss_tool_mem_mmap(struct file *filp, struct vm_area_struct *mem_area) +{ + unsigned long mem_size = mem_area->vm_end - mem_area->vm_start; + phys_addr_t offset = (phys_addr_t)mem_area->vm_pgoff << PAGE_SHIFT; + phys_addr_t phy_addr; + + if (mem_size > SSS_TOOL_MEM_MAP_SIZE) { + tool_err("Fail to map mem, mem_size :%ld, alloc size: %ld\n", + mem_size, SSS_TOOL_MEM_MAP_SIZE); + return -EAGAIN; + } + + phy_addr = offset ? offset : g_card_pa[g_card_id]; + if (!phy_addr) { + tool_err("Fail to map mem, card_id = %d phy_addr is 0\n", g_card_id); + return -EAGAIN; + } + + mem_area->vm_page_prot = pgprot_noncached(mem_area->vm_page_prot); + if (remap_pfn_range(mem_area, mem_area->vm_start, (phy_addr >> PAGE_SHIFT), + mem_size, mem_area->vm_page_prot)) { + tool_err("Fail to remap pfn range.\n"); + return -EAGAIN; + } + + return 0; +} + +static const struct file_operations sss_tool_file_ops = { + .owner = THIS_MODULE, + .release = sss_tool_release, + .open = sss_tool_open, + .read = sss_tool_read, + .write = sss_tool_write, + .unlocked_ioctl = sss_tool_unlocked_ioctl, + .mmap = sss_tool_mem_mmap, +}; + +static struct sss_tool_knl_dbg_info *sss_tool_alloc_dbg_info(void *hwdev) +{ + struct sss_tool_knl_dbg_info *dbg_info = NULL; + + dbg_info = (struct sss_tool_knl_dbg_info *) + kzalloc(sizeof(struct sss_tool_knl_dbg_info), GFP_KERNEL); + if (!dbg_info) + return NULL; + + dbg_info->ffm = (struct sss_tool_ffm_record_info *) + kzalloc(sizeof(*dbg_info->ffm), GFP_KERNEL); + if (!dbg_info->ffm) { + tool_err("Fail to alloc ffm_record_info\n"); + kfree(dbg_info); + return NULL; + } + + return dbg_info; +} + +static void sss_tool_free_dbg_info(struct sss_tool_knl_dbg_info *dbg_info) +{ + kfree(dbg_info->ffm); + kfree(dbg_info); +} + +static int sss_tool_get_node_id(struct sss_card_node *card_node, int *node_id) +{ + int ret; + + ret = sscanf(card_node->chip_name, SSS_CHIP_NAME "%d", node_id); + if (ret < 0) { + tool_err("Fail to get card id\n"); + return -ENOMEM; + } + + return 0; +} + +static int sss_tool_add_func_to_card_node(void *hwdev, struct sss_card_node *card_node) +{ + int func_id = sss_get_func_id(hwdev); + struct sss_tool_knl_dbg_info *dbg_info = NULL; + int ret; + int node_id; + + if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_VF) + card_node->func_handle_array[func_id] = hwdev; + + if (card_node->func_num++) + return 0; + + dbg_info = sss_tool_alloc_dbg_info(hwdev); + if (!dbg_info) { + ret = -ENOMEM; + tool_err("Fail to alloc dbg_info\n"); + goto alloc_dbg_info_err; + } + card_node->dbgtool_info = dbg_info; + sema_init(&dbg_info->dbgtool_sem, 1); + + ret = sss_tool_get_node_id(card_node, &node_id); + if (ret) { + tool_err("Fail to add node to global array\n"); + goto get_node_id_err; + } + g_card_node_array[node_id] = card_node; + + return 0; + +get_node_id_err: + sss_tool_free_dbg_info(dbg_info); + card_node->dbgtool_info = NULL; + +alloc_dbg_info_err: + card_node->func_num--; + if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_VF) + card_node->func_handle_array[func_id] = NULL; + + return ret; +} + +static void sss_tool_del_func_in_card_node(void *hwdev, struct sss_card_node *card_node) +{ + struct sss_tool_knl_dbg_info *dbg_info = card_node->dbgtool_info; + int func_id = sss_get_func_id(hwdev); + int node_id; + + if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_VF) + card_node->func_handle_array[func_id] = NULL; + + if (--card_node->func_num) + return; + + sss_tool_get_node_id(card_node, &node_id); + if (node_id < SSS_TOOL_CARD_MAX) + g_card_node_array[node_id] = NULL; + + sss_tool_free_dbg_info(dbg_info); + card_node->dbgtool_info = NULL; + + if (node_id < SSS_TOOL_CARD_MAX) + (void)sss_tool_free_card_mem(node_id); +} + +static int sss_tool_create_dev(void) +{ + int ret; + struct device *pdevice = NULL; + + ret = alloc_chrdev_region(&g_dev_id, 0, 1, SSS_TOOL_DEV_NAME); + if (ret) { + tool_err("Fail to alloc sssnic_nictool_dev region(0x%x)\n", ret); + return ret; + } + +#ifdef CLASS_CREATE_WITH_ONE_PARAM + g_nictool_class = class_create(SSS_TOOL_DEV_CLASS); +#else + g_nictool_class = class_create(THIS_MODULE, SSS_TOOL_DEV_CLASS); +#endif + if (IS_ERR(g_nictool_class)) { + tool_err("Fail to create sssnic_nictool_class\n"); + ret = -EFAULT; + goto create_class_err; + } + + cdev_init(&g_nictool_cdev, &sss_tool_file_ops); + + ret = cdev_add(&g_nictool_cdev, g_dev_id, 1); + if (ret < 0) { + tool_err("Fail to add sssnic_nictool_dev to operating system (0x%x)\n", ret); + goto add_cdev_err; + } + + pdevice = device_create(g_nictool_class, NULL, g_dev_id, NULL, SSS_TOOL_DEV_NAME); + if (IS_ERR(pdevice)) { + tool_err("Fail to create sssnic_nictool_dev on operating system\n"); + ret = -EFAULT; + goto create_device_err; + } + + tool_info("Success to register sssnic_nictool_dev to system\n"); + + return 0; + +create_device_err: + cdev_del(&g_nictool_cdev); + +add_cdev_err: + class_destroy(g_nictool_class); + +create_class_err: + g_nictool_class = NULL; + unregister_chrdev_region(g_dev_id, 1); + + return ret; +} + +static void sss_tool_destroy_dev(void) +{ + device_destroy(g_nictool_class, g_dev_id); + cdev_del(&g_nictool_cdev); + class_destroy(g_nictool_class); + g_nictool_class = NULL; + unregister_chrdev_region(g_dev_id, 1); + tool_info("Success to unregister sssnic_nictool_dev to system\n"); +} + +int sss_tool_init(void *hwdev, void *chip_node) +{ + struct sss_card_node *card_node = (struct sss_card_node *)chip_node; + int ret; + + ret = sss_tool_add_func_to_card_node(hwdev, card_node); + if (ret) { + tool_err("Fail to add func to card node\n"); + return ret; + } + + if (g_nictool_ref_cnt++) { + tool_info("sssnic_nictool_dev has already create\n"); + return 0; + } + + ret = sss_tool_create_dev(); + if (ret) { + tool_err("Fail to create sssnic_nictool_dev\n"); + goto out; + } + + return 0; + +out: + g_nictool_ref_cnt--; + sss_tool_del_func_in_card_node(hwdev, card_node); + + return ret; +} + +void sss_tool_uninit(void *hwdev, void *chip_node) +{ + struct sss_card_node *chip_info = (struct sss_card_node *)chip_node; + + sss_tool_del_func_in_card_node(hwdev, chip_info); + + if (g_nictool_ref_cnt == 0) + return; + + if (--g_nictool_ref_cnt) + return; + + if (!g_nictool_class || IS_ERR(g_nictool_class)) { + tool_err("Fail to uninit sssnictool, tool class is NULL.\n"); + return; + } + + sss_tool_destroy_dev(); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.c b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.c new file mode 100644 index 0000000000000..8f8fb6d364d49 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.c @@ -0,0 +1,527 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include +#include +#include +#include +#include +#include + +#include "sss_linux_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_tool.h" +#include "sss_csr.h" +#include "sss_adapter_mgmt.h" +#include "sss_mgmt_info.h" +#include "sss_pci_global.h" +#include "sss_hwif_api.h" + +typedef int (*sss_tool_hw_cmd_func)(struct sss_hal_dev *hal_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); +struct sss_tool_hw_cmd_handle { + enum sss_tool_driver_cmd_type cmd_type; + sss_tool_hw_cmd_func func; +}; + +int sss_tool_get_func_type(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (*out_size != sizeof(u16) || !buf_out) { + tool_err("Invalid out_size from user :%u, expect: %lu\n", *out_size, sizeof(u16)); + return -EFAULT; + } + + *(u16 *)buf_out = (u16)sss_get_func_type(SSS_TO_HWDEV(hal_dev)); + + return 0; +} + +int sss_tool_get_func_id(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (*out_size != sizeof(u16) || !buf_out) { + tool_err("Invalid out_size from user :%u, expect: %lu\n", *out_size, sizeof(u16)); + return -EFAULT; + } + + *(u16 *)buf_out = (u16)sss_get_func_id(SSS_TO_HWDEV(hal_dev)); + + return 0; +} + +int sss_tool_get_hw_driver_stats(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct sss_hwdev *hwdev = hal_dev->hwdev; + struct sss_card_node *node = hwdev->chip_node; + struct sss_hw_stats *stats = buf_out; + struct sss_hw_stats *tmp = stats; + + if (!hwdev) + return -EINVAL; + + if (*out_size != sizeof(struct sss_hw_stats) || !stats) { + tool_err("Invalid out_size from user :%u, expect: %lu\n", + *out_size, sizeof(struct sss_hw_stats)); + return -EFAULT; + } + + memcpy(stats, &hwdev->hw_stats, sizeof(struct sss_hw_stats)); + + atomic_set(&tmp->nic_ucode_event_stats[SSS_CHN_BUSY], + atomic_read(&node->channel_timeout_cnt)); + + return 0; +} + +static int sss_tool_clear_hw_driver_stats(struct sss_hal_dev *hal_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct sss_hwdev *hwdev = hal_dev->hwdev; + struct sss_card_node *node = hwdev->chip_node; + + memset((void *)&hwdev->hw_stats, 0, sizeof(struct sss_hw_stats)); + memset((void *)hwdev->chip_fault_stats, 0, SSS_TOOL_CHIP_FAULT_SIZE); + + if (SSS_SUPPORT_CHANNEL_DETECT(hwdev) && atomic_read(&node->channel_timeout_cnt)) { + atomic_set(&node->channel_timeout_cnt, 0); + hwdev->aeq_busy_cnt = 0; +#if !defined(__UEFI__) && !defined(VMWARE) + queue_delayed_work(hwdev->workq, &hwdev->channel_detect_task, + msecs_to_jiffies(SSSNIC_CHANNEL_DETECT_PERIOD)); +#endif + } + + if (*out_size != sizeof(struct sss_hw_stats)) { + tool_err("Invalid out_size from user :%u, expect: %lu\n", + *out_size, sizeof(struct sss_hw_stats)); + return -EFAULT; + } + + return 0; +} + +static int sss_tool_get_self_test_result(struct sss_hal_dev *hal_dev, + const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u32 val; + + if (*out_size != sizeof(u32) || !buf_out) { + tool_err("Invalid out_size from user :%u, expect: %lu\n", + *out_size, sizeof(u32)); + return -EFAULT; + } + + val = sss_chip_read_reg(SSS_TO_HWIF(hal_dev->hwdev), SSS_MGMT_HEALTH_STATUS_ADDR); + *(u32 *)buf_out = val; + + return 0; +} + +static void sss_tool_get_chip_fault_stats(const void *hwdev, u8 *chip_fault_stats, u32 offset) +{ + u32 size; + + if (offset >= SSS_TOOL_CHIP_FAULT_SIZE) { + tool_err("Invalid chip offset value: %d\n", offset); + return; + } + + size = min(SSS_TOOL_DRV_BUF_SIZE_MAX, SSS_TOOL_CHIP_FAULT_SIZE - (int)offset); + memcpy(chip_fault_stats, ((struct sss_hwdev *)hwdev)->chip_fault_stats + + offset, size); +} + +static int sss_tool_get_chip_faults_stats(struct sss_hal_dev *hal_dev, + const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u32 offset = 0; + struct sss_tool_cmd_chip_fault_stats *info = NULL; + + if (!buf_in || !buf_out || *out_size != sizeof(*info) || + in_size != sizeof(*info)) { + tool_err("Invalid out_size from user: %d, expect: %lu\n", *out_size, sizeof(*info)); + return -EFAULT; + } + info = (struct sss_tool_cmd_chip_fault_stats *)buf_in; + offset = info->offset; + + info = (struct sss_tool_cmd_chip_fault_stats *)buf_out; + sss_tool_get_chip_fault_stats(hal_dev->hwdev, + info->chip_fault_stats, offset); + + return 0; +} + +static int sss_tool_get_single_card_info(struct sss_hal_dev *hal_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + if (!buf_out || *out_size != sizeof(struct sss_tool_card_info)) { + tool_err("Invalid buf out is NULL, or out_size != %lu\n", + sizeof(struct sss_tool_card_info)); + return -EINVAL; + } + + sss_get_card_info(hal_dev->hwdev, buf_out); + + return 0; +} + +static int sss_tool_is_driver_in_vm(struct sss_hal_dev *hal_dev, + const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (!buf_out || (*out_size != sizeof(u8))) { + tool_err("Invalid parameter, buf_out is NULL or out_size != %lu\n", sizeof(u8)); + return -EINVAL; + } + + *((u8 *)buf_out) = sss_is_in_host() ? 0 : 1; + + return 0; +} + +static int sss_tool_get_all_chip_id_cmd(struct sss_hal_dev *hal_dev, + const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (*out_size != sizeof(struct sss_card_id) || !buf_out) { + tool_err("Invalid parameter: out_size %u, expect %lu\n", + *out_size, sizeof(struct sss_card_id)); + return -EFAULT; + } + + sss_get_all_chip_id(buf_out); + + return 0; +} + +static int sss_tool_get_card_id(char *dev_name, int *id) +{ + int ret; + + ret = sscanf(dev_name, SSS_CHIP_NAME "%d", id); + if (ret < 0) { + tool_err("Fail to get card id\n"); + return ret; + } + + if (*id >= SSS_TOOL_CARD_MAX || *id < 0) { + tool_err("Invalid chip id %d, out of range: [0-%d]\n", *id, SSS_TOOL_CARD_MAX - 1); + return -EINVAL; + } + + return 0; +} + +static void sss_tool_get_pf_dev_info_param(struct sss_tool_pf_dev_info *dev_info, int card_id, + void **func_array) +{ + u32 func_id; + void *hwdev = NULL; + struct pci_dev *pdev = NULL; + + for (func_id = 0; func_id < SSS_TOOL_PF_DEV_MAX; func_id++) { + hwdev = (void *)func_array[func_id]; + + dev_info[func_id].phy_addr = g_card_pa[card_id]; + + if (!hwdev) { + dev_info[func_id].bar0_size = 0; + dev_info[func_id].bus = 0; + dev_info[func_id].slot = 0; + dev_info[func_id].func = 0; + } else { + pdev = (struct pci_dev *)sss_get_pcidev_hdl(hwdev); + dev_info[func_id].bar0_size = pci_resource_len(pdev, 0); + dev_info[func_id].bus = pdev->bus->number; + dev_info[func_id].slot = PCI_SLOT(pdev->devfn); + dev_info[func_id].func = PCI_FUNC(pdev->devfn); + } + } +} + +static int sss_tool_get_card_adm_mem(int card_id) +{ + int i; + unsigned char *card_va = NULL; + + g_card_id = card_id; + if (!g_card_va[card_id]) { + g_card_va[card_id] = + (void *)__get_free_pages(GFP_KERNEL, SSS_TOOL_PAGE_ORDER); + if (!g_card_va[card_id]) { + tool_err("Fail to alloc adm memory for card %d!\n", card_id); + return -EFAULT; + } + + memset(g_card_va[card_id], 0, PAGE_SIZE * (1 << SSS_TOOL_PAGE_ORDER)); + + g_card_pa[card_id] = virt_to_phys(g_card_va[card_id]); + if (!g_card_pa[card_id]) { + tool_err("Invalid phy addr for card %d is 0\n", card_id); + free_pages((unsigned long)g_card_va[card_id], SSS_TOOL_PAGE_ORDER); + g_card_va[card_id] = NULL; + return -EFAULT; + } + + card_va = g_card_va[card_id]; + for (i = 0; i < (1 << SSS_TOOL_PAGE_ORDER); i++) { + SetPageReserved(virt_to_page(card_va)); + card_va += PAGE_SIZE; + } + } + + return 0; +} + +static int sss_tool_get_pf_dev_info(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + int id; + int ret; + struct sss_tool_pf_dev_info *info = buf_out; + struct sss_card_node *node = sss_get_card_node(hal_dev); + + if (!buf_out || *out_size != sizeof(struct sss_tool_pf_dev_info) * SSS_TOOL_PF_DEV_MAX) { + tool_err("Invalid param: out_size %u, expect %lu\n", + *out_size, sizeof(info) * SSS_TOOL_PF_DEV_MAX); + return -EFAULT; + } + + ret = sss_tool_get_card_id(node->chip_name, &id); + if (ret) + return ret; + + sss_tool_get_pf_dev_info_param(info, id, node->func_handle_array); + + ret = sss_tool_get_card_adm_mem(id); + if (ret) { + tool_err("Fail to get adm memory for userspace %s\n", node->chip_name); + return -EFAULT; + } + + return 0; +} + +long sss_tool_free_card_mem(int id) +{ + unsigned char *va = NULL; + int i; + + if (!g_card_va[id]) + return 0; + + va = g_card_va[id]; + for (i = 0; i < (1 << SSS_TOOL_PAGE_ORDER); i++) { + ClearPageReserved(virt_to_page(va)); + va += PAGE_SIZE; + } + + free_pages((unsigned long)g_card_va[id], SSS_TOOL_PAGE_ORDER); + g_card_va[id] = NULL; + g_card_pa[id] = 0; + + return 0; +} + +static int sss_tool_free_all_card_mem(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + int id; + int ret; + struct sss_card_node *node = sss_get_card_node(hal_dev); + + ret = sss_tool_get_card_id(node->chip_name, &id); + if (ret) + return ret; + + sss_tool_free_card_mem(id); + + return 0; +} + +static int sss_tool_check_card_info_param(char *dev_name, const void *buf_out, u32 out_size) +{ + int ret; + + if (!buf_out || out_size != sizeof(struct sss_card_func_info)) { + tool_err("Invalid out_size %u, expect %lu\n", + out_size, sizeof(struct sss_card_func_info)); + return -EINVAL; + } + + ret = memcmp(dev_name, SSS_CHIP_NAME, strlen(SSS_CHIP_NAME)); + if (ret) { + tool_err("Invalid chip name %s\n", dev_name); + return ret; + } + + return 0; +} + +static int sss_tool_get_card_func_info(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + int ret; + int id = 0; + struct sss_card_func_info *info = buf_out; + struct sss_card_node *node = sss_get_card_node(hal_dev); + + ret = sss_tool_check_card_info_param(node->chip_name, buf_out, *out_size); + if (ret) + return ret; + + ret = sss_tool_get_card_id(node->chip_name, &id); + if (ret) + return ret; + + sss_get_card_func_info(node->chip_name, info); + + if (!info->pf_num) { + tool_err("Fail to get card func info, chip name %s\n", node->chip_name); + return -EFAULT; + } + + ret = sss_tool_get_card_adm_mem(id); + if (ret) { + tool_err("Fail to get adm memory for userspace %s\n", node->chip_name); + return -EFAULT; + } + + info->usr_adm_pa = g_card_pa[id]; + + return 0; +} + +static int sss_tool_get_pf_cap_info(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct sss_hwdev *hwdev = NULL; + struct sss_card_node *node = sss_get_card_node(hal_dev); + struct sss_svc_cap_info *in_info = (struct sss_svc_cap_info *)buf_in; + struct sss_svc_cap_info *out_info = (struct sss_svc_cap_info *)buf_out; + + if (*out_size != sizeof(struct sss_svc_cap_info) || + in_size != sizeof(struct sss_svc_cap_info) || + !buf_in || !buf_out) { + tool_err("Invalid out_size %u, in_size: %u, expect %lu\n", + *out_size, in_size, sizeof(struct sss_svc_cap_info)); + return -EINVAL; + } + + if (in_info->func_id >= SSS_MAX_FUNC) { + tool_err("Invalid func id: %u, max_num: %u\n", + in_info->func_id, SSS_MAX_FUNC); + return -EINVAL; + } + + sss_hold_chip_node(); + hwdev = (struct sss_hwdev *)(node->func_handle_array)[in_info->func_id]; + if (!hwdev) { + sss_put_chip_node(); + return -EINVAL; + } + + memcpy(&out_info->cap, SSS_TO_SVC_CAP(hwdev), sizeof(struct sss_service_cap)); + sss_put_chip_node(); + + return 0; +} + +static int sss_tool_get_hw_drv_version(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + int ret; + struct sss_tool_drv_version_info *info = buf_out; + + if (!buf_out || *out_size != sizeof(*info)) { + tool_err("Invalid param, buf_out is NULL or out_size:%u, expect: %lu\n", + *out_size, sizeof(*info)); + return -EINVAL; + } + + ret = snprintf(info->ver, sizeof(info->ver), "%s %s", SSS_DRV_VERSION, + __TIME_STR__); + if (ret < 0) + return -EINVAL; + + return 0; +} + +static int sss_tool_get_pf_id(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct sss_tool_pf_info *info = NULL; + struct sss_card_node *node = sss_get_card_node(hal_dev); + u32 port_id; + int ret; + + if (!node) + return -ENODEV; + + if (!buf_out || (*out_size != sizeof(*info)) || !buf_in || in_size != sizeof(port_id)) { + tool_err("Invalid out_size from user: %u, expect: %lu, in_size:%u\n", + *out_size, sizeof(*info), in_size); + return -EINVAL; + } + + port_id = *((u32 *)buf_in); + info = (struct sss_tool_pf_info *)buf_out; + + ret = sss_get_pf_id(node, port_id, &info->pf_id, &info->valid); + if (ret != 0) + return ret; + + *out_size = sizeof(*info); + + return 0; +} + +struct sss_tool_hw_cmd_handle g_hw_cmd_handle[] = { + {SSS_TOOL_FUNC_TYPE, sss_tool_get_func_type}, + {SSS_TOOL_GET_FUNC_IDX, sss_tool_get_func_id}, + {SSS_TOOL_GET_CHIP_INFO, sss_tool_get_card_func_info}, + {SSS_TOOL_GET_DRV_VERSION, sss_tool_get_hw_drv_version}, + {SSS_TOOL_GET_PF_ID, sss_tool_get_pf_id}, + {SSS_TOOL_GET_FUNC_CAP, sss_tool_get_pf_cap_info}, + {SSS_TOOL_GET_SELF_TEST_RES, sss_tool_get_self_test_result}, + {SSS_TOOL_GET_CHIP_ID, sss_tool_get_all_chip_id_cmd}, + {SSS_TOOL_GET_PF_DEV_INFO, sss_tool_get_pf_dev_info}, + {SSS_TOOL_IS_DRV_IN_VM, sss_tool_is_driver_in_vm}, + {SSS_TOOL_CMD_FREE_MEM, sss_tool_free_all_card_mem}, + {SSS_TOOL_GET_CHIP_FAULT_STATS, (sss_tool_hw_cmd_func)sss_tool_get_chip_faults_stats}, + {SSS_TOOL_GET_SINGLE_CARD_INFO, (sss_tool_hw_cmd_func)sss_tool_get_single_card_info}, + {SSS_TOOL_GET_HW_STATS, (sss_tool_hw_cmd_func)sss_tool_get_hw_driver_stats}, + {SSS_TOOL_CLEAR_HW_STATS, sss_tool_clear_hw_driver_stats}, +}; + +int sss_tool_msg_to_hw(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int id; + int ret = 0; + int cmd_num = ARRAY_LEN(g_hw_cmd_handle); + enum sss_tool_driver_cmd_type cmd = + (enum sss_tool_driver_cmd_type)(tool_msg->msg_formate); + + for (id = 0; id < cmd_num; id++) { + if (cmd == g_hw_cmd_handle[id].cmd_type) { + ret = g_hw_cmd_handle[id].func + (hal_dev, buf_in, in_size, buf_out, out_size); + break; + } + } + + if (id == cmd_num) { + tool_err("Fail to send msg to hw, cmd: %d out of range\n", cmd); + return -EINVAL; + } + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.h b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.h new file mode 100644 index 0000000000000..d02af2fe52c1c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_SDK_H +#define SSS_TOOL_SDK_H + +#include "sss_tool_comm.h" +#include "sss_tool_hw.h" +#include "sss_hw.h" + +long sss_tool_free_card_mem(int id); + +int sss_tool_msg_to_hw(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.c b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.c new file mode 100644 index 0000000000000..549eb928f5c40 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.c @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include "sss_hwdev.h" +#include "sss_hwif_adm.h" +#include "sss_tool_comm.h" +#include "sss_tool_sm.h" + +#define SSS_TOOL_CHIP_ACK 1 +#define SSS_TOOL_CHIP_NOACK 0 + +#define SSS_TOOL_SM_CHIP_OP_READ 0x2 +#define SSS_TOOL_SM_CHIP_OP_READ_CLEAR 0x6 + +#define SSS_TOOL_BIT_32 32 + +struct sss_tool_sm_in { + int node; + int id; + int instance; +}; + +struct sss_tool_sm_out { + u64 val1; + u64 val2; +}; + +union sss_tool_sm_chip_request_head { + struct { + u32 pad:15; + u32 ack:1; + u32 op_id:5; + u32 instance:6; + u32 src:5; + } bs; + + u32 value; +}; + +/* counter read request struct */ +struct sss_tool_sm_chip_request { + u32 extra; + union sss_tool_sm_chip_request_head head; + u32 ctr_id; + u32 initial; + u32 pad; +}; + +/* counter read response union */ +union sss_tool_chip_rd_response { + struct { + u32 value1:16; + u32 pad0:16; + u32 pad1[3]; + } bs_ss16_rsp; + + struct { + u32 value1; + u32 pad[3]; + } bs_ss32_rsp; + + struct { + u32 value1:20; + u32 pad0:12; + u32 value2:12; + u32 pad1:20; + u32 pad2[2]; + } bs_sp_rsp; + + struct { + u32 value1; + u32 value2; + u32 pad[2]; + } bs_bs64_rsp; + + struct { + u32 val1_h; + u32 val1_l; + u32 val2_h; + u32 val2_l; + } bs_bp64_rsp; +}; + +typedef int (*sss_tool_sm_handler_func)(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf); + +struct sss_tool_sm_handler { + enum sss_tool_sm_cmd_type msg_name; + sss_tool_sm_handler_func sm_func; +}; + +static void sss_tool_sm_read_msg_create(struct sss_tool_sm_chip_request *request, + u8 instance_id, u8 op_id, + u8 ack, u32 ctr_id, u32 init_val) +{ + request->head.value = 0; + request->head.bs.op_id = op_id; + request->head.bs.ack = ack; + request->head.bs.instance = instance_id; + request->head.value = HTONL(request->head.value); + + request->initial = init_val; + request->ctr_id = ctr_id; + request->ctr_id = HTONL(request->ctr_id); +} + +static void sss_tool_sm_node_htonl(u32 *node, u32 len) +{ + u32 *new_node = node; + u32 i; + + for (i = 0; i < len; i++) { + *new_node = HTONL(*new_node); + new_node++; + } +} + +static int sss_tool_sm_adm_msg_rd(void *hwdev, u32 id, u8 instance, + u8 node, union sss_tool_chip_rd_response *rsp, u8 opcode) +{ + struct sss_tool_sm_chip_request req = {0}; + int ret; + + if (!hwdev) + return -EFAULT; + + if (!SSS_SUPPORT_ADM_MSG((struct sss_hwdev *)hwdev)) { + tool_err("Fail to read sm data, device not support adm msg\n"); + return -EPERM; + } + + sss_tool_sm_read_msg_create(&req, instance, opcode, + SSS_TOOL_CHIP_ACK, id, 0); + + ret = sss_adm_msg_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), + (void *)rsp, + (unsigned short)sizeof(*rsp)); + if (ret) { + tool_err("Fail to read sm data from adm msg, err(%d)\n", ret); + return ret; + } + + sss_tool_sm_node_htonl((u32 *)rsp, sizeof(*rsp) / sizeof(u32)); + + return 0; +} + +static int sss_tool_sm_msg_rd16(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u16 val1; + union sss_tool_chip_rd_response rsp; + int ret = 0; + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, &rsp, SSS_TOOL_SM_CHIP_OP_READ); + if (ret) { + tool_err("Fail to read sm 32 bits\n"); + val1 = ~0; + goto out; + } + + val1 = rsp.bs_ss16_rsp.value1; +out: + out_buf->val1 = val1; + + return ret; +} + +static int sss_tool_sm_msg_rd32(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u32 val1; + union sss_tool_chip_rd_response rsp; + int ret = 0; + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, &rsp, SSS_TOOL_SM_CHIP_OP_READ); + if (ret) { + tool_err("Fail to read sm 32 bits\n"); + val1 = ~0; + goto out; + } + + val1 = rsp.bs_ss32_rsp.value1; +out: + out_buf->val1 = val1; + + return ret; +} + +static int sss_tool_sm_msg_rd32_clear(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u32 val1; + union sss_tool_chip_rd_response rsp; + int ret = 0; + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, + &rsp, SSS_TOOL_SM_CHIP_OP_READ_CLEAR); + if (ret) { + tool_err("Fail to read sm 32 bits\n"); + val1 = ~0; + goto out; + } + + val1 = rsp.bs_ss32_rsp.value1; + +out: + out_buf->val1 = val1; + return ret; +} + +static int sss_tool_sm_msg_rd128(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u64 val1 = 0; + u64 val2 = 0; + int ret = 0; + union sss_tool_chip_rd_response rsp; + + if ((id & 0x1) != 0) { + tool_err("Invalid id(%u), It is odd number\n", id); + val1 = ~0; + val2 = ~0; + ret = -EINVAL; + goto out; + } + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, + &rsp, SSS_TOOL_SM_CHIP_OP_READ); + if (ret) { + tool_err("Fail to read sm 128 bits\n"); + val1 = ~0; + val2 = ~0; + goto out; + } + + sss_tool_sm_node_htonl((u32 *)&rsp, sizeof(rsp) / sizeof(u32)); + val1 = ((u64)rsp.bs_bp64_rsp.val1_h << SSS_TOOL_BIT_32) | rsp.bs_bp64_rsp.val1_l; + val2 = ((u64)rsp.bs_bp64_rsp.val2_h << SSS_TOOL_BIT_32) | rsp.bs_bp64_rsp.val2_l; + +out: + out_buf->val1 = val1; + out_buf->val2 = val2; + + return ret; +} + +static int sss_tool_sm_msg_rd128_clear(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u64 val1 = 0; + u64 val2 = 0; + int ret = 0; + union sss_tool_chip_rd_response rsp; + + if ((id & 0x1) != 0) { + tool_err("Invalid id(%u), It is odd number\n", id); + val1 = ~0; + val2 = ~0; + ret = -EINVAL; + goto out; + } + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, + &rsp, SSS_TOOL_SM_CHIP_OP_READ_CLEAR); + if (ret) { + tool_err("Fail to read sm 128 bits\n"); + val1 = ~0; + val2 = ~0; + goto out; + } + + val1 = ((u64)rsp.bs_bp64_rsp.val1_h << SSS_TOOL_BIT_32) | rsp.bs_bp64_rsp.val1_l; + val2 = ((u64)rsp.bs_bp64_rsp.val2_h << SSS_TOOL_BIT_32) | rsp.bs_bp64_rsp.val2_l; + +out: + out_buf->val1 = val1; + out_buf->val2 = val2; + + return ret; +} + +static int sss_tool_sm_msg_rd64(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u64 val1 = 0; + int ret = 0; + union sss_tool_chip_rd_response rsp; + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, + &rsp, SSS_TOOL_SM_CHIP_OP_READ); + if (ret) { + tool_err("Fail to read sm 64 bits\n"); + val1 = ~0; + goto out; + } + + val1 = ((u64)rsp.bs_bs64_rsp.value1 << SSS_TOOL_BIT_32) | rsp.bs_bs64_rsp.value2; + +out: + out_buf->val1 = val1; + + return ret; +} + +static int sss_tool_sm_msg_rd64_clear(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u64 val1 = 0; + int ret = 0; + union sss_tool_chip_rd_response rsp; + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, + &rsp, SSS_TOOL_SM_CHIP_OP_READ_CLEAR); + if (ret) { + tool_err("Fail to read sm 64 bits\n"); + val1 = ~0; + goto out; + } + + val1 = ((u64)rsp.bs_bs64_rsp.value1 << SSS_TOOL_BIT_32) | rsp.bs_bs64_rsp.value2; + +out: + out_buf->val1 = val1; + + return ret; +} + +const struct sss_tool_sm_handler g_sm_cmd_handle[] = { + {SSS_TOOL_SM_CMD_RD16, sss_tool_sm_msg_rd16}, + {SSS_TOOL_SM_CMD_RD32, sss_tool_sm_msg_rd32}, + {SSS_TOOL_SM_CMD_RD32_CLEAR, sss_tool_sm_msg_rd32_clear}, + {SSS_TOOL_SM_CMD_RD64, sss_tool_sm_msg_rd64}, + {SSS_TOOL_SM_CMD_RD64_CLEAR, sss_tool_sm_msg_rd64_clear}, + {SSS_TOOL_SM_CMD_RD64_PAIR, sss_tool_sm_msg_rd128}, + {SSS_TOOL_SM_CMD_RD64_PAIR_CLEAR, sss_tool_sm_msg_rd128_clear} +}; + +int sss_tool_msg_to_sm(struct sss_hal_dev *hal_dev, struct sss_tool_msg *msg, + void *in_buf, u32 in_len, void *out_buf, u32 *out_len) +{ + int index; + int ret = 0; + int cmd_num = ARRAY_LEN(g_sm_cmd_handle); + u32 msg_formate = msg->msg_formate; + struct sss_tool_sm_in *sm_in = in_buf; + struct sss_tool_sm_out *sm_out = out_buf; + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid in_buf or out buf param\n"); + return -EINVAL; + } + + if (in_len != sizeof(*sm_in) || *out_len != sizeof(*sm_out)) { + tool_err("Invalid out buf size :%u, in buf size: %u\n", + *out_len, in_len); + return -EINVAL; + } + + for (index = 0; index < cmd_num; index++) { + if (msg_formate != g_sm_cmd_handle[index].msg_name) + continue; + + ret = g_sm_cmd_handle[index].sm_func(hal_dev->hwdev, (u32)sm_in->id, + (u8)sm_in->instance, (u8)sm_in->node, sm_out); + break; + } + + if (index == cmd_num) { + tool_err("Fail to execute msg %d,could not find callback\n", msg_formate); + return -EINVAL; + } + + if (ret != 0) + tool_err("Fail to get sm information, id:%u, instance:%u, node:%u, msg:%d\n", + sm_in->id, sm_in->instance, sm_in->node, msg_formate); + + *out_len = sizeof(*sm_out); + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.h b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.h new file mode 100644 index 0000000000000..7c32ebdf2f4d3 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_SM_H +#define SSS_TOOL_SM_H +#include "sss_pci_global.h" +#include "sss_tool_comm.h" +#include "sss_tool_hw.h" + +#ifndef HTONL +#define HTONL(x) \ + ((((x) & 0x000000ff) << 24) | \ + (((x) & 0x0000ff00) << 8) | \ + (((x) & 0x00ff0000) >> 8) | \ + (((x) & 0xff000000) >> 24)) +#endif + +int sss_tool_msg_to_sm(struct sss_hal_dev *hal_dev, struct sss_tool_msg *msg, + void *in_buf, u32 in_len, void *out_buf, u32 *out_len); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_aeq.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_aeq.h new file mode 100644 index 0000000000000..4a9dd7eee1ad6 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_aeq.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_AEQ_H +#define SSS_HW_AEQ_H + +enum sss_aeq_hw_event { + SSS_HW_FROM_INT = 0, + SSS_MBX_FROM_FUNC = 1, + SSS_MSG_FROM_MGMT = 2, + SSS_ADM_RSP = 3, + SSS_ADM_MSG_STS = 4, + SSS_MBX_SEND_RSLT = 5, + SSS_AEQ_EVENT_MAX +}; + +enum sss_aeq_sw_event { + SSS_STL_EVENT = 0, + SSS_STF_EVENT = 1, + SSS_AEQ_SW_EVENT_MAX +}; + +enum sss_ucode_event_type { + SSS_INTERN_ERR = 0x0, + SSS_CHN_BUSY = 0x7, + SSS_ERR_MAX = 0x8, +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ceq.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ceq.h new file mode 100644 index 0000000000000..7626ec44b968e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ceq.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_CEQ_H +#define SSS_HW_CEQ_H + +enum sss_ceq_event { + SSS_NIC_CTRLQ = 0x3, + SSS_NIC_SQ, + SSS_NIC_RQ, + SSS_CEQ_EVENT_MAX, +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_common.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_common.h new file mode 100644 index 0000000000000..aef21aa49b288 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_common.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_COMMON_H +#define SSS_HW_COMMON_H + +#include + +#ifndef BIG_ENDIAN +#define BIG_ENDIAN 0x4321 +#endif + +#ifndef LITTLE_ENDIAN +#define LITTLE_ENDIAN 0x1234 +#endif + +#ifdef BYTE_ORDER +#undef BYTE_ORDER +#endif +/* X86 */ +#define BYTE_ORDER LITTLE_ENDIAN + +#define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof((arr)[0]))) + +#ifndef IFNAMSIZ +#define IFNAMSIZ 16 +#endif + +enum sss_func_type { + SSS_FUNC_TYPE_PF, + SSS_FUNC_TYPE_VF, + SSS_FUNC_TYPE_PPF, + SSS_FUNC_TYPE_UNKNOWN, +}; + +struct sss_dma_addr_align { + u32 real_size; + + void *origin_vaddr; + dma_addr_t origin_paddr; + + void *align_vaddr; + dma_addr_t align_paddr; +}; + +enum sss_process_ret { + SSS_PROCESS_OK = 0, + SSS_PROCESS_DOING = 1, + SSS_PROCESS_ERR = 2, +}; + +struct sss_sge { + u32 high_addr; + u32 low_addr; + u32 len; +}; + +typedef enum sss_process_ret(*sss_wait_handler_t)(void *priv_data); + +/* * + * sssnic_cpu_to_be32 - convert data to big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert, must be Multiple of 4B + */ +static inline void sss_cpu_to_be32(void *data, int len) +{ + int i, chunk_sz = sizeof(u32); + int data_len = len; + u32 *mem = data; + + if (!data) + return; + + data_len = data_len / chunk_sz; + + for (i = 0; i < data_len; i++) { + *mem = cpu_to_be32(*mem); + mem++; + } +} + +/* * + * sss_cpu_to_be32 - convert data from big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert + */ +static inline void sss_be32_to_cpu(void *data, int len) +{ + int i; + int data_len; + u32 *array = data; + + if (!data) + return; + + data_len = len / sizeof(u32); + + for (i = 0; i < data_len; i++) { + *array = be32_to_cpu(*array); + array++; + } +} + +/* * + * sss_set_sge - set dma area in scatter gather entry + * @sge: scatter gather entry + * @addr: dma address + * @len: length of relevant data in the dma address + */ +static inline void sss_set_sge(struct sss_sge *sge, dma_addr_t addr, int len) +{ + sge->high_addr = upper_32_bits(addr); + sge->low_addr = lower_32_bits(addr); + sge->len = len; +} + +#define sss_hw_be32(val) (val) +#define sss_hw_cpu32(val) (val) +#define sss_hw_cpu16(val) (val) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ctrlq.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ctrlq.h new file mode 100644 index 0000000000000..71921daa24526 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ctrlq.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_CTRLQ_H +#define SSS_HW_CTRLQ_H + +#include +#include + +struct sss_ctrl_msg_buf { + void *buf; + dma_addr_t dma_addr; + u16 size; + + /* Usage count, USERS DO NOT USE */ + atomic_t ref_cnt; +}; + +/** + * @brief sss_alloc_ctrlq_msg_buf - alloc ctrlq msg buffer + * @param hwdev: device pointer to hwdev + * @retval non-zero: success + * @retval null: failure + **/ +struct sss_ctrl_msg_buf *sss_alloc_ctrlq_msg_buf(void *hwdev); + +/** + * @brief sss_free_ctrlq_msg_buf - free ctrlq msg buffer + * @param hwdev: device pointer to hwdev + * @param msg_buf: buffer to free + **/ +void sss_free_ctrlq_msg_buf(void *hwdev, struct sss_ctrl_msg_buf *msg_buf); + +/** + * @brief sss_ctrlq_direct_reply - ctrlq direct message response + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param in_buf: message buffer in + * @param out_param: message out + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_ctrlq_direct_reply(void *hwdev, u8 mod, u8 cmd, + struct sss_ctrl_msg_buf *in_buf, + u64 *out_param, u32 timeout, u16 channel); + +/** + * @brief sss_ctrlq_detail_reply - ctrlq detail message response + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param in_buf: message buffer in + * @param out_buf: message buffer out + * @param out_param: inline output data + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_ctrlq_detail_reply(void *hwdev, u8 mod, u8 cmd, + struct sss_ctrl_msg_buf *in_buf, struct sss_ctrl_msg_buf *out_buf, + u64 *out_param, u32 timeout, u16 channel); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_event.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_event.h new file mode 100644 index 0000000000000..362ba20656ce3 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_event.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_EVENT_H +#define SSS_HW_EVENT_H + +#include + +#include "sss_hw_svc_cap.h" + +enum sss_fault_source_type { + /* same as SSS_FAULT_TYPE_CHIP */ + SSS_FAULT_SRC_HW_MGMT_CHIP = 0, + /* same as SSS_FAULT_TYPE_NPU */ + SSS_FAULT_SRC_HW_MGMT_NPU, + /* same as SSS_FAULT_TYPE_MEM_RD_TIMEOUT */ + SSS_FAULT_SRC_HW_MGMT_MEM_RD_TIMEOUT, + /* same as SSS_FAULT_TYPE_MEM_WR_TIMEOUT */ + SSS_FAULT_SRC_HW_MGMT_MEM_WR_TIMEOUT, + /* same as SSS_FAULT_TYPE_REG_RD_TIMEOUT */ + SSS_FAULT_SRC_HW_MGMT_REG_RD_TIMEOUT, + /* same as SSS_FAULT_TYPE_REG_WR_TIMEOUT */ + SSS_FAULT_SRC_HW_MGMT_REG_WR_TIMEOUT, + SSS_FAULT_SRC_SW_MGMT_NPU, + SSS_FAULT_SRC_MGMT_WATCHDOG, + SSS_FAULT_SRC_MGMT_RESET = 8, + SSS_FAULT_SRC_HW_PHY_FAULT, + SSS_FAULT_SRC_TX_PAUSE_EXCP, + SSS_FAULT_SRC_PCIE_LINK_DOWN = 20, + SSS_FAULT_SRC_HOST_HEARTBEAT_LOST = 21, + SSS_FAULT_SRC_TX_TIMEOUT, + SSS_FAULT_SRC_TYPE_MAX, +}; + +enum sss_comm_event_type { + SSS_EVENT_PCIE_LINK_DOWN, + SSS_EVENT_HEART_LOST, + SSS_EVENT_FAULT, + SSS_EVENT_SRIOV_STATE_CHANGE, + SSS_EVENT_CARD_REMOVE, + SSS_EVENT_MGMT_WATCHDOG, + SSS_EVENT_MAX +}; + +enum sss_event_service_type { + SSS_EVENT_SRV_COMM, + SSS_SERVICE_EVENT_BASE, + SSS_EVENT_SRV_NIC = SSS_SERVICE_EVENT_BASE + SSS_SERVICE_TYPE_NIC, + SSS_EVENT_SRV_MIGRATE = SSS_SERVICE_EVENT_BASE + SSS_SERVICE_TYPE_MIGRATE, +}; + +enum sss_fault_err_level { + SSS_FAULT_LEVEL_FATAL, + SSS_FAULT_LEVEL_SERIOUS_RESET, + SSS_FAULT_LEVEL_HOST, + SSS_FAULT_LEVEL_SERIOUS_FLR, + SSS_FAULT_LEVEL_GENERAL, + SSS_FAULT_LEVEL_SUGGESTION, + SSS_FAULT_LEVEL_MAX, +}; + +enum sss_fault_type { + SSS_FAULT_TYPE_CHIP, + SSS_FAULT_TYPE_NPU, + SSS_FAULT_TYPE_MEM_RD_TIMEOUT, + SSS_FAULT_TYPE_MEM_WR_TIMEOUT, + SSS_FAULT_TYPE_REG_RD_TIMEOUT, + SSS_FAULT_TYPE_REG_WR_TIMEOUT, + SSS_FAULT_TYPE_PHY_FAULT, + SSS_FAULT_TYPE_TSENSOR_FAULT, + SSS_FAULT_TYPE_MAX, +}; + +#define SSS_SRV_EVENT_TYPE(svc, type) ((((u32)(svc)) << 16) | (type)) + +#define SSS_MGMT_CMD_UNSUPPORTED 0xFF + +union sss_fault_hw_mgmt { + u32 val[4]; + /* valid only type == SSS_FAULT_TYPE_CHIP */ + struct { + u8 node_id; + /* enum sss_fault_err_level */ + u8 err_level; + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; + /* func_id valid only if err_level == SSS_FAULT_LEVEL_SERIOUS_FLR */ + u8 rsvd1; + u8 host_id; + u16 func_id; + } chip; + + /* valid only if type == SSS_FAULT_TYPE_NPU */ + struct { + u8 cause_id; + u8 core_id; + u8 c_id; + u8 rsvd3; + u32 epc; + u32 rsvd4; + u32 rsvd5; + } ucode; + + /* valid only if type == SSS_FAULT_TYPE_MEM_RD_TIMEOUT || + * SSS_FAULT_TYPE_MEM_WR_TIMEOUT + */ + struct { + u32 err_csr_ctrl; + u32 err_csr_data; + u32 ctrl_tab; + u32 mem_id; + } mem_timeout; + + /* valid only if type == SSS_FAULT_TYPE_REG_RD_TIMEOUT || + * SSS_FAULT_TYPE_REG_WR_TIMEOUT + */ + struct { + u32 err_csr; + u32 rsvd6; + u32 rsvd7; + u32 rsvd8; + } reg_timeout; + + struct { + /* 0: read; 1: write */ + u8 op_type; + u8 port_id; + u8 dev_ad; + u8 rsvd9; + u32 csr_addr; + u32 op_data; + u32 rsvd10; + } phy_fault; +}; + +/* defined by chip */ +struct sss_fault_event { + u8 type; /* enum sss_fault_type */ + u8 fault_level; /* sdk write fault level for uld event */ + u8 rsvd[2]; + union sss_fault_hw_mgmt info; +}; + +struct sss_cmd_fault_event { + u8 status; + u8 ver; + u8 rsvd[6]; + struct sss_fault_event fault_event; +}; + +struct sss_event_info { + u16 service; /* enum sss_event_service_type */ + u16 type; /* enum sss_comm_event_type */ + u8 event_data[104]; +}; + +typedef void (*sss_event_handler_t)(void *handle, struct sss_event_info *event); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_export.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_export.h new file mode 100644 index 0000000000000..b14290fb2f272 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_export.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_EXPORT_H +#define SSS_HW_EXPORT_H + +#include + +#include "sss_hw_irq.h" +#include "sss_hw_svc_cap.h" +#include "sss_hw_event.h" + +int sss_chip_set_msix_attr(void *hwdev, + struct sss_irq_cfg intr_cfg, u16 channel); + +/* * + * @brief sss_chip_clear_msix_resend_bit - clear msix resend bit + * @param hwdev: device pointer to hwdev + * @param msix_id: msix id + * @param clear_en: 1-clear + */ +void sss_chip_clear_msix_resend_bit(void *hwdev, u16 msix_id, bool clear_en); + +/** + * @brief sss_chip_reset_function - reset func + * @param hwdev: device pointer to hwdev + * @param func_id: global function index + * @param flag: reset flag + * @param channel: channel id + */ +int sss_chip_reset_function(void *hwdev, u16 func_id, u64 flag, u16 channel); + +/** + * @brief sss_chip_set_root_ctx - set root context + * @param hwdev: device pointer to hwdev + * @param rq_depth: rq depth + * @param sq_depth: sq depth + * @param rx_size: rx buffer size + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + **/ +int sss_chip_set_root_ctx(void *hwdev, + u32 rq_depth, u32 sq_depth, int rx_size, u16 channel); + +/** + * @brief sss_chip_clean_root_ctx - clean root context + * @param hwdev: device pointer to hwdev + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + **/ +int sss_chip_clean_root_ctx(void *hwdev, u16 channel); + +/* * + * @brief sss_get_mgmt_version - get management cpu version + * @param hwdev: device pointer to hwdev + * @param buf: output management version + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_get_mgmt_version(void *hwdev, u8 *buf, u8 buf_size, u16 channel); + +/** + * @brief sss_chip_set_func_used_state - set function service used state + * @param hwdev: device pointer to hwdev + * @param service_type: service type + * @param state: function used state + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_chip_set_func_used_state(void *hwdev, + u16 service_type, bool state, u16 channel); + +bool sss_get_nic_capability(void *hwdev, struct sss_nic_service_cap *capability); + +/* * + * @brief sss_support_nic - function support nic + * @param hwdev: device pointer to hwdev + * @param cap: nic service capbility + * @retval true: function support nic + * @retval false: function not support nic + */ +bool sss_support_nic(void *hwdev); + +bool sss_support_ppa(void *hwdev, struct sss_ppa_service_cap *cap); + +/* * + * @brief sss_get_max_sq_num - get max queue number + * @param hwdev: device pointer to hwdev + * @retval non-zero: max queue number + * @retval zero: failure + */ +u16 sss_get_max_sq_num(void *hwdev); + +/* * + * @brief sss_get_phy_port_id - get physical port id + * @param hwdev: device pointer to hwdev + * @retval physical port id + */ +u8 sss_get_phy_port_id(void *hwdev); /* Obtain sss_service_cap.port_id */ + +/* * + * @brief sss_get_max_vf_num - get vf number + * @param hwdev: device pointer to hwdev + * @retval non-zero: vf number + * @retval zero: failure + */ +u16 sss_get_max_vf_num(void *hwdev); /* Obtain sss_service_cap.max_vf */ + +/* * + * @brief sss_get_cos_valid_bitmap - get cos valid bitmap + * @param hwdev: device pointer to hwdev + * @retval non-zero: valid cos bit map + * @retval zero: failure + */ +int sss_get_cos_valid_bitmap(void *hwdev, u8 *func_cos_bitmap, u8 *port_cos_bitmap); + +/* * + * @brief sss_alloc_irq - alloc irq + * @param hwdev: device pointer to hwdev + * @param service_type: service type + * @param alloc_array: alloc irq info + * @param alloc_num: alloc number + * @retval zero: failure + * @retval non-zero: success + */ +u16 sss_alloc_irq(void *hwdev, enum sss_service_type service_type, + struct sss_irq_desc *alloc_array, u16 alloc_num); + +/* * + * @brief sss_free_irq - free irq + * @param hwdev: device pointer to hwdev + * @param service_type: service type + * @param irq_id: irq id + */ +void sss_free_irq(void *hwdev, enum sss_service_type service_type, u32 irq_id); + +/* * + * @brief sss_register_dev_event - register hardware event + * @param hwdev: device pointer to hwdev + * @param data: private data will be used by the callback + * @param callback: callback function + */ +void sss_register_dev_event(void *hwdev, void *data, sss_event_handler_t callback); + +/* * + * @brief sss_unregister_dev_event - unregister hardware event + * @param dev: device pointer to hwdev + */ +void sss_unregister_dev_event(void *dev); + +/* * + * @brief sss_get_dev_present_flag - get chip present flag + * @param hwdev: device pointer to hwdev + * @retval 1: chip is present + * @retval 0: chip is absent + */ +int sss_get_dev_present_flag(const void *hwdev); + +/* * + * @brief sss_get_max_pf_num - get global max pf number + */ +u8 sss_get_max_pf_num(void *hwdev); + +u16 sss_nic_intr_num(void *hwdev); + +/* * + * @brief sss_get_chip_present_state - get card present state + * @param hwdev: device pointer to hwdev + * @param present_state: return card present state + * @retval zero: success + * @retval non-zero: failure + */ +int sss_get_chip_present_state(void *hwdev, bool *present_state); + +/** + * @brief sss_fault_event_report - report fault event + * @param hwdev: device pointer to hwdev + * @param src: fault event source, reference to enum sss_fault_source_type + * @param level: fault level, reference to enum sss_fault_err_level + */ +void sss_fault_event_report(void *hwdev, u16 src, u16 level); + +/** + * @brief sss_register_service_adapter - register service adapter + * @param hwdev: device pointer to hwdev + * @param service_type: service type + * @param service_adapter: service adapter + * @retval zero: success + * @retval non-zero: failure + **/ +int sss_register_service_adapter(void *hwdev, enum sss_service_type service_type, + void *service_adapter); + +/** + * @brief sss_unregister_service_adapter - unregister service adapter + * @param hwdev: device pointer to hwdev + * @param service_type: service type + **/ +void sss_unregister_service_adapter(void *hwdev, + enum sss_service_type service_type); + +/** + * @brief sss_get_service_adapter - get service adapter + * @param hwdev: device pointer to hwdev + * @param service_type: service type + * @retval non-zero: success + * @retval null: failure + **/ +void *sss_get_service_adapter(void *hwdev, enum sss_service_type service_type); + +/** + * @brief sss_do_event_callback - evnet callback to notify service driver + * @param hwdev: device pointer to hwdev + * @param event: event info to service driver + */ +void sss_do_event_callback(void *hwdev, struct sss_event_info *event); + +/** + * @brief sss_update_link_stats - link event stats + * @param hwdev: device pointer to hwdev + * @param link_state: link status + */ +void sss_update_link_stats(void *hwdev, bool link_state); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_irq.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_irq.h new file mode 100644 index 0000000000000..60354bcf0efac --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_irq.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_IRQ_H +#define SSS_HW_IRQ_H + +#include + +enum sss_msix_auto_mask { + SSS_CLR_MSIX_AUTO_MASK, + SSS_SET_MSIX_AUTO_MASK, +}; + +enum sss_msix_state { + SSS_MSIX_ENABLE, + SSS_MSIX_DISABLE, +}; + +struct sss_irq_desc { + u16 msix_id; /* PCIe MSIX id */ + u16 rsvd; + u32 irq_id; /* OS IRQ id */ +}; + +struct sss_irq_cfg { + u32 lli_set; + u32 coalesc_intr_set; + u16 msix_id; + u8 lli_credit; + u8 lli_timer; + u8 pending; + u8 coalesc_timer; + u8 resend_timer; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx.h new file mode 100644 index 0000000000000..33b5338a3ed79 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx.h @@ -0,0 +1,332 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_MBX_H +#define SSS_HW_MBX_H + +#include + +/* between Driver to MPU */ +enum sss_mgmt_cmd { + /* flr */ + SSS_COMM_MGMT_CMD_FUNC_RESET = 0, + SSS_COMM_MGMT_CMD_FEATURE_NEGO, + SSS_COMM_MGMT_CMD_FLUSH_DOORBELL, + SSS_COMM_MGMT_CMD_START_FLUSH, + SSS_COMM_MGMT_CMD_SET_FUNC_FLR, + SSS_COMM_MGMT_CMD_GET_GLOBAL_ATTR, + SSS_COMM_MGMT_CMD_SET_PPF_FLR_TYPE, + SSS_COMM_MGMT_CMD_SET_FUNC_SVC_USED_STATE, + + /* msi-x */ + SSS_COMM_MGMT_CMD_CFG_MSIX_NUM = 10, + + /* init cfg */ + SSS_COMM_MGMT_CMD_SET_CTRLQ_CTXT = 20, + SSS_COMM_MGMT_CMD_SET_VAT, + SSS_COMM_MGMT_CMD_CFG_PAGESIZE, + SSS_COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, + SSS_COMM_MGMT_CMD_SET_CEQ_CTRL_REG, + SSS_COMM_MGMT_CMD_SET_DMA_ATTR, + + /* infra */ + SSS_COMM_MGMT_CMD_GET_MQM_FIX_INFO = 40, + SSS_COMM_MGMT_CMD_SET_MQM_CFG_INFO, + SSS_COMM_MGMT_CMD_SET_MQM_SRCH_GPA, + SSS_COMM_MGMT_CMD_SET_PPF_TMR, + SSS_COMM_MGMT_CMD_SET_PPF_HT_GPA, + SSS_COMM_MGMT_CMD_SET_FUNC_TMR_BITMAT, + SSS_COMM_MGMT_CMD_SET_MBX_CRDT, + SSS_COMM_MGMT_CMD_CFG_TEMPLATE, + SSS_COMM_MGMT_CMD_SET_MQM_LIMIT, + + /* get chip info */ + SSS_COMM_MGMT_CMD_GET_FW_VERSION = 60, + SSS_COMM_MGMT_CMD_GET_BOARD_INFO, + SSS_COMM_MGMT_CMD_SYNC_TIME, + SSS_COMM_MGMT_CMD_GET_HW_PF_INFOS, + SSS_COMM_MGMT_CMD_SEND_BDF_INFO, + SSS_COMM_MGMT_CMD_GET_VIRTIO_BDF_INFO, + SSS_COMM_MGMT_CMD_GET_SML_TABLE_INFO, + + /* update firmware */ + SSS_COMM_MGMT_CMD_UPDATE_FW = 80, + SSS_COMM_MGMT_CMD_ACTIVE_FW, + SSS_COMM_MGMT_CMD_HOT_ACTIVE_FW, + SSS_COMM_MGMT_CMD_HOT_ACTIVE_DONE_NOTICE, + SSS_COMM_MGMT_CMD_SWITCH_CFG, + SSS_COMM_MGMT_CMD_CHECK_FLASH, + SSS_COMM_MGMT_CMD_CHECK_FLASH_RW, + SSS_COMM_MGMT_CMD_RESOURCE_CFG, + SSS_COMM_MGMT_CMD_UPDATE_BIOS, /* merge to SSS_COMM_MGMT_CMD_UPDATE_FW */ + SSS_COMM_MGMT_CMD_MPU_GIT_CODE, + + /* chip reset */ + SSS_COMM_MGMT_CMD_FAULT_REPORT = 100, + SSS_COMM_MGMT_CMD_WATCHDOG_INFO, + SSS_COMM_MGMT_CMD_MGMT_RESET, + SSS_COMM_MGMT_CMD_FFM_SET, + + /* chip info/log */ + SSS_COMM_MGMT_CMD_GET_LOG = 120, + SSS_COMM_MGMT_CMD_TEMP_OP, + SSS_COMM_MGMT_CMD_EN_AUTO_RST_CHIP, + SSS_COMM_MGMT_CMD_CFG_REG, + SSS_COMM_MGMT_CMD_GET_CHIP_ID, + SSS_COMM_MGMT_CMD_SYSINFO_DFX, + SSS_COMM_MGMT_CMD_PCIE_DFX_NTC, + SSS_COMM_MGMT_CMD_DICT_LOG_STATUS, /* LOG STATUS 127 */ + SSS_COMM_MGMT_CMD_MSIX_INFO, + SSS_COMM_MGMT_CMD_CHANNEL_DETECT, + + /* DFT mode */ + SSS_COMM_MGMT_CMD_GET_DIE_ID = 200, + SSS_COMM_MGMT_CMD_GET_EFUSE_TEST, + SSS_COMM_MGMT_CMD_EFUSE_INFO_CFG, + SSS_COMM_MGMT_CMD_GPIO_CTL, + SSS_COMM_MGMT_CMD_HI30_SERLOOP_START, /* DFT or ssslink */ + SSS_COMM_MGMT_CMD_HI30_SERLOOP_STOP, /* DFT or ssslink */ + SSS_COMM_MGMT_CMD_HI30_MBIST_SET_FLAG, /* DFT or ssslink */ + SSS_COMM_MGMT_CMD_HI30_MBIST_GET_RESULT, /* DFT or ssslink */ + SSS_COMM_MGMT_CMD_ECC_TEST, + SSS_COMM_MGMT_CMD_FUNC_BIST_TEST, + SSS_COMM_MGMT_CMD_VPD_SET, + SSS_COMM_MGMT_CMD_VPD_GET, + + SSS_COMM_MGMT_CMD_ERASE_FLASH, + SSS_COMM_MGMT_CMD_QUERY_FW_INFO, + SSS_COMM_MGMT_CMD_GET_CFG_INFO, + SSS_COMM_MGMT_CMD_GET_UART_LOG, + SSS_COMM_MGMT_CMD_SET_UART_CMD, + SSS_COMM_MGMT_CMD_SPI_TEST, + + /* ALL reg read/write merge to SSS_COMM_MGMT_CMD_CFG_REG */ + SSS_COMM_MGMT_CMD_UP_REG_GET, + SSS_COMM_MGMT_CMD_UP_REG_SET, + SSS_COMM_MGMT_CMD_REG_READ, + SSS_COMM_MGMT_CMD_REG_WRITE, + SSS_COMM_MGMT_CMD_MAG_REG_WRITE, + SSS_COMM_MGMT_CMD_ANLT_REG_WRITE, + + SSS_COMM_MGMT_CMD_HEART_EVENT, + SSS_COMM_MGMT_CMD_NCSI_OEM_GET_DRV_INFO, + SSS_COMM_MGMT_CMD_LASTWORD_GET, /* merge to SSS_COMM_MGMT_CMD_GET_LOG */ + SSS_COMM_MGMT_CMD_READ_BIN_DATA, + SSS_COMM_MGMT_CMD_WWPN_GET, + SSS_COMM_MGMT_CMD_WWPN_SET, + + SSS_COMM_MGMT_CMD_SEND_API_ACK_BY_UP, + + SSS_COMM_MGMT_CMD_SET_MAC, + + /* MPU patch cmd */ + SSS_COMM_MGMT_CMD_LOAD_PATCH, + SSS_COMM_MGMT_CMD_REMOVE_PATCH, + SSS_COMM_MGMT_CMD_PATCH_ACTIVE, + SSS_COMM_MGMT_CMD_PATCH_DEACTIVE, + SSS_COMM_MGMT_CMD_PATCH_SRAM_OPTIMIZE, + /* container host process */ + SSS_COMM_MGMT_CMD_CONTAINER_HOST_PROC, + /* nsci counter */ + SSS_COMM_MGMT_CMD_NCSI_COUNTER_PROC, +}; + +enum sss_channel_type { + SSS_CHANNEL_DEFAULT, + SSS_CHANNEL_COMM, + SSS_CHANNEL_NIC, + SSS_CHANNEL_ROCE, + SSS_CHANNEL_TOE, + SSS_CHANNEL_FC, + SSS_CHANNEL_OVS, + SSS_CHANNEL_DSW, + SSS_CHANNEL_MIG, + SSS_CHANNEL_CRYPT, + SSS_CHANNEL_MAX = 32, +}; + +enum sss_mbx_errcode { + SSS_MBX_ERRCODE_NO_ERRORS = 0, + /* VF send the mbx data to the wrong destination functions */ + SSS_MBX_ERRCODE_VF_TO_WRONG_FUNC = 0x100, + /* PPF send the mbx data to the wrong destination functions */ + SSS_MBX_ERRCODE_PPF_TO_WRONG_FUNC = 0x200, + /* PF send the mbx data to the wrong destination functions */ + SSS_MBX_ERRCODE_PF_TO_WRONG_FUNC = 0x300, + /* The mbx data size is set to all zero */ + SSS_MBX_ERRCODE_ZERO_DATA_SIZE = 0x400, + /* The sender function attribute has not been learned by hardware */ + SSS_MBX_ERRCODE_UNKNOWN_SRC_FUNC = 0x500, + /* The receiver function attr has not been learned by hardware */ + SSS_MBX_ERRCODE_UNKNOWN_DES_FUNC = 0x600, +}; + +/* CTRLQ MODULE_TYPE */ +enum sss_mod_type { + SSS_MOD_TYPE_COMM = 0, /* HW communication module */ + SSS_MOD_TYPE_L2NIC = 1, /* L2NIC module */ + SSS_MOD_TYPE_ROCE = 2, + SSS_MOD_TYPE_PLOG = 3, + SSS_MOD_TYPE_TOE = 4, + SSS_MOD_TYPE_FLR = 5, + SSS_MOD_TYPE_RSVD1 = 6, + SSS_MOD_TYPE_CFGM = 7, /* Configuration module */ + SSS_MOD_TYPE_QMM = 8, + SSS_MOD_TYPE_RSVD2 = 9, + COMM_MOD_FC = 10, + SSS_MOD_TYPE_OVS = 11, + SSS_MOD_TYPE_DSW = 12, + SSS_MOD_TYPE_MIGRATE = 13, + SSS_MOD_TYPE_SSSLINK = 14, + SSS_MOD_TYPE_CRYPT = 15, /* secure crypto module */ + SSS_MOD_TYPE_VIO = 16, + SSS_MOD_TYPE_IMU = 17, + SSS_MOD_TYPE_DFT = 18, /* DFT */ + SSS_MOD_TYPE_HW_MAX = 19, /* hardware max module id */ + /* Software module id, for PF/VF and multi-host */ + SSS_MOD_TYPE_SW_FUNC = 20, + SSS_MOD_TYPE_MAX, +}; + +/* func reset flag */ +enum sss_func_reset_flag { + SSS_RESET_TYPE_FLUSH_BIT = 0, + SSS_RESET_TYPE_MQM, + SSS_RESET_TYPE_SMF, + SSS_RESET_TYPE_PF_BW_CFG, + + SSS_RESET_TYPE_COMM = 10, + SSS_RESET_TYPE_COMM_MGMT_CH, + SSS_RESET_TYPE_COMM_CMD_CH, + SSS_RESET_TYPE_NIC, + SSS_RESET_TYPE_OVS, + SSS_RESET_TYPE_VBS, + SSS_RESET_TYPE_ROCE, + SSS_RESET_TYPE_FC, + SSS_RESET_TYPE_TOE, + SSS_RESET_TYPE_IPSEC, + SSS_RESET_TYPE_MAX, +}; + +#define SSS_NIC_RESET BIT(SSS_RESET_TYPE_NIC) +#define SSS_OVS_RESET BIT(SSS_RESET_TYPE_OVS) +#define SSS_VBS_RESET BIT(SSS_RESET_TYPE_VBS) +#define SSS_ROCE_RESET BIT(SSS_RESET_TYPE_ROCE) +#define SSS_FC_RESET BIT(SSS_RESET_TYPE_FC) +#define SSS_TOE_RESET BIT(SSS_RESET_TYPE_TOE) +#define SSS_IPSEC_RESET BIT(SSS_RESET_TYPE_IPSEC) + +typedef int (*sss_vf_mbx_handler_t)(void *pri_handle, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +typedef int (*sss_pf_mbx_handler_t)(void *pri_handle, u16 vf_id, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); + +typedef int (*sss_ppf_mbx_handler_t)(void *pri_handle, u16 pf_id, u16 vf_id, + u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +typedef int (*sss_pf_from_ppf_mbx_handler_t)(void *pri_handle, + u16 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size); + +/** + * @brief sss_register_pf_mbx_handler - pf register mbx msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param pri_handle: private data will be used by the callback + * @param callback: callback function + * @retval zero: success + * @retval non-zero: failure + **/ +int sss_register_pf_mbx_handler(void *hwdev, u8 mod, void *pri_handle, sss_pf_mbx_handler_t cb); + +/** + * @brief sss_register_vf_mbx_handler - vf register mbx msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param pri_handle: private data will be used by the callback + * @param callback: callback function + * @retval zero: success + * @retval non-zero: failure + **/ +int sss_register_vf_mbx_handler(void *hwdev, u8 mod, void *pri_handle, sss_vf_mbx_handler_t cb); + +/** + * @brief sss_unregister_pf_mbx_handler - pf register mbx msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + **/ +void sss_unregister_pf_mbx_handler(void *hwdev, u8 mod); + +/** + * @brief sss_unregister_vf_mbx_handler - pf register mbx msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + **/ +void sss_unregister_vf_mbx_handler(void *hwdev, u8 mod); + +/** + * @brief sss_sync_send_mbx_msg - msg to management cpu + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param buf_out: message buffer out + * @param out_size: out buffer size + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_sync_mbx_send_msg(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel); + +#define sss_sync_send_msg_ch(hwdev, cmd, buf_in, in_size, buf_out, out_size, channel) \ + sss_sync_mbx_send_msg(hwdev, SSS_MOD_TYPE_COMM, cmd, \ + buf_in, in_size, buf_out, out_size, 0, channel) + +#define sss_sync_send_msg(hwdev, cmd, buf_in, in_size, buf_out, out_size) \ + sss_sync_mbx_send_msg(hwdev, SSS_MOD_TYPE_COMM, cmd, \ + buf_in, in_size, buf_out, out_size, 0, SSS_CHANNEL_COMM) + +#define SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, cmd_ptr) \ + ((ret) != 0 || (out_len) == 0 || (cmd_ptr)->head.state != SSS_MGMT_CMD_SUCCESS) + +/** + * @brief sss_mbx_send_to_pf - vf mbx message to pf + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param buf_out: message buffer out + * @param out_size: out buffer size + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_mbx_send_to_pf(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel); + +/** + * @brief sss_mbx_send_to_vf - mbx message to vf + * @param hwdev: device pointer to hwdev + * @param vf_id: vf index + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param buf_out: message buffer out + * @param out_size: out buffer size + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_mbx_send_to_vf(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx_msg.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx_msg.h new file mode 100644 index 0000000000000..2280b234e0603 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx_msg.h @@ -0,0 +1,260 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_MBX_MSG_H +#define SSS_HW_MBX_MSG_H + +#include + +#define SSS_MGMT_MSG_SET_CMD 1 +#define SSS_MGMT_MSG_GET_CMD 0 + +#define SSS_MGMT_CMD_SUCCESS 0 + +struct sss_mgmt_msg_head { + u8 state; + u8 version; + u8 rsvd0[6]; +}; + +struct sss_cmd_func_reset { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd[3]; + u64 reset_flag; +}; + +enum { + SSS_COMM_F_ADM = 1U << 0, + SSS_COMM_F_CLP = 1U << 1, + SSS_COMM_F_CHANNEL_DETECT = 1U << 2, + SSS_COMM_F_MBX_SEGMENT = 1U << 3, + SSS_COMM_F_CTRLQ_NUM = 1U << 4, + SSS_COMM_F_VIRTIO_VQ_SIZE = 1U << 5, +}; + +#define SSS_MAX_FEATURE_QWORD 4 +struct sss_cmd_feature_nego { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; /* 1: set, 0: get */ + u8 rsvd; + u64 feature[SSS_MAX_FEATURE_QWORD]; +}; + +struct sss_cmd_clear_doorbell { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd[3]; +}; + +struct sss_cmd_clear_resource { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd[3]; +}; + +struct sss_comm_global_attr { + u8 max_host_num; + u8 max_pf_num; + u16 vf_id_start; + + u8 mgmt_host_node_id; /* for adm msg to mgmt cpu */ + u8 ctrlq_num; + u8 rsvd1[2]; + u32 rsvd2[8]; +}; + +struct sss_cmd_channel_detect { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1[3]; + u32 rsvd2[2]; +}; + +enum sss_svc_type { + SSS_SVC_TYPE_COM = 0, + SSS_SVC_TYPE_NIC, + SSS_SVC_TYPE_OVS, + SSS_SVC_TYPE_ROCE, + SSS_SVC_TYPE_TOE, + SSS_SVC_TYPE_IOE, + SSS_SVC_TYPE_FC, + SSS_SVC_TYPE_VBS, + SSS_SVC_TYPE_IPSEC, + SSS_SVC_TYPE_VIRTIO, + SSS_SVC_TYPE_MIGRATE, + SSS_SVC_TYPE_PPA, + SSS_SVC_TYPE_MAX, +}; + +struct sss_cmd_func_svc_used_state { + struct sss_mgmt_msg_head head; + u16 func_id; + u16 svc_type; + u8 used_state; + u8 rsvd[35]; +}; + +struct sss_cmd_get_glb_attr { + struct sss_mgmt_msg_head head; + + struct sss_comm_global_attr attr; +}; + +enum sss_fw_ver_type { + SSS_FW_VER_TYPE_BOOT, + SSS_FW_VER_TYPE_MPU, + SSS_FW_VER_TYPE_NPU, + SSS_FW_VER_TYPE_SMU_L0, + SSS_FW_VER_TYPE_SMU_L1, + SSS_FW_VER_TYPE_CFG, +}; + +#define SSS_FW_VERSION_LEN 16 +#define SSS_FW_COMPILE_TIME_LEN 20 +struct sss_cmd_get_fw_version { + struct sss_mgmt_msg_head head; + + u16 fw_type; + u16 rsvd; + u8 ver[SSS_FW_VERSION_LEN]; + u8 time[SSS_FW_COMPILE_TIME_LEN]; +}; + +/* hardware define: ctrlq context */ +struct sss_ctrlq_ctxt_info { + u64 curr_wqe_page_pfn; + u64 wq_block_pfn; +}; + +struct sss_cmd_ctrlq_ctxt { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 ctrlq_id; + u8 rsvd[5]; + + struct sss_ctrlq_ctxt_info ctxt; +}; + +struct sss_cmd_root_ctxt { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 set_ctrlq_depth; + u8 ctrlq_depth; + u16 rx_buf_sz; + u8 lro_en; + u8 rsvd1; + u16 sq_depth; + u16 rq_depth; + u64 rsvd2; +}; + +struct sss_cmd_wq_page_size { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 page_size; + + u32 rsvd; +}; + +struct sss_cmd_msix_config { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u16 msix_index; + u8 pending_cnt; + u8 coalesce_timer_cnt; + u8 resend_timer_cnt; + u8 lli_timer_cnt; + u8 lli_credit_cnt; + u8 rsvd2[5]; +}; + +struct sss_cmd_dma_attr_config { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 entry_id; + u8 st; + u8 at; + u8 ph; + u8 no_snooping; + u8 tph_en; + u32 resv; +}; + +struct sss_cmd_ceq_ctrl_reg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 qid; + u32 ctrl0; + u32 ctrl1; + u32 rsvd1; +}; + +struct sss_board_info { + u8 board_type; + u8 port_num; + u8 port_speed; + u8 pcie_width; + u8 host_num; + u8 pf_num; + u16 vf_total_num; + u8 tile_num; + u8 qcm_num; + u8 core_num; + u8 work_mode; + u8 service_mode; + u8 pcie_mode; + u8 boot_sel; + u8 board_id; + u32 cfg_addr; + u32 service_en_bitmap; + u8 scenes_id; + u8 cfg_tmpl_id; + u8 hw_id; + u8 rsvd; + u16 pf_vendor_id; + u8 tile_bitmap; + u8 sm_bitmap; +}; + +struct sss_cmd_board_info { + struct sss_mgmt_msg_head head; + + struct sss_board_info info; + u32 rsvd[22]; +}; + +struct sss_cmd_sync_time { + struct sss_mgmt_msg_head head; + + u64 mstime; + u64 rsvd; +}; + +struct sss_cmd_bdf_info { + struct sss_mgmt_msg_head head; + + u16 function_id; + u8 rsvd1[2]; + u8 bus; + u8 device; + u8 function; + u8 rsvd2[5]; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mgmt.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mgmt.h new file mode 100644 index 0000000000000..61ed2206cd3ba --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mgmt.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_MGMT_H +#define SSS_HW_MGMT_H + +enum sss_hwdev_init_state { + SSS_HW_NOT_INIT_OK = 0, + SSS_HW_ADM_INIT_OK, + SSS_HW_MBX_INIT_OK, + SSS_HW_CTRLQ_INIT_OK, +}; + +typedef void (*sss_mgmt_msg_handler_t)(void *data, u16 cmd, void *in_buf, + u16 in_size, void *out_buf, u16 *out_size); + +int sss_register_mgmt_msg_handler(void *hwdev, u8 mod_type, void *data, + sss_mgmt_msg_handler_t handler); + +void sss_unregister_mgmt_msg_handler(void *hwdev, u8 mod_type); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_sriov.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_sriov.h new file mode 100644 index 0000000000000..41f053608b353 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_sriov.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_SRIOV_H +#define SSS_HW_SRIOV_H + +#include + +struct sss_sriov_state_info { + u8 enable; + u16 vf_num; +}; +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_statistics.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_statistics.h new file mode 100644 index 0000000000000..0dbb4b6963ea7 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_statistics.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_STATISTICS_H +#define SSS_HW_STATISTICS_H + +#include +#include + +#include "sss_hw_event.h" +#include "sss_hw_aeq.h" + +struct sss_qmm_stats { + atomic_t qmm_rsv_cnt[134]; +}; + +struct sss_link_event_stats { + atomic_t link_down_stats; + atomic_t link_up_stats; +}; + +struct sss_fault_event_stats { + atomic_t chip_fault_stats[22][SSS_FAULT_LEVEL_MAX]; + atomic_t fault_type_stat[SSS_FAULT_TYPE_MAX]; + atomic_t pcie_fault_stats; +}; + +struct sss_hw_stats { + atomic_t heart_lost_stats; + struct sss_qmm_stats qmm_stats; + struct sss_link_event_stats link_event_stats; + struct sss_fault_event_stats fault_event_stats; + atomic_t nic_ucode_event_stats[SSS_ERR_MAX]; +}; + +#define SSS_CHIP_FAULT_SIZE (110 * 1024) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_svc_cap.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_svc_cap.h new file mode 100644 index 0000000000000..158ba77fe6635 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_svc_cap.h @@ -0,0 +1,281 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_SVC_CAP_H +#define SSS_HW_SVC_CAP_H + +#include + +enum sss_service_type { + SSS_SERVICE_TYPE_NIC = 0, + SSS_SERVICE_TYPE_OVS, + SSS_SERVICE_TYPE_ROCE, + SSS_SERVICE_TYPE_TOE, + SSS_SERVICE_TYPE_IOE, + SSS_SERVICE_TYPE_FC, + SSS_SERVICE_TYPE_VBS, + SSS_SERVICE_TYPE_IPSEC, + SSS_SERVICE_TYPE_VIRTIO, + SSS_SERVICE_TYPE_MIGRATE, + SSS_SERVICE_TYPE_PPA, + SSS_SERVICE_TYPE_CUSTOM, + SSS_SERVICE_TYPE_VROCE, + SSS_SERVICE_TYPE_MAX, + + SSS_SERVICE_TYPE_INTF = (1 << 15), + SSS_SERVICE_TYPE_QMM = (1 << 16), +}; + +/* RDMA service capability */ +enum { + SSS_RDMA_BMME_FLAG_LOCAL_INV = (1 << 0), + SSS_RDMA_BMME_FLAG_REMOTE_INV = (1 << 1), + SSS_RDMA_BMME_FLAG_FAST_REG_WR = (1 << 2), + SSS_RDMA_BMME_FLAG_RESERVED_LKEY = (1 << 3), + SSS_RDMA_BMME_FLAG_TYPE_2_WIN = (1 << 4), + SSS_RDMA_BMME_FLAG_WIN_TYPE_2B = (1 << 5), + + SSS_RDMA_DEV_CAP_FLAG_XRC = (1 << 6), + SSS_RDMA_DEV_CAP_FLAG_MEM_WINDOW = (1 << 7), + SSS_RDMA_DEV_CAP_FLAG_ATOMIC = (1 << 8), + SSS_RDMA_DEV_CAP_FLAG_APM = (1 << 9), +}; + +struct sss_ppa_service_cap { + u16 qpc_pseudo_vf_start; + u16 qpc_pseudo_vf_num; + u32 qpc_pseudo_vf_ctx_num; + u32 pctx_size; /* 512B */ + u32 bloomfilter_len; + u8 bloomfilter_en; + u8 rsvd0; + u16 rsvd1; +}; + +struct sss_vbs_service_cap { + u16 vbs_max_volq; + u16 rsvd1; +}; + +/* PF/VF ToE service resource */ +struct sss_dev_toe_svc_cap { + u32 max_pctx; /* Parent Context: max specifications 1M */ + u32 max_cctxt; + u32 max_cq; + u16 max_srq; + u32 srq_id_start; + u32 max_mpt; +}; + +/* ToE services */ +struct sss_toe_service_cap { + struct sss_dev_toe_svc_cap dev_toe_cap; + + u8 alloc_flag; + u8 rsvd[3]; + u32 pctx_size; /* 1KB */ + u32 scqc_size; /* 64B */ +}; + +/* PF FC service resource */ +struct sss_dev_fc_svc_cap { + /* PF Parent QPC */ + u32 max_parent_qpc_num; /* max number is 2048 */ + + /* PF Child QPC */ + u32 max_child_qpc_num; /* max number is 2048 */ + u32 child_qpc_id_start; + + /* PF SCQ */ + u32 scq_num; /* 16 */ + + /* PF supports SRQ */ + u32 srq_num; /* Number of SRQ is 2 */ + + u8 vp_id_start; + u8 vp_id_end; +}; + +/* FC services */ +struct sss_fc_service_cap { + struct sss_dev_fc_svc_cap dev_fc_cap; + + /* Parent QPC */ + u32 parent_qpc_size; /* 256B */ + + /* Child QPC */ + u32 child_qpc_size; /* 256B */ + + /* SQ */ + u32 sqe_size; /* 128B(in linked list mode) */ + + /* SCQ */ + u32 scqc_size; /* Size of the Context 32B */ + u32 scqe_size; /* 64B */ + + /* SRQ */ + u32 srqc_size; /* Size of SRQ Context (64B) */ + u32 srqe_size; /* 32B */ +}; + +struct sss_dev_roce_svc_own_cap { + u32 max_qp; + u32 max_cq; + u32 max_srq; + u32 max_mpt; + u32 max_drc_qp; + + u32 cmtt_cl_start; + u32 cmtt_cl_end; + u32 cmtt_cl_size; + + u32 dmtt_cl_start; + u32 dmtt_cl_end; + u32 dmtt_cl_size; + + u32 wqe_cl_start; + u32 wqe_cl_end; + u32 wqe_cl_size; + + u32 qpc_entry_size; + u32 max_wqe; + u32 max_rq_sg; + u32 max_sq_inline_data_size; + u32 max_rq_desc_size; + + u32 rdmarc_entry_size; + u32 max_qp_init_rdma; + u32 max_qp_dest_rdma; + + u32 max_srq_wqe; + u32 reserved_srq; + u32 max_srq_sge; + u32 srqc_entry_size; + + u32 max_msg_size; /* Message size 2GB */ +}; + +/* RDMA service capability */ +struct sss_dev_rdma_svc_cap { + struct sss_dev_roce_svc_own_cap roce_own_cap; +}; + +struct sss_nic_service_cap { + u16 max_sq; + u16 max_rq; + u16 def_queue_num; +}; + +/* RDMA services */ +struct sss_rdma_service_cap { + struct sss_dev_rdma_svc_cap dev_rdma_cap; + + /* 1. the number of MTT PA must be integer power of 2 + * 2. represented by logarithm. Each MTT table can + * contain 1, 2, 4, 8, and 16 PA) + */ + u8 log_mtt; + + /* Number of MTT table (4M), is actually MTT seg number */ + u32 mtt_num; + + u32 log_mtt_seg; + u32 mtt_entry_size; /* MTT table size 8B, including 1 PA(64bits) */ + u32 mpt_entry_size; /* MPT table size (64B) */ + + u32 dmtt_cl_start; + u32 dmtt_cl_end; + u32 dmtt_cl_size; + + /* 1. the number of RDMArc PA must be integer power of 2 + * 2. represented by logarithm. Each MTT table can + * contain 1, 2, 4, 8, and 16 PA) + */ + u8 log_rdmarc; + + u32 reserved_qp; /* Number of reserved QP */ + u32 max_sq_sg; /* Maximum SGE number of SQ (8) */ + + /* WQE maximum size of SQ(1024B), inline maximum + * size if 960B(944B aligned to the 960B), + * 960B=>wqebb alignment=>1024B + */ + u32 max_sq_desc_size; + + /* Currently, the supports 64B and 128B, + * defined as 64Bytes + */ + u32 wqebb_size; + + u32 max_cqe; /* Size of the depth of the CQ (64K-1) */ + u32 reserved_cq; /* Number of reserved CQ */ + u32 cqc_entry_size; /* Size of the CQC (64B/128B) */ + u32 cqe_size; /* Size of CQE (32B) */ + + u32 reserved_mrw; /* Number of reserved MR/MR Window */ + + /* max MAP of FMR, + * (1 << (32-ilog2(num_mpt)))-1; + */ + u32 max_fmr_map; + + u32 log_rdmarc_seg; /* table number of each RDMArc seg(3) */ + + /* Timeout time. Formula:Tr=4.096us*2(local_ca_ack_delay), [Tr,4Tr] */ + u32 local_ca_ack_delay; + u32 port_num; /* Physical port number */ + + u32 db_page_size; /* Size of the DB (4KB) */ + u32 direct_wqe_size; /* Size of the DWQE (256B) */ + + u32 pd_num; /* Maximum number of PD (128K) */ + u32 reserved_pd; /* Number of reserved PD */ + u32 max_xrcd; /* Maximum number of xrcd (64K) */ + u32 reserved_xrcd; /* Number of reserved xrcd */ + + u32 max_gid_per_port; /* gid number (16) of each port */ + + /* RoCE v2 GID table is 32B, + * compatible RoCE v1 expansion + */ + u32 gid_entry_size; + + u32 reserved_lkey; /* local_dma_lkey */ + u32 comp_vector_num; /* Number of complete vector (32) */ + u32 page_size_cap; /* Supports 4K,8K,64K,256K,1M and 4M page_size */ + + u32 flag; /* RDMA some identity */ + u32 max_frpl_len; /* Maximum number of pages frmr registration */ + u32 max_pkey; /* Number of supported pkey group */ +}; + +/* PF OVS service resource */ +struct sss_dev_ovs_svc_cap { + u32 max_pctx; /* Parent Context: max specifications 1M */ + u32 pseudo_vf_max_pctx; + u16 pseudo_vf_num; + u16 pseudo_vf_start_id; + u8 dynamic_qp_en; +}; + +/* OVS services */ +struct sss_ovs_service_cap { + struct sss_dev_ovs_svc_cap dev_ovs_cap; + + u32 pctx_size; /* 512B */ +}; + +/* PF IPsec service resource */ +struct sss_dev_ipsec_svc_cap { + u32 max_sactx; /* max IPsec SA context num */ + u16 max_cq; /* max IPsec SCQC num */ + u16 rsvd0; +}; + +/* IPsec services */ +struct sss_ipsec_service_cap { + struct sss_dev_ipsec_svc_cap dev_ipsec_cap; + u32 sactx_size; /* 512B */ +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_uld_driver.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_uld_driver.h new file mode 100644 index 0000000000000..677008109e18f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_uld_driver.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_ULD_DRIVER_H +#define SSS_HW_ULD_DRIVER_H + +#include "sss_hw_event.h" +#include "sss_hw_svc_cap.h" + +struct sss_hal_dev { + struct pci_dev *pdev; + void *hwdev; +}; + +struct sss_uld_info { + /* When it is unnessary to initialize the uld dev, + * @probe needs to return 0 and uld_dev is set to NULL; + * if uld_dev is NULL, @remove will not be called when uninstalling + */ + int (*probe)(struct sss_hal_dev *hal_dev, void **uld_dev, char *uld_dev_name); + void (*remove)(struct sss_hal_dev *hal_dev, void *uld_dev); + int (*suspend)(struct sss_hal_dev *hal_dev, void *uld_dev, pm_message_t state); + int (*resume)(struct sss_hal_dev *hal_dev, void *uld_dev); + void (*event)(struct sss_hal_dev *hal_dev, void *uld_dev, + struct sss_event_info *event); + int (*ioctl)(void *uld_dev, u32 cmd, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size); +}; + +/* sss_register_uld - register an upper driver + * @type: uld service type + * @uld_info: uld callback + * + * Registers an upper-layer driver. + * Traverse existing devices and call @probe to initialize the uld device. + */ +int sss_register_uld(enum sss_service_type type, struct sss_uld_info *uld_info); + +/** + * sss_unregister_uld - unregister an upper driver + * @type: uld service type + * + * Traverse existing devices and call @remove to uninstall the uld device. + * Unregisters an existing upper-layer driver. + */ +void sss_unregister_uld(enum sss_service_type type); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_wq.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_wq.h new file mode 100644 index 0000000000000..dd9dd0695a15b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_wq.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_WQ_H +#define SSS_HW_WQ_H +#include + +#include "sss_hw_common.h" + +struct sss_wq { + u16 ci; + u16 pi; + + u32 q_depth; + u16 id_mask; + u16 elem_size_shift; + u16 page_num; + u16 rsvd1; + u32 elem_per_page; + u16 elem_per_page_shift; + u16 elem_per_page_mask; + + struct sss_dma_addr_align *page; + + dma_addr_t block_paddr; + u64 *block_vaddr; + + void *dev_hdl; + u32 page_size; + u16 elem_size; + u16 rsvd2; +} ____cacheline_aligned; + +#define SSS_WQ_MASK_ID(wq, id) ((id) & (wq)->id_mask) +#define SSS_WQ_MASK_PAGE(wq, pg_id) \ + ((pg_id) < (wq)->page_num ? (pg_id) : 0) +#define SSS_WQ_PAGE_ID(wq, id) ((id) >> (wq)->elem_per_page_shift) +#define SSS_WQ_OFFSET_IN_PAGE(wq, id) ((id) & (wq)->elem_per_page_mask) +#define SSS_WQ_GET_WQEBB_ADDR(wq, pg_id, id_in_pg) \ + ((u8 *)(wq)->page[pg_id].align_vaddr + \ + ((id_in_pg) << (wq)->elem_size_shift)) +#define SSS_WQ_IS_0_LEVEL_CLA(wq) ((wq)->page_num == 1) + +static inline u16 sss_wq_free_wqebb(struct sss_wq *wq) +{ + return wq->q_depth - ((wq->q_depth + wq->pi - wq->ci) & wq->id_mask) - 1; +} + +static inline bool sss_wq_is_empty(struct sss_wq *wq) +{ + return SSS_WQ_MASK_ID(wq, wq->pi) == SSS_WQ_MASK_ID(wq, wq->ci); +} + +static inline void *sss_wq_get_one_wqebb(struct sss_wq *wq, u16 *pi) +{ + *pi = SSS_WQ_MASK_ID(wq, wq->pi); + wq->pi++; + + return SSS_WQ_GET_WQEBB_ADDR(wq, SSS_WQ_PAGE_ID(wq, *pi), + SSS_WQ_OFFSET_IN_PAGE(wq, *pi)); +} + +static inline void *sss_wq_get_multi_wqebb(struct sss_wq *wq, + u16 num_wqebbs, u16 *pi, + void **second_part_wqebbs_addr, + u16 *first_part_wqebbs_num) +{ + u32 pg_id; + u32 off_in_page; + + *pi = SSS_WQ_MASK_ID(wq, wq->pi); + wq->pi += num_wqebbs; + + pg_id = SSS_WQ_PAGE_ID(wq, *pi); + off_in_page = SSS_WQ_OFFSET_IN_PAGE(wq, *pi); + + if (off_in_page + num_wqebbs > wq->elem_per_page) { + /* wqe across wq page boundary */ + *second_part_wqebbs_addr = + SSS_WQ_GET_WQEBB_ADDR(wq, SSS_WQ_MASK_PAGE(wq, pg_id + 1), 0); + *first_part_wqebbs_num = wq->elem_per_page - off_in_page; + } else { + *second_part_wqebbs_addr = NULL; + *first_part_wqebbs_num = num_wqebbs; + } + + return SSS_WQ_GET_WQEBB_ADDR(wq, pg_id, off_in_page); +} + +static inline void sss_update_wq_ci(struct sss_wq *wq, u16 num_wqebbs) +{ + wq->ci += num_wqebbs; +} + +static inline void *sss_wq_wqebb_addr(struct sss_wq *wq, u16 id) +{ + return SSS_WQ_GET_WQEBB_ADDR(wq, SSS_WQ_PAGE_ID(wq, id), + SSS_WQ_OFFSET_IN_PAGE(wq, id)); +} + +static inline void *sss_wq_read_one_wqebb(struct sss_wq *wq, u16 *ci) +{ + *ci = SSS_WQ_MASK_ID(wq, wq->ci); + + return sss_wq_wqebb_addr(wq, *ci); +} + +static inline u64 sss_wq_get_first_wqe_page_addr(struct sss_wq *wq) +{ + return wq->page[0].align_paddr; +} + +static inline void sss_wq_reset(struct sss_wq *wq) +{ + u16 pg_id; + + wq->ci = 0; + wq->pi = 0; + + for (pg_id = 0; pg_id < wq->page_num; pg_id++) + memset(wq->page[pg_id].align_vaddr, 0, wq->page_size); +} + +int sss_create_wq(void *hwdev, struct sss_wq *wq, u32 q_depth, u16 block_size); +void sss_destroy_wq(struct sss_wq *wq); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hwif_export.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hwif_export.h new file mode 100644 index 0000000000000..e83810dde176c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hwif_export.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_EXPORT_H +#define SSS_HWIF_EXPORT_H + +#include + +#include "sss_hw_common.h" +#include "sss_hw_irq.h" + +/** + * @brief sss_alloc_db_addr - alloc doorbell + * @param hwdev: device pointer to hwdev + * @param db_base: pointer to alloc doorbell base address + * @retval zero: success + * @retval non-zero: failure + **/ +int sss_alloc_db_addr(void *hwdev, void __iomem **db_base); + +/** + * @brief sss_free_db_addr - free doorbell + * @param hwdev: device pointer to hwdev + * @param db_base: pointer to free doorbell base address + **/ +void sss_free_db_addr(void *hwdev, const void __iomem *db_base); + +/* * + * @brief sss_nic_set_msix_auto_mask - set msix auto mask function + * @param hwdev: device pointer to hwdev + * @param msix_idx: msix id + * @param flag: msix auto_mask flag, 1-enable, 2-clear + */ +void sss_chip_set_msix_auto_mask(void *hwdev, u16 msix_id, + enum sss_msix_auto_mask flag); + +/* * + * @brief sss_chip_set_msix_state - set msix state + * @param hwdev: device pointer to hwdev + * @param msix_id: msix id + * @param flag: msix state flag, 0-enable, 1-disable + */ +void sss_chip_set_msix_state(void *hwdev, u16 msix_id, + enum sss_msix_state flag); + +/* * + * @brief sss_get_global_func_id - get global function id + * @param hwdev: device pointer to hwdev + * @retval global function id + */ +u16 sss_get_global_func_id(void *hwdev); + +/* * + * @brief sss_get_pf_id_of_vf - get pf id of vf + * @param hwdev: device pointer to hwdev + * @retval pf id + */ +u8 sss_get_pf_id_of_vf(void *hwdev); + +/* * + * @brief sss_get_pcie_itf_id - get pcie port id + * @param hwdev: device pointer to hwdev + * @retval pcie port id + */ +u8 sss_get_pcie_itf_id(void *hwdev); + +/* * + * @brief sss_get_func_type - get function type + * @param hwdev: device pointer to hwdev + * @retval function type + */ +enum sss_func_type sss_get_func_type(void *hwdev); + +enum sss_func_type sss_get_func_id(void *hwdev); + +/* * + * @brief sss_get_glb_pf_vf_offset - get vf offset id of pf + * @param hwdev: device pointer to hwdev + * @retval vf offset id + */ +u16 sss_get_glb_pf_vf_offset(void *hwdev); + +/* * + * @brief sss_get_ppf_id - get ppf id + * @param hwdev: device pointer to hwdev + * @retval ppf id + */ +u8 sss_get_ppf_id(void *hwdev); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/kernel/sss_linux_kernel.h b/drivers/net/ethernet/3snic/sssnic/include/kernel/sss_linux_kernel.h new file mode 100644 index 0000000000000..e8c123f7189ba --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/kernel/sss_linux_kernel.h @@ -0,0 +1,335 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_LINUX_KERNEL_H_ +#define SSS_LINUX_KERNEL_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ +#ifndef UTS_RELEASE +#include +#endif + +#ifndef NETIF_F_SCTP_CSUM +#define NETIF_F_SCTP_CSUM 0 +#endif + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + +/* ************************************************************************ */ +#define ETH_TYPE_TRANS_SETS_DEV +#define HAVE_NETDEV_STATS_IN_NETDEV + +/* ************************************************************************ */ +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif +#define HAVE_INET6_IFADDR_LIST + +/* ************************************************************************ */ +#define HAVE_NDO_GET_STATS64 + +/* ************************************************************************ */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif + +#ifndef HAVE_NDO_SET_FEATURES +#define HAVE_NDO_SET_FEATURES +#endif +#define HAVE_IRQ_AFFINITY_NOTIFY + +/* ************************************************************************ */ +#define HAVE_ETHTOOL_SET_PHYS_ID + +/* ************************************************************************ */ +#define HAVE_NETDEV_WANTED_FEAUTES + +/* ************************************************************************ */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_VF_SPOOFCHK_CONFIGURE +#endif +#ifndef HAVE_SKB_L4_RXHASH +#define HAVE_SKB_L4_RXHASH +#endif + +/* ************************************************************************ */ +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif + +/* ************************************************************************ */ +#define _kc_kmap_atomic(page) kmap_local_page(page) +#define _kc_kunmap_atomic(addr) kunmap_local(addr) + +/* ************************************************************************ */ +#include +#define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO + +/* ************************************************************************ */ +#define HAVE_NAPI_GRO_FLUSH_OLD + +/* ************************************************************************ */ +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif + +/* ************************************************************************ */ +#define HAVE_ENCAP_TSO_OFFLOAD +#define HAVE_SKB_INNER_NETWORK_HEADER + +/* ************************************************************************ */ +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_SKB_INNER_PROTOCOL +#define HAVE_MPLS_FEATURES + +/* ************************************************************************ */ +#define HAVE_VXLAN_CHECKS +#define HAVE_NDO_SELECT_QUEUE_ACCEL + +#define HAVE_NET_GET_RANDOM_ONCE +#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS + +/* ************************************************************************ */ +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK + +/* ************************************************************************ */ +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#define HAVE_VLAN_FIND_DEV_DEEP_RCU + +/* ************************************************************************ */ +#define HAVE_SKBUFF_CSUM_LEVEL +#define HAVE_MULTI_VLAN_OFFLOAD_EN +#define HAVE_ETH_GET_HEADLEN_FUNC + +/* ************************************************************************ */ +#define HAVE_RXFH_HASHFUNC + +/****************************************************************/ +#define HAVE_NDO_SET_VF_TRUST + +/* ************************************************************** */ +#include + +/* ************************************************************** */ +#define HAVE_IO_MAP_WC_SIZE + +/* ************************************************************************ */ +#define HAVE_NETDEVICE_MIN_MAX_MTU + +/* ************************************************************************ */ +#define HAVE_VOID_NDO_GET_STATS64 +#define HAVE_VM_OPS_FAULT_NO_VMA + +/* ************************************************************************ */ +#define HAVE_HWTSTAMP_FILTER_NTP_ALL +#define HAVE_NDO_SETUP_TC_ADM_INDEX +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#define HAVE_PTP_CLOCK_DO_AUX_WORK + +/* ************************************************************************ */ +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_XDP_SUPPORT + +/* ************************************************************************ */ +#define HAVE_NDO_BPF_NETDEV_BPF +#define HAVE_TIMER_SETUP +#define HAVE_XDP_DATA_META + +/* ************************************************************************ */ +#define HAVE_NDO_SELECT_QUEUE_SB_DEV + +/*****************************************************************************/ +#define dev_open(x) dev_open(x, NULL) +#define HAVE_NEW_ETHTOOL_LINK_SETTINGS_ONLY + +#ifndef get_ds +#define get_ds() (KERNEL_DS) +#endif + +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _sss_nic_dma_zalloc_coherent(d, s, h, f) +static inline void *_sss_nic_dma_zalloc_coherent(struct device *dev, + size_t size, dma_addr_t *dma_handle, gfp_t gfp) +{ + /* Above kernel 5.0, fixed up all remaining architectures + * to zero the memory in dma_alloc_coherent, and made + * dma_zalloc_coherent a no-op wrapper around dma_alloc_coherent, + * which fixes all of the above issues. + */ + return dma_alloc_coherent(dev, size, dma_handle, gfp); +} +#endif + +struct timeval { + __kernel_old_time_t tv_sec; /* seconds */ + __kernel_suseconds_t tv_usec; /* microseconds */ +}; + +#ifndef do_gettimeofday +#define do_gettimeofday(time) _kc_do_gettimeofday(time) +static inline void _kc_do_gettimeofday(struct timeval *tv) +{ + struct timespec64 ts; + + ktime_get_real_ts64(&ts); + tv->tv_sec = ts.tv_sec; + tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC; +} +#endif + +/*****************************************************************************/ +#define HAVE_NDO_SELECT_QUEUE_SB_DEV_ONLY +#define ETH_GET_HEADLEN_NEED_DEV + +/*****************************************************************************/ +#ifndef FIELD_SIZEOF +#define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->f)) +#endif + +/*****************************************************************************/ +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS + +/*****************************************************************************/ +#ifndef rtc_time_to_tm +#define rtc_time_to_tm rtc_time64_to_tm +#endif +#define HAVE_NDO_TX_TIMEOUT_TXQ + +/*****************************************************************************/ +#define SUPPORTED_COALESCE_PARAMS + +#ifndef pci_cleanup_aer_uncorrect_error_status +#define pci_cleanup_aer_uncorrect_error_status pci_aer_clear_nonfatal_status +#endif + +/* ************************************************************************ */ +#define HAVE_XDP_FRAME_SZ + +/* ************************************************************************ */ +#define HAVE_ENCAPSULATION_TSO +#define HAVE_ENCAPSULATION_CSUM + +/* ************************************************************************ */ +#define HAVE_BFP_WARN_NETDEV_PARAM +#define USE_OLD_PCI_FUNCTION +#define CLASS_CREATE_WITH_ONE_PARAM +#define NEED_NETIF_NAPI_ADD_NO_WEIGHT +#define HAS_DEVLINK_ALLOC_SETS_DEV +#define NO_DEVLINK_REGISTER_SETS_DEV +#define DEVLINK_REGISTER_RETURN_VOID +#define devlink_params_publish(x) do {} while (0) +#define devlink_params_unpublish(x) do {} while (0) + +#ifndef eth_zero_addr +static inline void __kc_eth_zero_addr(u8 *addr) +{ + memset(addr, 0x00, ETH_ALEN); +} + +#define eth_zero_addr(_addr) __kc_eth_zero_addr(_addr) +#endif + +#ifndef netdev_hw_addr_list_for_each +#define netdev_hw_addr_list_for_each(ha, l) \ + list_for_each_entry(ha, &(l)->list, list) +#endif + +#define spin_lock_deinit(lock) + +#define destroy_work(work) + +#ifndef HAVE_TIMER_SETUP +void initialize_timer(const void *adapter_hdl, struct timer_list *timer); +#endif + +#define nicif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, "[NIC]" fmt, ##args) + +#define destroy_completion(completion) +#define sema_deinit(lock) +#define mutex_deinit(lock) +#define rwlock_deinit(lock) + +#define tasklet_state(tasklet) ((tasklet)->state) + +#ifndef hash_init +#define HASH_SIZE(name) (ARRAY_SIZE(name)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) +#endif + +#ifndef FIELD_SIZEOF +#define FIELD_SIZEOF sizeof_field +#endif + +#ifndef HAVE_TX_TIMEOUT_TXQUEUE +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif + +#define HAS_ETHTOOL_SUPPORTED_COALESCE_PARAMS +#define SSSNIC_SUPPORTED_COALESCE_PARAMS \ + (ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USECS | \ +ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | \ +ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_RX_USECS_HIGH | \ +ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \ +ETHTOOL_COALESCE_USE_ADAPTIVE_RX | \ +ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH) + +#ifndef DEVLINK_HAVE_SUPPORTED_FLASH_UPDATE_PARAMS +#define DEVLINK_HAVE_SUPPORTED_FLASH_UPDATE_PARAMS +#endif + +#if IS_BUILTIN(CONFIG_NET_DEVLINK) +#ifndef HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#endif +#endif + +#endif +/* ************************************************************************ */ diff --git a/drivers/net/ethernet/3snic/sssnic/include/sss_hw.h b/drivers/net/ethernet/3snic/sssnic/include/sss_hw.h new file mode 100644 index 0000000000000..9a2bf99f0b3c3 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/sss_hw.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_H +#define SSS_HW_H + +#include "sss_hw_aeq.h" +#include "sss_hw_ceq.h" +#include "sss_hw_ctrlq.h" +#include "sss_hw_common.h" +#include "sss_hw_event.h" +#include "sss_hw_export.h" +#include "sss_hw_irq.h" +#include "sss_hw_mbx.h" +#include "sss_hw_mbx_msg.h" +#include "sss_hw_mgmt.h" +#include "sss_hw_sriov.h" +#include "sss_hw_statistics.h" +#include "sss_hw_svc_cap.h" +#include "sss_hw_uld_driver.h" +#include "sss_hw_wq.h" +#include "sss_hwif_export.h" + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/sss_kernel.h b/drivers/net/ethernet/3snic/sssnic/include/sss_kernel.h new file mode 100644 index 0000000000000..19b2aa3b7fa3e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/sss_kernel.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_KERNEL_H +#define SSS_KERNEL_H + +#include "sss_linux_kernel.h" + +#define sdk_err(dev, format, ...) dev_err(dev, "[BASE]" format, ##__VA_ARGS__) +#define sdk_warn(dev, format, ...) dev_warn(dev, "[BASE]" format, ##__VA_ARGS__) +#define sdk_notice(dev, format, ...) dev_notice(dev, "[BASE]" format, ##__VA_ARGS__) +#define sdk_info(dev, format, ...) dev_info(dev, "[BASE]" format, ##__VA_ARGS__) + +#define nic_err(dev, format, ...) dev_err(dev, "[NIC]" format, ##__VA_ARGS__) +#define nic_warn(dev, format, ...) dev_warn(dev, "[NIC]" format, ##__VA_ARGS__) +#define nic_notice(dev, format, ...) dev_notice(dev, "[NIC]" format, ##__VA_ARGS__) +#define nic_info(dev, format, ...) dev_info(dev, "[NIC]" format, ##__VA_ARGS__) + +#ifndef BIG_ENDIAN +#define BIG_ENDIAN 0x4321 +#endif + +#ifndef LITTLE_ENDIAN +#define LITTLE_ENDIAN 0x1234 +#endif + +#ifdef BYTE_ORDER +#undef BYTE_ORDER +#endif +/* X86 */ +#define BYTE_ORDER LITTLE_ENDIAN +#define USEC_PER_MSEC 1000L +#define MSEC_PER_SEC 1000L + +#endif /* OSSL_KNL_H */ diff --git a/drivers/net/ethernet/3snic/sssnic/include/sss_tool_comm.h b/drivers/net/ethernet/3snic/sssnic/include/sss_tool_comm.h new file mode 100644 index 0000000000000..48a2937b3bbd2 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/sss_tool_comm.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_COMM_H +#define SSS_TOOL_COMM_H + +#define tool_err(format, ...) pr_err(format, ##__VA_ARGS__) +#define tool_warn(format, ...) pr_warn(format, ##__VA_ARGS__) +#define tool_info(format, ...) pr_info(format, ##__VA_ARGS__) + +#define SSS_TOOL_SHOW_ITEM_LEN 32 + +#define SSS_TOOL_VERSION_INFO_LEN 128 + +#define SSS_TOOL_EPERM 1 /* Operation not permitted */ +#define SSS_TOOL_EIO 2 /* I/O error */ +#define SSS_TOOL_EINVAL 3 /* Invalid argument */ +#define SSS_TOOL_EBUSY 4 /* Device or resource busy */ +#define SSS_TOOL_EOPNOTSUPP 0xFF /* Operation not supported */ + +enum sss_tool_driver_cmd_type { + SSS_TOOL_GET_TX_INFO = 1, + SSS_TOOL_GET_Q_NUM, + SSS_TOOL_GET_TX_WQE_INFO, + SSS_TOOL_TX_MAPPING, + SSS_TOOL_GET_RX_INFO, + SSS_TOOL_GET_RX_WQE_INFO, + SSS_TOOL_GET_RX_CQE_INFO, + SSS_TOOL_UPRINT_FUNC_EN, + SSS_TOOL_UPRINT_FUNC_RESET, + SSS_TOOL_UPRINT_SET_PATH, + SSS_TOOL_UPRINT_GET_STATISTICS, + SSS_TOOL_FUNC_TYPE, + SSS_TOOL_GET_FUNC_IDX, + SSS_TOOL_GET_INTER_NUM, + SSS_TOOL_CLOSE_TX_STREAM, + SSS_TOOL_GET_DRV_VERSION, + SSS_TOOL_CLEAR_FUNC_STATS, + SSS_TOOL_GET_HW_STATS, + SSS_TOOL_CLEAR_HW_STATS, + SSS_TOOL_GET_SELF_TEST_RES, + SSS_TOOL_GET_CHIP_FAULT_STATS, + SSS_TOOL_NIC_RSVD1, + SSS_TOOL_NIC_RSVD2, + SSS_TOOL_NIC_RSVD3, + SSS_TOOL_GET_CHIP_ID, + SSS_TOOL_GET_SINGLE_CARD_INFO, + SSS_TOOL_GET_FIRMWARE_ACTIVE_STATUS, + SSS_TOOL_ROCE_DFX_FUNC, + SSS_TOOL_GET_DEVICE_ID, + SSS_TOOL_GET_PF_DEV_INFO, + SSS_TOOL_CMD_FREE_MEM, + SSS_TOOL_GET_LOOPBACK_MODE = 32, + SSS_TOOL_SET_LOOPBACK_MODE, + SSS_TOOL_SET_LINK_MODE, + SSS_TOOL_SET_PF_BW_LIMIT, + SSS_TOOL_GET_PF_BW_LIMIT, + SSS_TOOL_ROCE_CMD, + SSS_TOOL_GET_POLL_WEIGHT, + SSS_TOOL_SET_POLL_WEIGHT, + SSS_TOOL_GET_HOMOLOGUE, + SSS_TOOL_SET_HOMOLOGUE, + SSS_TOOL_GET_SSET_COUNT, + SSS_TOOL_GET_SSET_ITEMS, + SSS_TOOL_IS_DRV_IN_VM, + SSS_TOOL_LRO_ADPT_MGMT, + SSS_TOOL_SET_INTER_COAL_PARAM, + SSS_TOOL_GET_INTER_COAL_PARAM, + SSS_TOOL_GET_CHIP_INFO, + SSS_TOOL_GET_NIC_STATS_LEN, + SSS_TOOL_GET_NIC_STATS_STRING, + SSS_TOOL_GET_NIC_STATS_INFO, + SSS_TOOL_GET_PF_ID, + SSS_TOOL_NIC_RSVD4, + SSS_TOOL_NIC_RSVD5, + SSS_TOOL_DCB_QOS_INFO, + SSS_TOOL_DCB_PFC_STATE, + SSS_TOOL_DCB_ETS_STATE, + SSS_TOOL_DCB_STATE, + SSS_TOOL_QOS_DEV, + SSS_TOOL_GET_QOS_COS, + SSS_TOOL_GET_ULD_DEV_NAME, + SSS_TOOL_GET_TX_TIMEOUT, + SSS_TOOL_SET_TX_TIMEOUT, + + SSS_TOOL_RSS_CFG = 0x40, + SSS_TOOL_RSS_INDIR, + SSS_TOOL_PORT_ID, + + SSS_TOOL_GET_FUNC_CAP = 0x50, + SSS_TOOL_GET_XSFP_PRESENT = 0x51, + SSS_TOOL_GET_XSFP_INFO = 0x52, + SSS_TOOL_DEV_NAME_TEST = 0x53, + + SSS_TOOL_GET_WIN_STAT = 0x60, + SSS_TOOL_WIN_CSR_READ = 0x61, + SSS_TOOL_WIN_CSR_WRITE = 0x62, + SSS_TOOL_WIN_API_CMD_RD = 0x63, + + SSS_TOOL_VM_COMPAT_TEST = 0xFF +}; + +struct sss_tool_show_item { + char name[SSS_TOOL_SHOW_ITEM_LEN]; + u8 hexadecimal; /* 0: decimal , 1: Hexadecimal */ + u8 rsvd[7]; + u64 value; +}; + +struct sss_tool_drv_version_info { + char ver[SSS_TOOL_VERSION_INFO_LEN]; +}; + +#endif /* _SSS_NIC_MT_H_ */ diff --git a/drivers/net/ethernet/3snic/sssnic/include/sss_version.h b/drivers/net/ethernet/3snic/sssnic/include/sss_version.h new file mode 100644 index 0000000000000..6b6edef780d97 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/sss_version.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_VERSION_H +#define SSS_VERSION_H + +#define SSS_VERSION_STR "1.1.0.0" + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/Makefile b/drivers/net/ethernet/3snic/sssnic/nic/Makefile new file mode 100644 index 0000000000000..453333021a2f2 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/Makefile @@ -0,0 +1,90 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2023 3SNIC +# + +SYS_TIME=$(shell date +%Y-%m-%d_%H:%M:%S) +ccflags-y += -D __TIME_STR__=\"$(SYS_TIME)\" + +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include/hw +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include/kernel +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/nic +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/nic/tool +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/nic/include +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/hw +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/hw/tool +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include/hw +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include/kernel +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/hw/include + +ccflags-y += -Werror + +obj-$(CONFIG_SSSNIC) += sssnic.o +sssnic-y := sss_nic_main.o \ + sss_nic_tx.o \ + sss_nic_tx_init.o \ + sss_nic_rx.o \ + sss_nic_rx_init.o \ + sss_nic_rx_reset.o \ + sss_nic_rss.o \ + sss_nic_ntuple.o \ + sss_nic_dcb.o \ + sss_nic_ethtool.o \ + sss_nic_ethtool_api.o \ + sss_nic_ethtool_stats.o \ + sss_nic_ethtool_stats_api.o \ + sss_nic_irq.o \ + sss_nic_filter.o \ + sss_nic_netdev_ops.o \ + sss_nic_cfg.o \ + sss_nic_mag_cfg.o \ + sss_nic_vf_cfg.o \ + sss_nic_rss_cfg.o \ + sss_nic_event.o \ + sss_nic_io.o \ + sss_nic_netdev_ops_api.o \ + ./tool/sss_tool_nic_func.o \ + ./tool/sss_tool_nic_dcb.o \ + ./tool/sss_tool_nic_phy_attr.o \ + ./tool/sss_tool_nic_qp_info.o \ + ./tool/sss_tool_nic_stats.o \ + ../hw/sss_hw_main.o \ + ../hw/sss_pci.o \ + ../hw/sss_pci_probe.o \ + ../hw/sss_pci_remove.o \ + ../hw/sss_pci_shutdown.o \ + ../hw/sss_pci_error.o \ + ../hw/sss_pci_sriov.o \ + ../hw/sss_pci_global.o \ + ../hw/sss_hwdev_api.o \ + ../hw/sss_hwdev_cap.o \ + ../hw/sss_hwdev_export.o \ + ../hw/sss_hwdev_link.o \ + ../hw/sss_hwdev_init.o \ + ../hw/sss_hwdev_mgmt_info.o \ + ../hw/sss_hwdev_mgmt_channel.o \ + ../hw/sss_hwdev_io_flush.o \ + ../hw/sss_hwif_ctrlq.o \ + ../hw/sss_hwif_ctrlq_init.o \ + ../hw/sss_hwif_ctrlq_export.o \ + ../hw/sss_hwif_mbx.o \ + ../hw/sss_hwif_mbx_init.o \ + ../hw/sss_hwif_mbx_export.o \ + ../hw/sss_hwif_adm.o \ + ../hw/sss_hwif_adm_init.o \ + ../hw/sss_hwif_init.o \ + ../hw/sss_hwif_api.o \ + ../hw/sss_hwif_export.o \ + ../hw/sss_hwif_eq.o \ + ../hw/sss_hwif_mgmt_init.o \ + ../hw/sss_hwif_irq.o \ + ../hw/sss_hwif_aeq.o \ + ../hw/sss_common.o \ + ../hw/sss_wq.o \ + ../hw/sss_hwif_ceq.o \ + ../hw/sss_adapter_mgmt.o \ + ../hw/tool/sss_tool_main.o \ + ../hw/tool/sss_tool_chip.o \ + ../hw/tool/sss_tool_sdk.o \ + ../hw/tool/sss_tool_sm.o diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_define.h new file mode 100644 index 0000000000000..21b4612f06860 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_define.h @@ -0,0 +1,608 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_CFG_DEFINE_H +#define SSS_NIC_CFG_DEFINE_H + +#include "sss_hw_mbx_msg.h" +#include "sss_nic_cfg_mag_define.h" +#include "sss_nic_cfg_vf_define.h" +#include "sss_nic_cfg_rss_define.h" +#include "sss_nic_dcb_define.h" +#include "sss_nic_tcam_define.h" + +#ifndef ETH_ALEN +#define ETH_ALEN 6 +#endif + +#define SSSNIC_MBX_OPCODE_SET 1 +#define SSSNIC_MBX_OPCODE_GET 0 + +#define SSSNIC_MBX_OPCODE_ADD 1 +#define SSSNIC_MBX_OPCODE_DEL 0 + +#ifndef BIT +#define BIT(n) (1UL << (n)) +#endif + +#define SSSNIC_MIN_MTU_SIZE 256 + +#define SSSNIC_MAX_JUMBO_FRAME_SIZE 9600 + +#define SSSNIC_PF_SET_VF_ALREADY 0x4 + +#define SSSNIC_LOWEST_LATENCY 1 + +#define SSSNIC_MAX_FEATURE_QWORD 4 + +#define SSSNIC_MBX_OPCODE_GET_DCB_STATE 0 +#define SSSNIC_MBX_OPCODE_SET_DCB_STATE 1 +#define SSSNIC_DCB_STATE_DISABLE 0 +#define SSSNIC_DCB_STATE_ENABLE 1 + +#define SSSNIC_STD_SFP_INFO_MAX_SIZE 640 + +#define SSSNIC_BIOS_SIGNATURE 0x1923E518 +#define SSSNIC_BIOS_FUN_VALID 1 +#define SSSNIC_BIOS_FUN_INVALID 0 + +enum sss_nic_func_tbl_cfg_type { + SSSNIC_FUNC_CFG_TYPE_INIT, + SSSNIC_FUNC_CFG_TYPE_RX_BUF_SIZE, + SSSNIC_FUNC_CFG_TYPE_MTU, +}; + +enum sss_nic_feature_cap { + SSSNIC_F_CSUM = BIT(0), + SSSNIC_F_SCTP_CRC = BIT(1), + SSSNIC_F_TSO = BIT(2), + SSSNIC_F_LRO = BIT(3), + SSSNIC_F_UFO = BIT(4), + SSSNIC_F_RSS = BIT(5), + SSSNIC_F_RX_VLAN_FILTER = BIT(6), + SSSNIC_F_RX_VLAN_STRIP = BIT(7), + SSSNIC_F_TX_VLAN_INSERT = BIT(8), + SSSNIC_F_VXLAN_OFFLOAD = BIT(9), + SSSNIC_F_IPSEC_OFFLOAD = BIT(10), + SSSNIC_F_FDIR = BIT(11), + SSSNIC_F_PROMISC = BIT(12), + SSSNIC_F_ALLMULTI = BIT(13), + SSSNIC_F_XSFP_REPORT = BIT(14), + SSSNIC_F_VF_MAC = BIT(15), + SSSNIC_F_RATE_LIMIT = BIT(16), + SSSNIC_F_RXQ_RECOVERY = BIT(17), +}; + +/* BIOS CONF */ +enum { + SSSNIC_NVM_PF_SPEED_LIMIT = BIT(6), +}; + +/* Commands between NIC to MPU */ +enum sss_nic_mbx_opcode { + SSSNIC_MBX_OPCODE_VF_REGISTER = 0, /* only for PFD and VFD */ + + /* FUNC CFG */ + SSSNIC_MBX_OPCODE_SET_FUNC_TBL = 5, + SSSNIC_MBX_OPCODE_SET_VPORT_ENABLE, + SSSNIC_MBX_OPCODE_SET_RX_MODE, + SSSNIC_MBX_OPCODE_SQ_CI_ATTR_SET, + SSSNIC_MBX_OPCODE_GET_VPORT_STAT, + SSSNIC_MBX_OPCODE_CLEAN_VPORT_STAT, + SSSNIC_MBX_OPCODE_CLEAR_QP_RESOURCE, + SSSNIC_MBX_OPCODE_CFG_FLEX_QUEUE, + /* LRO CFG */ + SSSNIC_MBX_OPCODE_CFG_RX_LRO, + SSSNIC_MBX_OPCODE_CFG_LRO_TIMER, + SSSNIC_MBX_OPCODE_FEATURE_NEGO, + SSSNIC_MBX_OPCODE_CFG_LOCAL_LRO_STATE, + + SSSNIC_MBX_OPCODE_CACHE_OUT_QP_RES, + /* MAC & VLAN CFG */ + SSSNIC_MBX_OPCODE_GET_MAC = 20, + SSSNIC_MBX_OPCODE_SET_MAC, + SSSNIC_MBX_OPCODE_DEL_MAC, + SSSNIC_MBX_OPCODE_UPDATE_MAC, + SSSNIC_MBX_OPCODE_GET_ALL_DEFAULT_MAC, + + SSSNIC_MBX_OPCODE_CFG_FUNC_VLAN, + SSSNIC_MBX_OPCODE_SET_VLAN_FILTER_EN, + SSSNIC_MBX_OPCODE_SET_RX_VLAN_OFFLOAD, + SSSNIC_MBX_OPCODE_SMAC_CHECK_STATE, + + /* SR-IOV */ + SSSNIC_MBX_OPCODE_CFG_VF_VLAN = 40, + SSSNIC_MBX_OPCODE_SET_SPOOPCHK_STATE, + /* RATE LIMIT */ + SSSNIC_MBX_OPCODE_SET_MAX_MIN_RATE, + + /* RSS CFG */ + SSSNIC_MBX_OPCODE_RSS_CFG = 60, + SSSNIC_MBX_OPCODE_RSS_TEMP_MGR, + SSSNIC_MBX_OPCODE_GET_RSS_CTX_TBL, + SSSNIC_MBX_OPCODE_CFG_RSS_HASH_KEY, + SSSNIC_MBX_OPCODE_CFG_RSS_HASH_ENGINE, + SSSNIC_MBX_OPCODE_SET_RSS_CTX_TBL_INTO_FUNC, + + /* IP checksum error packets, enable rss quadruple hash */ + SSSNIC_MBX_OPCODE_IPCS_ERR_RSS_ENABLE_OP = 66, + + /* PPA/FDIR */ + SSSNIC_MBX_OPCODE_ADD_TC_FLOW = 80, + SSSNIC_MBX_OPCODE_DEL_TC_FLOW, + SSSNIC_MBX_OPCODE_GET_TC_FLOW, + SSSNIC_MBX_OPCODE_FLUSH_TCAM, + SSSNIC_MBX_OPCODE_CFG_TCAM_BLOCK, + SSSNIC_MBX_OPCODE_ENABLE_TCAM, + SSSNIC_MBX_OPCODE_GET_TCAM_BLOCK, + SSSNIC_MBX_OPCODE_CFG_PPA_TABLE_ID, + SSSNIC_MBX_OPCODE_SET_PPA_EN = 88, + SSSNIC_MBX_OPCODE_CFG_PPA_MODE, + SSSNIC_MBX_OPCODE_CFG_PPA_FLUSH, + SSSNIC_MBX_OPCODE_SET_FDIR_STATUS, + SSSNIC_MBX_OPCODE_GET_PPA_COUNTER, + + /* PORT CFG */ + SSSNIC_MBX_OPCODE_SET_PORT_ENABLE = 100, + SSSNIC_MBX_OPCODE_CFG_PAUSE_INFO, + + SSSNIC_MBX_OPCODE_SET_PORT_CAR, + SSSNIC_MBX_OPCODE_SET_ER_DROP_PKT, + + SSSNIC_MBX_OPCODE_GET_VF_COS, + SSSNIC_MBX_OPCODE_SETUP_COS_MAPPING, + SSSNIC_MBX_OPCODE_SET_ETS, + SSSNIC_MBX_OPCODE_SET_PFC, + SSSNIC_MBX_OPCODE_QOS_ETS, + SSSNIC_MBX_OPCODE_QOS_PFC, + SSSNIC_MBX_OPCODE_QOS_DCB_STATE, + SSSNIC_MBX_OPCODE_QOS_PORT_CFG, + SSSNIC_MBX_OPCODE_QOS_MAP_CFG, + SSSNIC_MBX_OPCODE_FORCE_PKT_DROP, + SSSNIC_MBX_OPCODE_TX_PAUSE_EXCP_NOTICE = 118, + SSSNIC_MBX_OPCODE_INQUIRT_PAUSE_CFG = 119, + + /* MISC */ + SSSNIC_MBX_OPCODE_BIOS_CFG = 120, + SSSNIC_MBX_OPCODE_SET_FIRMWARE_CUSTOM_PACKETS_MSG, + + /* BOND */ + SSSNIC_MBX_OPCODE_BOND_DEV_CREATE = 134, + SSSNIC_MBX_OPCODE_BOND_DEV_DELETE, + SSSNIC_MBX_OPCODE_BOND_DEV_OPEN_CLOSE, + SSSNIC_MBX_OPCODE_BOND_INFO_GET, + SSSNIC_MBX_OPCODE_BOND_ACTIVE_INFO_GET, + SSSNIC_MBX_OPCODE_BOND_ACTIVE_NOTICE, + + /* DFX */ + SSSNIC_MBX_OPCODE_GET_SM_TABLE = 140, + SSSNIC_MBX_OPCODE_RD_LINE_TBL, + + SSSNIC_MBX_OPCODE_SET_UCAPTURE_OPT = 160, + SSSNIC_MBX_OPCODE_SET_VHD_CFG, + + /* move to SSSLINK */ + SSSNIC_MBX_OPCODE_GET_PORT_STAT = 200, + SSSNIC_MBX_OPCODE_CLEAN_PORT_STAT, + SSSNIC_MBX_OPCODE_CFG_LOOPBACK_MODE, + SSSNIC_MBX_OPCODE_GET_SFP_QSFP_INFO, + SSSNIC_MBX_OPCODE_SET_SFP_STATUS, + SSSNIC_MBX_OPCODE_GET_LIGHT_MODULE_ABS, + SSSNIC_MBX_OPCODE_GET_LINK_INFO, + SSSNIC_MBX_OPCODE_CFG_AN_TYPE, + SSSNIC_MBX_OPCODE_GET_PORT_INFO, + SSSNIC_MBX_OPCODE_SET_LINK_SETTINGS, + SSSNIC_MBX_OPCODE_ACTIVATE_BIOS_LINK_CFG, + SSSNIC_MBX_OPCODE_RESTORE_LINK_CFG, + SSSNIC_MBX_OPCODE_SET_LINK_FOLLOW, + SSSNIC_MBX_OPCODE_GET_LINK_STATE, + SSSNIC_MBX_OPCODE_LINK_STATUS_REPORT, + SSSNIC_MBX_OPCODE_CABLE_PLUG_EVENT, + SSSNIC_MBX_OPCODE_LINK_ERR_EVENT, + SSSNIC_MBX_OPCODE_SET_LED_STATUS, + + SSSNIC_MBX_OPCODE_MAX = 256, +}; + +/* NIC CTRLQ MODE */ +enum sss_nic_ctrlq_opcode { + SSSNIC_CTRLQ_OPCODE_MODIFY_QUEUE_CTX = 0, + SSSNIC_CTRLQ_OPCODE_CLEAN_QUEUE_CONTEXT, + SSSNIC_CTRLQ_OPCODE_ARM_SQ, + SSSNIC_CTRLQ_OPCODE_ARM_RQ, + SSSNIC_CTRLQ_OPCODE_SET_RSS_INDIR_TABLE, + SSSNIC_CTRLQ_OPCODE_SET_RSS_CONTEXT_TABLE, + SSSNIC_CTRLQ_OPCODE_GET_RSS_INDIR_TABLE, + SSSNIC_CTRLQ_OPCODE_GET_RSS_CONTEXT_TABLE, + SSSNIC_CTRLQ_OPCODE_SET_IQ_ENABLE, + SSSNIC_CTRLQ_OPCODE_SET_RQ_FLUSH = 10, + SSSNIC_CTRLQ_OPCODE_MODIFY_VLAN_CTX, + SSSNIC_CTRLQ_OPCODE_PPA_HASH_TABLE, + SSSNIC_CTRLQ_OPCODE_RXQ_INFO_GET = 13, +}; + +struct sss_nic_rq_pc_info { + u16 hw_pi; + u16 hw_ci; +}; + +struct sss_nic_rq_hw_info { + u32 func_id; + u32 num_queues; + u32 rsvd[14]; +}; + +struct sss_nic_mbx_feature_nego { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; /* 1: set, 0: get */ + u8 rsvd; + u64 feature[SSSNIC_MAX_FEATURE_QWORD]; +}; + +struct sss_nic_mbx_mac_addr { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 mac[ETH_ALEN]; +}; + +struct sss_nic_mbx_mac_update { + struct sss_nic_mbx_mac_addr old_mac; + u16 rsvd2; + u8 new_mac[ETH_ALEN]; +}; + +struct sss_nic_mbx_vport_state { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; + u8 state; /* 0--disable, 1--enable */ + u8 rsvd2[3]; +}; + +struct sss_nic_mbx_clear_qp_resource { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; +}; + +struct sss_nic_mbx_invalid_qp_cache { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; +}; + +struct sss_nic_mbx_port_stats_info { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; +}; + +struct sss_nic_port_stats { + u64 tx_unicast_pkts; + u64 tx_unicast_bytes; + u64 tx_multicast_pkts; + u64 tx_multicast_bytes; + u64 tx_broadcast_pkts; + u64 tx_broadcast_bytes; + + u64 rx_unicast_pkts; + u64 rx_unicast_bytes; + u64 rx_multicast_pkts; + u64 rx_multicast_bytes; + u64 rx_broadcast_pkts; + u64 rx_broadcast_bytes; + + u64 tx_discard; + u64 rx_discard; + u64 tx_err; + u64 rx_err; +}; + +struct sss_nic_mbx_port_stats { + struct sss_mgmt_msg_head head; + + u32 stats_size; + u32 rsvd1; + struct sss_nic_port_stats stats; + u64 rsvd2[6]; +}; + +struct sss_nic_func_table_cfg { + u16 rx_wqe_buf_size; + u16 mtu; + u32 rsvd[9]; +}; + +struct sss_nic_mbx_set_func_table { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd; + + u32 cfg_bitmap; + struct sss_nic_func_table_cfg tbl_cfg; +}; + +struct sss_nic_mbx_intr_attr { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 dma_attr_off; + u8 pending_limit; + u8 coalescing_time; + u8 intr_en; + u16 intr_id; + u32 l2nic_sqn; + u32 rsvd; + u64 ci_addr; +}; + +struct sss_nic_mbx_offload_vlan { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 vlan_offload; + u8 rsvd1[5]; +}; + +struct sss_nic_mbx_lro_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u8 lro_ipv4_en; + u8 lro_ipv6_en; + u8 lro_max_pkt_len; /* unit is 1K */ + u8 resv2[13]; +}; + +struct sss_nic_mbx_lro_timer { + struct sss_mgmt_msg_head head; + + u8 opcode; /* 1: set timer value, 0: get timer value */ + u8 rsvd1; + u16 rsvd2; + u32 timer; +}; + +struct sss_nic_mbx_vf_vlan_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u16 vlan_id; + u8 qos; + u8 rsvd2[5]; +}; + +struct sss_nic_mbx_set_spoofchk { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 state; + u8 rsvd1; +}; + +struct sss_nic_mbx_tx_rate_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; + u32 min_rate; + u32 max_rate; + u8 rsvd2[8]; +}; + +struct sss_nic_mbx_attach_vf { + struct sss_mgmt_msg_head head; + + u8 op_register; /* 0 - unregister, 1 - register */ + u8 rsvd1[3]; + u32 extra_feature; + u8 rsvd2[32]; +}; + +struct sss_nic_mbx_vlan_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u16 vlan_id; + u16 rsvd2; +}; + +/* set vlan filter */ +struct sss_nic_mbx_vlan_filter_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 resvd[2]; + u32 vlan_filter_ctrl; /* bit0:vlan filter en; bit1:broadcast_filter_en */ +}; + +struct sss_nic_mbx_force_drop_pkt { + struct sss_mgmt_msg_head head; + + u8 port; + u8 rsvd1[3]; +}; + +struct sss_nic_mbx_set_rx_mode { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; + u32 rx_mode; +}; + +/* rss */ +struct sss_nic_mbx_rss_ctx { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; + u32 context; +}; + +struct sss_nic_mbx_rss_engine_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 hash_engine; + u8 rsvd1[4]; +}; + +struct sss_nic_mbx_rss_key_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u8 key[SSSNIC_RSS_KEY_SIZE]; +}; + +struct sss_nic_mbx_rss_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 rss_en; + u8 rq_priority_number; + u8 prio_tc[SSSNIC_DCB_COS_MAX]; + u16 qp_num; + u16 rsvd1; +}; + +struct sss_nic_mbx_vf_dcb_cfg { + struct sss_mgmt_msg_head head; + + struct sss_nic_dcb_info dcb_info; +}; + +struct sss_nic_mbx_dcb_state { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 op_code; /* 0 - get dcb state, 1 - set dcb state */ + u8 state; /* 0 - disable, 1 - enable dcb */ + u8 port_state; /* 0 - disable, 1 - enable dcb */ + u8 rsvd[7]; +}; + +struct sss_nic_mbx_pause_cfg { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 opcode; + u16 rsvd1; + u8 auto_neg; + u8 rx_pause; + u8 tx_pause; + u8 rsvd2[5]; +}; + +/* pfc/pause tx abnormal */ +struct sss_nic_msg_tx_pause_info { + struct sss_mgmt_msg_head head; + + u32 tx_pause_except; /* 1: 异常,0: 正常 */ + u32 except_level; + u32 rsvd; +}; + +struct sss_nic_mbx_set_tcam_state { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 tcam_enable; + u8 rsvd1; + u32 rsvd2; +}; + +/* alloc tcam block output struct */ +struct sss_nic_mbx_tcam_block_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; /* func_id */ + u8 alloc_en; + u8 tcam_type; /* 0: 16 size tcam block, 1: 0 size tcam block */ + u16 tcam_block_index; + u16 mpu_alloc_block_size; +}; + +struct sss_nic_mbx_flush_tcam_rule { + struct sss_mgmt_msg_head head; + + u16 func_id; /* func_id */ + u16 rsvd; +}; + +struct sss_nic_mbx_add_tcam_rule { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 type; + u8 rsvd; + struct sss_nic_tcam_rule_cfg rule; +}; + +struct sss_nic_mbx_del_tcam_rule { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 type; + u8 rsvd; + u32 index_start; + u32 index_num; +}; + +/* note:must 4 byte align */ +struct sss_nic_bios_cfg { + u32 signature; /* check flash data valid */ + u8 pxe_en; /* PXE enable: 0 - disable 1 - enable */ + u8 extend_mode; + u8 rsvd0[2]; + u8 pxe_vlan_en; /* PXE VLAN enable: 0 - disable 1 - enable */ + u8 pxe_vlan_pri; /* PXE VLAN priority: 0-7 */ + u16 pxe_vlan_id; /* PXE VLAN ID 1-4094 */ + u32 service_mode; /* refer to CHIPIF_SERVICE_MODE_x macro */ + u32 pf_bw; /* PF rate,percent 0-100 */ + u8 speed; /* enum of port speed */ + u8 auto_neg; /* 0 - invalid 1 - open 2 - close */ + u8 lanes; /* lane num */ + u8 fec; /* FEC mode, refer to enum mag_cmd_port_fec */ + u8 auto_adapt; /* 0 - invalid 1 - open 2 - close */ + u8 func_valid; /* 0 - func_id is invalid,other - func_id is valid */ + u8 func_id; + u8 sriov_en; /* SRIOV-EN: 0 - invalid, 1 - open, 2 - close */ +}; + +struct sss_nic_mbx_bios_cfg { + struct sss_mgmt_msg_head head; + u32 op_code; /* Operation Code: Bit0[0: read 1:write, BIT1-6: cfg_mask */ + struct sss_nic_bios_cfg bios_cfg; +}; + +/* lacp status update */ +struct sss_nic_msg_bond_active_info { + struct sss_mgmt_msg_head head; + u32 bond_id; + u32 bon_mmi_status; /* bond link state */ + u32 active_bitmap; /* slave port state */ + + u8 rsvd[16]; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_mag_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_mag_define.h new file mode 100644 index 0000000000000..73bbeb34f6429 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_mag_define.h @@ -0,0 +1,460 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_CFG_MAG_DEFINE_H +#define SSS_NIC_CFG_MAG_DEFINE_H + +#include +#include +#include + +#include "sss_hw_mbx_msg.h" + +/* * + * Definition of the NIC receiving mode + */ +#define SSSNIC_RX_MODE_UC 0x01 +#define SSSNIC_RX_MODE_MC 0x02 +#define SSSNIC_RX_MODE_BC 0x04 +#define SSSNIC_RX_MODE_MC_ALL 0x08 +#define SSSNIC_RX_MODE_PROMISC 0x10 + +#define SSSNIC_RX_RATE_LOW 200000 +#define SSSNIC_RX_COAL_TIME_LOW 25 +#define SSSNIC_RX_PENDING_LIMIT_LOW 2 + +#define SSSNIC_RX_RATE_HIGH 700000 +#define SSSNIC_RX_COAL_TIME_HIGH 225 +#define SSSNIC_RX_PENDING_LIMIT_HIGH 8 + +#define SSSNIC_RX_RATE_THRESH 50000 +#define SSSNIC_TX_RATE_THRESH 50000 +#define SSSNIC_RX_RATE_LOW_VM 100000 +#define SSSNIC_RX_PENDING_LIMIT_HIGH_VM 87 + +#define SSSNIC_MAX_LIMIT_BW 100 + +#define SSSNIC_MAG_OPCODE_PORT_DISABLE 0x0 +#define SSSNIC_MAG_OPCODE_TX_ENABLE 0x1 +#define SSSNIC_MAG_OPCODE_RX_ENABLE 0x2 + +#define SSSNIC_XSFP_INFO_MAX_SIZE 640 + +#define SSNSIC_PORT_PRESENT 0 +#define SSNSIC_PORT_ABSENT 1 + +enum sss_nic_valid_link_settings { + SSSNIC_LINK_SET_SPEED = 0x1, + SSSNIC_LINK_SET_AUTONEG = 0x2, + SSSNIC_LINK_SET_FEC = 0x4, +}; + +enum sss_nic_link_follow_status { + SSSNIC_LINK_FOLLOW_DEFAULT, + SSSNIC_LINK_FOLLOW_PORT, + SSSNIC_LINK_FOLLOW_SEPARATE, + SSSNIC_LINK_FOLLOW_STATUS_MAX, +}; + +/* serdes/mag message cmd define */ +enum sss_nic_mag_opcode { + SSSNIC_MAG_OPCODE_SERDES_PROCESS = 0, + + /* port configure, 0-29 */ + SSSNIC_MAG_OPCODE_SET_PORT_CFG = 1, + SSSNIC_MAG_OPCODE_SET_PORT_ADAPT = 2, + SSSNIC_MAG_OPCODE_CFG_LOOPBACK_MODE = 3, + + SSSNIC_MAG_OPCODE_GET_PORT_ENABLE = 5, + SSSNIC_MAG_OPCODE_SET_PORT_ENABLE = 6, + SSSNIC_MAG_OPCODE_LINK_STATUS = 7, + SSSNIC_MAG_OPCODE_SET_LINK_FOLLOW = 8, + SSSNIC_MAG_OPCODE_SET_PMA_ENABLE = 9, + SSSNIC_MAG_OPCODE_CFG_FEC_MODE = 10, + + SSSNIC_MAG_OPCODE_CFG_AN_TYPE = 12, /* reserved for future use */ + SSSNIC_MAG_OPCODE_CFG_LINK_TIME = 13, + + /* bios link, 30-49 */ + SSSNIC_MAG_OPCODE_CFG_BIOS_LINK_CFG = 31, + SSSNIC_MAG_OPCODE_RESTORE_LINK_CFG = 32, + SSSNIC_MAG_OPCODE_ACTIVATE_BIOS_LINK_CFG = 33, + + /* LED */ + SSSNIC_MAG_OPCODE_SET_LED_CFG = 50, + + /* PHY */ + SSSNIC_MAG_OPCODE_GET_PHY_INIT_STATUS = 55, /* reserved for future use */ + + /* sfp */ + SSSNIC_MAG_OPCODE_GET_XSFP_INFO = 60, + SSSNIC_MAG_OPCODE_SET_XSFP_ENABLE = 61, + SSSNIC_MAG_OPCODE_GET_XSFP_PRESENT = 62, + /* sfp/qsfp single byte read/write, for equipment test */ + SSSNIC_MAG_OPCODE_SET_XSFP_RW = 63, + SSSNIC_MAG_OPCODE_CFG_XSFP_TEMPERATURE = 64, + + /* event 100-149 */ + SSSNIC_MAG_OPCODE_WIRE_EVENT = 100, + SSSNIC_MAG_OPCODE_LINK_ERR_EVENT = 101, + + /* DFX、Counter */ + SSSNIC_MAG_OPCODE_EVENT_PORT_INFO = 150, + SSSNIC_MAG_OPCODE_GET_PORT_STAT = 151, + SSSNIC_MAG_OPCODE_CLR_PORT_STAT = 152, + SSSNIC_MAG_OPCODE_GET_PORT_INFO = 153, + SSSNIC_MAG_OPCODE_GET_PCS_ERR_CNT = 154, + SSSNIC_MAG_OPCODE_GET_MAG_CNT = 155, + SSSNIC_MAG_OPCODE_DUMP_ANTRAIN_INFO = 156, + + SSSNIC_MAG_OPCODE_MAX = 0xFF +}; + +enum sss_nic_mag_opcode_port_speed { + SSSNIC_PORT_SPEED_NOT_SET = 0, + SSSNIC_PORT_SPEED_10MB = 1, + SSSNIC_PORT_SPEED_100MB = 2, + SSSNIC_PORT_SPEED_1GB = 3, + SSSNIC_PORT_SPEED_10GB = 4, + SSSNIC_PORT_SPEED_25GB = 5, + SSSNIC_PORT_SPEED_40GB = 6, + SSSNIC_PORT_SPEED_50GB = 7, + SSSNIC_PORT_SPEED_100GB = 8, + SSSNIC_PORT_SPEED_200GB = 9, + SSSNIC_PORT_SPEED_UNKNOWN +}; + +enum sss_nic_mag_opcode_port_an { + SSSNIC_PORT_AN_NOT_SET = 0, + SSSNIC_PORT_CFG_AN_ON = 1, + SSSNIC_PORT_CFG_AN_OFF = 2 +}; + +/* mag supported/advertised link mode bitmap */ +enum mag_cmd_link_mode { + SSSNIC_LINK_MODE_GE = 0, + SSSNIC_LINK_MODE_10GE_BASE_R = 1, + SSSNIC_LINK_MODE_25GE_BASE_R = 2, + SSSNIC_LINK_MODE_40GE_BASE_R4 = 3, + SSSNIC_LINK_MODE_50GE_BASE_R = 4, + SSSNIC_LINK_MODE_50GE_BASE_R2 = 5, + SSSNIC_LINK_MODE_100GE_BASE_R = 6, + SSSNIC_LINK_MODE_100GE_BASE_R2 = 7, + SSSNIC_LINK_MODE_100GE_BASE_R4 = 8, + SSSNIC_LINK_MODE_200GE_BASE_R2 = 9, + SSSNIC_LINK_MODE_200GE_BASE_R4 = 10, + SSSNIC_LINK_MODE_MAX_NUMBERS, + + SSSNIC_LINK_MODE_UNKNOWN = 0xFFFF +}; + +/* led type */ +enum sss_nic_mag_led_type { + SSSNIC_MAG_LED_TYPE_ALARM = 0x0, + SSSNIC_MAG_LED_TYPE_LOW_SPEED = 0x1, + SSSNIC_MAG_LED_TYPE_HIGH_SPEED = 0x2 +}; + +/* led mode */ +enum sss_nic_mag_led_mode { + SSSNIC_MAG_LED_DEFAULT = 0x0, + SSSNIC_MAG_LED_FORCE_ON = 0x1, + SSSNIC_MAG_LED_FORCE_OFF = 0x2, + SSSNIC_MAG_LED_FORCE_BLINK_1HZ = 0x3, + SSSNIC_MAG_LED_FORCE_BLINK_2HZ = 0x4, + SSSNIC_MAG_LED_FORCE_BLINK_4HZ = 0x5, + SSSNIC_MAG_LED_1HZ = 0x6, + SSSNIC_MAG_LED_2HZ = 0x7, + SSSNIC_MAG_LED_4HZ = 0x8 +}; + +/* xsfp wire type, refer to cmis protocol definition */ +enum sss_nic_mag_wire_type { + SSSNIC_MAG_WIRE_TYPE_UNKNOWN = 0x0, + SSSNIC_MAG_WIRE_TYPE_MM = 0x1, + SSSNIC_MAG_WIRE_TYPE_SM = 0x2, + SSSNIC_MAG_WIRE_TYPE_COPPER = 0x3, + SSSNIC_MAG_WIRE_TYPE_ACC = 0x4, + SSSNIC_MAG_WIRE_TYPE_BASET = 0x5, + SSSNIC_MAG_WIRE_TYPE_AOC = 0x40, + SSSNIC_MAG_WIRE_TYPE_ELECTRIC = 0x41, + SSSNIC_MAG_WIRE_TYPE_BACKPLANE = 0x42 +}; + +enum sss_nic_link_status { + SSSNIC_LINK_DOWN = 0, + SSSNIC_LINK_UP +}; + +struct sss_nic_link_ksettings { + u32 valid_bitmap; + u8 speed; /* enum nic_speed_level */ + u8 autoneg; /* 0 - off; 1 - on */ + u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */ +}; + +struct sss_nic_port_info { + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; + u8 fec; + u32 supported_mode; + u32 advertised_mode; +}; + +struct sss_nic_pause_cfg { + u8 auto_neg; + u8 rx_pause; + u8 tx_pause; +}; + +struct sss_nic_mbx_mag_set_port_cfg { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 rsvd0[3]; + + u32 config_bitmap; + u8 speed; + u8 autoneg; + u8 fec; + u8 lanes; + u8 rsvd1[20]; +}; + +struct sss_nic_mbx_get_port_info { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 rsvd0[3]; + + u8 wire_type; + u8 an_support; + u8 an_en; + u8 duplex; + + u8 speed; + u8 fec; + u8 lanes; + u8 rsvd1; + + u32 supported_mode; + u32 advertised_mode; + u8 rsvd2[8]; +}; + +struct sss_nic_mbx_loopback_mode { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 opcode; /* 0:get loopback mode 1:set loopback mode */ + u8 mode; + u8 en; /* 0:disable 1:enable */ + + u32 rsvd0[2]; +}; + +struct sss_nic_mbx_set_port_mag_state { + struct sss_mgmt_msg_head head; + + u16 function_id; /* function_id should not more than the max support pf_id(32) */ + u16 rsvd0; + + u8 state; /* bitmap bit0:tx_en bit1:rx_en */ + u8 rsvd1[3]; +}; + +/* the physical port disable link follow only when all pf of the port are set to follow disable */ +struct sss_nic_mbx_set_link_follow { + struct sss_mgmt_msg_head head; + + u16 function_id; /* function_id should not more than the max support pf_id(32) */ + u16 rsvd0; + + u8 follow; + u8 rsvd1[3]; +}; + +/* firmware also use this cmd report link event to driver */ +struct sss_nic_mbx_get_link_state { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 status; /* 0:link down 1:link up */ + u8 rsvd0[2]; +}; + +/* the led is report alarm when any pf of the port is alram */ +struct sss_nic_mbx_set_led_cfg { + struct sss_mgmt_msg_head head; + + u16 function_id; + u8 type; + u8 mode; +}; + +struct sss_nic_mbx_get_xsfp_info { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 wire_type; + u16 out_len; + u32 rsvd; + u8 sfp_info[SSSNIC_XSFP_INFO_MAX_SIZE]; +}; + +struct sss_nic_mbx_get_xsfp_present { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 abs_status; /* 0:present, 1:absent */ + u8 rsvd[2]; +}; + +struct sss_nic_cache_port_sfp { + u8 mpu_send_sfp_info; + u8 mpu_send_sfp_abs; + u8 rsvd[2]; + struct sss_nic_mbx_get_xsfp_info std_sfp_info; + struct sss_nic_mbx_get_xsfp_present abs; +}; + +/* xsfp plug event */ +struct sss_nic_mag_wire_event { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 status; /* 0:present, 1:absent */ + u8 rsvd[2]; +}; + +struct sss_nic_mag_port_stats { + u64 tx_fragment_pkts; + u64 tx_undersize_pkts; + u64 tx_undermin_pkts; + u64 tx_64_oct_pkts; + u64 tx_65_127_oct_pkts; + u64 tx_128_255_oct_pkts; + u64 tx_256_511_oct_pkts; + u64 tx_512_1023_oct_pkts; + u64 tx_1024_1518_oct_pkts; + u64 tx_1519_2047_oct_pkts; + u64 tx_2048_4095_oct_pkts; + u64 tx_4096_8191_oct_pkts; + u64 tx_8192_9216_oct_pkts; + u64 tx_9217_12287_oct_pkts; + u64 tx_12288_16383_oct_pkts; + u64 tx_1519_max_bad_pkts; + u64 tx_1519_max_good_pkts; + u64 tx_oversize_pkts; + u64 tx_jabber_pkts; + u64 tx_bad_pkts; + u64 tx_bad_octs; + u64 tx_good_pkts; + u64 tx_good_octs; + u64 tx_total_pkts; + u64 tx_total_octs; + u64 tx_uni_pkts; + u64 tx_multi_pkts; + u64 tx_broad_pkts; + u64 tx_pauses; + u64 tx_pfc_pkts; + u64 tx_pfc_pri0_pkts; + u64 tx_pfc_pri1_pkts; + u64 tx_pfc_pri2_pkts; + u64 tx_pfc_pri3_pkts; + u64 tx_pfc_pri4_pkts; + u64 tx_pfc_pri5_pkts; + u64 tx_pfc_pri6_pkts; + u64 tx_pfc_pri7_pkts; + u64 tx_control_pkts; + u64 tx_err_all_pkts; + u64 tx_from_app_good_pkts; + u64 tx_from_app_bad_pkts; + + u64 rx_fragment_pkts; + u64 rx_undersize_pkts; + u64 rx_undermin_pkts; + u64 rx_64_oct_pkts; + u64 rx_65_127_oct_pkts; + u64 rx_128_255_oct_pkts; + u64 rx_256_511_oct_pkts; + u64 rx_512_1023_oct_pkts; + u64 rx_1024_1518_oct_pkts; + u64 rx_1519_2047_oct_pkts; + u64 rx_2048_4095_oct_pkts; + u64 rx_4096_8191_oct_pkts; + u64 rx_8192_9216_oct_pkts; + u64 rx_9217_12287_oct_pkts; + u64 rx_12288_16383_oct_pkts; + u64 rx_1519_max_bad_pkts; + u64 rx_1519_max_good_pkts; + u64 rx_oversize_pkts; + u64 rx_jabber_pkts; + u64 rx_bad_pkts; + u64 rx_bad_octs; + u64 rx_good_pkts; + u64 rx_good_octs; + u64 rx_total_pkts; + u64 rx_total_octs; + u64 rx_uni_pkts; + u64 rx_multi_pkts; + u64 rx_broad_pkts; + u64 rx_pauses; + u64 rx_pfc_pkts; + u64 rx_pfc_pri0_pkts; + u64 rx_pfc_pri1_pkts; + u64 rx_pfc_pri2_pkts; + u64 rx_pfc_pri3_pkts; + u64 rx_pfc_pri4_pkts; + u64 rx_pfc_pri5_pkts; + u64 rx_pfc_pri6_pkts; + u64 rx_pfc_pri7_pkts; + u64 rx_control_pkts; + u64 rx_sym_err_pkts; + u64 rx_fcs_err_pkts; + u64 rx_send_app_good_pkts; + u64 rx_send_app_bad_pkts; + u64 rx_unfilter_pkts; +}; + +struct sss_nic_mbx_mag_port_stats_info { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 rsvd0[3]; +}; + +struct sss_nic_mbx_mag_port_stats { + struct sss_mgmt_msg_head head; + + struct sss_nic_mag_port_stats counter; + u64 rsvd1[15]; +}; + +struct sss_nic_mag_cfg { + struct semaphore cfg_lock; + + /* Valid when pfc is disable */ + u8 pause_set; + u8 rsvd1[3]; + struct sss_nic_pause_cfg nic_pause; + + u8 pfc_en; + u8 pfc_bitmap; + u8 rsvd2[2]; + + struct sss_nic_port_info port_info; + + /* percentage of pf link bandwidth */ + u32 pf_bw_limit; + + struct sss_nic_cache_port_sfp rt_cmd; + struct mutex sfp_mutex; /* mutex used for copy sfp info */ +}; + +#define SSSNIC_PF_LIMIT_BW_MAX 100 + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_rss_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_rss_define.h new file mode 100644 index 0000000000000..adfb3eae33961 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_rss_define.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_CFG_RSS_DEFINE_H +#define SSS_NIC_CFG_RSS_DEFINE_H + +#include + +/* rss */ +#define SSSNIC_RSS_TYPE_VALID_SHIFT 23 +#define SSSNIC_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24 +#define SSSNIC_RSS_TYPE_IPV6_EXT_SHIFT 25 +#define SSSNIC_RSS_TYPE_TCP_IPV6_SHIFT 26 +#define SSSNIC_RSS_TYPE_IPV6_SHIFT 27 +#define SSSNIC_RSS_TYPE_TCP_IPV4_SHIFT 28 +#define SSSNIC_RSS_TYPE_IPV4_SHIFT 29 +#define SSSNIC_RSS_TYPE_UDP_IPV6_SHIFT 30 +#define SSSNIC_RSS_TYPE_UDP_IPV4_SHIFT 31 + +#define SSSNIC_RSS_TYPE_SET(val, member) (((u32)(val) & 0x1) << SSSNIC_RSS_TYPE_##member##_SHIFT) +#define SSSNIC_RSS_TYPE_GET(val, member) (((u32)(val) >> SSSNIC_RSS_TYPE_##member##_SHIFT) & 0x1) + +#define SSSNIC_RSS_KEY_RSV_NUM 2 + +#define SSSNIC_RSS_INDIR_SIZE 256 +#define SSSNIC_RSS_KEY_SIZE 40 + +enum sss_nic_rss_hash_engine_type { + SSSNIC_RSS_ENGINE_XOR = 0, + SSSNIC_RSS_ENGINE_TOEP, + SSSNIC_RSS_ENGINE_MAX, +}; + +struct sss_nic_rss_type { + u8 tcp_ipv6_ext; + u8 ipv6_ext; + u8 tcp_ipv6; + u8 ipv6; + u8 tcp_ipv4; + u8 ipv4; + u8 udp_ipv6; + u8 udp_ipv4; +}; + +/* rss */ +struct sss_nic_rss_indirect_table { + u32 rsvd[4]; /* Make sure that 16B beyond entry[] */ + u16 entry[SSSNIC_RSS_INDIR_SIZE]; +}; + +struct sss_nic_rss_ctx_table { + u32 rsvd[4]; + u32 ctx; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_vf_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_vf_define.h new file mode 100644 index 0000000000000..b9aaa38104a00 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_vf_define.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_CFG_VF_DEFINE_H +#define SSS_NIC_CFG_VF_DEFINE_H + +#include + +#define SSSNIC_OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1) +#define SSSNIC_HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1) + +#define SSSNIC_VLAN_PRIORITY_SHIFT 13 + +#define SSSNIC_CONFIG_ALL_QUEUE_VLAN_CTX 0xFFFF + +#define SSSNIC_GET_VLAN_PRIO(vlan, qos) \ + ((u16)((vlan) | ((qos) << SSSNIC_VLAN_PRIORITY_SHIFT))) + +struct sss_nic_vlan_ctx { + u32 func_id; + u32 qid; /* if qid = 0xFFFF, config current function all queue */ + u32 tag; + u32 mode; + u32 sel; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_common.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_common.h new file mode 100644 index 0000000000000..3924d9f9b8eea --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_common.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_COMMON_H +#define SSS_NIC_COMMON_H + +#include + +#include "sss_kernel.h" +#include "sss_version.h" + +#define SSSNIC_DRV_NAME "sssnic" +#define SSSNIC_DRV_VERSION SSS_VERSION_STR + +#define SSSNIC_FUNC_IS_VF(hwdev) (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF) + +#define SSSNIC_MODERATONE_DELAY HZ + +#define SSSNIC_LP_PKT_CNT 64 + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dcb_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dcb_define.h new file mode 100644 index 0000000000000..946928c7199de --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dcb_define.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_DCB_DEFINE_H +#define SSS_NIC_DCB_DEFINE_H + +#include + +#define SSSNIC_PCP_UP_MAX 8 +#define SSSNIC_DSCP_MAC_UP 64 + +/* IEEE 802.1Qaz std */ +#define SSSNIC_DCB_COS_MAX 0x8 +#define SSSNIC_DCB_UP_MAX 0x8 +#define SSSNIC_DCB_TC_MAX 0x8 +#define SSSNIC_DCB_PG_MAX 0x8 +#define SSSNIC_DCB_TSA_SP 0x0 +#define SSSNIC_DCB_TSA_CBS 0x1 +#define SSSNIC_DCB_TSA_ETS 0x2 +#define SSSNIC_DCB_DSCP_NUM 0x8 +#define SSSNIC_DCB_IP_PRI_MAX 0x40 + +#define SSSNIC_DCB_PRIO_DWRR 0x0 +#define SSSNIC_DCB_PRIO_STRICT 0x1 + +#define SSSNIC_DCB_MAX_PFC_NUM 0x4 + +struct sss_nic_dcb_config { + u8 trust; /* pcp, dscp */ + u8 default_cos; + u8 pcp_user_cos_num; + u8 pcp_valid_cos_map; + u8 dscp_user_cos_num; + u8 dscp_valid_cos_map; + u8 pcp2cos[SSSNIC_PCP_UP_MAX]; + u8 dscp2cos[SSSNIC_DSCP_MAC_UP]; + + u8 cos_qp_offset[SSSNIC_DCB_COS_MAX]; + u8 cos_qp_num[SSSNIC_DCB_COS_MAX]; +}; + +struct sss_nic_dcb_info { + u8 dcb_on; + u8 default_cos; + u8 trust; + u8 rsvd1; + u8 pcp2cos[SSSNIC_DCB_UP_MAX]; + u8 dscp2cos[64]; + u32 rsvd2[7]; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dev_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dev_define.h new file mode 100644 index 0000000000000..adf6b92b96168 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dev_define.h @@ -0,0 +1,272 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_DEV_DEFINE_H +#define SSS_NIC_DEV_DEFINE_H + +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw_uld_driver.h" +#include "sss_hw_svc_cap.h" +#include "sss_hw_irq.h" +#include "sss_nic_common.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_dcb_define.h" +#include "sss_nic_tx_define.h" +#include "sss_nic_rx_define.h" +#include "sss_nic_irq_define.h" +#include "sss_nic_tcam_define.h" + +enum sss_nic_flags { + SSSNIC_INTF_UP, + SSSNIC_MAC_FILTER_CHANGED, + SSSNIC_LP_TEST, + SSSNIC_RSS_ENABLE, + SSSNIC_DCB_ENABLE, + SSSNIC_SAME_RXTX, + SSSNIC_INTR_ADAPT, + SSSNIC_UPDATE_MAC_FILTER, + SSSNIC_CHANGE_RES_INVALID, + SSSNIC_RSS_DEFAULT_INDIR, + SSSNIC_FORCE_LINK_UP, + SSSNIC_BONDING_MASTER, + SSSNIC_AUTONEG_RESET, + SSSNIC_RXQ_RECOVERY, +}; + +enum sss_nic_event_flags { + SSSNIC_EVENT_TX_TIMEOUT, +}; + +struct sss_nic_tx_stats { + u64 tx_timeout; + + /* Subdivision statistics show in private tool */ + u64 tx_drop; + u64 tx_invalid_qid; + u64 rsvd1; + u64 rsvd2; + +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync stats_sync; +#else + struct u64_stats_sync_empty stats_sync; +#endif +}; + +struct sss_nic_qp_resource { + u16 qp_num; + u8 cos_num; + u8 rsvd1; + u32 sq_depth; + u32 rq_depth; + + struct sss_nic_sq_resource *sq_res_group; + struct sss_nic_rq_resource *rq_res_group; + struct sss_nic_irq_cfg *irq_cfg; +}; + +struct sss_nic_rx_rule { + struct list_head rule_list; + int rule_cnt; +}; + +struct sss_nic_dev { + struct pci_dev *pdev; + struct net_device *netdev; + struct sss_hal_dev *uld_dev; + void *hwdev; + void *dev_hdl; + struct sss_nic_io *nic_io; + + int poll_budget; + + u32 msg_enable; + + unsigned long flags; + unsigned long event_flag; + unsigned long dcb_flags; + unsigned long rx_mode; + + u32 rx_poll_wqe; + + u32 rx_dma_buff_size; + u16 rx_buff_len; + + u16 max_qp_num; + + u32 page_order; + + /* Rss related varibles */ + u8 rss_hash_engine; + u8 rsvd1[3]; + u8 *rss_key; + u32 *rss_key_big; /* hkey in big endian */ + u32 *rss_indir_tbl; + struct sss_nic_rss_type rss_type; + + u8 max_cos_num; + u8 dft_func_cos_bitmap; + u16 dft_port_cos_bitmap; + + int disable_port_cnt; + + unsigned long last_jiffies; + + u32 use_adaptive_rx_coalesce; + u32 rsvd2; + + struct sss_nic_intr_coal_info *coal_info; + struct workqueue_struct *workq; + + int netdev_uc_cnt; + int netdev_mc_cnt; + + int loop_test_rx_cnt; + int loop_pkt_len; + u8 *loop_test_rx_buf; + + struct sss_irq_desc *irq_desc_group; + u16 irq_desc_num; + + u8 link_status; + + u8 rsvd3; + + u32 get_rq_fail_cnt; + + struct sss_nic_tx_stats tx_stats; + + struct sss_nic_sq_desc *sq_desc_group; + struct sss_nic_rq_desc *rq_desc_group; + + struct sss_nic_qp_resource qp_res; + + struct delayed_work routine_work; + struct delayed_work rq_watchdog_work; + + struct list_head uc_filter_list; + struct list_head mc_filter_list; + + unsigned long *vlan_bitmap; +#ifdef HAVE_XDP_SUPPORT + struct bpf_prog *xdp_prog; +#endif + + /* lock for qp_res,qp_info access */ + struct mutex qp_mutex; + struct semaphore port_sem; + + struct work_struct rx_mode_work; + + struct delayed_work moderation_task; + + struct sss_nic_dcb_config hw_dcb_cfg; + struct sss_nic_dcb_config backup_dcb_cfg; + +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats net_stats; +#endif + + struct sss_nic_tcam_info tcam_info; + struct sss_nic_rx_rule rx_rule; + + struct sss_nic_service_cap nic_svc_cap; + +}; + +#define SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, flag) \ + test_bit(flag, &(nic_dev)->flags) +#define SSSNIC_SET_NIC_DEV_FLAG(nic_dev, flag) \ + set_bit(flag, &(nic_dev)->flags) +#define SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, flag) \ + clear_bit(flag, &(nic_dev)->flags) +#define SSSNIC_TEST_CLEAR_NIC_DEV_FLAG(nic_dev, flag) \ + test_and_clear_bit(flag, &(nic_dev)->flags) +#define SSSNIC_TEST_SET_NIC_DEV_FLAG(nic_dev, flag) \ + test_and_set_bit(flag, &(nic_dev)->flags) + +#ifdef HAVE_XDP_SUPPORT +#define SSSNIC_IS_XDP_ENABLE(nic_dev) (!!(nic_dev)->xdp_prog) +#endif + +#define SSS_CHANNEL_RES_VALID(nic_dev) \ + (test_bit(SSSNIC_INTF_UP, &(nic_dev)->flags) && \ + !test_bit(SSSNIC_CHANGE_RES_INVALID, &(nic_dev)->flags)) + +#define SSSNIC_VLAN_BITMAP_BYTE_SIZE(nic_dev) (sizeof(*(nic_dev)->vlan_bitmap)) +#define SSSNIC_VLAN_BITMAP_BIT_SIZE(nic_dev) (SSSNIC_VLAN_BITMAP_BYTE_SIZE(nic_dev) * 8) +#define SSSNIC_VLAN_NUM_BITMAP(nic_dev) (VLAN_N_VID / \ + SSSNIC_VLAN_BITMAP_BIT_SIZE(nic_dev)) +#define SSSNIC_VLAN_BITMAP_SIZE(nic_dev) (VLAN_N_VID / \ + SSSNIC_VLAN_BITMAP_BYTE_SIZE(nic_dev)) +#define SSSNIC_VID_LINE(nic_dev, vid) ((vid) / SSSNIC_VLAN_BITMAP_BIT_SIZE(nic_dev)) +#define SSSNIC_VID_COL(nic_dev, vid) ((vid) & (SSSNIC_VLAN_BITMAP_BIT_SIZE(nic_dev) - 1)) +#define SSSNIC_TEST_VLAN_BIT(nic_dev, vid) \ + ((nic_dev)->vlan_bitmap[SSSNIC_VID_LINE(nic_dev, vid)] & \ + (1UL << SSSNIC_VID_COL(nic_dev, vid))) + +#define SSSNIC_SET_VLAN_BITMAP(nic_dev, vid) \ + set_bit(SSSNIC_VID_COL(nic_dev, vid), \ + &(nic_dev)->vlan_bitmap[SSSNIC_VID_LINE(nic_dev, vid)]) +#define SSSNIC_CLEAR_VLAN_BITMAP(nic_dev, vid) \ + clear_bit(SSSNIC_VID_COL(nic_dev, vid), \ + &(nic_dev)->vlan_bitmap[SSSNIC_VID_LINE(nic_dev, vid)]) + +#define SSSNIC_SET_NIC_EVENT_FLAG(nic_dev, flag) \ + set_bit(flag, &(nic_dev)->event_flag) + +#define SSSNIC_TEST_CLEAR_NIC_EVENT_FLAG(nic_dev, flag) \ + test_and_clear_bit(flag, &(nic_dev)->event_flag) + +#define SSSNIC_STATS_TX_TIMEOUT_INC(nic_dev) \ +do { \ + typeof(nic_dev) (_nic_dev) = (nic_dev); \ + u64_stats_update_begin(&(_nic_dev)->tx_stats.stats_sync); \ + (_nic_dev)->tx_stats.tx_timeout++; \ + u64_stats_update_end(&(_nic_dev)->tx_stats.stats_sync); \ +} while (0) + +#define SSSNIC_STATS_TX_DROP_INC(nic_dev) \ +do { \ + typeof(nic_dev) (_nic_dev) = (nic_dev); \ + u64_stats_update_begin(&(_nic_dev)->tx_stats.stats_sync); \ + (_nic_dev)->tx_stats.tx_drop++; \ + u64_stats_update_end(&(_nic_dev)->tx_stats.stats_sync); \ +} while (0) + +#define SSSNIC_STATS_TX_INVALID_QID_INC(nic_dev) \ +do { \ + typeof(nic_dev) (_nic_dev) = (nic_dev); \ + u64_stats_update_begin(&(_nic_dev)->tx_stats.stats_sync); \ + (_nic_dev)->tx_stats.tx_invalid_qid++; \ + u64_stats_update_end(&(_nic_dev)->tx_stats.stats_sync); \ +} while (0) + +#define sssnic_msg(level, nic_dev, msglvl, format, arg...) \ +do { \ + if ((nic_dev)->netdev && (nic_dev)->netdev->reg_state \ + == NETREG_REGISTERED) \ + nicif_##level((nic_dev), msglvl, (nic_dev)->netdev, \ + format, ## arg); \ + else \ + nic_##level(&(nic_dev)->pdev->dev, \ + format, ## arg); \ +} while (0) + +#define sss_nic_info(nic_dev, msglvl, format, arg...) \ + sssnic_msg(info, nic_dev, msglvl, format, ## arg) + +#define sss_nic_warn(nic_dev, msglvl, format, arg...) \ + sssnic_msg(warn, nic_dev, msglvl, format, ## arg) + +#define sss_nic_err(nic_dev, msglvl, format, arg...) \ + sssnic_msg(err, nic_dev, msglvl, format, ## arg) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_io_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_io_define.h new file mode 100644 index 0000000000000..32eccbe831b1a --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_io_define.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_IO_DEFINE_H +#define SSS_NIC_IO_DEFINE_H + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw_wq.h" +#include "sss_nic_dcb_define.h" +#include "sss_nic_cfg_mag_define.h" + +struct sss_nic_vf_info { + u8 user_mac[ETH_ALEN]; + u8 drv_mac[ETH_ALEN]; + u16 qp_num; + u16 pf_vlan; + + u8 pf_qos; + u8 rsvd0[3]; + u32 extra_feature; + + u32 min_rate; + u32 max_rate; + + u8 specified_mac; + u8 attach; + u8 trust; + u8 spoofchk; + u8 link_forced; + u8 link_up; /* only valid if VF link is forced */ + u8 rsvd1[2]; +}; + +struct sss_nic_io_queue { + struct sss_wq wq; + union { + u8 wqe_type; /* for rq */ + u8 owner; /* for sq */ + }; + u8 rsvd1; + u16 rsvd2; + + u16 qid; + u16 msix_id; + + u8 __iomem *db_addr; + + union { + struct { + void *ci_addr; + } tx; + + struct { + u16 *pi_vaddr; + dma_addr_t pi_daddr; + } rx; + }; +} ____cacheline_aligned; + +struct sss_nic_io { + void *hwdev; + void *pcidev_hdl; + void *dev_hdl; + void *nic_dev; + + struct sss_nic_io_queue *sq_group; + struct sss_nic_io_queue *rq_group; + + u16 active_qp_num; + u16 max_qp_num; + + u8 link_status; + u8 rsvd1[3]; + + void *ci_base_vaddr; + dma_addr_t ci_base_daddr; + + u8 __iomem *sq_db_addr; + u8 __iomem *rq_db_addr; + + u16 rx_buff_len; + u16 max_vf_num; + + struct sss_nic_vf_info *vf_info_group; + + u64 feature_cap; + + struct sss_nic_dcb_info dcb_info; + + struct sss_nic_mag_cfg mag_cfg; +}; + +struct sss_nic_qp_info { + u16 qp_num; + u8 resvd[6]; + + u32 sq_depth; + u32 rq_depth; + + struct sss_nic_io_queue *sq_group; + struct sss_nic_io_queue *rq_group; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_irq_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_irq_define.h new file mode 100644 index 0000000000000..b6c44d40a22d2 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_irq_define.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_IRQ_DEFINE_H +#define SSS_NIC_IRQ_DEFINE_H + +#include +#include + +#include "sss_kernel.h" +#include "sss_hw_common.h" + +struct sss_nic_irq_cfg { + struct net_device *netdev; + u16 msix_id; /* PCIe MSIX id */ + u16 rsvd1; + u32 irq_id; /* OS IRQ id */ + char irq_name[IFNAMSIZ + 16]; + struct napi_struct napi; + cpumask_t affinity_mask; + void *sq; + void *rq; +}; + +struct sss_nic_intr_coal_info { + u8 pending_limt; + u8 coalesce_timer; + u8 resend_timer; + + u64 pkt_rate_low; + u8 rx_usecs_low; + u8 rx_pending_limt_low; + u64 pkt_rate_high; + u8 rx_usecs_high; + u8 rx_pending_limt_high; + + u8 user_set_intr_coal_flag; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_qp_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_qp_define.h new file mode 100644 index 0000000000000..9da431372bbf3 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_qp_define.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_QP_DEFINE_H +#define SSS_NIC_QP_DEFINE_H + +#include + +#include "sss_kernel.h" +#include "sss_hw_common.h" + +struct sss_nic_cqe { + u32 state; + u32 vlan_len; + + u32 offload_type; + u32 hash; + u32 xid; + u32 decrypt_desc; + u32 rsvd6; + u32 pkt_desc; +}; + +struct sss_nic_normal_rqe { + u32 bd_hi_addr; + u32 bd_lo_addr; + u32 cqe_hi_addr; + u32 cqe_lo_addr; +}; + +struct sss_nic_sge_section { + struct sss_sge sge; + u32 rsvd; +}; + +struct sss_nic_extend_rqe { + struct sss_nic_sge_section bd_sect; + struct sss_nic_sge_section cqe_sect; +}; + +struct sss_nic_rqe { + union { + struct sss_nic_normal_rqe normal_rqe; + struct sss_nic_extend_rqe extend_rqe; + }; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_rx_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_rx_define.h new file mode 100644 index 0000000000000..1ecd5d6409c9d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_rx_define.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_RX_DEFINE_H +#define SSS_NIC_RX_DEFINE_H + +#include + +#include "sss_kernel.h" +#include "sss_nic_qp_define.h" +#include "sss_nic_io_define.h" +#include "sss_nic_irq_define.h" + +struct sss_nic_rq_stats { + u64 rx_packets; + u64 rx_bytes; + u64 errors; + u64 csum_errors; + u64 other_errors; + u64 rx_dropped; + u64 xdp_dropped; + u64 rx_buf_errors; + + u64 alloc_rx_dma_err; + u64 alloc_skb_err; + u64 reset_drop_sge; + u64 large_xdp_pkts; + u64 rsvd2; + +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync stats_sync; +#else + struct u64_stats_sync_empty stats_sync; +#endif +}; + +struct sss_nic_rx_desc { + dma_addr_t buf_daddr; + dma_addr_t cqe_daddr; + struct sss_nic_rqe *rqe; + struct sss_nic_cqe *cqe; + struct page *page; + u32 page_offset; +}; + +struct sss_nic_rq_desc { + struct net_device *netdev; + struct device *dev; /* device for DMA mapping */ + + u32 irq_id; + u16 msix_id; + + u16 qid; + u32 qid_mask; + u32 q_depth; + + u32 buff_size_shift; + u32 dma_buff_size; + u16 buf_len; + u16 rsvd; + + u16 backup_pi; + u16 pi; + u32 last_sw_pi; + u32 last_sw_ci; + u32 last_hw_ci; + u32 ci; + u16 reset_pi; + u16 reset_wqe_num; + u32 delta; + + u64 last_rx_bytes; + u64 last_rx_pkts; + u64 rx_pkts; + + unsigned long status; + + u8 last_pending_limt; + u8 last_coal_timer; + + u8 print_err_cnt; + u8 check_err_cnt; + + struct sss_nic_irq_cfg *irq_cfg; + + struct sss_nic_rq_stats stats; + + struct sss_nic_rx_desc *rx_desc_group; + struct sss_nic_io_queue *rq; + +#ifdef HAVE_XDP_SUPPORT + struct bpf_prog *xdp_prog; +#endif + + void *cqe_vaddr; + dma_addr_t cqe_paddr; +} ____cacheline_aligned; + +struct sss_nic_rq_resource { + u16 page_num; + u8 rsvd[6]; + struct sss_nic_rx_desc *rx_desc_group; + void *cqe_vaddr; + dma_addr_t cqe_paddr; +}; + +#define SSSNIC_RQ_STATS_INC(rq_desc, field) \ +do { \ + u64_stats_update_begin(&(rq_desc)->stats.stats_sync); \ + (rq_desc)->stats.field++; \ + u64_stats_update_end(&(rq_desc)->stats.stats_sync); \ +} while (0) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tcam_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tcam_define.h new file mode 100644 index 0000000000000..0a6dec9e9dc07 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tcam_define.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_TCAM_DEFINE_H +#define SSS_NIC_TCAM_DEFINE_H + +#include +#include + +#include "sss_kernel.h" + +#define SSSNIC_TCAM_BLOCK_SIZE 16 +#define SSSNIC_TCAM_FILTERS_MAX 512 + +#define SSSNIC_PKT_TCAM_INDEX_START(block_index) \ + (SSSNIC_TCAM_BLOCK_SIZE * (block_index)) + +#define SSSNIC_TCAM_FLOW_KEY_SIZE (44) + +#define SSSNIC_TCAM_RULE_FDIR_TYPE 0 +#define SSSNIC_TCAM_RULE_PPA_TYPE 1 + +#define SSSNIC_TCAM_BLOCK_ENABLE 1 +#define SSSNIC_TCAM_BLOCK_DISABLE 0 +#define SSSNIC_TCAM_RULES_NUM_MAX 4096 + +/* tcam block type, according to tcam block size */ +enum { + SSSNIC_TCAM_BLOCK_TYPE_LARGE = 0, /* block_size: 16 */ + SSSNIC_TCAM_BLOCK_TYPE_SMALL, /* block_size: 0 */ + SSSNIC_TCAM_BLOCK_TYPE_MAX +}; + +struct sss_nic_ipv4_tcam_key { + u32 rsvd1 : 4; + u32 tunnel_type : 4; + u32 ip_proto : 8; + u32 rsvd0 : 16; + u32 sipv4_h : 16; + u32 ip_type : 1; + u32 func_id : 15; + u32 dipv4_h : 16; + u32 sipv4_l : 16; + u32 rsvd2 : 16; + u32 dipv4_l : 16; + u32 rsvd3; + u32 dport : 16; + u32 rsvd4 : 16; + u32 rsvd5 : 16; + u32 sport : 16; + u32 outer_sipv4_h : 16; + u32 rsvd6 : 16; + u32 outer_dipv4_h : 16; + u32 outer_sipv4_l : 16; + u32 vni_h : 16; + u32 outer_dipv4_l : 16; + u32 rsvd7 : 16; + u32 vni_l : 16; +}; + +struct sss_nic_ipv6_tcam_key { + u32 rsvd1 : 4; + u32 tunnel_type : 4; + u32 ip_proto : 8; + u32 rsvd0 : 16; + u32 sipv6_key0 : 16; + u32 ip_type : 1; + u32 func_id : 15; + u32 sipv6_key2 : 16; + u32 sipv6_key1 : 16; + u32 sipv6_key4 : 16; + u32 sipv6_key3 : 16; + u32 sipv6_key6 : 16; + u32 sipv6_key5 : 16; + u32 dport : 16; + u32 sipv6_key7 : 16; + u32 dipv6_key0 : 16; + u32 sport : 16; + u32 dipv6_key2 : 16; + u32 dipv6_key1 : 16; + u32 dipv6_key4 : 16; + u32 dipv6_key3 : 16; + u32 dipv6_key6 : 16; + u32 dipv6_key5 : 16; + u32 rsvd2 : 16; + u32 dipv6_key7 : 16; +}; + +struct sss_nic_vxlan_ipv6_tcam_key { + u32 rsvd1 : 4; + u32 tunnel_type : 4; + u32 ip_proto : 8; + u32 rsvd0 : 16; + + u32 dipv6_key0 : 16; + u32 ip_type : 1; + u32 func_id : 15; + + u32 dipv6_key2 : 16; + u32 dipv6_key1 : 16; + + u32 dipv6_key4 : 16; + u32 dipv6_key3 : 16; + + u32 dipv6_key6 : 16; + u32 dipv6_key5 : 16; + + u32 dport : 16; + u32 dipv6_key7 : 16; + + u32 rsvd2 : 16; + u32 sport : 16; + + u32 outer_sipv4_h : 16; + u32 rsvd3 : 16; + + u32 outer_dipv4_h : 16; + u32 outer_sipv4_l : 16; + + u32 vni_h : 16; + u32 outer_dipv4_l : 16; + + u32 rsvd4 : 16; + u32 vni_l : 16; +}; + +struct sss_nic_tcam_key_tag { + union { + struct sss_nic_ipv4_tcam_key key_info_ipv4; + struct sss_nic_ipv6_tcam_key key_info_ipv6; + struct sss_nic_vxlan_ipv6_tcam_key key_info_vxlan_ipv6; + }; + + union { + struct sss_nic_ipv4_tcam_key key_mask_ipv4; + struct sss_nic_ipv6_tcam_key key_mask_ipv6; + struct sss_nic_vxlan_ipv6_tcam_key key_mask_vxlan_ipv6; + }; +}; + +struct sss_nic_tcam_node { + struct list_head block_list; + u16 block_id; + u16 index_cnt; + u8 index_used[SSSNIC_TCAM_BLOCK_SIZE]; +}; + +struct sss_nic_tcam_node_list { + struct list_head tcam_node_list; + u16 block_cnt; +}; + +struct sss_nic_tcam_filter { + struct list_head tcam_filter_list; + u16 block_id; + u16 index; + struct sss_nic_tcam_key_tag tcam_key; + u16 qid; +}; + +/* function level struct info */ +struct sss_nic_tcam_info { + u16 tcam_rule_num; + struct list_head tcam_list; + struct sss_nic_tcam_node_list tcam_node_info; +}; + +struct sss_nic_tcam_result { + u32 qid; + u32 rsvd; +}; + +struct sss_nic_tcam_key { + u8 key_x[SSSNIC_TCAM_FLOW_KEY_SIZE]; + u8 key_y[SSSNIC_TCAM_FLOW_KEY_SIZE]; +}; + +struct sss_nic_tcam_rule_cfg { + u32 index; + struct sss_nic_tcam_result data; + struct sss_nic_tcam_key key; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tx_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tx_define.h new file mode 100644 index 0000000000000..b6076c87121aa --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tx_define.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_TX_DEFINE_H +#define SSS_NIC_TX_DEFINE_H + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_nic_io_define.h" + +struct sss_nic_dma_info { + dma_addr_t dma; + u32 len; +}; + +struct sss_nic_tx_desc { + struct sk_buff *skb; + + u16 wqebb_cnt; + u16 nr_frags; + + int sge_num; + u16 nr_pkt_cnt; + u16 rsvd1; + u32 rsvd2; + + u64 bytes; + struct sss_nic_dma_info *dma_group; + u64 rsvd3; +}; + +struct sss_nic_sq_stats { + u64 tx_packets; + u64 tx_bytes; + u64 tx_busy; + u64 wake; + u64 tx_dropped; + + /* Subdivision statistics show in private tool */ + u64 skb_pad_err; + u64 offload_err; + u64 dma_map_err; + u64 unknown_tunnel_proto; + u64 frag_size_zero; + u64 frag_len_overflow; + u64 rsvd1; + u64 rsvd2; + +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync stats_sync; +#else + struct u64_stats_sync_empty stats_sync; +#endif +}; + +struct sss_nic_sq_desc { + struct net_device *netdev; + struct device *dev; + + struct sss_nic_sq_stats stats; + + u8 cos; + u8 rsvd1; + u16 qid; + u32 qid_mask; + u32 q_depth; + u32 rsvd2; + + struct sss_nic_tx_desc *tx_desc_group; + struct sss_nic_io_queue *sq; + + u64 last_tx_pkts; + u64 last_tx_bytes; + u64 rsvd3; +} ____cacheline_aligned; + +struct sss_nic_sq_resource { + struct sss_nic_tx_desc *tx_desc_group; + struct sss_nic_dma_info *dma_group; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.c new file mode 100644 index 0000000000000..663403ff6da45 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.c @@ -0,0 +1,1140 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_io_define.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_event.h" + +#define SSSNIC_DEFAULT_MAX_MTU 0x3FFF +#define SSSNIC_VLAN_ID_MASK 0x7FFF + +#define SSSNIC_INIT_FUNC_MASK \ + (BIT(SSSNIC_FUNC_CFG_TYPE_INIT) | \ + BIT(SSSNIC_FUNC_CFG_TYPE_MTU) | \ + BIT(SSSNIC_FUNC_CFG_TYPE_RX_BUF_SIZE)) + +#define SSSNIC_MGMT_STATUS_EXIST 0x6 + +#define SSSNIC_CHECK_IPSU_15BIT 0x8000 + +#define SSSNIC_DCB_PCP 0 +#define SSSNIC_DCB_DSCP 1 + +#define SSSNIC_F_ALL_MASK 0x3FFFF /* enable all feature */ +#define SSSNIC_DRV_DEFAULT_FEATURE SSSNIC_F_ALL_MASK + +#define SSSNIC_UNSUPPORT_SET_PAUSE 0x10 + +#define SSSNIC_VF_SET_MAC_ALREADY(func_type, status) \ + ((func_type) == SSS_FUNC_TYPE_VF && (status) == SSSNIC_PF_SET_VF_ALREADY) + +static int sss_nic_check_mac_set_status(u32 func_type, u8 status, u16 vlan_id) +{ + if (status != 0 && status != SSSNIC_MGMT_STATUS_EXIST) { + if (!SSSNIC_VF_SET_MAC_ALREADY(func_type, status)) + return -EINVAL; + } + + if ((vlan_id & SSSNIC_CHECK_IPSU_15BIT) != 0 && status == SSSNIC_MGMT_STATUS_EXIST) { + if (!SSSNIC_VF_SET_MAC_ALREADY(func_type, status)) + return -EINVAL; + } + + return 0; +} + +int sss_nic_set_mac(struct sss_nic_dev *nic_dev, const u8 *mac_addr, + u16 vlan_id, u16 func_id, u16 channel) +{ + struct sss_nic_mbx_mac_addr cmd_mac = {0}; + u16 out_len = sizeof(cmd_mac); + u32 func_type; + int ret; + + if (!nic_dev || !mac_addr) + return -EINVAL; + + if ((vlan_id & SSSNIC_VLAN_ID_MASK) >= VLAN_N_VID) { + nic_err(nic_dev->dev_hdl, "Invalid VLAN ID: %d\n", (vlan_id & SSSNIC_VLAN_ID_MASK)); + return -EINVAL; + } + + cmd_mac.vlan_id = vlan_id; + cmd_mac.func_id = func_id; + ether_addr_copy(cmd_mac.mac, mac_addr); + + ret = sss_nic_l2nic_msg_to_mgmt_sync_ch(nic_dev->hwdev, SSSNIC_MBX_OPCODE_SET_MAC, + &cmd_mac, sizeof(cmd_mac), + &cmd_mac, &out_len, channel); + if (ret != 0 || out_len == 0) { + nic_err(nic_dev->dev_hdl, + "Fail to set MAC, ret: %d, out_len: 0x%x, channel: 0x%x\n", + ret, out_len, channel); + return -EIO; + } + + func_type = sss_get_func_type(nic_dev->hwdev); + if (sss_nic_check_mac_set_status(func_type, cmd_mac.head.state, cmd_mac.vlan_id) != 0) { + nic_err(nic_dev->dev_hdl, + "Fail to set MAC, state: 0x%x, channel: 0x%x\n", + cmd_mac.head.state, channel); + return -EIO; + } + + if (SSSNIC_VF_SET_MAC_ALREADY(func_type, cmd_mac.head.state)) { + nic_warn(nic_dev->dev_hdl, + "PF has already set VF mac, ignore it\n"); + return SSSNIC_PF_SET_VF_ALREADY; + } + + if (cmd_mac.head.state == SSSNIC_MGMT_STATUS_EXIST) { + nic_warn(nic_dev->dev_hdl, "Repeat mac, ignore it\n"); + return 0; + } + + return 0; +} + +int sss_nic_del_mac(struct sss_nic_dev *nic_dev, const u8 *mac_addr, + u16 vlan_id, u16 func_id, u16 channel) +{ + struct sss_nic_mbx_mac_addr cmd_mac = {0}; + u16 out_len = sizeof(cmd_mac); + u32 func_type; + int ret; + + if (!nic_dev || !mac_addr) + return -EINVAL; + + if ((vlan_id & SSSNIC_VLAN_ID_MASK) >= VLAN_N_VID) { + nic_err(nic_dev->dev_hdl, "Invalid VLAN number: %d\n", + (vlan_id & SSSNIC_VLAN_ID_MASK)); + return -EINVAL; + } + + cmd_mac.func_id = func_id; + cmd_mac.vlan_id = vlan_id; + ether_addr_copy(cmd_mac.mac, mac_addr); + + ret = sss_nic_l2nic_msg_to_mgmt_sync_ch(nic_dev->hwdev, SSSNIC_MBX_OPCODE_DEL_MAC, + &cmd_mac, sizeof(cmd_mac), &cmd_mac, + &out_len, channel); + if (ret != 0 || out_len == 0) { + nic_err(nic_dev->dev_hdl, + "Fail to del MAC, ret: %d, out_len: 0x%x, channel: 0x%x\n", + ret, out_len, channel); + return -EIO; + } + + func_type = sss_get_func_type(nic_dev->hwdev); + if (SSSNIC_VF_SET_MAC_ALREADY(func_type, cmd_mac.head.state)) { + nic_warn(nic_dev->dev_hdl, "PF has already set VF mac\n"); + return SSSNIC_PF_SET_VF_ALREADY; + } + + if (cmd_mac.head.state != 0) { + nic_err(nic_dev->dev_hdl, + "Fail to delete MAC, ret: %d, state: 0x%x, channel: 0x%x\n", + ret, cmd_mac.head.state, channel); + return -EIO; + } + + return 0; +} + +int sss_nic_update_mac(struct sss_nic_dev *nic_dev, u8 *new_mac) +{ + int ret; + u32 func_type; + struct sss_nic_mbx_mac_update cmd_mac_update = {0}; + u16 out_len = sizeof(cmd_mac_update); + + ether_addr_copy(cmd_mac_update.new_mac, new_mac); + ether_addr_copy(cmd_mac_update.old_mac.mac, nic_dev->netdev->dev_addr); + cmd_mac_update.old_mac.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_UPDATE_MAC, + &cmd_mac_update, sizeof(cmd_mac_update), + &cmd_mac_update, &out_len); + + if (ret != 0 || out_len == 0) { + nic_err(nic_dev->dev_hdl, + "Fail to update MAC, ret: %d, out_len: 0x%x\n", ret, out_len); + return -EIO; + } + + func_type = sss_get_func_type(nic_dev->hwdev); + if (sss_nic_check_mac_set_status(func_type, cmd_mac_update.old_mac.head.state, + cmd_mac_update.old_mac.vlan_id)) { + nic_err(nic_dev->dev_hdl, + "Fail to update MAC, state: 0x%x", cmd_mac_update.old_mac.head.state); + return -EIO; + } + + if (SSSNIC_VF_SET_MAC_ALREADY(func_type, cmd_mac_update.old_mac.head.state)) { + nic_warn(nic_dev->dev_hdl, + "PF has already set VF MAC. Ignore update\n"); + return SSSNIC_PF_SET_VF_ALREADY; + } + + if (cmd_mac_update.old_mac.head.state == SSSNIC_MGMT_STATUS_EXIST) + nic_warn(nic_dev->dev_hdl, + "MAC is existed. Ignore update\n"); + + return 0; +} + +int sss_nic_get_default_mac(struct sss_nic_dev *nic_dev, u8 *mac_addr) +{ + struct sss_nic_mbx_mac_addr cmd_mac = {0}; + u16 out_len = sizeof(cmd_mac); + int ret; + + cmd_mac.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_GET_MAC, + &cmd_mac, sizeof(cmd_mac), &cmd_mac, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_mac)) { + nic_err(nic_dev->hwdev, + "Fail to get mac, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_mac.head.state, out_len); + return -EINVAL; + } + + ether_addr_copy(mac_addr, cmd_mac.mac); + + return 0; +} + +int sss_nic_config_vlan(struct sss_nic_dev *nic_dev, u8 opcode, u16 vlan_id) +{ + struct sss_nic_mbx_vlan_cfg cmd_config_vlan = {0}; + u16 out_len = sizeof(cmd_config_vlan); + int ret; + + cmd_config_vlan.func_id = + sss_get_global_func_id(nic_dev->hwdev); + cmd_config_vlan.opcode = opcode; + cmd_config_vlan.vlan_id = vlan_id; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, + SSSNIC_MBX_OPCODE_CFG_FUNC_VLAN, + &cmd_config_vlan, sizeof(cmd_config_vlan), + &cmd_config_vlan, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_config_vlan)) { + nic_err(nic_dev->dev_hdl, + "Fail to %s vlan, ret: %d, state: 0x%x, out_len: 0x%x\n", + opcode == SSSNIC_MBX_OPCODE_ADD ? "add" : "delete", + ret, cmd_config_vlan.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +int sss_nic_set_hw_vport_state(struct sss_nic_dev *nic_dev, + u16 func_id, bool enable, u16 channel) +{ + struct sss_nic_mbx_vport_state cmd_set_vport_state = {0}; + u16 out_len = sizeof(cmd_set_vport_state); + int ret; + + cmd_set_vport_state.func_id = func_id; + cmd_set_vport_state.state = enable ? 1 : 0; + + ret = sss_nic_l2nic_msg_to_mgmt_sync_ch(nic_dev->hwdev, SSSNIC_MBX_OPCODE_SET_VPORT_ENABLE, + &cmd_set_vport_state, sizeof(cmd_set_vport_state), + &cmd_set_vport_state, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_set_vport_state)) { + nic_err(nic_dev->dev_hdl, + "Fail to set vport state, ret: %d, state: 0x%x, out_len: 0x%x, channel: 0x%x\n", + ret, cmd_set_vport_state.head.state, out_len, channel); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(sss_nic_set_hw_vport_state); + +int sss_nic_set_dcb_info(struct sss_nic_io *nic_io, + struct sss_nic_dcb_info *dcb_info) +{ + if (memcmp(&nic_io->dcb_info, dcb_info, sizeof(*dcb_info)) == 0) + return 0; + + memcpy(&nic_io->dcb_info, dcb_info, sizeof(*dcb_info)); + + /* notify stateful in pf, than notify all vf */ + sss_nic_notify_dcb_state_event(nic_io->hwdev, dcb_info); + + return 0; +} + +static int sss_nic_cfg_hw_pause(struct sss_nic_dev *nic_dev, + u8 opcode, struct sss_nic_pause_cfg *pause_cfg) +{ + struct sss_nic_mbx_pause_cfg cmd_pause_cfg = {0}; + u16 out_len = sizeof(cmd_pause_cfg); + int ret; + + cmd_pause_cfg.port_id = sss_get_phy_port_id(nic_dev->hwdev); + cmd_pause_cfg.opcode = opcode; + if (opcode == SSSNIC_MBX_OPCODE_SET) { + cmd_pause_cfg.auto_neg = pause_cfg->auto_neg; + cmd_pause_cfg.rx_pause = pause_cfg->rx_pause; + cmd_pause_cfg.tx_pause = pause_cfg->tx_pause; + } + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, + SSSNIC_MBX_OPCODE_CFG_PAUSE_INFO, + &cmd_pause_cfg, sizeof(cmd_pause_cfg), + &cmd_pause_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_pause_cfg)) { + if (cmd_pause_cfg.head.state == SSSNIC_UNSUPPORT_SET_PAUSE) { + ret = -EOPNOTSUPP; + nic_err(nic_dev->dev_hdl, "Fail to set pause when pfc is enable\n"); + } else { + ret = -EFAULT; + nic_err(nic_dev->dev_hdl, + "Fail to %s pause info, ret: %d, state: 0x%x, out_len: 0x%x\n", + opcode == SSSNIC_MBX_OPCODE_SET ? "set" : "get", + ret, cmd_pause_cfg.head.state, out_len); + } + return ret; + } + + if (opcode == SSSNIC_MBX_OPCODE_GET) { + pause_cfg->auto_neg = cmd_pause_cfg.auto_neg; + pause_cfg->rx_pause = cmd_pause_cfg.rx_pause; + pause_cfg->tx_pause = cmd_pause_cfg.tx_pause; + } + + return 0; +} + +int sss_nic_set_hw_pause_info(struct sss_nic_dev *nic_dev, + struct sss_nic_pause_cfg pause_cfg) +{ + struct sss_nic_mag_cfg *mag_cfg = NULL; + int ret; + + mag_cfg = &nic_dev->nic_io->mag_cfg; + + down(&mag_cfg->cfg_lock); + + ret = sss_nic_cfg_hw_pause(nic_dev, SSSNIC_MBX_OPCODE_SET, &pause_cfg); + if (ret != 0) { + up(&mag_cfg->cfg_lock); + return ret; + } + + mag_cfg->pfc_en = 0; + mag_cfg->pfc_bitmap = 0; + mag_cfg->pause_set = true; + mag_cfg->nic_pause.auto_neg = pause_cfg.auto_neg; + mag_cfg->nic_pause.rx_pause = pause_cfg.rx_pause; + mag_cfg->nic_pause.tx_pause = pause_cfg.tx_pause; + + up(&mag_cfg->cfg_lock); + + return 0; +} + +int sss_nic_get_hw_pause_info(struct sss_nic_dev *nic_dev, struct sss_nic_pause_cfg *pause_cfg) +{ + struct sss_nic_mag_cfg *mag_cfg = NULL; + int ret = 0; + + ret = sss_nic_cfg_hw_pause(nic_dev, SSSNIC_MBX_OPCODE_GET, pause_cfg); + if (ret != 0) + return ret; + + mag_cfg = &nic_dev->nic_io->mag_cfg; + if (mag_cfg->pause_set || pause_cfg->auto_neg == SSSNIC_PORT_AN_NOT_SET) { + pause_cfg->rx_pause = mag_cfg->nic_pause.rx_pause; + pause_cfg->tx_pause = mag_cfg->nic_pause.tx_pause; + } + + return 0; +} + +int sss_nic_set_hw_dcb_state(struct sss_nic_dev *nic_dev, u8 op_code, u8 state) +{ + struct sss_nic_mbx_dcb_state cmd_dcb_state = {0}; + u16 out_len = sizeof(cmd_dcb_state); + int ret; + + cmd_dcb_state.state = state; + cmd_dcb_state.op_code = op_code; + cmd_dcb_state.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_QOS_DCB_STATE, + &cmd_dcb_state, sizeof(cmd_dcb_state), + &cmd_dcb_state, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_dcb_state)) { + nic_err(nic_dev->dev_hdl, + "Fail to set dcb state, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_dcb_state.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +int sss_nic_clear_hw_qp_resource(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_mbx_clear_qp_resource qp_res = {0}; + u16 out_len = sizeof(qp_res); + int ret; + + if (!nic_dev) + return -EINVAL; + + qp_res.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_CLEAR_QP_RESOURCE, + &qp_res, sizeof(qp_res), &qp_res, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &qp_res)) { + nic_err(nic_dev->dev_hdl, + "Fail to clear qp resource, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, qp_res.head.state, out_len); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(sss_nic_clear_hw_qp_resource); + +int sss_nic_cache_out_qp_resource(struct sss_nic_io *nic_io) +{ + struct sss_nic_mbx_invalid_qp_cache cmd_qp_res = {0}; + u16 out_len = sizeof(cmd_qp_res); + int ret; + + cmd_qp_res.func_id = sss_get_global_func_id(nic_io->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_CACHE_OUT_QP_RES, + &cmd_qp_res, sizeof(cmd_qp_res), + &cmd_qp_res, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_qp_res)) { + nic_err(nic_io->dev_hdl, + "Fail to cache out qp resources, ret: %d, state: 0x%x, out len: 0x%x\n", + ret, cmd_qp_res.head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_nic_get_vport_stats(struct sss_nic_dev *nic_dev, u16 func_id, + struct sss_nic_port_stats *stats) +{ + struct sss_nic_mbx_port_stats_info cmd_port_stats = {0}; + struct sss_nic_mbx_port_stats vport_stats = {0}; + u16 out_len = sizeof(vport_stats); + int ret; + + if (!nic_dev || !stats) + return -EINVAL; + + cmd_port_stats.func_id = func_id; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_GET_VPORT_STAT, + &cmd_port_stats, sizeof(cmd_port_stats), + &vport_stats, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &vport_stats)) { + nic_err(nic_dev->dev_hdl, + "Fail to get vport statistics, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, vport_stats.head.state, out_len); + return -EFAULT; + } + + memcpy(stats, &vport_stats.stats, sizeof(*stats)); + + return 0; +} + +static int sss_nic_set_func_table(struct sss_nic_io *nic_io, + u32 cfg_mask, const struct sss_nic_func_table_cfg *cfg) +{ + struct sss_nic_mbx_set_func_table cmd_func_tbl = {0}; + u16 out_len = sizeof(cmd_func_tbl); + int ret; + + cmd_func_tbl.tbl_cfg = *cfg; + cmd_func_tbl.cfg_bitmap = cfg_mask; + cmd_func_tbl.func_id = sss_get_global_func_id(nic_io->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, + SSSNIC_MBX_OPCODE_SET_FUNC_TBL, + &cmd_func_tbl, sizeof(cmd_func_tbl), + &cmd_func_tbl, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_func_tbl)) { + nic_err(nic_io->dev_hdl, + "Fail to set func table, bitmap: 0x%x, ret: %d, state: 0x%x, out_len: 0x%x\n", + cfg_mask, ret, cmd_func_tbl.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +static int sss_nic_init_func_table(struct sss_nic_io *nic_io) +{ + struct sss_nic_func_table_cfg tbl_cfg = {0}; + + tbl_cfg.mtu = SSSNIC_DEFAULT_MAX_MTU; + tbl_cfg.rx_wqe_buf_size = nic_io->rx_buff_len; + + return sss_nic_set_func_table(nic_io, SSSNIC_INIT_FUNC_MASK, &tbl_cfg); +} + +int sss_nic_set_dev_mtu(struct sss_nic_dev *nic_dev, u16 new_mtu) +{ + struct sss_nic_func_table_cfg func_tbl_cfg = {0}; + + if (new_mtu < SSSNIC_MIN_MTU_SIZE || new_mtu > SSSNIC_MAX_JUMBO_FRAME_SIZE) { + nic_err(nic_dev->dev_hdl, + "Invalid mtu size: %ubytes, mtu range %ubytes - %ubytes.\n", + new_mtu, SSSNIC_MIN_MTU_SIZE, SSSNIC_MAX_JUMBO_FRAME_SIZE); + return -EINVAL; + } + + func_tbl_cfg.mtu = new_mtu; + + return sss_nic_set_func_table(nic_dev->nic_io, + BIT(SSSNIC_FUNC_CFG_TYPE_MTU), &func_tbl_cfg); +} + +static int sss_nic_feature_nego(struct sss_nic_io *nic_io, u8 opcode, u64 *feature) +{ + struct sss_nic_mbx_feature_nego cmd_feature_nego = {0}; + u16 out_len = sizeof(cmd_feature_nego); + int ret; + + cmd_feature_nego.opcode = opcode; + cmd_feature_nego.func_id = sss_get_global_func_id(nic_io->hwdev); + if (opcode == SSSNIC_MBX_OPCODE_SET) + memcpy(cmd_feature_nego.feature, feature, sizeof(u64)); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_FEATURE_NEGO, + &cmd_feature_nego, sizeof(cmd_feature_nego), + &cmd_feature_nego, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_feature_nego)) { + nic_err(nic_io->dev_hdl, + "Fail to negotiate nic feature, ret:%d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_feature_nego.head.state, out_len); + return -EIO; + } + + if (opcode == SSSNIC_MBX_OPCODE_GET) + memcpy(feature, cmd_feature_nego.feature, sizeof(u64)); + + return 0; +} + +static int sss_nic_get_bios_pf_bandwidth(struct sss_nic_io *nic_io) +{ + struct sss_nic_mbx_bios_cfg cmd_bios_cfg = {0}; + u16 out_len = sizeof(cmd_bios_cfg); + int ret; + + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF || + !SSSNIC_SUPPORT_RATE_LIMIT(nic_io)) + return 0; + + cmd_bios_cfg.op_code = SSSNIC_NVM_PF_SPEED_LIMIT; + cmd_bios_cfg.bios_cfg.func_valid = SSSNIC_BIOS_FUN_VALID; + cmd_bios_cfg.bios_cfg.func_id = (u8)sss_get_global_func_id(nic_io->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_BIOS_CFG, + &cmd_bios_cfg, sizeof(cmd_bios_cfg), + &cmd_bios_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_bios_cfg)) { + nic_err(nic_io->dev_hdl, + "Fail to get bios pf bandwidth limit, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_bios_cfg.head.state, out_len); + return -EIO; + } + + if (cmd_bios_cfg.bios_cfg.pf_bw > SSSNIC_MAX_LIMIT_BW) { + nic_err(nic_io->dev_hdl, "Invalid bios cfg pf bandwidth limit: %u\n", + cmd_bios_cfg.bios_cfg.pf_bw); + return -EINVAL; + } + + if (cmd_bios_cfg.bios_cfg.signature != SSSNIC_BIOS_SIGNATURE) + nic_warn(nic_io->dev_hdl, "Invalid bios configuration data, signature: 0x%x\n", + cmd_bios_cfg.bios_cfg.signature); + + nic_io->mag_cfg.pf_bw_limit = cmd_bios_cfg.bios_cfg.pf_bw; + + return 0; +} + +static int sss_nic_get_feature_from_hw(struct sss_nic_io *nic_io) +{ + return sss_nic_feature_nego(nic_io, SSSNIC_MBX_OPCODE_GET, &nic_io->feature_cap); +} + +int sss_nic_set_feature_to_hw(struct sss_nic_io *nic_io) +{ + return sss_nic_feature_nego(nic_io, SSSNIC_MBX_OPCODE_SET, &nic_io->feature_cap); +} + +void sss_nic_update_nic_feature(struct sss_nic_dev *nic_dev, u64 feature) +{ + struct sss_nic_io *nic_io = nic_dev->nic_io; + + nic_io->feature_cap = feature; + + nic_info(nic_io->dev_hdl, "Update nic feature to 0x%llx\n", nic_io->feature_cap); +} + +int sss_nic_io_init(struct sss_nic_dev *nic_dev) +{ + struct pci_dev *pdev = nic_dev->pdev; + struct sss_nic_io *nic_io = NULL; + int ret; + + nic_io = kzalloc(sizeof(*nic_io), GFP_KERNEL); + if (!nic_io) + return -ENOMEM; + + nic_io->hwdev = nic_dev->hwdev; + nic_io->pcidev_hdl = pdev; + nic_io->dev_hdl = &pdev->dev; + nic_io->nic_dev = nic_dev; + mutex_init(&nic_io->mag_cfg.sfp_mutex); + sema_init(&nic_io->mag_cfg.cfg_lock, 1); + nic_io->rx_buff_len = nic_dev->rx_buff_len; + nic_dev->nic_io = nic_io; + + ret = sss_register_service_adapter(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC, nic_io); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to register service adapter\n"); + goto register_adapter_err; + } + + ret = sss_chip_set_func_used_state(nic_dev->hwdev, SSS_SVC_TYPE_NIC, + true, SSS_CHANNEL_NIC); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to set function svc used state\n"); + goto set_state_err; + } + + ret = sss_nic_init_func_table(nic_io); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to init function table\n"); + goto init_func_table_err; + } + + ret = sss_nic_get_feature_from_hw(nic_io); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to get nic features\n"); + goto get_feature_from_hw_err; + } + + ret = sss_nic_get_bios_pf_bandwidth(nic_io); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to get pf bandwidth limit\n"); + goto get_bios_pf_bandwidth_err; + } + + ret = sss_nic_init_pf_vf_info(nic_io); + if (ret != 0) + goto init_pf_vf_info_err; + + ret = sss_nic_register_io_callback(nic_io); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to init vf info\n"); + goto register_io_callback_err; + } + + nic_io->feature_cap &= SSSNIC_DRV_DEFAULT_FEATURE; + + return 0; + +register_io_callback_err: + sss_nic_deinit_pf_vf_info(nic_io); + +init_pf_vf_info_err: +get_bios_pf_bandwidth_err: +get_feature_from_hw_err: +init_func_table_err: + sss_chip_set_func_used_state(nic_dev->hwdev, SSS_SVC_TYPE_NIC, + false, SSS_CHANNEL_NIC); + +set_state_err: + sss_unregister_service_adapter(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC); + +register_adapter_err: + nic_dev->nic_io = NULL; + kfree(nic_io); + + return ret; +} +EXPORT_SYMBOL(sss_nic_io_init); + +void sss_nic_io_deinit(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_io *nic_io = nic_dev->nic_io; + + sss_nic_unregister_io_callback(nic_io); + + if (nic_io->vf_info_group) { + sss_nic_clear_all_vf_info(nic_io); + sss_nic_deinit_pf_vf_info(nic_io); + } + + sss_chip_set_func_used_state(nic_dev->hwdev, SSS_SVC_TYPE_NIC, + false, SSS_CHANNEL_NIC); + + sss_unregister_service_adapter(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC); + + nic_dev->nic_io = NULL; + kfree(nic_io); +} +EXPORT_SYMBOL(sss_nic_io_deinit); + +int sss_nic_force_drop_tx_pkt(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_mbx_force_drop_pkt cmd_force_drop_pkt = {0}; + u16 out_len = sizeof(cmd_force_drop_pkt); + int ret; + + cmd_force_drop_pkt.port = sss_get_phy_port_id(nic_dev->hwdev); + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_FORCE_PKT_DROP, + &cmd_force_drop_pkt, sizeof(cmd_force_drop_pkt), + &cmd_force_drop_pkt, &out_len); + if ((cmd_force_drop_pkt.head.state != SSS_MGMT_CMD_UNSUPPORTED && + cmd_force_drop_pkt.head.state) || ret || !out_len) { + nic_err(nic_dev->dev_hdl, + "Fail to force drop tx packet, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_force_drop_pkt.head.state, out_len); + return -EFAULT; + } + + return cmd_force_drop_pkt.head.state; +} + +int sss_nic_set_rx_mode(struct sss_nic_dev *nic_dev, u32 rx_mode) +{ + struct sss_nic_mbx_set_rx_mode cmd_set_rx_mode = {0}; + u16 out_len = sizeof(cmd_set_rx_mode); + int ret; + + cmd_set_rx_mode.func_id = sss_get_global_func_id(nic_dev->hwdev); + cmd_set_rx_mode.rx_mode = rx_mode; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_SET_RX_MODE, + &cmd_set_rx_mode, sizeof(cmd_set_rx_mode), + &cmd_set_rx_mode, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_set_rx_mode)) { + nic_err(nic_dev->dev_hdl, + "Fail to set rx mode, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_set_rx_mode.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +int sss_nic_set_rx_vlan_offload(struct sss_nic_dev *nic_dev, bool en) +{ + struct sss_nic_mbx_offload_vlan cmd_vlan_offload = {0}; + u16 out_len = sizeof(cmd_vlan_offload); + int ret; + + cmd_vlan_offload.vlan_offload = (u8)en; + cmd_vlan_offload.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_SET_RX_VLAN_OFFLOAD, + &cmd_vlan_offload, sizeof(cmd_vlan_offload), + &cmd_vlan_offload, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_vlan_offload)) { + nic_err(nic_dev->dev_hdl, + "Fail to set rx vlan offload, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_vlan_offload.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +int sss_nic_update_mac_vlan(struct sss_nic_dev *nic_dev, u16 old_vlan, u16 new_vlan, int vf_id) +{ + struct sss_nic_vf_info *vf_info = NULL; + struct sss_nic_io *nic_io = nic_dev->nic_io; + u16 func_id; + int ret; + + if (old_vlan >= VLAN_N_VID || new_vlan >= VLAN_N_VID) + return -EINVAL; + + vf_info = nic_io->vf_info_group + SSSNIC_HW_VF_ID_TO_OS(vf_id); + if (!nic_io->vf_info_group || is_zero_ether_addr(vf_info->drv_mac)) + return 0; + + func_id = sss_get_glb_pf_vf_offset(nic_dev->hwdev) + (u16)vf_id; + + ret = sss_nic_del_mac(nic_dev, vf_info->drv_mac, + func_id, old_vlan, SSS_CHANNEL_NIC); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to delete VF %d MAC %pM vlan %u\n", + SSSNIC_HW_VF_ID_TO_OS(vf_id), vf_info->drv_mac, old_vlan); + return ret; + } + + ret = sss_nic_set_mac(nic_dev, vf_info->drv_mac, + func_id, new_vlan, SSS_CHANNEL_NIC); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to add VF %d MAC %pM vlan %u\n", + SSSNIC_HW_VF_ID_TO_OS(vf_id), vf_info->drv_mac, new_vlan); + sss_nic_set_mac(nic_dev, vf_info->drv_mac, + func_id, old_vlan, SSS_CHANNEL_NIC); + return ret; + } + + return 0; +} + +static int sss_nic_set_rx_lro(struct sss_nic_dev *nic_dev, + bool lro_en, u8 lro_max_pkt_len) +{ + struct sss_nic_mbx_lro_cfg cmd_lro_cfg = {0}; + u16 out_len = sizeof(cmd_lro_cfg); + int ret; + + cmd_lro_cfg.lro_ipv4_en = (u8)lro_en; + cmd_lro_cfg.lro_ipv6_en = (u8)lro_en; + cmd_lro_cfg.lro_max_pkt_len = lro_max_pkt_len; + cmd_lro_cfg.opcode = SSSNIC_MBX_OPCODE_SET; + cmd_lro_cfg.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_CFG_RX_LRO, + &cmd_lro_cfg, sizeof(cmd_lro_cfg), + &cmd_lro_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_lro_cfg)) { + nic_err(nic_dev->dev_hdl, + "Fail to set lro offload, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_lro_cfg.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +static int sss_nic_set_rx_lro_timer(struct sss_nic_dev *nic_dev, u32 value) +{ + struct sss_nic_mbx_lro_timer cmd_lro_timer = {0}; + u16 out_len = sizeof(cmd_lro_timer); + int ret; + + cmd_lro_timer.timer = value; + cmd_lro_timer.opcode = SSSNIC_MBX_OPCODE_SET; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_CFG_LRO_TIMER, + &cmd_lro_timer, sizeof(cmd_lro_timer), + &cmd_lro_timer, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_lro_timer)) { + nic_err(nic_dev->dev_hdl, + "Fail to set lro timer, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_lro_timer.head.state, out_len); + + return -EINVAL; + } + + return 0; +} + +int sss_nic_set_rx_lro_state(struct sss_nic_dev *nic_dev, bool en, u32 timer, u32 max_pkt_len) +{ + int ret; + + nic_info(nic_dev->dev_hdl, "Set LRO max coalesce packet size to %uK\n", + max_pkt_len); + ret = sss_nic_set_rx_lro(nic_dev, en, (u8)max_pkt_len); + if (ret != 0) + return ret; + + /* we don't set LRO timer for VF */ + if (sss_get_func_type(nic_dev->hwdev) == SSS_FUNC_TYPE_VF) + return 0; + + nic_info(nic_dev->dev_hdl, "Success to set LRO timer to %u\n", timer); + + return sss_nic_set_rx_lro_timer(nic_dev, timer); +} + +int sss_nic_set_vlan_fliter(struct sss_nic_dev *nic_dev, bool en) +{ + struct sss_nic_mbx_vlan_filter_cfg cmd_set_filter = {0}; + u16 out_len = sizeof(cmd_set_filter); + int ret; + + cmd_set_filter.func_id = sss_get_global_func_id(nic_dev->hwdev); + cmd_set_filter.vlan_filter_ctrl = (u32)en; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_SET_VLAN_FILTER_EN, + &cmd_set_filter, sizeof(cmd_set_filter), + &cmd_set_filter, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_set_filter)) { + nic_err(nic_dev->dev_hdl, + "Fail to set vlan filter, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_set_filter.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +int sss_nic_add_tcam_rule(struct sss_nic_dev *nic_dev, struct sss_nic_tcam_rule_cfg *tcam_rule) +{ + struct sss_nic_mbx_add_tcam_rule cmd_add_tcam_rule = {0}; + u16 out_len = sizeof(cmd_add_tcam_rule); + int ret; + + if (!nic_dev || !tcam_rule) + return -EINVAL; + + if (tcam_rule->index >= SSSNIC_TCAM_RULES_NUM_MAX) { + nic_err(nic_dev->dev_hdl, "Invalid tcam rules num :%u to add\n", + tcam_rule->index); + return -EINVAL; + } + + memcpy((void *)&cmd_add_tcam_rule.rule, (void *)tcam_rule, + sizeof(struct sss_nic_tcam_rule_cfg)); + cmd_add_tcam_rule.func_id = sss_get_global_func_id(nic_dev->hwdev); + cmd_add_tcam_rule.type = SSSNIC_TCAM_RULE_FDIR_TYPE; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_ADD_TC_FLOW, + &cmd_add_tcam_rule, sizeof(cmd_add_tcam_rule), + &cmd_add_tcam_rule, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_add_tcam_rule)) { + nic_err(nic_dev->dev_hdl, + "Fail to add tcam rule, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_add_tcam_rule.head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_nic_del_tcam_rule(struct sss_nic_dev *nic_dev, u32 index) +{ + struct sss_nic_mbx_del_tcam_rule cmd_del_tcam_rule = {0}; + u16 out_len = sizeof(cmd_del_tcam_rule); + int ret; + + if (!nic_dev) + return -EINVAL; + + if (index >= SSSNIC_TCAM_RULES_NUM_MAX) { + nic_err(nic_dev->dev_hdl, "Invalid tcam rule num :%u to del\n", index); + return -EINVAL; + } + + cmd_del_tcam_rule.index_start = index; + cmd_del_tcam_rule.index_num = 1; + cmd_del_tcam_rule.func_id = sss_get_global_func_id(nic_dev->hwdev); + cmd_del_tcam_rule.type = SSSNIC_TCAM_RULE_FDIR_TYPE; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_DEL_TC_FLOW, + &cmd_del_tcam_rule, sizeof(cmd_del_tcam_rule), + &cmd_del_tcam_rule, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_del_tcam_rule)) { + nic_err(nic_dev->dev_hdl, + "Fail to delete tcam rule, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_del_tcam_rule.head.state, out_len); + return -EIO; + } + + return 0; +} + +static int sss_nic_mgmt_tcam_block(struct sss_nic_dev *nic_dev, u8 alloc_en, u16 *index) +{ + struct sss_nic_mbx_tcam_block_cfg cmd_mgmt_tcam_block = {0}; + u16 out_len = sizeof(cmd_mgmt_tcam_block); + int ret; + + if (!nic_dev || !index) + return -EINVAL; + + cmd_mgmt_tcam_block.func_id = sss_get_global_func_id(nic_dev->hwdev); + cmd_mgmt_tcam_block.alloc_en = alloc_en; + cmd_mgmt_tcam_block.tcam_type = SSSNIC_TCAM_BLOCK_TYPE_LARGE; + cmd_mgmt_tcam_block.tcam_block_index = *index; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_CFG_TCAM_BLOCK, + &cmd_mgmt_tcam_block, sizeof(cmd_mgmt_tcam_block), + &cmd_mgmt_tcam_block, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_mgmt_tcam_block)) { + nic_err(nic_dev->dev_hdl, + "Fail to set tcam block, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_mgmt_tcam_block.head.state, out_len); + return -EIO; + } + + if (alloc_en) + *index = cmd_mgmt_tcam_block.tcam_block_index; + + return 0; +} + +int sss_nic_alloc_tcam_block(struct sss_nic_dev *nic_dev, u16 *index) +{ + return sss_nic_mgmt_tcam_block(nic_dev, SSSNIC_TCAM_BLOCK_ENABLE, index); +} + +int sss_nic_free_tcam_block(struct sss_nic_dev *nic_dev, u16 *index) +{ + return sss_nic_mgmt_tcam_block(nic_dev, SSSNIC_TCAM_BLOCK_DISABLE, index); +} + +int sss_nic_set_fdir_tcam_rule_filter(struct sss_nic_dev *nic_dev, bool enable) +{ + struct sss_nic_mbx_set_tcam_state cmd_set_tcam_enable = {0}; + u16 out_len = sizeof(cmd_set_tcam_enable); + int ret; + + cmd_set_tcam_enable.func_id = sss_get_global_func_id(nic_dev->hwdev); + cmd_set_tcam_enable.tcam_enable = (u8)enable; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_ENABLE_TCAM, + &cmd_set_tcam_enable, sizeof(cmd_set_tcam_enable), + &cmd_set_tcam_enable, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_set_tcam_enable)) { + nic_err(nic_dev->dev_hdl, + "Fail to set fdir tcam filter, ret: %d, state: 0x%x, out_len: 0x%x, enable: 0x%x\n", + ret, cmd_set_tcam_enable.head.state, out_len, + enable); + return -EIO; + } + + return 0; +} + +int sss_nic_flush_tcam_rule(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_mbx_flush_tcam_rule cmd_flush_tcam_rule = {0}; + u16 out_len = sizeof(cmd_flush_tcam_rule); + int ret; + + cmd_flush_tcam_rule.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_FLUSH_TCAM, + &cmd_flush_tcam_rule, + sizeof(cmd_flush_tcam_rule), + &cmd_flush_tcam_rule, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_flush_tcam_rule)) { + nic_err(nic_dev->dev_hdl, + "Fail to flush tcam fdir rule, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_flush_tcam_rule.head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_nic_rq_hw_pc_info(struct sss_nic_dev *nic_dev, + struct sss_nic_rq_pc_info *out_info, u16 qp_num, u16 wqe_type) +{ + int ret; + u16 i; + struct sss_nic_rq_pc_info *rq_pc_info = NULL; + struct sss_nic_rq_hw_info *rq_hw = NULL; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_dev->hwdev); + if (!msg_buf) { + nic_err(nic_dev->dev_hdl, "Fail to alloc cmd_buf\n"); + return -ENOMEM; + } + + msg_buf->size = sizeof(*rq_hw); + + rq_hw = msg_buf->buf; + rq_hw->num_queues = qp_num; + rq_hw->func_id = sss_get_global_func_id(nic_dev->hwdev); + sss_cpu_to_be32(rq_hw, sizeof(*rq_hw)); + + ret = sss_ctrlq_detail_reply(nic_dev->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_RXQ_INFO_GET, + msg_buf, msg_buf, NULL, 0, SSS_CHANNEL_NIC); + if (ret) + goto get_rq_info_error; + + rq_pc_info = msg_buf->buf; + for (i = 0; i < qp_num; i++) { + out_info[i].hw_ci = rq_pc_info[i].hw_ci >> wqe_type; + out_info[i].hw_pi = rq_pc_info[i].hw_pi >> wqe_type; + } + +get_rq_info_error: + sss_free_ctrlq_msg_buf(nic_dev->hwdev, msg_buf); + + return ret; +} + +int sss_nic_set_pf_rate(struct sss_nic_dev *nic_dev, u8 speed) +{ + int ret; + u32 pf_rate; + u32 speed_convert[SSSNIC_PORT_SPEED_UNKNOWN] = { + 0, 10, 100, 1000, 10000, 25000, 40000, 50000, 100000, 200000 + }; + struct sss_nic_io *nic_io = nic_dev->nic_io; + struct sss_nic_mbx_tx_rate_cfg rate_cfg = {0}; + u16 out_len = sizeof(rate_cfg); + + if (speed >= SSSNIC_PORT_SPEED_UNKNOWN) { + nic_err(nic_io->dev_hdl, "Invalid speed level: %u\n", speed); + return -EINVAL; + } + + if (nic_io->mag_cfg.pf_bw_limit == SSSNIC_PF_LIMIT_BW_MAX) { + pf_rate = 0; + } else { + pf_rate = (speed_convert[speed] / 100) * nic_io->mag_cfg.pf_bw_limit; + if (pf_rate == 0 && speed != SSSNIC_PORT_SPEED_NOT_SET) + pf_rate = 1; + } + + rate_cfg.func_id = sss_get_global_func_id(nic_dev->hwdev); + rate_cfg.max_rate = pf_rate; + rate_cfg.min_rate = 0; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_SET_MAX_MIN_RATE, + &rate_cfg, sizeof(rate_cfg), &rate_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &rate_cfg)) { + nic_err(nic_dev->dev_hdl, "Fail to set rate:%u, ret: %d, state: 0x%x, out len: 0x%x\n", + pf_rate, ret, rate_cfg.head.state, out_len); + return rate_cfg.head.state ? rate_cfg.head.state : -EIO; + } + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.h new file mode 100644 index 0000000000000..20c0cf5991bee --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_CFG_H +#define SSS_NIC_CFG_H + +#include +#include + +#include "sss_nic_cfg_define.h" +#include "sss_nic_dev_define.h" + +#define SSSNIC_SUPPORT_FEATURE(nic_io, feature) \ + ((nic_io)->feature_cap & SSSNIC_F_##feature) +#define SSSNIC_SUPPORT_CSUM(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, CSUM) +#define SSSNIC_SUPPORT_SCTP_CRC(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, SCTP_CRC) +#define SSSNIC_SUPPORT_TSO(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, TSO) +#define SSSNIC_SUPPORT_UFO(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, UFO) +#define SSSNIC_SUPPORT_LRO(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, LRO) +#define SSSNIC_SUPPORT_RSS(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, RSS) +#define SSSNIC_SUPPORT_RXVLAN_FILTER(nic_io) \ + SSSNIC_SUPPORT_FEATURE(nic_io, RX_VLAN_FILTER) +#define SSSNIC_SUPPORT_VLAN_OFFLOAD(nic_io) \ + (SSSNIC_SUPPORT_FEATURE(nic_io, RX_VLAN_STRIP) && \ + SSSNIC_SUPPORT_FEATURE(nic_io, TX_VLAN_INSERT)) +#define SSSNIC_SUPPORT_VXLAN_OFFLOAD(nic_io) \ + SSSNIC_SUPPORT_FEATURE(nic_io, VXLAN_OFFLOAD) +#define SSSNIC_SUPPORT_IPSEC_OFFLOAD(nic_io) \ + SSSNIC_SUPPORT_FEATURE(nic_io, IPSEC_OFFLOAD) +#define SSSNIC_SUPPORT_FDIR(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, FDIR) +#define SSSNIC_SUPPORT_PROMISC(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, PROMISC) +#define SSSNIC_SUPPORT_ALLMULTI(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, ALLMULTI) +#define SSSNIC_SUPPORT_VF_MAC(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, VF_MAC) +#define SSSNIC_SUPPORT_RATE_LIMIT(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, RATE_LIMIT) +#define SSSNIC_SUPPORT_RXQ_RECOVERY(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, RXQ_RECOVERY) + +int sss_nic_set_mac(struct sss_nic_dev *nic_dev, const u8 *mac_addr, + u16 vlan_id, u16 func_id, u16 channel); + +int sss_nic_del_mac(struct sss_nic_dev *nic_dev, const u8 *mac_addr, + u16 vlan_id, u16 func_id, u16 channel); + +int sss_nic_add_tcam_rule(struct sss_nic_dev *nic_dev, struct sss_nic_tcam_rule_cfg *tcam_rule); +int sss_nic_del_tcam_rule(struct sss_nic_dev *nic_dev, u32 index); + +int sss_nic_alloc_tcam_block(struct sss_nic_dev *nic_dev, u16 *index); +int sss_nic_free_tcam_block(struct sss_nic_dev *nic_dev, u16 *index); + +int sss_nic_set_fdir_tcam_rule_filter(struct sss_nic_dev *nic_dev, bool enable); + +int sss_nic_flush_tcam_rule(struct sss_nic_dev *nic_dev); + +int sss_nic_update_mac(struct sss_nic_dev *nic_dev, u8 *new_mac); + +int sss_nic_get_default_mac(struct sss_nic_dev *nic_dev, u8 *mac_addr); + +int sss_nic_set_dev_mtu(struct sss_nic_dev *nic_dev, u16 new_mtu); + +int sss_nic_get_vport_stats(struct sss_nic_dev *nic_dev, + u16 func_id, struct sss_nic_port_stats *stats); + +int sss_nic_force_drop_tx_pkt(struct sss_nic_dev *nic_dev); + +int sss_nic_set_rx_mode(struct sss_nic_dev *nic_dev, u32 rx_mode); + +int sss_nic_set_rx_vlan_offload(struct sss_nic_dev *nic_dev, bool en); + +int sss_nic_set_rx_lro_state(struct sss_nic_dev *nic_dev, bool en, u32 timer, u32 max_pkt_len); + +int sss_nic_config_vlan(struct sss_nic_dev *nic_dev, u8 opcode, u16 vlan_id); + +int sss_nic_set_hw_vport_state(struct sss_nic_dev *nic_dev, + u16 func_id, bool enable, u16 channel); + +int sss_nic_set_dcb_info(struct sss_nic_io *nic_io, struct sss_nic_dcb_info *dcb_info); + +int sss_nic_set_hw_dcb_state(struct sss_nic_dev *nic_dev, u8 op_code, u8 state); + +int sss_nic_clear_hw_qp_resource(struct sss_nic_dev *nic_dev); + +int sss_nic_get_hw_pause_info(struct sss_nic_dev *nic_dev, struct sss_nic_pause_cfg *pause_config); + +int sss_nic_set_hw_pause_info(struct sss_nic_dev *nic_dev, struct sss_nic_pause_cfg pause_config); + +int sss_nic_set_vlan_fliter(struct sss_nic_dev *nic_dev, bool en); + +int sss_nic_update_mac_vlan(struct sss_nic_dev *nic_dev, + u16 old_vlan, u16 new_vlan, int vf_id); + +int sss_nic_cache_out_qp_resource(struct sss_nic_io *nic_io); + +int sss_nic_set_feature_to_hw(struct sss_nic_io *nic_io); + +void sss_nic_update_nic_feature(struct sss_nic_dev *nic_dev, u64 feature); + +int sss_nic_io_init(struct sss_nic_dev *nic_dev); + +void sss_nic_io_deinit(struct sss_nic_dev *nic_dev); + +int sss_nic_rq_hw_pc_info(struct sss_nic_dev *nic_dev, + struct sss_nic_rq_pc_info *out_info, u16 num_qps, u16 wqe_type); +int sss_nic_set_pf_rate(struct sss_nic_dev *nic_dev, u8 speed); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.c new file mode 100644 index 0000000000000..573cf72f3b396 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_dcb.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" + +u8 sss_nic_get_user_cos_num(struct sss_nic_dev *nic_dev) +{ + if (nic_dev->hw_dcb_cfg.trust == 1) + return nic_dev->hw_dcb_cfg.dscp_user_cos_num; + if (nic_dev->hw_dcb_cfg.trust == 0) + return nic_dev->hw_dcb_cfg.pcp_user_cos_num; + return 0; +} + +u8 sss_nic_get_valid_cos_map(struct sss_nic_dev *nic_dev) +{ + if (nic_dev->hw_dcb_cfg.trust == 1) + return nic_dev->hw_dcb_cfg.dscp_valid_cos_map; + if (nic_dev->hw_dcb_cfg.trust == 0) + return nic_dev->hw_dcb_cfg.pcp_valid_cos_map; + return 0; +} + +void sss_nic_update_qp_cos_map(struct sss_nic_dev *nic_dev, u8 cos_num) +{ + u8 cur_cos_num = 0; + u8 cos_per_qp_num; + u8 qp_num; + u8 qp_offset; + u8 i; + u8 remain; + struct sss_nic_dcb_config *dcb_config = &nic_dev->hw_dcb_cfg; + u8 valid_cos_map; + + if (cos_num == 0) + return; + + cos_per_qp_num = (u8)(nic_dev->qp_res.qp_num / cos_num); + if (cos_per_qp_num == 0) + return; + + remain = nic_dev->qp_res.qp_num % cos_per_qp_num; + valid_cos_map = sss_nic_get_valid_cos_map(nic_dev); + + memset(dcb_config->cos_qp_num, 0, sizeof(dcb_config->cos_qp_num)); + memset(dcb_config->cos_qp_offset, 0, sizeof(dcb_config->cos_qp_offset)); + + for (i = 0; i < SSSNIC_PCP_UP_MAX; i++) { + if (BIT(i) & valid_cos_map) { + qp_num = cos_per_qp_num; + qp_offset = (u8)(cur_cos_num * cos_per_qp_num); + + if (cur_cos_num < remain) { + qp_offset += cur_cos_num; + qp_num++; + } else { + qp_offset += remain; + } + + valid_cos_map -= (u8)BIT(i); + cur_cos_num++; + + dcb_config->cos_qp_num[i] = qp_num; + dcb_config->cos_qp_offset[i] = qp_offset; + sss_nic_info(nic_dev, drv, "Qp info: cos %u, qp_offset=%u qp_num=%u\n", + i, qp_offset, qp_num); + } + } + + memcpy(nic_dev->backup_dcb_cfg.cos_qp_num, dcb_config->cos_qp_num, + sizeof(dcb_config->cos_qp_num)); + memcpy(nic_dev->backup_dcb_cfg.cos_qp_offset, dcb_config->cos_qp_offset, + sizeof(dcb_config->cos_qp_offset)); +} + +static void sss_nic_set_sq_cos(struct sss_nic_dev *nic_dev, + u16 qid_start, u16 qid_end, u8 cos) +{ + u16 qid; + + for (qid = qid_start; qid < qid_end; qid++) + nic_dev->sq_desc_group[qid].cos = cos; +} + +void sss_nic_update_sq_cos(struct sss_nic_dev *nic_dev, u8 dcb_en) +{ + u8 i; + u16 q_num; + u16 qid_start; + u16 qid_end; + + sss_nic_set_sq_cos(nic_dev, 0, nic_dev->qp_res.qp_num, + nic_dev->hw_dcb_cfg.default_cos); + + if (dcb_en == 0) + return; + + for (i = 0; i < SSSNIC_DCB_COS_MAX; i++) { + q_num = (u16)nic_dev->hw_dcb_cfg.cos_qp_num[i]; + if (q_num == 0) + continue; + + qid_start = (u16)nic_dev->hw_dcb_cfg.cos_qp_offset[i]; + qid_end = qid_start + q_num; + sss_nic_set_sq_cos(nic_dev, qid_start, qid_end, i); + sss_nic_info(nic_dev, drv, "Update tx db cos, qid_start=%u, qid_end=%u cos=%u\n", + qid_start, qid_end, i); + } +} + +static int sss_nic_init_tx_cos_info(struct sss_nic_dev *nic_dev) +{ + int ret; + struct sss_nic_dcb_info dcb_info = {0}; + struct sss_nic_dcb_config *dcb_config = &nic_dev->hw_dcb_cfg; + + dcb_info.default_cos = dcb_config->default_cos; + dcb_info.trust = dcb_config->trust; + memset(dcb_info.dscp2cos, dcb_config->default_cos, sizeof(dcb_info.dscp2cos)); + memset(dcb_info.pcp2cos, dcb_config->default_cos, sizeof(dcb_info.pcp2cos)); + + ret = sss_nic_set_dcb_info(nic_dev->nic_io, &dcb_info); + if (ret != 0) + sss_nic_err(nic_dev, drv, "Fail to set dcb state, ret: %d\n", ret); + + return ret; +} + +static u8 sss_nic_get_cos_num(u8 cos_bitmap) +{ + u8 i; + u8 cos_count = 0; + + for (i = 0; i < SSSNIC_DCB_COS_MAX; i++) + if (cos_bitmap & BIT(i)) + cos_count++; + + return cos_count; +} + +void sss_nic_sync_dcb_cfg(struct sss_nic_dev *nic_dev, + const struct sss_nic_dcb_config *dcb_config) +{ + struct sss_nic_dcb_config *hw_config = &nic_dev->hw_dcb_cfg; + + memcpy(hw_config, dcb_config, sizeof(*dcb_config)); +} + +static int sss_nic_init_dcb_cfg(struct sss_nic_dev *nic_dev, + struct sss_nic_dcb_config *dcb_config) +{ + u8 func_cos_bitmap; + u8 port_cos_bitmap; + int ret; + u8 i; + u8 j; + + ret = sss_get_cos_valid_bitmap(nic_dev->hwdev, &func_cos_bitmap, &port_cos_bitmap); + if (ret != 0) { + sss_nic_err(nic_dev, drv, "Fail to get cos valid bitmap, ret: %d\n", ret); + return -EFAULT; + } + + nic_dev->max_cos_num = sss_nic_get_cos_num(func_cos_bitmap); + nic_dev->dft_port_cos_bitmap = port_cos_bitmap; + nic_dev->dft_func_cos_bitmap = func_cos_bitmap; + + dcb_config->dscp_user_cos_num = nic_dev->max_cos_num; + dcb_config->pcp_user_cos_num = nic_dev->max_cos_num; + dcb_config->dscp_valid_cos_map = func_cos_bitmap; + dcb_config->pcp_valid_cos_map = func_cos_bitmap; + dcb_config->trust = DCB_PCP; + dcb_config->default_cos = (u8)fls(nic_dev->dft_func_cos_bitmap) - 1; + + for (i = 0; i < SSSNIC_DCB_COS_MAX; i++) { + dcb_config->pcp2cos[i] = func_cos_bitmap & BIT(i) ? i : dcb_config->default_cos; + for (j = 0; j < SSSNIC_DCB_COS_MAX; j++) + dcb_config->dscp2cos[i * SSSNIC_DCB_DSCP_NUM + j] = dcb_config->pcp2cos[i]; + } + + return 0; +} + +static void sss_nic_reset_dcb_config(struct sss_nic_dev *nic_dev) +{ + memset(&nic_dev->hw_dcb_cfg, 0, sizeof(nic_dev->hw_dcb_cfg)); + sss_nic_init_dcb_cfg(nic_dev, &nic_dev->hw_dcb_cfg); + sss_nic_info(nic_dev, drv, "Success to reset bcb confg\n"); +} + +int sss_nic_update_dcb_cfg(struct sss_nic_dev *nic_dev) +{ + int ret; + + ret = sss_nic_set_hw_dcb_state(nic_dev, SSSNIC_MBX_OPCODE_SET_DCB_STATE, + !!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)); + if (ret != 0) { + sss_nic_err(nic_dev, drv, "Fail to set dcb state, ret: %d\n", ret); + return ret; + } + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)) + sss_nic_sync_dcb_cfg(nic_dev, &nic_dev->backup_dcb_cfg); + else + sss_nic_reset_dcb_config(nic_dev); + + return 0; +} + +int sss_nic_dcb_init(struct sss_nic_dev *nic_dev) +{ + int ret; + struct sss_nic_dcb_config *dcb_config = &nic_dev->hw_dcb_cfg; + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) { + dcb_config->default_cos = (u8)fls(nic_dev->dft_func_cos_bitmap) - 1; + return 0; + } + + ret = sss_nic_init_dcb_cfg(nic_dev, dcb_config); + if (ret != 0) { + sss_nic_err(nic_dev, drv, "Fail to init dcb, ret: %d\n", ret); + return ret; + } + sss_nic_info(nic_dev, drv, "Support num cos %u, default cos %u\n", + nic_dev->max_cos_num, dcb_config->default_cos); + + memcpy(&nic_dev->backup_dcb_cfg, &nic_dev->hw_dcb_cfg, sizeof(nic_dev->hw_dcb_cfg)); + + ret = sss_nic_init_tx_cos_info(nic_dev); + if (ret != 0) { + sss_nic_err(nic_dev, drv, "Fail to set tx cos info, ret: %d\n", ret); + return ret; + } + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.h new file mode 100644 index 0000000000000..00a649598f286 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_DCB_H +#define SSS_NIC_DCB_H + +#include "sss_kernel.h" +#include "sss_nic_dcb_define.h" + +enum SSSNIC_DCB_FLAGS { + SSSNIC_DCB_UP_COS_SETTING, + SSSNIC_DCB_TRAFFIC_STOPPED, +}; + +enum sss_nic_dcb_trust { + DCB_PCP, + DCB_DSCP, +}; + +u8 sss_nic_get_user_cos_num(struct sss_nic_dev *nic_dev); +u8 sss_nic_get_valid_cos_map(struct sss_nic_dev *nic_dev); +int sss_nic_dcb_init(struct sss_nic_dev *nic_dev); +int sss_nic_update_dcb_cfg(struct sss_nic_dev *nic_dev); +void sss_nic_update_sq_cos(struct sss_nic_dev *nic_dev, u8 dcb_en); +void sss_nic_update_qp_cos_map(struct sss_nic_dev *nic_dev, u8 cos_num); +void sss_nic_sync_dcb_cfg(struct sss_nic_dev *nic_dev, + const struct sss_nic_dcb_config *dcb_config); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.c new file mode 100644 index 0000000000000..2f056c4fa4caf --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" +#include "sss_nic_rss.h" +#include "sss_nic_ethtool_api.h" +#include "sss_nic_ethtool_stats.h" +#include "sss_nic_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_netdev_ops_api.h" + +#define SSSNIC_MGMT_VERSION_MAX_LEN 32 + +#define SSSNIC_AUTONEG_RESET_TIMEOUT 100 +#define SSSNIC_AUTONEG_FINISH_TIMEOUT 200 + +static void sss_nic_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = nic_dev->pdev; + u8 mgmt_ver[SSSNIC_MGMT_VERSION_MAX_LEN] = {0}; + int ret; + + strscpy(drvinfo->driver, SSSNIC_DRV_NAME, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, SSSNIC_DRV_VERSION, sizeof(drvinfo->version)); + strscpy(drvinfo->bus_info, pci_name(pdev), sizeof(drvinfo->bus_info)); + + ret = sss_get_mgmt_version(nic_dev->hwdev, mgmt_ver, + SSSNIC_MGMT_VERSION_MAX_LEN, + SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to get fw version, ret: %d\n", ret); + return; + } + + ret = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%s", mgmt_ver); + if (ret < 0) + nicif_err(nic_dev, drv, netdev, "Fail to snprintf fw version\n"); +} + +static u32 sss_nic_get_msglevel(struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + return nic_dev->msg_enable; +} + +static void sss_nic_set_msglevel(struct net_device *netdev, u32 msg_enable) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + nicif_info(nic_dev, drv, netdev, "Success to change msg_enable from 0x%x to 0x%x\n", + nic_dev->msg_enable, msg_enable); + + nic_dev->msg_enable = msg_enable; +} + +static int sss_nic_nway_reset(struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_port_info port_info = {0}; + int ret; + + while (SSSNIC_TEST_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_AUTONEG_RESET)) + msleep(SSSNIC_AUTONEG_RESET_TIMEOUT); + + ret = sss_nic_get_hw_port_info(nic_dev, &port_info, SSS_CHANNEL_NIC); + if (ret) { + nicif_err(nic_dev, drv, netdev, "Fail to get port info\n"); + ret = -EFAULT; + goto reset_err; + } + + if (port_info.autoneg_state != SSSNIC_PORT_CFG_AN_ON) { + nicif_err(nic_dev, drv, netdev, "Autonegotiation is not on, don't support to restart it\n"); + ret = -EOPNOTSUPP; + goto reset_err; + } + + ret = sss_nic_set_autoneg(nic_dev, false); + if (ret) { + nicif_err(nic_dev, drv, netdev, "Fail to set autonegotiation off\n"); + ret = -EFAULT; + goto reset_err; + } + + msleep(SSSNIC_AUTONEG_FINISH_TIMEOUT); + + ret = sss_nic_set_autoneg(nic_dev, true); + if (ret) { + nicif_err(nic_dev, drv, netdev, "Fail to set autonegotiation on\n"); + ret = -EFAULT; + goto reset_err; + } + + msleep(SSSNIC_AUTONEG_FINISH_TIMEOUT); + nicif_info(nic_dev, drv, netdev, "Success to restart autonegotiation\n"); + +reset_err: + clear_bit(SSSNIC_AUTONEG_RESET, &nic_dev->flags); + return ret; +} + +static void sss_nic_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam __maybe_unused *param, + struct netlink_ext_ack __maybe_unused *extack) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + ringparam->tx_pending = nic_dev->sq_desc_group[0].q_depth; + ringparam->rx_pending = nic_dev->rq_desc_group[0].q_depth; + ringparam->tx_max_pending = SSSNIC_MAX_TX_QUEUE_DEPTH; + ringparam->rx_max_pending = SSSNIC_MAX_RX_QUEUE_DEPTH; +} + +static int sss_nic_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam __maybe_unused *param, + struct netlink_ext_ack __maybe_unused *extack) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_qp_resource qp_res = {0}; + u32 sq_depth; + u32 rq_depth; + int ret; + + ret = sss_nic_check_ringparam_valid(netdev, ringparam); + if (ret != 0) + return ret; + + sq_depth = (u32)(1U << (u16)ilog2(ringparam->tx_pending)); + rq_depth = (u32)(1U << (u16)ilog2(ringparam->rx_pending)); + if (sq_depth == nic_dev->qp_res.sq_depth && + rq_depth == nic_dev->qp_res.rq_depth) + return 0; /* nothing to do */ + + nicif_info(nic_dev, drv, netdev, + "Change Tx/Rx ring depth from %u/%u to %u/%u\n", + nic_dev->qp_res.sq_depth, nic_dev->qp_res.rq_depth, + sq_depth, rq_depth); + + if (netif_running(netdev) == 0) { + sss_nic_update_qp_depth(nic_dev, sq_depth, rq_depth); + return 0; + } + + qp_res = nic_dev->qp_res; + qp_res.sq_depth = sq_depth; + qp_res.rq_depth = rq_depth; + qp_res.sq_res_group = NULL; + qp_res.rq_res_group = NULL; + qp_res.irq_cfg = NULL; + + nicif_info(nic_dev, drv, netdev, "Restarting channel\n"); + ret = sss_nic_update_channel_setting(nic_dev, &qp_res, + NULL, NULL); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to update channel settings\n"); + return -EFAULT; + } + + return 0; +} + +static int sss_nic_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce __maybe_unused *kec, + struct netlink_ext_ack __maybe_unused *extack) +{ + return sss_nic_ethtool_get_coalesce(netdev, coal, SSSNIC_COALESCE_ALL_QUEUE); +} + +static int sss_nic_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce __maybe_unused *kec, + struct netlink_ext_ack __maybe_unused *extack) +{ + return sss_nic_ethtool_set_coalesce(netdev, coal, SSSNIC_COALESCE_ALL_QUEUE); +} + +static int sss_nic_get_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *coal) +{ + return sss_nic_ethtool_get_coalesce(netdev, coal, (u16)queue); +} + +static int sss_nic_set_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *coal) +{ + return sss_nic_ethtool_set_coalesce(netdev, coal, (u16)queue); +} + +static int sss_nic_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + enum sss_nic_mag_led_type led_type = SSSNIC_MAG_LED_TYPE_ALARM; + enum sss_nic_mag_led_mode led_mode; + int ret; + + if (state == ETHTOOL_ID_ACTIVE) { + led_mode = SSSNIC_MAG_LED_FORCE_BLINK_2HZ; + } else if (state == ETHTOOL_ID_INACTIVE) { + led_mode = SSSNIC_MAG_LED_DEFAULT; + } else { + nicif_err(nic_dev, drv, netdev, "Not support to set phys id, state:%d\n", state); + return -EOPNOTSUPP; + } + + ret = sss_nic_set_hw_led_state(nic_dev, led_type, led_mode); + if (ret != 0) + nicif_err(nic_dev, drv, netdev, "Fail to set led status, ret:%d, type:%d, mode:%d\n", + ret, led_type, led_mode); + + return ret; +} + +static void sss_nic_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + int ret; + struct sss_nic_pause_cfg pause_config = {0}; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + ret = sss_nic_get_hw_pause_info(nic_dev, &pause_config); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to get pauseparam\n"); + } else { + pauseparam->autoneg = pause_config.auto_neg == SSSNIC_PORT_CFG_AN_ON ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + pauseparam->rx_pause = pause_config.rx_pause; + pauseparam->tx_pause = pause_config.tx_pause; + } +} + +static int sss_nic_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_port_info port_info = {0}; + struct sss_nic_pause_cfg pause_config = {0}; + u32 auto_neg; + int ret; + + ret = sss_nic_get_hw_port_info(nic_dev, &port_info, SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to get auto-negotiation state\n"); + return -EFAULT; + } + + auto_neg = port_info.autoneg_state == SSSNIC_PORT_CFG_AN_ON ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + if (pauseparam->autoneg != auto_neg) { + nicif_err(nic_dev, drv, netdev, + "Use: ethtool -s autoneg to change autoneg\n"); + return -EOPNOTSUPP; + } + pause_config.auto_neg = pauseparam->autoneg == AUTONEG_ENABLE ? + SSSNIC_PORT_CFG_AN_ON : SSSNIC_PORT_CFG_AN_OFF; + pause_config.rx_pause = (u8)pauseparam->rx_pause; + pause_config.tx_pause = (u8)pauseparam->tx_pause; + + ret = sss_nic_set_hw_pause_info(nic_dev, pause_config); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set pauseparam\n"); + return ret; + } + + nicif_info(nic_dev, drv, netdev, "Success to set pauseparam option, rx: %s, tx: %s\n", + pauseparam->rx_pause ? "on" : "off", pauseparam->tx_pause ? "on" : "off"); + + return 0; +} + +static int sss_nic_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + sss_nic_get_module_info_t handler[SSSNIC_MODULE_TYPE_MAX] = {NULL}; + u8 sfp_type = 0; + u8 sfp_type_ext = 0; + int ret; + + handler[SSSNIC_MODULE_TYPE_SFP] = sss_nic_module_type_sfp; + handler[SSSNIC_MODULE_TYPE_QSFP] = sss_nic_module_type_qsfp; + handler[SSSNIC_MODULE_TYPE_QSFP_PLUS] = sss_nic_module_type_qsfp_plus; + handler[SSSNIC_MODULE_TYPE_QSFP28] = sss_nic_module_type_qsfp28; + + ret = sss_nic_get_sfp_type(nic_dev, &sfp_type, &sfp_type_ext); + if (ret != 0) + return ret; + + if (sfp_type >= SSSNIC_MODULE_TYPE_MAX) { + nicif_warn(nic_dev, drv, netdev, + "Unknown optical module type: 0x%x\n", sfp_type); + return -EINVAL; + } + + if (!handler[sfp_type]) { + nicif_warn(nic_dev, drv, netdev, + "Unknown optical module type: 0x%x\n", sfp_type); + return -EINVAL; + } + + handler[sfp_type](modinfo, sfp_type_ext); + + return 0; +} + +static int sss_nic_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + u8 sfp_data[SSSNIC_STD_SFP_INFO_MAX_SIZE]; + u32 offset = ee->len + ee->offset; + u32 len = ee->len; + int ret; + + if (len == 0 || offset > SSSNIC_STD_SFP_INFO_MAX_SIZE) + return -EINVAL; + + memset(data, 0, len); + + ret = sss_nic_get_sfp_eeprom(nic_dev, (u8 *)sfp_data, len); + if (ret != 0) + return ret; + + memcpy(data, sfp_data + ee->offset, len); + + return 0; +} + +static u32 sss_nic_get_priv_flags(struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + u32 ret_flag = 0; + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_SAME_RXTX)) + ret_flag |= SSSNIC_PRIV_FLAG_SYMM_RSS; + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_FORCE_LINK_UP)) + ret_flag |= SSSNIC_PRIV_FLAG_LINK_UP; + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY)) + ret_flag |= SSSNIC_PRIV_FLAG_RQ_RECOVERY; + + return ret_flag; +} + +static int sss_nic_set_priv_flags(struct net_device *netdev, u32 flags) +{ + int ret; + + ret = sss_nic_set_symm_rss_flag(netdev, flags); + if (ret) + return ret; + + ret = sss_nic_set_rq_recovery_flag(netdev, flags); + if (ret) + return ret; + + return sss_nic_set_force_link_flag(netdev, flags); +} + +static void sss_nic_self_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + memset(data, 0, SSSNIC_LP_TEST_TYPE_MAX * sizeof(u64)); + sss_nic_loop_test(netdev, eth_test, data); +} + +static const struct ethtool_ops g_nic_ethtool_ops = { + .supported_coalesce_params = SSSNIC_SUPPORTED_COALESCE_PARAMS, + + .get_link_ksettings = sss_nic_get_link_ksettings, + .set_link_ksettings = sss_nic_set_link_ksettings, + + .get_drvinfo = sss_nic_get_drvinfo, + .get_msglevel = sss_nic_get_msglevel, + .set_msglevel = sss_nic_set_msglevel, + .nway_reset = sss_nic_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = sss_nic_get_ringparam, + .set_ringparam = sss_nic_set_ringparam, + .get_pauseparam = sss_nic_get_pauseparam, + .set_pauseparam = sss_nic_set_pauseparam, + .get_sset_count = sss_nic_get_sset_count, + .get_ethtool_stats = sss_nic_get_ethtool_stats, + .get_strings = sss_nic_get_strings, + + .self_test = sss_nic_self_test, + + .set_phys_id = sss_nic_set_phys_id, + + .get_coalesce = sss_nic_get_coalesce, + .set_coalesce = sss_nic_set_coalesce, + + .get_per_queue_coalesce = sss_nic_get_per_queue_coalesce, + .set_per_queue_coalesce = sss_nic_set_per_queue_coalesce, + + .get_rxnfc = sss_nic_get_rxnfc, + .set_rxnfc = sss_nic_set_rxnfc, + .get_priv_flags = sss_nic_get_priv_flags, + .set_priv_flags = sss_nic_set_priv_flags, + + .get_channels = sss_nic_get_channels, + .set_channels = sss_nic_set_channels, + + .get_module_info = sss_nic_get_module_info, + .get_module_eeprom = sss_nic_get_module_eeprom, + + .get_rxfh_indir_size = sss_nic_get_rxfh_indir_size, + + .get_rxfh_key_size = sss_nic_get_rxfh_key_size, + .get_rxfh = sss_nic_get_rxfh, + .set_rxfh = sss_nic_set_rxfh, +}; + +static const struct ethtool_ops g_nicvf_ethtool_ops = { + .supported_coalesce_params = SSSNIC_SUPPORTED_COALESCE_PARAMS, + + .get_link_ksettings = sss_nic_get_link_ksettings, + + .get_drvinfo = sss_nic_get_drvinfo, + .get_msglevel = sss_nic_get_msglevel, + .set_msglevel = sss_nic_set_msglevel, + .get_link = ethtool_op_get_link, + .get_ringparam = sss_nic_get_ringparam, + + .set_ringparam = sss_nic_set_ringparam, + .get_sset_count = sss_nic_get_sset_count, + .get_ethtool_stats = sss_nic_get_ethtool_stats, + .get_strings = sss_nic_get_strings, + + .get_coalesce = sss_nic_get_coalesce, + .set_coalesce = sss_nic_set_coalesce, + + .get_per_queue_coalesce = sss_nic_get_per_queue_coalesce, + .set_per_queue_coalesce = sss_nic_set_per_queue_coalesce, + + .get_rxnfc = sss_nic_get_rxnfc, + .set_rxnfc = sss_nic_set_rxnfc, + .get_priv_flags = sss_nic_get_priv_flags, + .set_priv_flags = sss_nic_set_priv_flags, + + .get_channels = sss_nic_get_channels, + .set_channels = sss_nic_set_channels, + + .get_rxfh_indir_size = sss_nic_get_rxfh_indir_size, + + .get_rxfh_key_size = sss_nic_get_rxfh_key_size, + .get_rxfh = sss_nic_get_rxfh, + .set_rxfh = sss_nic_set_rxfh, + +}; + +void sss_nic_set_ethtool_ops(struct sss_nic_dev *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (!SSSNIC_FUNC_IS_VF(adapter->hwdev)) + netdev->ethtool_ops = &g_nic_ethtool_ops; + else + netdev->ethtool_ops = &g_nicvf_ethtool_ops; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.h new file mode 100644 index 0000000000000..d27145371df19 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_ETHTOOL_H +#define SSS_NIC_ETHTOOL_H + +#include + +void sss_nic_set_ethtool_ops(struct sss_nic_dev *adapter); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.c new file mode 100644 index 0000000000000..59588e06214fa --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.c @@ -0,0 +1,810 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" +#include "sss_nic_rss.h" +#include "sss_nic_ethtool_stats.h" +#include "sss_nic_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_ethtool_api.h" + +#define SSSNIC_COALESCE_PENDING_LIMIT_UNIT 8 +#define SSSNIC_COALESCE_TIMER_CFG_UNIT 5 +#define SSSNIC_COALESCE_MAX_PENDING_LIMIT (255 * SSSNIC_COALESCE_PENDING_LIMIT_UNIT) +#define SSSNIC_COALESCE_MAX_TIMER_CFG (255 * SSSNIC_COALESCE_TIMER_CFG_UNIT) +#define SSSNIC_WAIT_PKTS_TO_RX_BUFFER 200 +#define SSSNIC_WAIT_CLEAR_LP_TEST 100 + +#define SSSNIC_CHECK_COALESCE_ALIGN(coal, item, unit) \ +do { \ + if ((coal)->item % (unit)) \ + nicif_warn(nic_dev, drv, netdev, \ + "%s in %d units, change to %u\n", \ + #item, (unit), ((coal)->item - \ + (coal)->item % (unit))); \ +} while (0) + +#define SSSNIC_CHECK_COALESCE_CHANGED(coal, item, unit, ori_val, obj_str) \ +do { \ + if (((coal)->item / (unit)) != (ori_val)) \ + nicif_info(nic_dev, drv, netdev, \ + "Change %s from %d to %u %s\n", \ + #item, (ori_val) * (unit), \ + ((coal)->item - (coal)->item % (unit)), \ + (obj_str)); \ +} while (0) + +#define SSSNIC_CHECK_PKT_RATE_CHANGED(coal, item, ori_val, obj_str) \ +do { \ + if ((coal)->item != (ori_val)) \ + nicif_info(nic_dev, drv, netdev, \ + "Change %s from %llu to %u %s\n", \ + #item, (ori_val), (coal)->item, (obj_str)); \ +} while (0) + +#define SSSNIC_PORT_DOWN_ERR_ID 0 +#define SSSNIC_LP_DEF_TIME 5 /* seconds */ + +#define SSSNIC_TEST_TIME_MULTIPLE 5 + +#define SSSNIC_INTERNAL_LP_MODE 5 + +#define SSSNIC_WAIT_LOOP_TEST_FINISH_TIMEOUT 5000 + +void sss_nic_update_qp_depth(struct sss_nic_dev *nic_dev, + u32 sq_depth, u32 rq_depth) +{ + u16 i; + + nic_dev->qp_res.sq_depth = sq_depth; + nic_dev->qp_res.rq_depth = rq_depth; + for (i = 0; i < nic_dev->max_qp_num; i++) { + nic_dev->sq_desc_group[i].q_depth = sq_depth; + nic_dev->rq_desc_group[i].q_depth = rq_depth; + nic_dev->sq_desc_group[i].qid_mask = sq_depth - 1; + nic_dev->rq_desc_group[i].qid_mask = rq_depth - 1; + } +} + +int sss_nic_check_ringparam_valid(struct net_device *netdev, + const struct ethtool_ringparam *ringparam) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (ringparam->rx_mini_pending != 0 || ringparam->rx_jumbo_pending != 0) { + nicif_err(nic_dev, drv, netdev, + "Unsupport rx_mini_pending: %u, rx_jumbo_pending: %u\n", + ringparam->rx_mini_pending, ringparam->rx_jumbo_pending); + return -EINVAL; + } + + if (ringparam->tx_pending < SSSNIC_MIN_QUEUE_DEPTH || + ringparam->tx_pending > SSSNIC_MAX_TX_QUEUE_DEPTH || + ringparam->rx_pending < SSSNIC_MIN_QUEUE_DEPTH || + ringparam->rx_pending > SSSNIC_MAX_RX_QUEUE_DEPTH) { + nicif_err(nic_dev, drv, netdev, + "Queue depth out of range tx[%d-%d] rx[%d-%d]\n", + ringparam->tx_pending, ringparam->tx_pending, + ringparam->rx_pending, ringparam->rx_pending); + return -EINVAL; + } + + return 0; +} + +void sss_nic_intr_coal_to_ethtool_coal(struct ethtool_coalesce *ethtool_coal, + struct sss_nic_intr_coal_info *nic_coal) +{ + ethtool_coal->rx_coalesce_usecs = nic_coal->coalesce_timer * + SSSNIC_COALESCE_TIMER_CFG_UNIT; + ethtool_coal->tx_coalesce_usecs = ethtool_coal->rx_coalesce_usecs; + ethtool_coal->rx_coalesce_usecs_low = nic_coal->rx_usecs_low * + SSSNIC_COALESCE_TIMER_CFG_UNIT; + ethtool_coal->rx_coalesce_usecs_high = nic_coal->rx_usecs_high * + SSSNIC_COALESCE_TIMER_CFG_UNIT; + + ethtool_coal->rx_max_coalesced_frames = nic_coal->pending_limt * + SSSNIC_COALESCE_PENDING_LIMIT_UNIT; + ethtool_coal->tx_max_coalesced_frames = + ethtool_coal->rx_max_coalesced_frames; + ethtool_coal->rx_max_coalesced_frames_low = + nic_coal->rx_pending_limt_low * + SSSNIC_COALESCE_PENDING_LIMIT_UNIT; + ethtool_coal->rx_max_coalesced_frames_high = + nic_coal->rx_pending_limt_high * + SSSNIC_COALESCE_PENDING_LIMIT_UNIT; + + ethtool_coal->pkt_rate_low = (u32)nic_coal->pkt_rate_low; + ethtool_coal->pkt_rate_high = (u32)nic_coal->pkt_rate_high; +} + +int sss_nic_ethtool_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ethtool_coal, u16 queue) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_intr_coal_info *intr_coal_info = NULL; + + if (queue == SSSNIC_COALESCE_ALL_QUEUE) { + /* get tx/rx irq0 as default parameters */ + intr_coal_info = &nic_dev->coal_info[0]; + } else { + if (queue >= nic_dev->qp_res.qp_num) { + nicif_err(nic_dev, drv, netdev, + "Invalid queue_id: %u\n", queue); + return -EINVAL; + } + intr_coal_info = &nic_dev->coal_info[queue]; + } + + sss_nic_intr_coal_to_ethtool_coal(ethtool_coal, intr_coal_info); + ethtool_coal->use_adaptive_rx_coalesce = + nic_dev->use_adaptive_rx_coalesce; + + return 0; +} + +int sss_nic_set_hw_intr_coal(struct sss_nic_dev *nic_dev, + u16 qid, struct sss_nic_intr_coal_info *coal) +{ + struct sss_nic_intr_coal_info *intr_coal_info = NULL; + struct sss_irq_cfg irq_cfg = {0}; + struct net_device *netdev = nic_dev->netdev; + int ret; + + intr_coal_info = &nic_dev->coal_info[qid]; + if (intr_coal_info->coalesce_timer != coal->coalesce_timer || + intr_coal_info->pending_limt != coal->pending_limt) + intr_coal_info->user_set_intr_coal_flag = 1; + + intr_coal_info->coalesce_timer = coal->coalesce_timer; + intr_coal_info->pending_limt = coal->pending_limt; + intr_coal_info->rx_pending_limt_low = coal->rx_pending_limt_low; + intr_coal_info->rx_pending_limt_high = coal->rx_pending_limt_high; + intr_coal_info->pkt_rate_low = coal->pkt_rate_low; + intr_coal_info->pkt_rate_high = coal->pkt_rate_high; + intr_coal_info->rx_usecs_low = coal->rx_usecs_low; + intr_coal_info->rx_usecs_high = coal->rx_usecs_high; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP) || + qid >= nic_dev->qp_res.qp_num || + nic_dev->use_adaptive_rx_coalesce != 0) + return 0; + + irq_cfg.msix_id = nic_dev->qp_res.irq_cfg[qid].msix_id; + irq_cfg.lli_set = 0; + irq_cfg.coalesc_intr_set = 1; + irq_cfg.coalesc_timer = intr_coal_info->coalesce_timer; + irq_cfg.resend_timer = intr_coal_info->resend_timer; + irq_cfg.pending = intr_coal_info->pending_limt; + nic_dev->rq_desc_group[qid].last_coal_timer = + intr_coal_info->coalesce_timer; + nic_dev->rq_desc_group[qid].last_pending_limt = intr_coal_info->pending_limt; + ret = sss_chip_set_msix_attr(nic_dev->hwdev, irq_cfg, + SSS_CHANNEL_NIC); + if (ret != 0) + nicif_warn(nic_dev, drv, netdev, + "Fail to set queue%u coalesce", qid); + + return ret; +} + +int sss_nic_check_coal_param_support(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (coal->cmd & ~SSSNIC_SUPPORTED_COALESCE_PARAMS) { + nicif_err(nic_dev, drv, netdev, + "Only support to change rx/tx-usecs and rx/tx-frames\n"); + + return -EOPNOTSUPP; + } + + return 0; +} + +int sss_nic_check_coal_param_valid(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (coal->rx_coalesce_usecs != coal->tx_coalesce_usecs) { + nicif_err(nic_dev, drv, netdev, + "Coal param: tx-usecs must be equal to rx-usecs\n"); + return -EINVAL; + } + + if (coal->rx_max_coalesced_frames != coal->tx_max_coalesced_frames) { + nicif_err(nic_dev, drv, netdev, + "Coal param: tx-frames must be equal to rx-frames\n"); + return -EINVAL; + } + + if (coal->rx_coalesce_usecs > SSSNIC_COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, + "Coal param: rx_coalesce_usecs out of range[%d-%d]\n", 0, + SSSNIC_COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_coalesce_usecs_low > SSSNIC_COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, + "Coal param: rx_coalesce_usecs_low out of range[%d-%d]\n", 0, + SSSNIC_COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_coalesce_usecs_high > SSSNIC_COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, + "Coal param: rx_coalesce_usecs_high out of range[%d-%d]\n", 0, + SSSNIC_COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames > SSSNIC_COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, + "Coal param: rx_max_coalesced_frames out of range[%d-%d]\n", 0, + SSSNIC_COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_low > + SSSNIC_COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, + "Coal param: rx_max_coalesced_frames_low out of range[%d-%d]\n", + 0, SSSNIC_COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_high > + SSSNIC_COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, + "Coal param: rx_max_coalesced_frames_high out of range[%d-%d]\n", + 0, SSSNIC_COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + return 0; +} + +int sss_nic_check_coal_param_range(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (coal->rx_coalesce_usecs_low / SSSNIC_COALESCE_TIMER_CFG_UNIT >= + coal->rx_coalesce_usecs_high / SSSNIC_COALESCE_TIMER_CFG_UNIT) { + nicif_err(nic_dev, drv, netdev, + "Coal param: coalesce_usecs_high(%u) must more than coalesce_usecs_low(%u)\n", + coal->rx_coalesce_usecs_high, + coal->rx_coalesce_usecs_low); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_low / SSSNIC_COALESCE_PENDING_LIMIT_UNIT >= + coal->rx_max_coalesced_frames_high / SSSNIC_COALESCE_PENDING_LIMIT_UNIT) { + nicif_err(nic_dev, drv, netdev, + "Coal param: coalesced_frames_high(%u) must more than coalesced_frames_low(%u)\n", + coal->rx_max_coalesced_frames_high, + coal->rx_max_coalesced_frames_low); + return -EOPNOTSUPP; + } + + if (coal->pkt_rate_low >= coal->pkt_rate_high) { + nicif_err(nic_dev, drv, netdev, + "Coal param: pkt_rate_high(%u) must more than pkt_rate_low(%u)\n", + coal->pkt_rate_high, + coal->pkt_rate_low); + return -EOPNOTSUPP; + } + + return 0; +} + +int sss_nic_coalesce_check(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + int ret; + + ret = sss_nic_check_coal_param_support(netdev, coal); + if (ret != 0) + return ret; + + ret = sss_nic_check_coal_param_valid(netdev, coal); + if (ret != 0) + return ret; + + ret = sss_nic_check_coal_param_range(netdev, coal); + if (ret != 0) + return ret; + + return 0; +} + +int sss_nic_set_coal_param_to_hw(struct sss_nic_dev *nic_dev, + struct sss_nic_intr_coal_info *intr_coal_info, u16 queue) +{ + u16 i; + + if (queue < nic_dev->qp_res.qp_num) { + sss_nic_set_hw_intr_coal(nic_dev, queue, intr_coal_info); + return 0; + } else if (queue != SSSNIC_COALESCE_ALL_QUEUE) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid queue_id: %u\n", queue); + return -EINVAL; + } + + for (i = 0; i < nic_dev->max_qp_num; i++) + sss_nic_set_hw_intr_coal(nic_dev, i, intr_coal_info); + + return 0; +} + +void sss_nic_coalesce_align_check(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + SSSNIC_CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs, + SSSNIC_COALESCE_TIMER_CFG_UNIT); + SSSNIC_CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_low, + SSSNIC_COALESCE_TIMER_CFG_UNIT); + SSSNIC_CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_high, + SSSNIC_COALESCE_TIMER_CFG_UNIT); + SSSNIC_CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames, + SSSNIC_COALESCE_PENDING_LIMIT_UNIT); + SSSNIC_CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_low, + SSSNIC_COALESCE_PENDING_LIMIT_UNIT); + SSSNIC_CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_high, + SSSNIC_COALESCE_PENDING_LIMIT_UNIT); +} + +void sss_nic_coalesce_change_check(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_intr_coal_info *intr_coal_info = NULL; + char obj_str[32] = {0}; + + if (queue == SSSNIC_COALESCE_ALL_QUEUE) { + intr_coal_info = &nic_dev->coal_info[0]; + snprintf(obj_str, sizeof(obj_str), "for netdev"); + } else { + intr_coal_info = &nic_dev->coal_info[queue]; + snprintf(obj_str, sizeof(obj_str), "for queue %u", queue); + } + + SSSNIC_CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs, + SSSNIC_COALESCE_TIMER_CFG_UNIT, + intr_coal_info->coalesce_timer, obj_str); + SSSNIC_CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_low, + SSSNIC_COALESCE_TIMER_CFG_UNIT, + intr_coal_info->rx_usecs_low, obj_str); + SSSNIC_CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_high, + SSSNIC_COALESCE_TIMER_CFG_UNIT, + intr_coal_info->rx_usecs_high, obj_str); + SSSNIC_CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames, + SSSNIC_COALESCE_PENDING_LIMIT_UNIT, + intr_coal_info->pending_limt, obj_str); + SSSNIC_CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_low, + SSSNIC_COALESCE_PENDING_LIMIT_UNIT, + intr_coal_info->rx_pending_limt_low, obj_str); + SSSNIC_CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_high, + SSSNIC_COALESCE_PENDING_LIMIT_UNIT, + intr_coal_info->rx_pending_limt_high, obj_str); + SSSNIC_CHECK_PKT_RATE_CHANGED(coal, pkt_rate_low, + intr_coal_info->pkt_rate_low, obj_str); + SSSNIC_CHECK_PKT_RATE_CHANGED(coal, pkt_rate_high, + intr_coal_info->pkt_rate_high, obj_str); +} + +void sss_nic_ethtool_coalesce_to_intr_coal_info(struct sss_nic_intr_coal_info *nic_coal, + struct ethtool_coalesce *ethtool_coal) +{ + nic_coal->coalesce_timer = + (u8)(ethtool_coal->rx_coalesce_usecs / SSSNIC_COALESCE_TIMER_CFG_UNIT); + nic_coal->pending_limt = (u8)(ethtool_coal->rx_max_coalesced_frames / + SSSNIC_COALESCE_PENDING_LIMIT_UNIT); + nic_coal->pkt_rate_low = ethtool_coal->pkt_rate_low; + nic_coal->pkt_rate_high = ethtool_coal->pkt_rate_high; + nic_coal->rx_usecs_low = + (u8)(ethtool_coal->rx_coalesce_usecs_low / SSSNIC_COALESCE_TIMER_CFG_UNIT); + nic_coal->rx_usecs_high = + (u8)(ethtool_coal->rx_coalesce_usecs_high / SSSNIC_COALESCE_TIMER_CFG_UNIT); + nic_coal->rx_pending_limt_low = + (u8)(ethtool_coal->rx_max_coalesced_frames_low / + SSSNIC_COALESCE_PENDING_LIMIT_UNIT); + nic_coal->rx_pending_limt_high = + (u8)(ethtool_coal->rx_max_coalesced_frames_high / + SSSNIC_COALESCE_PENDING_LIMIT_UNIT); +} + +int sss_nic_ethtool_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_intr_coal_info intr_coal_info = {0}; + u32 last_adaptive_rx; + int ret = 0; + + ret = sss_nic_coalesce_check(netdev, coal); + if (ret != 0) + return ret; + + sss_nic_coalesce_align_check(netdev, coal); + sss_nic_coalesce_change_check(netdev, coal, queue); + + sss_nic_ethtool_coalesce_to_intr_coal_info(&intr_coal_info, coal); + + last_adaptive_rx = nic_dev->use_adaptive_rx_coalesce; + nic_dev->use_adaptive_rx_coalesce = coal->use_adaptive_rx_coalesce; + + if (nic_dev->use_adaptive_rx_coalesce == 0 && + (intr_coal_info.coalesce_timer == 0 || + intr_coal_info.pending_limt == 0)) + nicif_warn(nic_dev, drv, netdev, "Coalesce will be disabled\n"); + + if (SSS_CHANNEL_RES_VALID(nic_dev) != 0) { + if (nic_dev->use_adaptive_rx_coalesce == 0) + cancel_delayed_work_sync(&nic_dev->moderation_task); + else if (last_adaptive_rx == 0) + queue_delayed_work(nic_dev->workq, + &nic_dev->moderation_task, + SSSNIC_MODERATONE_DELAY); + } + + return sss_nic_set_coal_param_to_hw(nic_dev, &intr_coal_info, queue); +} + +void sss_nic_module_type_sfp(struct ethtool_modinfo *modinfo, + u8 sfp_type_ext) +{ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; +} + +void sss_nic_module_type_qsfp(struct ethtool_modinfo *modinfo, + u8 sfp_type_ext) +{ + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; +} + +void sss_nic_module_type_qsfp_plus(struct ethtool_modinfo *modinfo, u8 sfp_type_ext) +{ + if (sfp_type_ext < SSSNIC_SFP_TYPE_EXT_FLAG) { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + } +} + +void sss_nic_module_type_qsfp28(struct ethtool_modinfo *modinfo, + u8 sfp_type_ext) +{ + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; +} + +int sss_nic_set_rq_recovery_flag(struct net_device *netdev, u32 flag) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (flag & SSSNIC_PRIV_FLAG_RQ_RECOVERY) { + if (!SSSNIC_SUPPORT_RXQ_RECOVERY(nic_dev->nic_io)) { + nicif_info(nic_dev, drv, netdev, "Unsupport open rq recovery\n"); + return -EOPNOTSUPP; + } + + if (SSSNIC_TEST_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY)) + return 0; + queue_delayed_work(nic_dev->workq, &nic_dev->rq_watchdog_work, HZ); + nicif_info(nic_dev, drv, netdev, "Succss to open rq recovery\n"); + } else { + if (!SSSNIC_TEST_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY)) + return 0; + cancel_delayed_work_sync(&nic_dev->rq_watchdog_work); + nicif_info(nic_dev, drv, netdev, "Success to close rq recovery\n"); + } + + return 0; +} + +int sss_nic_set_symm_rss_flag(struct net_device *netdev, u32 flag) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if ((flag & SSSNIC_PRIV_FLAG_SYMM_RSS) != 0) { + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)) { + nicif_err(nic_dev, drv, netdev, "Fail to open Symmetric RSS while DCB is enabled\n"); + return -EOPNOTSUPP; + } + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_RSS_ENABLE)) { + nicif_err(nic_dev, drv, netdev, "Fail to open Symmetric RSS while RSS is disabled\n"); + return -EOPNOTSUPP; + } + + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_SAME_RXTX); + } else { + SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_SAME_RXTX); + } + + return 0; +} + +void sss_nic_force_link_up(struct sss_nic_dev *nic_dev) +{ + if (SSSNIC_TEST_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_FORCE_LINK_UP)) + return; + + if (!SSS_CHANNEL_RES_VALID(nic_dev)) + return; + + if (netif_carrier_ok(nic_dev->netdev)) + return; + + nic_dev->link_status = true; + netif_carrier_on(nic_dev->netdev); + nicif_info(nic_dev, link, nic_dev->netdev, "Set link up\n"); + + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + sss_nic_notify_all_vf_link_state(nic_dev->nic_io, nic_dev->link_status); +} + +int sss_nic_force_link_down(struct sss_nic_dev *nic_dev) +{ + int ret; + u8 link_status = 0; + + if (!SSSNIC_TEST_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_FORCE_LINK_UP)) + return 0; + + if (!SSS_CHANNEL_RES_VALID(nic_dev)) + return 0; + + ret = sss_nic_get_hw_link_state(nic_dev, &link_status); + if (ret != 0) { + nicif_err(nic_dev, link, nic_dev->netdev, "Fail to get link state: %d\n", ret); + return ret; + } + + nic_dev->link_status = link_status; + + if (link_status != 0) { + if (netif_carrier_ok(nic_dev->netdev)) + return 0; + + netif_carrier_on(nic_dev->netdev); + nicif_info(nic_dev, link, nic_dev->netdev, "Link state is up\n"); + } else { + if (!netif_carrier_ok(nic_dev->netdev)) + return 0; + + netif_carrier_off(nic_dev->netdev); + nicif_info(nic_dev, link, nic_dev->netdev, "Link state is down\n"); + } + + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + sss_nic_notify_all_vf_link_state(nic_dev->nic_io, nic_dev->link_status); + + return ret; +} + +int sss_nic_set_force_link_flag(struct net_device *netdev, u32 flag) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret = 0; + + if ((flag & SSSNIC_PRIV_FLAG_LINK_UP) != 0) + sss_nic_force_link_up(nic_dev); + else + ret = sss_nic_force_link_down(nic_dev); + + return ret; +} + +int sss_nic_finish_loop_test(struct sss_nic_dev *nic_dev, + struct sk_buff *skb_tmp, u32 test_time) +{ + struct net_device *netdev = nic_dev->netdev; + u8 *loop_test_rx_buf = nic_dev->loop_test_rx_buf; + u32 cnt = test_time * SSSNIC_TEST_TIME_MULTIPLE; + struct sk_buff *skb = NULL; + int lp_pkt_cnt = nic_dev->loop_pkt_len; + unsigned char pkt_mark_data; + u32 i; + u32 j; + + for (i = 0; i < cnt; i++) { + nic_dev->loop_test_rx_cnt = 0; + memset(loop_test_rx_buf, 0, SSSNIC_LP_PKT_CNT * lp_pkt_cnt); + + for (j = 0; j < SSSNIC_LP_PKT_CNT; j++) { + skb = pskb_copy(skb_tmp, GFP_ATOMIC); + if (!skb) { + nicif_err(nic_dev, drv, netdev, + "Fail to copy skb for loopback test\n"); + return -ENOMEM; + } + + /* mark index for every pkt */ + skb->data[lp_pkt_cnt - 1] = j; + + if (sss_nic_loop_start_xmit(skb, netdev) != NETDEV_TX_OK) { + dev_kfree_skb_any(skb); + nicif_err(nic_dev, drv, netdev, + "Fail to xmit pkt for loopback test\n"); + return -EBUSY; + } + } + + /* wait till all pkts received to RX buffer */ + msleep(SSSNIC_WAIT_PKTS_TO_RX_BUFFER); + + for (j = 0; j < SSSNIC_LP_PKT_CNT; j++) { + pkt_mark_data = *(loop_test_rx_buf + (j * lp_pkt_cnt) + (lp_pkt_cnt - 1)); + if (memcmp((loop_test_rx_buf + (j * lp_pkt_cnt)), + skb_tmp->data, (lp_pkt_cnt - 1)) != 0 || + pkt_mark_data != j) { + nicif_err(nic_dev, drv, netdev, + "Fail to compare pkt in loopback test(index=0x%02x, data[%d]=0x%02x)\n", + (j + (i * SSSNIC_LP_PKT_CNT)), + (lp_pkt_cnt - 1), pkt_mark_data); + return -EIO; + } + } + } + + return 0; +} + +static struct sk_buff *sss_nic_alloc_loop_skb(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + struct sk_buff *skb = NULL; + struct ethhdr *eth_hdr = NULL; + u8 *test_data = NULL; + u32 i; + + skb = alloc_skb(nic_dev->loop_pkt_len, GFP_ATOMIC); + if (!skb) + return skb; + + eth_hdr = __skb_put(skb, ETH_HLEN); + eth_hdr->h_proto = htons(ETH_P_ARP); + ether_addr_copy(eth_hdr->h_dest, nic_dev->netdev->dev_addr); + eth_zero_addr(eth_hdr->h_source); + skb_reset_mac_header(skb); + + test_data = __skb_put(skb, nic_dev->loop_pkt_len - ETH_HLEN); + for (i = ETH_HLEN; i < nic_dev->loop_pkt_len; i++) + test_data[i] = i & 0xFF; + + skb->queue_mapping = 0; + skb->dev = netdev; + skb->protocol = htons(ETH_P_ARP); + + return skb; +} + +static int sss_nic_run_loop_test(struct sss_nic_dev *nic_dev, u32 test_time) +{ + struct net_device *netdev = nic_dev->netdev; + struct sk_buff *skb_tmp = NULL; + int ret; + + skb_tmp = sss_nic_alloc_loop_skb(nic_dev); + if (!skb_tmp) { + nicif_err(nic_dev, drv, netdev, + "Fail to create lp test skb for loopback test\n"); + return -ENOMEM; + } + + ret = sss_nic_finish_loop_test(nic_dev, skb_tmp, test_time); + if (ret != 0) { + dev_kfree_skb_any(skb_tmp); + return ret; + } + + dev_kfree_skb_any(skb_tmp); + nicif_info(nic_dev, drv, netdev, "Success to loopback test.\n"); + return 0; +} + +static int sss_nic_do_loop_test(struct sss_nic_dev *nic_dev, u32 *flags, + u32 test_time, enum sss_nic_lp_test_type *test_index) +{ + struct net_device *netdev = nic_dev->netdev; + int ret = 0; + + if (!(*flags & ETH_TEST_FL_EXTERNAL_LB)) { + *test_index = SSSNIC_INTERNAL_LP_TEST; + if (sss_nic_set_loopback_mode(nic_dev, + SSSNIC_INTERNAL_LP_MODE, true)) { + nicif_err(nic_dev, drv, netdev, + "Fail to set port loopback mode before loopback test\n"); + return -EFAULT; + } + + /* suspend 5000 ms, waiting for port to stop receiving frames */ + msleep(SSSNIC_WAIT_LOOP_TEST_FINISH_TIMEOUT); + } else { + *test_index = SSSNIC_EXTERNAL_LP_TEST; + } + + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_LP_TEST); + + if (sss_nic_run_loop_test(nic_dev, test_time)) + ret = -EFAULT; + + SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_LP_TEST); + msleep(SSSNIC_WAIT_CLEAR_LP_TEST); + + if (!(*flags & ETH_TEST_FL_EXTERNAL_LB)) { + if (sss_nic_set_loopback_mode(nic_dev, + SSSNIC_INTERNAL_LP_MODE, false)) { + nicif_err(nic_dev, drv, netdev, + "Fail to cancel port loopback mode after loopback test\n"); + ret = -EFAULT; + } + } else { + *flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; + } + return ret; +} + +void sss_nic_loop_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + enum sss_nic_lp_test_type test_type = SSSNIC_INTERNAL_LP_TEST; + u32 act_test_time = SSSNIC_LP_DEF_TIME; + u8 link_state = 0; + int ret; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) { + nicif_err(nic_dev, drv, netdev, + "Fail to entry loopback test when netdev is closed\n"); + eth_test->flags |= ETH_TEST_FL_FAILED; + data[SSSNIC_PORT_DOWN_ERR_ID] = 1; + return; + } + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + ret = sss_nic_do_loop_test(nic_dev, ð_test->flags, act_test_time, &test_type); + if (ret) { + eth_test->flags |= ETH_TEST_FL_FAILED; + data[test_type] = 1; + } + + netif_tx_wake_all_queues(netdev); + + ret = sss_nic_get_hw_link_state(nic_dev, &link_state); + if (!ret && link_state) + netif_carrier_on(netdev); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.h new file mode 100644 index 0000000000000..9cfb72b2668d5 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_ETHTOOL_API_H +#define SSS_NIC_ETHTOOL_API_H + +#include + +#define SSSNIC_PRIV_FLAG_SYMM_RSS BIT(0) +#define SSSNIC_PRIV_FLAG_LINK_UP BIT(1) +#define SSSNIC_PRIV_FLAG_RQ_RECOVERY BIT(2) + +#define SSSNIC_COALESCE_ALL_QUEUE 0xFFFF + +#define SSSNIC_SFP_TYPE_EXT_FLAG 0x3 + +typedef void (*sss_nic_get_module_info_t)(struct ethtool_modinfo *modinfo, u8 sfp_type_ext); + +enum sss_nic_lp_test_type { + SSSNIC_INTERNAL_LP_TEST = 0, + SSSNIC_EXTERNAL_LP_TEST = 1, + SSSNIC_LP_TEST_TYPE_MAX = 2, +}; + +enum module_type { + SSSNIC_MODULE_TYPE_SFP = 0x3, + SSSNIC_MODULE_TYPE_QSFP = 0x0C, + SSSNIC_MODULE_TYPE_QSFP_PLUS = 0x0D, + SSSNIC_MODULE_TYPE_QSFP28 = 0x11, + SSSNIC_MODULE_TYPE_MAX, +}; + +void sss_nic_update_qp_depth(struct sss_nic_dev *nic_dev, + u32 sq_depth, u32 rq_depth); +int sss_nic_check_ringparam_valid(struct net_device *netdev, + const struct ethtool_ringparam *ringparam); +void sss_nic_intr_coal_to_ethtool_coal(struct ethtool_coalesce *ethtool_coal, + struct sss_nic_intr_coal_info *nic_coal); +int sss_nic_ethtool_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ethtool_coal, u16 queue); +int sss_nic_set_hw_intr_coal(struct sss_nic_dev *nic_dev, + u16 qid, struct sss_nic_intr_coal_info *coal); +int sss_nic_check_coal_param_support(struct net_device *netdev, + const struct ethtool_coalesce *coal); +int sss_nic_check_coal_param_valid(struct net_device *netdev, + const struct ethtool_coalesce *coal); +int sss_nic_check_coal_param_range(struct net_device *netdev, + const struct ethtool_coalesce *coal); +int sss_nic_coalesce_check(struct net_device *netdev, + const struct ethtool_coalesce *coal); +int sss_nic_set_coal_param_to_hw(struct sss_nic_dev *nic_dev, + struct sss_nic_intr_coal_info *intr_coal_info, u16 queue); +void sss_nic_coalesce_align_check(struct net_device *netdev, + struct ethtool_coalesce *coal); +void sss_nic_coalesce_change_check(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue); +void sss_nic_ethtool_coalesce_to_intr_coal_info(struct sss_nic_intr_coal_info *nic_coal, + struct ethtool_coalesce *ethtool_coal); +int sss_nic_ethtool_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue); +void sss_nic_module_type_sfp(struct ethtool_modinfo *modinfo, + u8 sfp_type_ext); +void sss_nic_module_type_qsfp(struct ethtool_modinfo *modinfo, + u8 sfp_type_ext); +void sss_nic_module_type_qsfp_plus(struct ethtool_modinfo *modinfo, u8 sfp_type_ext); +void sss_nic_module_type_qsfp28(struct ethtool_modinfo *modinfo, + u8 sfp_type_ext); +int sss_nic_set_rq_recovery_flag(struct net_device *netdev, + u32 flag); +int sss_nic_set_symm_rss_flag(struct net_device *netdev, u32 flag); +void sss_nic_force_link_up(struct sss_nic_dev *nic_dev); +int sss_nic_force_link_down(struct sss_nic_dev *nic_dev); +int sss_nic_set_force_link_flag(struct net_device *netdev, u32 flag); +void sss_nic_loop_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.c new file mode 100644 index 0000000000000..d5b80dfb2f8bc --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" +#include "sss_nic_ethtool_stats_api.h" + +typedef int (*sss_nic_ss_handler_t)(struct sss_nic_dev *nic_dev); + +struct sss_nic_handler { + int type; + sss_nic_ss_handler_t handler_func; +}; + +typedef void (*sss_nic_strings_handler_t)(struct sss_nic_dev *nic_dev, + u8 *buffer); + +struct sss_nic_get_strings { + int type; + sss_nic_strings_handler_t handler_func; +}; + +int sss_nic_get_sset_count(struct net_device *netdev, int settings) +{ + int i; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + struct sss_nic_handler handler[] = { + {ETH_SS_TEST, sss_nic_eth_ss_test}, + {ETH_SS_STATS, sss_nic_eth_ss_stats}, + {ETH_SS_PRIV_FLAGS, sss_nic_eth_ss_priv_flags}, + }; + + for (i = 0; i < ARRAY_LEN(handler); i++) + if (settings == handler[i].type) + return handler[i].handler_func(nic_dev); + + return -EOPNOTSUPP; +} + +void sss_nic_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + u16 cnt; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + cnt = sss_nic_get_ethtool_dev_stats(nic_dev, data); + + cnt += sss_nic_get_ethtool_vport_stats(nic_dev, data + cnt); + + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + cnt += sss_nic_get_ethtool_port_stats(nic_dev, data + cnt); + + sss_nic_get_drv_queue_stats(nic_dev, data + cnt); +} + +void sss_nic_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) +{ + int i; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + struct sss_nic_get_strings handler[] = { + {ETH_SS_TEST, sss_nic_get_test_strings}, + {ETH_SS_STATS, sss_nic_get_drv_stats_strings}, + {ETH_SS_PRIV_FLAGS, sss_nic_get_priv_flags_strings}, + }; + + for (i = 0; i < ARRAY_LEN(handler); i++) + if (stringset == handler[i].type) + return handler[i].handler_func(nic_dev, buf); + + nicif_err(nic_dev, drv, netdev, "Invalid string set %u.", stringset); +} + +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS +int sss_nic_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *ksetting) +{ + int ret; + struct sss_nic_cmd_link_settings cmd = {0}; + + sss_nic_ethtool_ksetting_clear(ksetting, supported); + sss_nic_ethtool_ksetting_clear(ksetting, advertising); + + ret = sss_nic_get_link_setting(net_dev, &cmd); + if (ret != 0) + return ret; + + sss_nic_copy_ksetting(ksetting, &cmd); + + return 0; +} +#endif +#endif + +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS +int sss_nic_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ksettings) +{ + /* Only support to set autoneg and speed */ + return sssnic_set_link_settings(netdev, + ksettings->base.autoneg, ksettings->base.speed); +} +#endif +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.h new file mode 100644 index 0000000000000..3e3d6e1aa8d63 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_ETHTOOL_STATS_H +#define SSS_NIC_ETHTOOL_STATS_H + +#include +#include + +#include "sss_kernel.h" + +void sss_nic_get_strings(struct net_device *netdev, u32 stringset, u8 *buf); +void sss_nic_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data); +int sss_nic_get_sset_count(struct net_device *netdev, int settings); + +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS +int sss_nic_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *ksetting); +int sss_nic_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ksettings); +#endif +#endif + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.c new file mode 100644 index 0000000000000..a81bb15eab64a --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.c @@ -0,0 +1,1057 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" +#include "sss_nic_ethtool_stats_api.h" +#include "sss_tool_comm.h" +#include "sss_tool_nic.h" + +#define SSSNIC_SET_SUPPORTED_MODE 0 +#define SSSNIC_SET_ADVERTISED_MODE 1 + +#define SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \ + set_bit(ETHTOOL_LINK_MODE_##mode##_BIT, (ecmd)->supported) +#define SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \ + set_bit(ETHTOOL_LINK_MODE_##mode##_BIT, (ecmd)->advertising) + +#define SSSNIC_ETHTOOL_ADD_SPPED_LINK_MODE(ecmd, mode, op) \ +do { \ + u32 _link_mode; \ + unsigned long *val = ((op) == SSSNIC_SET_SUPPORTED_MODE) ? \ + (ecmd)->supported : (ecmd)->advertising; \ + for (_link_mode = 0; _link_mode < g_link_mode_table[mode].array_len; _link_mode++) { \ + if (g_link_mode_table[mode].array[_link_mode] >= \ + __ETHTOOL_LINK_MODE_MASK_NBITS) \ + continue; \ + set_bit(g_link_mode_table[mode].array[_link_mode], val); \ + } \ +} while (0) + +#define SSSNIC_NETDEV_STATS(_item) { \ + .name = #_item, \ + .len = FIELD_SIZEOF(struct rtnl_link_stats64, _item), \ + .offset = offsetof(struct rtnl_link_stats64, _item) \ +} + +#define SSSNIC_TX_STATS(_item) { \ + .name = #_item, \ + .len = FIELD_SIZEOF(struct sss_nic_tx_stats, _item), \ + .offset = offsetof(struct sss_nic_tx_stats, _item) \ +} + +#define SSSNIC_RQ_STATS(_item) { \ + .name = "rxq%d_"#_item, \ + .len = FIELD_SIZEOF(struct sss_nic_rq_stats, _item), \ + .offset = offsetof(struct sss_nic_rq_stats, _item) \ +} + +#define SSSNIC_SQ_STATS(_item) { \ + .name = "txq%d_"#_item, \ + .len = FIELD_SIZEOF(struct sss_nic_sq_stats, _item), \ + .offset = offsetof(struct sss_nic_sq_stats, _item) \ +} + +#define SSSNIC_FUNCTION_STATS(_item) { \ + .name = #_item, \ + .len = FIELD_SIZEOF(struct sss_nic_port_stats, _item), \ + .offset = offsetof(struct sss_nic_port_stats, _item) \ +} + +#define SSSNIC_PORT_STATS(_item) { \ + .name = #_item, \ + .len = FIELD_SIZEOF(struct sss_nic_mag_port_stats, _item), \ + .offset = offsetof(struct sss_nic_mag_port_stats, _item) \ +} + +#define SSSNIC_GET_VALUE_OF_PTR(len, ptr) ( \ + (len) == sizeof(u64) ? *(u64 *)(ptr) : \ + (len) == sizeof(u32) ? *(u32 *)(ptr) : \ + (len) == sizeof(u16) ? *(u16 *)(ptr) : *(u8 *)(ptr) \ +) + +#define SSSNIC_DEV_STATS_PACK(items, item_idx, array, stats_ptr) \ +do { \ + int j; \ + for (j = 0; j < ARRAY_LEN(array); j++) { \ + memcpy((items)[item_idx].name, (array)[j].name, SSS_TOOL_SHOW_ITEM_LEN); \ + (items)[item_idx].hexadecimal = 0; \ + (items)[item_idx].value = \ + SSSNIC_GET_VALUE_OF_PTR((array)[j].len, \ + (char *)(stats_ptr) + (array)[j].offset); \ + (item_idx)++; \ + } \ +} while (0) + +#define SSSNIC_QUEUE_STATS_PACK(items, item_idx, array, stats_ptr, qid) \ +do { \ + int j; \ + for (j = 0; j < ARRAY_LEN(array); j++) { \ + memcpy((items)[item_idx].name, (array)[j].name, \ + SSS_TOOL_SHOW_ITEM_LEN); \ + snprintf((items)[item_idx].name, SSS_TOOL_SHOW_ITEM_LEN, \ + (array)[j].name, (qid)); \ + (items)[item_idx].hexadecimal = 0; \ + (items)[item_idx].value = \ + SSSNIC_GET_VALUE_OF_PTR((array)[j].len, \ + (char *)(stats_ptr) + (array)[j].offset); \ + (item_idx)++; \ + } \ +} while (0) + +#define SSSNIC_CONVERT_DATA_TYPE(len, p) (((len) == sizeof(u64)) ? *(u64 *)(p) : *(u32 *)(p)) +#define SSSNIC_AUTONEG_STRING(autoneg) ((autoneg) ? ("autong enable") : ("autong disable")) +#define SSSNIC_AUTONEG_ENABLE(autoneg) ((autoneg) ? SSSNIC_PORT_CFG_AN_ON : SSSNIC_PORT_CFG_AN_OFF) + +#define SSSNIC_NEGATE_ZERO_U32 ((u32)~0) + +struct sss_nic_hw2ethtool_link_mode { + const u32 *array; + u32 array_len; + u32 speed; +}; + +typedef void (*sss_nic_port_type_handler_t)(struct sss_nic_cmd_link_settings *cmd); + +static void sss_nic_set_fibre_port(struct sss_nic_cmd_link_settings *cmd); +static void sss_nic_set_da_port(struct sss_nic_cmd_link_settings *cmd); +static void sss_nic_set_tp_port(struct sss_nic_cmd_link_settings *cmd); +static void sss_nic_set_none_port(struct sss_nic_cmd_link_settings *cmd); + +static char g_test_strings[][ETH_GSTRING_LEN] = { + "Internal lb test (on/offline)", + "External lb test (external_lb)", +}; + +static char g_priv_flags_strings[][ETH_GSTRING_LEN] = { + "Symmetric-RSS", + "Force-Link-up", + "Rxq_Recovery", +}; + +static struct sss_nic_stats g_nic_sq_stats[] = { + SSSNIC_SQ_STATS(tx_packets), + SSSNIC_SQ_STATS(tx_bytes), + SSSNIC_SQ_STATS(tx_busy), + SSSNIC_SQ_STATS(wake), + SSSNIC_SQ_STATS(tx_dropped), +}; + +static struct sss_nic_stats g_nic_sq_stats_extern[] = { + SSSNIC_SQ_STATS(skb_pad_err), + SSSNIC_SQ_STATS(offload_err), + SSSNIC_SQ_STATS(dma_map_err), + SSSNIC_SQ_STATS(unknown_tunnel_proto), + SSSNIC_SQ_STATS(frag_size_zero), + SSSNIC_SQ_STATS(frag_len_overflow), + SSSNIC_SQ_STATS(rsvd1), + SSSNIC_SQ_STATS(rsvd2), +}; + +static struct sss_nic_stats g_nic_rq_stats[] = { + SSSNIC_RQ_STATS(rx_packets), + SSSNIC_RQ_STATS(rx_bytes), + SSSNIC_RQ_STATS(errors), + SSSNIC_RQ_STATS(csum_errors), + SSSNIC_RQ_STATS(other_errors), + SSSNIC_RQ_STATS(rx_dropped), +#ifdef HAVE_XDP_SUPPORT + SSSNIC_RQ_STATS(xdp_dropped), +#endif + SSSNIC_RQ_STATS(rx_buf_errors), +}; + +static struct sss_nic_stats g_nic_rq_stats_extern[] = { + SSSNIC_RQ_STATS(alloc_rx_dma_err), + SSSNIC_RQ_STATS(alloc_skb_err), + SSSNIC_RQ_STATS(reset_drop_sge), + SSSNIC_RQ_STATS(large_xdp_pkts), + SSSNIC_RQ_STATS(rsvd2), +}; + +static struct sss_nic_stats g_netdev_stats[] = { + SSSNIC_NETDEV_STATS(rx_packets), + SSSNIC_NETDEV_STATS(tx_packets), + SSSNIC_NETDEV_STATS(rx_bytes), + SSSNIC_NETDEV_STATS(tx_bytes), + SSSNIC_NETDEV_STATS(rx_errors), + SSSNIC_NETDEV_STATS(tx_errors), + SSSNIC_NETDEV_STATS(rx_dropped), + SSSNIC_NETDEV_STATS(tx_dropped), + SSSNIC_NETDEV_STATS(multicast), + SSSNIC_NETDEV_STATS(collisions), + SSSNIC_NETDEV_STATS(rx_length_errors), + SSSNIC_NETDEV_STATS(rx_over_errors), + SSSNIC_NETDEV_STATS(rx_crc_errors), + SSSNIC_NETDEV_STATS(rx_frame_errors), + SSSNIC_NETDEV_STATS(rx_fifo_errors), + SSSNIC_NETDEV_STATS(rx_missed_errors), + SSSNIC_NETDEV_STATS(tx_aborted_errors), + SSSNIC_NETDEV_STATS(tx_carrier_errors), + SSSNIC_NETDEV_STATS(tx_fifo_errors), + SSSNIC_NETDEV_STATS(tx_heartbeat_errors), +}; + +static struct sss_nic_stats g_dev_stats[] = { + SSSNIC_TX_STATS(tx_timeout), +}; + +static struct sss_nic_stats g_dev_stats_extern[] = { + SSSNIC_TX_STATS(tx_drop), + SSSNIC_TX_STATS(tx_invalid_qid), + SSSNIC_TX_STATS(rsvd1), + SSSNIC_TX_STATS(rsvd2), +}; + +static struct sss_nic_stats g_function_stats[] = { + SSSNIC_FUNCTION_STATS(tx_unicast_pkts), + SSSNIC_FUNCTION_STATS(tx_unicast_bytes), + SSSNIC_FUNCTION_STATS(tx_multicast_pkts), + SSSNIC_FUNCTION_STATS(tx_multicast_bytes), + SSSNIC_FUNCTION_STATS(tx_broadcast_pkts), + SSSNIC_FUNCTION_STATS(tx_broadcast_bytes), + + SSSNIC_FUNCTION_STATS(rx_unicast_pkts), + SSSNIC_FUNCTION_STATS(rx_unicast_bytes), + SSSNIC_FUNCTION_STATS(rx_multicast_pkts), + SSSNIC_FUNCTION_STATS(rx_multicast_bytes), + SSSNIC_FUNCTION_STATS(rx_broadcast_pkts), + SSSNIC_FUNCTION_STATS(rx_broadcast_bytes), + + SSSNIC_FUNCTION_STATS(tx_discard), + SSSNIC_FUNCTION_STATS(rx_discard), + SSSNIC_FUNCTION_STATS(tx_err), + SSSNIC_FUNCTION_STATS(rx_err), +}; + +static struct sss_nic_stats g_port_stats[] = { + SSSNIC_PORT_STATS(tx_fragment_pkts), + SSSNIC_PORT_STATS(tx_undersize_pkts), + SSSNIC_PORT_STATS(tx_undermin_pkts), + SSSNIC_PORT_STATS(tx_64_oct_pkts), + SSSNIC_PORT_STATS(tx_65_127_oct_pkts), + SSSNIC_PORT_STATS(tx_128_255_oct_pkts), + SSSNIC_PORT_STATS(tx_256_511_oct_pkts), + SSSNIC_PORT_STATS(tx_512_1023_oct_pkts), + SSSNIC_PORT_STATS(tx_1024_1518_oct_pkts), + SSSNIC_PORT_STATS(tx_1519_2047_oct_pkts), + SSSNIC_PORT_STATS(tx_2048_4095_oct_pkts), + SSSNIC_PORT_STATS(tx_4096_8191_oct_pkts), + SSSNIC_PORT_STATS(tx_8192_9216_oct_pkts), + SSSNIC_PORT_STATS(tx_9217_12287_oct_pkts), + SSSNIC_PORT_STATS(tx_12288_16383_oct_pkts), + SSSNIC_PORT_STATS(tx_1519_max_bad_pkts), + SSSNIC_PORT_STATS(tx_1519_max_good_pkts), + SSSNIC_PORT_STATS(tx_oversize_pkts), + SSSNIC_PORT_STATS(tx_jabber_pkts), + SSSNIC_PORT_STATS(tx_bad_pkts), + SSSNIC_PORT_STATS(tx_bad_octs), + SSSNIC_PORT_STATS(tx_good_pkts), + SSSNIC_PORT_STATS(tx_good_octs), + SSSNIC_PORT_STATS(tx_total_pkts), + SSSNIC_PORT_STATS(tx_total_octs), + SSSNIC_PORT_STATS(tx_uni_pkts), + SSSNIC_PORT_STATS(tx_multi_pkts), + SSSNIC_PORT_STATS(tx_broad_pkts), + SSSNIC_PORT_STATS(tx_pauses), + SSSNIC_PORT_STATS(tx_pfc_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri0_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri1_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri2_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri3_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri4_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri5_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri6_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri7_pkts), + SSSNIC_PORT_STATS(tx_control_pkts), + SSSNIC_PORT_STATS(tx_err_all_pkts), + SSSNIC_PORT_STATS(tx_from_app_good_pkts), + SSSNIC_PORT_STATS(tx_from_app_bad_pkts), + + SSSNIC_PORT_STATS(rx_fragment_pkts), + SSSNIC_PORT_STATS(rx_undersize_pkts), + SSSNIC_PORT_STATS(rx_undermin_pkts), + SSSNIC_PORT_STATS(rx_64_oct_pkts), + SSSNIC_PORT_STATS(rx_65_127_oct_pkts), + SSSNIC_PORT_STATS(rx_128_255_oct_pkts), + SSSNIC_PORT_STATS(rx_256_511_oct_pkts), + SSSNIC_PORT_STATS(rx_512_1023_oct_pkts), + SSSNIC_PORT_STATS(rx_1024_1518_oct_pkts), + SSSNIC_PORT_STATS(rx_1519_2047_oct_pkts), + SSSNIC_PORT_STATS(rx_2048_4095_oct_pkts), + SSSNIC_PORT_STATS(rx_4096_8191_oct_pkts), + SSSNIC_PORT_STATS(rx_8192_9216_oct_pkts), + SSSNIC_PORT_STATS(rx_9217_12287_oct_pkts), + SSSNIC_PORT_STATS(rx_12288_16383_oct_pkts), + SSSNIC_PORT_STATS(rx_1519_max_bad_pkts), + SSSNIC_PORT_STATS(rx_1519_max_good_pkts), + SSSNIC_PORT_STATS(rx_oversize_pkts), + SSSNIC_PORT_STATS(rx_jabber_pkts), + SSSNIC_PORT_STATS(rx_bad_pkts), + SSSNIC_PORT_STATS(rx_bad_octs), + SSSNIC_PORT_STATS(rx_good_pkts), + SSSNIC_PORT_STATS(rx_good_octs), + SSSNIC_PORT_STATS(rx_total_pkts), + SSSNIC_PORT_STATS(rx_total_octs), + SSSNIC_PORT_STATS(rx_uni_pkts), + SSSNIC_PORT_STATS(rx_multi_pkts), + SSSNIC_PORT_STATS(rx_broad_pkts), + SSSNIC_PORT_STATS(rx_pauses), + SSSNIC_PORT_STATS(rx_pfc_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri0_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri1_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri2_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri3_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri4_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri5_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri6_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri7_pkts), + SSSNIC_PORT_STATS(rx_control_pkts), + SSSNIC_PORT_STATS(rx_sym_err_pkts), + SSSNIC_PORT_STATS(rx_fcs_err_pkts), + SSSNIC_PORT_STATS(rx_send_app_good_pkts), + SSSNIC_PORT_STATS(rx_send_app_bad_pkts), + SSSNIC_PORT_STATS(rx_unfilter_pkts), +}; + +static const u32 g_mag_link_mode_ge[] = { + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT, +}; + +static const u32 g_mag_link_mode_10ge_base_r[] = { + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, +}; + +static const u32 g_mag_link_mode_25ge_base_r[] = { + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, +}; + +static const u32 g_mag_link_mode_40ge_base_r4[] = { + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, +}; + +static const u32 g_mag_link_mode_50ge_base_r[] = { + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, +}; + +static const u32 g_mag_link_mode_50ge_base_r2[] = { + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, +}; + +static const u32 g_mag_link_mode_100ge_base_r[] = { + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, +}; + +static const u32 g_mag_link_mode_100ge_base_r2[] = { + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, +}; + +static const u32 g_mag_link_mode_100ge_base_r4[] = { + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, +}; + +static const u32 g_mag_link_mode_200ge_base_r2[] = { + ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, +}; + +static const u32 g_mag_link_mode_200ge_base_r4[] = { + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, +}; + +static const struct sss_nic_hw2ethtool_link_mode + g_link_mode_table[SSSNIC_LINK_MODE_MAX_NUMBERS] = { + [SSSNIC_LINK_MODE_GE] = { + .array = g_mag_link_mode_ge, + .array_len = ARRAY_LEN(g_mag_link_mode_ge), + .speed = SPEED_1000, + }, + [SSSNIC_LINK_MODE_10GE_BASE_R] = { + .array = g_mag_link_mode_10ge_base_r, + .array_len = ARRAY_LEN(g_mag_link_mode_10ge_base_r), + .speed = SPEED_10000, + }, + [SSSNIC_LINK_MODE_25GE_BASE_R] = { + .array = g_mag_link_mode_25ge_base_r, + .array_len = ARRAY_LEN(g_mag_link_mode_25ge_base_r), + .speed = SPEED_25000, + }, + [SSSNIC_LINK_MODE_40GE_BASE_R4] = { + .array = g_mag_link_mode_40ge_base_r4, + .array_len = ARRAY_LEN(g_mag_link_mode_40ge_base_r4), + .speed = SPEED_40000, + }, + [SSSNIC_LINK_MODE_50GE_BASE_R] = { + .array = g_mag_link_mode_50ge_base_r, + .array_len = ARRAY_LEN(g_mag_link_mode_50ge_base_r), + .speed = SPEED_50000, + }, + [SSSNIC_LINK_MODE_50GE_BASE_R2] = { + .array = g_mag_link_mode_50ge_base_r2, + .array_len = ARRAY_LEN(g_mag_link_mode_50ge_base_r2), + .speed = SPEED_50000, + }, + [SSSNIC_LINK_MODE_100GE_BASE_R] = { + .array = g_mag_link_mode_100ge_base_r, + .array_len = ARRAY_LEN(g_mag_link_mode_100ge_base_r), + .speed = SPEED_100000, + }, + [SSSNIC_LINK_MODE_100GE_BASE_R2] = { + .array = g_mag_link_mode_100ge_base_r2, + .array_len = ARRAY_LEN(g_mag_link_mode_100ge_base_r2), + .speed = SPEED_100000, + }, + [SSSNIC_LINK_MODE_100GE_BASE_R4] = { + .array = g_mag_link_mode_100ge_base_r4, + .array_len = ARRAY_LEN(g_mag_link_mode_100ge_base_r4), + .speed = SPEED_100000, + }, + [SSSNIC_LINK_MODE_200GE_BASE_R2] = { + .array = g_mag_link_mode_200ge_base_r2, + .array_len = ARRAY_LEN(g_mag_link_mode_200ge_base_r2), + .speed = SPEED_200000, + }, + [SSSNIC_LINK_MODE_200GE_BASE_R4] = { + .array = g_mag_link_mode_200ge_base_r4, + .array_len = ARRAY_LEN(g_mag_link_mode_200ge_base_r4), + .speed = SPEED_200000, + }, +}; + +/* Related to enum sss_nic_mag_opcode_port_speed */ +static u32 g_hw_to_ethtool_speed[] = { + (u32)SPEED_UNKNOWN, SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, + SPEED_25000, SPEED_40000, SPEED_50000, SPEED_100000, SPEED_200000 +}; + +static sss_nic_port_type_handler_t g_link_port_set_handler[] = { + NULL, + sss_nic_set_fibre_port, + sss_nic_set_fibre_port, + sss_nic_set_da_port, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + sss_nic_set_fibre_port, + sss_nic_set_tp_port, + sss_nic_set_none_port +}; + +u32 sss_nic_get_io_stats_size(const struct sss_nic_dev *nic_dev) +{ + u32 count; + + count = ARRAY_LEN(g_dev_stats) + + ARRAY_LEN(g_dev_stats_extern) + + (ARRAY_LEN(g_nic_sq_stats) + + ARRAY_LEN(g_nic_sq_stats_extern) + + ARRAY_LEN(g_nic_rq_stats) + + ARRAY_LEN(g_nic_rq_stats_extern)) * nic_dev->max_qp_num; + + return count; +} + +int sss_nic_eth_ss_test(struct sss_nic_dev *nic_dev) +{ + return ARRAY_LEN(g_test_strings); +} + +int sss_nic_eth_ss_stats(struct sss_nic_dev *nic_dev) +{ + int count; + int q_num; + + q_num = nic_dev->qp_res.qp_num; + count = ARRAY_LEN(g_netdev_stats) + ARRAY_LEN(g_dev_stats) + + ARRAY_LEN(g_function_stats) + (ARRAY_LEN(g_nic_sq_stats) + + ARRAY_LEN(g_nic_rq_stats)) * q_num; + + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + count += ARRAY_LEN(g_port_stats); + + return count; +} + +int sss_nic_eth_ss_priv_flags(struct sss_nic_dev *nic_dev) +{ + return ARRAY_LEN(g_priv_flags_strings); +} + +static void sss_nic_get_ethtool_stats_data(char *ethtool_stats, + struct sss_nic_stats *stats, u16 stats_len, u64 *data) +{ + u16 i = 0; + u16 j = 0; + char *ptr = NULL; + + for (j = 0; j < stats_len; j++) { + ptr = ethtool_stats + stats[j].offset; + data[i] = SSSNIC_CONVERT_DATA_TYPE(stats[j].len, ptr); + i++; + } +} + +u16 sss_nic_get_ethtool_dev_stats(struct sss_nic_dev *nic_dev, + u64 *data) +{ + u16 cnt = 0; +#ifdef HAVE_NDO_GET_STATS64 + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *net_stats = NULL; + + net_stats = dev_get_stats(nic_dev->netdev, &temp); +#else + const struct net_device_stats *net_stats = NULL; + + net_stats = dev_get_stats(nic_dev->netdev); +#endif + + sss_nic_get_ethtool_stats_data((char *)net_stats, g_netdev_stats, + ARRAY_LEN(g_netdev_stats), data); + cnt += ARRAY_LEN(g_netdev_stats); + + sss_nic_get_ethtool_stats_data((char *)&nic_dev->tx_stats, g_dev_stats, + ARRAY_LEN(g_dev_stats), data + cnt); + cnt += ARRAY_LEN(g_dev_stats); + + return cnt; +} + +void sss_nic_get_drv_queue_stats(struct sss_nic_dev *nic_dev, u64 *data) +{ + u16 qid; + struct sss_nic_rq_stats rq_stats = {0}; + struct sss_nic_sq_stats sq_stats = {0}; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + if (!nic_dev->sq_desc_group) + break; + + sss_nic_get_sq_stats(&nic_dev->sq_desc_group[qid], &sq_stats); + sss_nic_get_ethtool_stats_data((char *)&sq_stats, g_nic_sq_stats, + ARRAY_LEN(g_nic_sq_stats), + data + qid * ARRAY_LEN(g_nic_sq_stats)); + } + + data += ARRAY_LEN(g_nic_sq_stats) * nic_dev->qp_res.qp_num; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + if (!nic_dev->rq_desc_group) + break; + + sss_nic_get_rq_stats(&nic_dev->rq_desc_group[qid], &rq_stats); + sss_nic_get_ethtool_stats_data((char *)&rq_stats, g_nic_rq_stats, + ARRAY_LEN(g_nic_rq_stats), + data + qid * ARRAY_LEN(g_nic_rq_stats)); + } +} + +int sss_nic_get_ethtool_vport_stats(struct sss_nic_dev *nic_dev, + u64 *data) +{ + int ret; + struct sss_nic_port_stats vport_stats = {0}; + + ret = sss_nic_get_vport_stats(nic_dev, sss_get_global_func_id(nic_dev->hwdev), + &vport_stats); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to get function stats from fw, ret:%d\n", ret); + return ARRAY_LEN(g_function_stats); + } + sss_nic_get_ethtool_stats_data((char *)&vport_stats, g_function_stats, + ARRAY_LEN(g_function_stats), data); + + return ARRAY_LEN(g_function_stats); +} + +u16 sss_nic_get_ethtool_port_stats(struct sss_nic_dev *nic_dev, + u64 *data) +{ + int ret; + u16 i = 0; + struct sss_nic_mag_port_stats *stats = NULL; + + stats = kzalloc(sizeof(*stats), GFP_KERNEL); + if (!stats) { + memset(&data[i], 0, ARRAY_LEN(g_port_stats) * sizeof(*data)); + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to Malloc port stats\n"); + return ARRAY_LEN(g_port_stats); + } + + ret = sss_nic_get_phy_port_stats(nic_dev, stats); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to get port stats from fw\n"); + goto out; + } + + sss_nic_get_ethtool_stats_data((char *)stats, g_port_stats, + ARRAY_LEN(g_port_stats), data); + +out: + kfree(stats); + + return ARRAY_LEN(g_port_stats); +} + +u16 sss_nic_get_stats_strings(struct sss_nic_stats *stats, + u16 stats_len, char *buffer) +{ + u16 i; + + for (i = 0; i < stats_len; i++) { + memcpy(buffer, stats[i].name, ETH_GSTRING_LEN); + buffer += ETH_GSTRING_LEN; + } + + return i; +} + +u16 sss_nic_get_drv_dev_strings(struct sss_nic_dev *nic_dev, + char *buffer) +{ + u16 cnt = + sss_nic_get_stats_strings(g_netdev_stats, ARRAY_LEN(g_netdev_stats), buffer); + cnt += sss_nic_get_stats_strings(g_dev_stats, ARRAY_LEN(g_dev_stats), + buffer + cnt * ETH_GSTRING_LEN); + + return cnt; +} + +u16 sss_nic_get_hw_stats_strings(struct sss_nic_dev *nic_dev, + char *buffer) +{ + u16 cnt = sss_nic_get_stats_strings(g_function_stats, + ARRAY_LEN(g_function_stats), buffer); + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + return cnt; + + cnt += sss_nic_get_stats_strings(g_port_stats, + ARRAY_LEN(g_port_stats), buffer + cnt * ETH_GSTRING_LEN); + + return cnt; +} + +int sss_nic_get_queue_stats_cnt(const struct sss_nic_dev *nic_dev, + struct sss_nic_stats *stats, u16 stats_len, u16 qid, char *buffer) +{ + int ret; + u16 i; + + for (i = 0; i < stats_len; i++) { + ret = sprintf(buffer, stats[i].name, qid); + if (ret < 0) + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to sprintf stats name:%s, qid: %u, stats id: %u\n", + stats[i].name, qid, i); + buffer += ETH_GSTRING_LEN; + } + + return i; +} + +u16 sss_nic_get_qp_stats_strings(const struct sss_nic_dev *nic_dev, + char *buffer) +{ + u16 qid = 0; + u16 cnt = 0; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) + cnt += sss_nic_get_queue_stats_cnt(nic_dev, g_nic_sq_stats, + ARRAY_LEN(g_nic_sq_stats), qid, + buffer + cnt * ETH_GSTRING_LEN); + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) + cnt += sss_nic_get_queue_stats_cnt(nic_dev, g_nic_rq_stats, + ARRAY_LEN(g_nic_rq_stats), qid, + buffer + cnt * ETH_GSTRING_LEN); + + return cnt; +} + +void sss_nic_get_test_strings(struct sss_nic_dev *nic_dev, u8 *buffer) +{ + memcpy(buffer, *g_test_strings, sizeof(g_test_strings)); +} + +void sss_nic_get_drv_stats_strings(struct sss_nic_dev *nic_dev, + u8 *buffer) +{ + u16 offset = 0; + + offset = sss_nic_get_drv_dev_strings(nic_dev, buffer); + offset += sss_nic_get_hw_stats_strings(nic_dev, buffer + offset * ETH_GSTRING_LEN); + sss_nic_get_qp_stats_strings(nic_dev, buffer + offset * ETH_GSTRING_LEN); +} + +void sss_nic_get_priv_flags_strings(struct sss_nic_dev *nic_dev, + u8 *buffer) +{ + memcpy(buffer, g_priv_flags_strings, sizeof(g_priv_flags_strings)); +} + +int sss_nic_get_speed_level(u32 speed) +{ + int level; + + for (level = 0; level < ARRAY_LEN(g_hw_to_ethtool_speed); level++) { + if (g_hw_to_ethtool_speed[level] == speed) + break; + } + + return level; +} + +void sss_nic_add_ethtool_link_mode(struct sss_nic_cmd_link_settings *cmd, + u32 hw_mode, u32 op) +{ + u32 i; + + for (i = 0; i < SSSNIC_LINK_MODE_MAX_NUMBERS; i++) { + if (test_bit(i, (unsigned long *)&hw_mode)) + SSSNIC_ETHTOOL_ADD_SPPED_LINK_MODE(cmd, i, op); + } +} + +void sss_nic_set_link_speed(struct sss_nic_dev *nic_dev, + struct sss_nic_cmd_link_settings *cmd, + struct sss_nic_port_info *port_info) +{ + int ret; + u8 state = 0; + + if (port_info->supported_mode != SSSNIC_LINK_MODE_UNKNOWN) + sss_nic_add_ethtool_link_mode(cmd, + port_info->supported_mode, + SSSNIC_SET_SUPPORTED_MODE); + if (port_info->advertised_mode != SSSNIC_LINK_MODE_UNKNOWN) + sss_nic_add_ethtool_link_mode(cmd, + port_info->advertised_mode, + SSSNIC_SET_ADVERTISED_MODE); + + ret = sss_nic_get_hw_link_state(nic_dev, &state); + if (ret != 0 || state == 0) { + cmd->duplex = DUPLEX_UNKNOWN; + cmd->speed = (u32)SPEED_UNKNOWN; + return; + } + + cmd->duplex = port_info->duplex; + cmd->speed = port_info->speed < ARRAY_LEN(g_hw_to_ethtool_speed) ? + g_hw_to_ethtool_speed[port_info->speed] : (u32)SPEED_UNKNOWN; +} + +static void sss_nic_set_fibre_port(struct sss_nic_cmd_link_settings *cmd) +{ + SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(cmd, FIBRE); + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, FIBRE); + cmd->port = PORT_FIBRE; +} + +static void sss_nic_set_da_port(struct sss_nic_cmd_link_settings *cmd) +{ + SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(cmd, FIBRE); + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, FIBRE); + cmd->port = PORT_DA; +} + +static void sss_nic_set_tp_port(struct sss_nic_cmd_link_settings *cmd) +{ + SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(cmd, TP); + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, TP); + cmd->port = PORT_TP; +} + +static void sss_nic_set_none_port(struct sss_nic_cmd_link_settings *cmd) +{ + SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(cmd, Backplane); + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, Backplane); + cmd->port = PORT_NONE; +} + +void sss_nic_link_port_type(struct sss_nic_cmd_link_settings *cmd, + u8 port_type) +{ + if (port_type >= ARRAY_LEN(g_link_port_set_handler)) { + cmd->port = PORT_OTHER; + return; + } + + if (!g_link_port_set_handler[port_type]) { + cmd->port = PORT_OTHER; + return; + } + + g_link_port_set_handler[port_type](cmd); +} + +int sss_nic_get_link_pause_setting(struct sss_nic_dev *nic_dev, + struct sss_nic_cmd_link_settings *cmd) +{ + int ret; + struct sss_nic_pause_cfg pause_config = {0}; + + ret = sss_nic_get_hw_pause_info(nic_dev, &pause_config); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to get pauseparam from hw\n"); + return ret; + } + + SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(cmd, Pause); + if (pause_config.rx_pause != 0 && pause_config.tx_pause != 0) { + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, Pause); + return 0; + } + + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, Asym_Pause); + if (pause_config.rx_pause != 0) + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, Pause); + + return 0; +} + +int sss_nic_get_link_setting(struct net_device *net_dev, + struct sss_nic_cmd_link_settings *cmd) +{ + int ret; + struct sss_nic_dev *nic_dev = netdev_priv(net_dev); + struct sss_nic_port_info info = {0}; + + ret = sss_nic_get_hw_port_info(nic_dev, &info, SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, net_dev, "Fail to get port info\n"); + return ret; + } + + sss_nic_set_link_speed(nic_dev, cmd, &info); + sss_nic_link_port_type(cmd, info.port_type); + + cmd->autoneg = info.autoneg_state == SSSNIC_PORT_CFG_AN_ON ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + if (info.autoneg_cap != 0) + SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(cmd, Autoneg); + if (info.autoneg_state == SSSNIC_PORT_CFG_AN_ON) + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, Autoneg); + + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + ret = sss_nic_get_link_pause_setting(nic_dev, cmd); + + return ret; +} + +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS +void sss_nic_copy_ksetting(struct ethtool_link_ksettings *ksetting, + struct sss_nic_cmd_link_settings *cmd) +{ + struct ethtool_link_settings *setting = &ksetting->base; + + bitmap_copy(ksetting->link_modes.advertising, cmd->advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_copy(ksetting->link_modes.supported, cmd->supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + setting->speed = cmd->speed; + setting->duplex = cmd->duplex; + setting->port = cmd->port; + setting->autoneg = cmd->autoneg; +} +#endif +#endif + +bool sss_nic_is_support_speed(u32 support_mode, u32 speed) +{ + u32 link_mode; + + for (link_mode = 0; link_mode < SSSNIC_LINK_MODE_MAX_NUMBERS; link_mode++) { + if ((support_mode & BIT(link_mode)) == 0) + continue; + + if (g_link_mode_table[link_mode].speed == speed) + return true; + } + + return false; +} + +int sss_nic_get_link_settings_param(struct sss_nic_dev *nic_dev, + u8 autoneg, u32 speed, u32 *settings) +{ + struct sss_nic_port_info info = {0}; + int ret; + int level; + + ret = sss_nic_get_hw_port_info(nic_dev, &info, SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to get port info\n"); + return -EAGAIN; + } + + if (info.autoneg_cap != 0) + *settings |= SSSNIC_LINK_SET_AUTONEG; + + if (autoneg == AUTONEG_ENABLE) { + if (info.autoneg_cap == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport autoneg\n"); + return -EOPNOTSUPP; + } + + return 0; + } + + if (speed != (u32)SPEED_UNKNOWN) { + if (info.supported_mode == SSSNIC_LINK_MODE_UNKNOWN || + info.advertised_mode == SSSNIC_LINK_MODE_UNKNOWN) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport link mode\n"); + return -EAGAIN; + } + + /* Set speed only when autoneg is disable */ + level = sss_nic_get_speed_level(speed); + if (level >= SSSNIC_PORT_SPEED_UNKNOWN || + !sss_nic_is_support_speed(info.supported_mode, speed)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport speed: %u\n", speed); + return -EINVAL; + } + + *settings |= SSSNIC_LINK_SET_SPEED; + return 0; + } + + nicif_err(nic_dev, drv, nic_dev->netdev, "Set speed when autoneg is off\n"); + return -EOPNOTSUPP; +} + +int sss_nic_set_settings_to_hw(struct sss_nic_dev *nic_dev, + u8 autoneg, u32 speed, u32 settings) +{ + int ret; + int level = 0; + char cmd_str[128] = {0}; + struct sss_nic_link_ksettings cmd = {0}; + struct net_device *netdev = nic_dev->netdev; + char *str = (bool)((settings & SSSNIC_LINK_SET_AUTONEG) != 0) ? + SSSNIC_AUTONEG_STRING((bool)autoneg) : ""; + + ret = snprintf(cmd_str, sizeof(cmd_str) - 1, "%s", str); + if (ret < 0) + return -EINVAL; + + if ((settings & SSSNIC_LINK_SET_SPEED) != 0) { + level = sss_nic_get_speed_level(speed); + ret = sprintf(cmd_str + strlen(cmd_str), "speed %u ", speed); + if (ret < 0) + return -EINVAL; + } + + cmd.valid_bitmap = settings; + cmd.autoneg = SSSNIC_AUTONEG_ENABLE((bool)autoneg); + cmd.speed = (u8)level; + + ret = sss_nic_set_link_settings(nic_dev, &cmd); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set %s\n", cmd_str); + return ret; + } + + nicif_info(nic_dev, drv, netdev, "Success to set %s, ret: %d\n", cmd_str, ret); + return 0; +} + +int sssnic_set_link_settings(struct net_device *netdev, + u8 autoneg, u32 speed) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + u32 settings = 0; + int ret; + + ret = sss_nic_get_link_settings_param(nic_dev, autoneg, speed, &settings); + if (ret != 0) + return ret; + + if (settings != 0) + return sss_nic_set_settings_to_hw(nic_dev, autoneg, speed, settings); + + nicif_info(nic_dev, drv, netdev, "Nothing change, exit.\n"); + + return 0; +} + +void sss_nic_get_io_stats(const struct sss_nic_dev *nic_dev, void *stats) +{ + struct sss_tool_show_item *items = stats; + int item_idx = 0; + u16 qid; + + SSSNIC_DEV_STATS_PACK(items, item_idx, g_dev_stats, &nic_dev->tx_stats); + SSSNIC_DEV_STATS_PACK(items, item_idx, g_dev_stats_extern, + &nic_dev->tx_stats); + + for (qid = 0; qid < nic_dev->max_qp_num; qid++) { + SSSNIC_QUEUE_STATS_PACK(items, item_idx, g_nic_sq_stats, + &nic_dev->sq_desc_group[qid].stats, qid); + SSSNIC_QUEUE_STATS_PACK(items, item_idx, g_nic_sq_stats_extern, + &nic_dev->sq_desc_group[qid].stats, qid); + } + + for (qid = 0; qid < nic_dev->max_qp_num; qid++) { + SSSNIC_QUEUE_STATS_PACK(items, item_idx, g_nic_rq_stats, + &nic_dev->rq_desc_group[qid].stats, qid); + SSSNIC_QUEUE_STATS_PACK(items, item_idx, g_nic_rq_stats_extern, + &nic_dev->rq_desc_group[qid].stats, qid); + } +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.h new file mode 100644 index 0000000000000..cf2b1cbe894a1 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_ETHTOOL_STATS_API_H +#define SSS_NIC_ETHTOOL_STATS_API_H + +#include +#include + +#include "sss_kernel.h" + +struct sss_nic_stats { + char name[ETH_GSTRING_LEN]; + u32 len; + int offset; +}; + +struct sss_nic_cmd_link_settings { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + + u32 speed; + u8 duplex; + u8 port; + u8 autoneg; +}; + +#define sss_nic_ethtool_ksetting_clear(ptr, name) \ + ethtool_link_ksettings_zero_link_mode(ptr, name) + +int sss_nic_eth_ss_test(struct sss_nic_dev *nic_dev); + +int sss_nic_eth_ss_stats(struct sss_nic_dev *nic_dev); + +int sss_nic_eth_ss_priv_flags(struct sss_nic_dev *nic_dev); + +u16 sss_nic_get_ethtool_dev_stats(struct sss_nic_dev *nic_dev, + u64 *data); + +void sss_nic_get_drv_queue_stats(struct sss_nic_dev *nic_dev, + u64 *data); + +int sss_nic_get_ethtool_vport_stats(struct sss_nic_dev *nic_dev, + u64 *data); + +u16 sss_nic_get_ethtool_port_stats(struct sss_nic_dev *nic_dev, + u64 *data); + +u16 sss_nic_get_stats_strings(struct sss_nic_stats *stats, + u16 stats_len, char *buffer); + +u16 sss_nic_get_drv_dev_strings(struct sss_nic_dev *nic_dev, + char *buffer); + +u16 sss_nic_get_hw_stats_strings(struct sss_nic_dev *nic_dev, + char *buffer); + +int sss_nic_get_queue_stats_cnt(const struct sss_nic_dev *nic_dev, + struct sss_nic_stats *stats, u16 stats_len, u16 qid, char *buffer); + +u16 sss_nic_get_qp_stats_strings(const struct sss_nic_dev *nic_dev, + char *buffer); + +void sss_nic_get_test_strings(struct sss_nic_dev *nic_dev, u8 *buffer); + +void sss_nic_get_drv_stats_strings(struct sss_nic_dev *nic_dev, + u8 *buffer); + +void sss_nic_get_priv_flags_strings(struct sss_nic_dev *nic_dev, + u8 *buffer); + +int sss_nic_get_speed_level(u32 speed); + +void sss_nic_add_ethtool_link_mode(struct sss_nic_cmd_link_settings *cmd, u32 hw_mode, u32 op); + +void sss_nic_set_link_speed(struct sss_nic_dev *nic_dev, + struct sss_nic_cmd_link_settings *cmd, + struct sss_nic_port_info *port_info); + +void sss_nic_link_port_type(struct sss_nic_cmd_link_settings *cmd, + u8 port_type); + +int sss_nic_get_link_pause_setting(struct sss_nic_dev *nic_dev, + struct sss_nic_cmd_link_settings *cmd); + +int sss_nic_get_link_setting(struct net_device *net_dev, + struct sss_nic_cmd_link_settings *cmd); + +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS +void sss_nic_copy_ksetting(struct ethtool_link_ksettings *ksetting, + struct sss_nic_cmd_link_settings *cmd); +#endif +#endif + +bool sss_nic_is_support_speed(u32 support_mode, u32 speed); + +int sss_nic_get_link_settings_param(struct sss_nic_dev *nic_dev, + u8 autoneg, u32 speed, u32 *settings); + +int sss_nic_set_settings_to_hw(struct sss_nic_dev *nic_dev, + u8 autoneg, u32 speed, u32 settings); + +int sssnic_set_link_settings(struct net_device *netdev, + u8 autoneg, u32 speed); + +void sss_nic_get_io_stats(const struct sss_nic_dev *nic_dev, void *stats); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.c new file mode 100644 index 0000000000000..6b8418bdfd18e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.c @@ -0,0 +1,562 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_io_define.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_event.h" + +#define SSSNIC_VF_UNREGISTER 0 + +static void sss_nic_dcb_state_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static void sss_nic_tx_pause_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static void sss_nic_bond_active_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static int sss_nic_register_vf_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static int sss_nic_get_vf_cos_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static int sss_nic_get_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static int sss_nic_set_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static int sss_nic_del_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static int sss_nic_update_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); + +static const struct nic_event_handler g_event_proc[] = { + { + .opcode = SSSNIC_MBX_OPCODE_GET_VF_COS, + .event_handler = sss_nic_dcb_state_event_handler, + }, + + { + .opcode = SSSNIC_MBX_OPCODE_TX_PAUSE_EXCP_NOTICE, + .event_handler = sss_nic_tx_pause_event_handler, + }, + + { + .opcode = SSSNIC_MBX_OPCODE_BOND_ACTIVE_NOTICE, + .event_handler = sss_nic_bond_active_event_handler, + }, +}; + +static const struct sss_nic_vf_msg_handler g_vf_cmd_proc[] = { + { + .opcode = SSSNIC_MBX_OPCODE_VF_REGISTER, + .msg_handler = sss_nic_register_vf_msg_handler, + }, + + { + .opcode = SSSNIC_MBX_OPCODE_GET_VF_COS, + .msg_handler = sss_nic_get_vf_cos_msg_handler + }, + + { + .opcode = SSSNIC_MBX_OPCODE_GET_MAC, + .msg_handler = sss_nic_get_vf_mac_msg_handler, + }, + + { + .opcode = SSSNIC_MBX_OPCODE_SET_MAC, + .msg_handler = sss_nic_set_vf_mac_msg_handler, + }, + + { + .opcode = SSSNIC_MBX_OPCODE_DEL_MAC, + .msg_handler = sss_nic_del_vf_mac_msg_handler, + }, + + { + .opcode = SSSNIC_MBX_OPCODE_UPDATE_MAC, + .msg_handler = sss_nic_update_vf_mac_msg_handler, + }, +}; + +static const struct nic_event_handler *sss_nic_get_event_proc(u16 opcode) +{ + u16 i; + u16 cmd_num = ARRAY_LEN(g_event_proc); + + for (i = 0; i < cmd_num; i++) + if (g_event_proc[i].opcode == opcode) + return &g_event_proc[i]; + + return NULL; +} + +static const struct sss_nic_vf_msg_handler *sss_nic_get_vf_cmd_proc(u16 opcode) +{ + u16 i; + u16 cmd_num = ARRAY_LEN(g_vf_cmd_proc); + + for (i = 0; i < cmd_num; i++) + if (g_vf_cmd_proc[i].opcode == opcode) + return &g_vf_cmd_proc[i]; + + return NULL; +} + +static int sss_nic_init_vf_config(struct sss_nic_io *nic_io, u16 vf_id) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[id]; + u16 func_id; + int ret; + + vf_info->specified_mac = false; + ether_addr_copy(vf_info->drv_mac, vf_info->user_mac); + + if (!is_zero_ether_addr(vf_info->drv_mac)) { + vf_info->specified_mac = true; + func_id = sss_get_glb_pf_vf_offset(nic_io->hwdev) + vf_id; + ret = sss_nic_set_mac(nic_io->nic_dev, vf_info->drv_mac, + vf_info->pf_vlan, func_id, SSS_CHANNEL_NIC); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to set VF %d MAC, ret: %d\n", id, ret); + return ret; + } + } + + if (SSSNIC_GET_VLAN_PRIO(vf_info->pf_vlan, vf_info->pf_qos) != 0) { + ret = sss_nic_set_vf_vlan(nic_io, SSSNIC_MBX_OPCODE_ADD, + vf_info->pf_vlan, vf_info->pf_qos, vf_id); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to add VF %d VLAN_QOS, ret: %d\n", id, ret); + return ret; + } + } + + if (vf_info->max_rate != 0) { + ret = sss_nic_set_vf_tx_rate_limit(nic_io, vf_id, + vf_info->min_rate, vf_info->max_rate); + if (ret != 0) { + nic_err(nic_io->dev_hdl, + "Fail to set VF %d max rate %u, min rate %u, ret: %d\n", + id, vf_info->max_rate, vf_info->min_rate, ret); + return ret; + } + } + + return 0; +} + +static int sss_nic_attach_vf(struct sss_nic_io *nic_io, u16 vf_id, u32 extra_feature) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[id]; + int ret; + + vf_info->extra_feature = extra_feature; + + if (vf_id > nic_io->max_vf_num) { + nic_err(nic_io->dev_hdl, "Fail to register VF id %d out of range: [0-%d]\n", + SSSNIC_HW_VF_ID_TO_OS(vf_id), SSSNIC_HW_VF_ID_TO_OS(nic_io->max_vf_num)); + return -EFAULT; + } + + ret = sss_nic_init_vf_config(nic_io, vf_id); + if (ret != 0) + return ret; + + vf_info->attach = true; + + return 0; +} + +int sss_nic_dettach_vf(struct sss_nic_io *nic_io, u16 vf_id) +{ + struct sss_nic_mbx_mac_addr cmd_set_mac = {0}; + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[SSSNIC_HW_VF_ID_TO_OS(vf_id)]; + u16 out_len; + int ret; + + vf_info->extra_feature = 0; + + if (vf_id > nic_io->max_vf_num) { + nic_err(nic_io->dev_hdl, "Invalid vf_id %d, max_vf_num: %d\n", + vf_id, nic_io->max_vf_num); + return -EFAULT; + } + + vf_info->attach = false; + + if (!vf_info->specified_mac && vf_info->pf_vlan == 0) { + memset(vf_info->drv_mac, 0, ETH_ALEN); + return 0; + } + + out_len = sizeof(cmd_set_mac); + ether_addr_copy(cmd_set_mac.mac, vf_info->drv_mac); + cmd_set_mac.vlan_id = vf_info->pf_vlan; + cmd_set_mac.func_id = sss_get_glb_pf_vf_offset(nic_io->hwdev) + (u16)vf_id; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_DEL_MAC, + &cmd_set_mac, sizeof(cmd_set_mac), + &cmd_set_mac, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_set_mac)) { + nic_err(nic_io->dev_hdl, + "Fail to delete the mac of VF %d, ret: %d, status: 0x%x, out_len: 0x%x\n", + SSSNIC_HW_VF_ID_TO_OS(vf_id), ret, + cmd_set_mac.head.state, out_len); + return -EFAULT; + } + + memset(vf_info->drv_mac, 0, ETH_ALEN); + + return 0; +} + +static int sss_nic_register_vf_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + int ret; + struct sss_nic_mbx_attach_vf *in_info = in_buf; + struct sss_nic_mbx_attach_vf *out_info = out_buf; + + if (in_info->op_register == SSSNIC_VF_UNREGISTER) + ret = sss_nic_dettach_vf(nic_io, vf_id); + else + ret = sss_nic_attach_vf(nic_io, vf_id, in_info->extra_feature); + + *out_size = sizeof(*out_info); + if (ret != 0) + out_info->head.state = EFAULT; + + return 0; +} + +static int sss_nic_get_vf_cos_msg_handler(struct sss_nic_io *nic_io, u16 vf_id, + void *in_buf, u16 in_size, void *out_buf, + u16 *out_size) +{ + struct sss_nic_mbx_vf_dcb_cfg *out_state = out_buf; + + *out_size = sizeof(*out_state); + out_state->head.state = SSS_MGMT_CMD_SUCCESS; + memcpy(&out_state->dcb_info, &nic_io->dcb_info, sizeof(nic_io->dcb_info)); + + return 0; +} + +static int sss_nic_get_vf_mac_msg_handler(struct sss_nic_io *nic_io, u16 vf_id, + void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[SSSNIC_HW_VF_ID_TO_OS(vf_id)]; + struct sss_nic_mbx_mac_addr *out_info = out_buf; + int ret; + + if (SSSNIC_SUPPORT_VF_MAC(nic_io)) { + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_GET_MAC, + in_buf, in_size, out_buf, out_size); + if (ret == 0) { + if (is_zero_ether_addr(out_info->mac)) + ether_addr_copy(out_info->mac, vf_info->drv_mac); + } + return ret; + } + + *out_size = sizeof(*out_info); + ether_addr_copy(out_info->mac, vf_info->drv_mac); + out_info->head.state = SSS_MGMT_CMD_SUCCESS; + + return 0; +} + +static int sss_nic_cmd_vf_mac(struct sss_nic_io *nic_io, struct sss_nic_vf_info *vf_info, + u16 cmd, void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + struct sss_nic_mbx_mac_addr *in_mac = in_buf; + struct sss_nic_mbx_mac_addr *out_mac = out_buf; + int ret; + + if (!vf_info->trust && vf_info->specified_mac && is_valid_ether_addr(in_mac->mac)) { + out_mac->head.state = SSSNIC_PF_SET_VF_ALREADY; + *out_size = sizeof(*out_mac); + nic_warn(nic_io->dev_hdl, + "PF has already set VF MAC address,and vf trust is off.\n"); + return 0; + } + if (is_valid_ether_addr(in_mac->mac)) + in_mac->vlan_id = vf_info->pf_vlan; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, cmd, in_buf, in_size, + out_buf, out_size); + if (ret != 0 || *out_size == 0) { + nic_warn(nic_io->dev_hdl, + "Fail to send vf mac, ret: %d,status: 0x%x, out size: 0x%x\n", + ret, out_mac->head.state, *out_size); + return -EFAULT; + } + + return 0; +} + +static int sss_nic_set_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[id]; + struct sss_nic_mbx_mac_addr *in_mac = in_buf; + struct sss_nic_mbx_mac_addr *out_mac = out_buf; + int ret; + + ret = sss_nic_cmd_vf_mac(nic_io, vf_info, SSSNIC_MBX_OPCODE_SET_MAC, + in_buf, in_size, out_buf, out_size); + if (ret != 0) + return ret; + + if (is_valid_ether_addr(in_mac->mac) && + out_mac->head.state == SSS_MGMT_CMD_SUCCESS) + ether_addr_copy(vf_info->drv_mac, in_mac->mac); + + return 0; +} + +static int sss_nic_del_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[id]; + struct sss_nic_mbx_mac_addr *in_mac = in_buf; + struct sss_nic_mbx_mac_addr *out_mac = out_buf; + int ret; + + ret = sss_nic_cmd_vf_mac(nic_io, vf_info, SSSNIC_MBX_OPCODE_DEL_MAC, + in_buf, in_size, out_buf, out_size); + if (ret != 0) + return ret; + + if (is_valid_ether_addr(in_mac->mac) && + out_mac->head.state == SSS_MGMT_CMD_SUCCESS) + eth_zero_addr(vf_info->drv_mac); + + return 0; +} + +static int sss_nic_update_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[id]; + struct sss_nic_mbx_mac_update *in_mac = in_buf; + struct sss_nic_mbx_mac_update *out_mac = out_buf; + int ret; + + if (!is_valid_ether_addr(in_mac->old_mac.mac)) { + nic_err(nic_io->dev_hdl, "Fail to update mac, Invalid mac.\n"); + return -EINVAL; + } + + if (!vf_info->trust && vf_info->specified_mac) { + out_mac->old_mac.head.state = SSSNIC_PF_SET_VF_ALREADY; + *out_size = sizeof(*out_mac); + nic_warn(nic_io->dev_hdl, + "PF has already set VF MAC address,and vf trust is off.\n"); + return 0; + } + + in_mac->old_mac.vlan_id = vf_info->pf_vlan; + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, + SSSNIC_MBX_OPCODE_UPDATE_MAC, in_buf, in_size, + out_buf, out_size); + if (ret != 0 || *out_size == 0) { + nic_warn(nic_io->dev_hdl, + "Fail to update vf mac, ret: %d,status: 0x%x, out size: 0x%x\n", + ret, out_mac->old_mac.head.state, *out_size); + return -EFAULT; + } + + if (out_mac->old_mac.head.state == SSS_MGMT_CMD_SUCCESS) + ether_addr_copy(vf_info->drv_mac, in_mac->new_mac); + + return 0; +} + +static int _sss_nic_l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *in_buf, + u16 in_size, void *out_buf, + u16 *out_size, u16 channel) +{ + if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF) + if (sss_nic_get_vf_cmd_proc(cmd)) + return sss_mbx_send_to_pf(hwdev, SSS_MOD_TYPE_L2NIC, cmd, in_buf, + in_size, out_buf, out_size, 0, channel); + + return sss_sync_mbx_send_msg(hwdev, SSS_MOD_TYPE_L2NIC, cmd, in_buf, + in_size, out_buf, out_size, 0, channel); +} + +int sss_nic_l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + return _sss_nic_l2nic_msg_to_mgmt_sync(hwdev, cmd, in_buf, in_size, out_buf, + out_size, SSS_CHANNEL_NIC); +} + +int sss_nic_l2nic_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size, u16 channel) +{ + return _sss_nic_l2nic_msg_to_mgmt_sync(hwdev, cmd, in_buf, in_size, out_buf, + out_size, channel); +} + +/* pf/ppf handler mbx msg from vf */ +int sss_nic_pf_mbx_handler(void *hwdev, u16 vf_id, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_nic_io *nic_io = NULL; + const struct sss_nic_vf_msg_handler *handler = NULL; + + if (!hwdev) + return -EFAULT; + + nic_io = sss_get_service_adapter(hwdev, SSS_SERVICE_TYPE_NIC); + if (!nic_io) + return -EINVAL; + + handler = sss_nic_get_vf_cmd_proc(cmd); + if (handler) + return handler->msg_handler(nic_io, vf_id, in_buf, in_size, out_buf, out_size); + + nic_warn(nic_io->dev_hdl, "NO handler for nic cmd(%u) received from vf id: %u\n", + cmd, vf_id); + + return -EINVAL; +} + +void sss_nic_notify_dcb_state_event(void *hwdev, + struct sss_nic_dcb_info *dcb_info) +{ + struct sss_event_info event_info = {0}; + + event_info.type = SSSNIC_EVENT_DCB_STATE_CHANGE; + event_info.service = SSS_EVENT_SRV_NIC; + memcpy((void *)event_info.event_data, dcb_info, sizeof(*dcb_info)); + + sss_do_event_callback(hwdev, &event_info); +} + +static void sss_nic_dcb_state_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_nic_mbx_vf_dcb_cfg *dcb_cfg = in_buf; + + if (!dcb_cfg) + return; + + memcpy(&nic_io->dcb_info, &dcb_cfg->dcb_info, sizeof(dcb_cfg->dcb_info)); + sss_nic_notify_dcb_state_event(nic_io->hwdev, &dcb_cfg->dcb_info); +} + +static void sss_nic_tx_pause_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + struct sss_nic_msg_tx_pause_info *in_pause = in_buf; + + if (in_size != sizeof(*in_pause)) { + nic_err(nic_io->dev_hdl, "Invalid in buffer size value: %u,It should be %ld\n", + in_size, sizeof(*in_pause)); + return; + } + + nic_warn(nic_io->dev_hdl, "Receive tx pause exception event, excp: %u, level: %u\n", + in_pause->tx_pause_except, in_pause->except_level); + sss_fault_event_report(nic_io->hwdev, SSS_FAULT_SRC_TX_PAUSE_EXCP, + (u16)in_pause->except_level); +} + +static void sss_nic_bond_active_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_event_info in_info = {0}; + struct sss_nic_msg_bond_active_info *bond_info = in_buf; + + if (in_size != sizeof(*bond_info)) { + nic_err(nic_io->dev_hdl, "Invalid in_size: %u, should be %ld\n", + in_size, sizeof(*bond_info)); + return; + } + + memcpy((void *)in_info.event_data, bond_info, sizeof(*bond_info)); + in_info.type = SSSNIC_MBX_OPCODE_BOND_ACTIVE_NOTICE; + in_info.service = SSS_EVENT_SRV_NIC; + sss_do_event_callback(nic_io->hwdev, &in_info); +} + +static int _sss_nic_event_handler(void *hwdev, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_nic_io *nic_io = NULL; + const struct nic_event_handler *handler = NULL; + + if (!hwdev) + return -EINVAL; + + nic_io = sss_get_service_adapter(hwdev, SSS_SERVICE_TYPE_NIC); + if (!nic_io) + return -EINVAL; + + *out_size = 0; + + handler = sss_nic_get_event_proc(cmd); + if (handler) { + handler->event_handler(nic_io, in_buf, in_size, out_buf, out_size); + return 0; + } + + ((struct sss_mgmt_msg_head *)out_buf)->state = SSS_MGMT_CMD_UNSUPPORTED; + *out_size = sizeof(struct sss_mgmt_msg_head); + nic_warn(nic_io->dev_hdl, "Unsupport nic event, cmd: %u\n", cmd); + + return 0; +} + +int sss_nic_vf_event_handler(void *hwdev, + u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + return _sss_nic_event_handler(hwdev, cmd, in_buf, in_size, out_buf, out_size); +} + +void sss_nic_pf_event_handler(void *hwdev, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + _sss_nic_event_handler(hwdev, cmd, in_buf, in_size, out_buf, out_size); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.h new file mode 100644 index 0000000000000..7c1e37929dc9f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_EVENT_H +#define SSS_NIC_EVENT_H + +#include +#include + +#include "sss_hw_common.h" +#include "sss_nic_io.h" +#include "sss_nic_cfg.h" +#include "sss_nic_cfg_mag_define.h" + +enum sss_nic_event_type { + SSSNIC_EVENT_LINK_DOWN, + SSSNIC_EVENT_LINK_UP, + SSSNIC_EVENT_PORT_MODULE_EVENT, + SSSNIC_EVENT_DCB_STATE_CHANGE, + SSSNIC_EVENT_MAX +}; + +struct sss_nic_vf_msg_handler { + u16 opcode; + int (*msg_handler)(struct sss_nic_io *nic_io, + u16 vf, void *buf_in, u16 in_size, void *buf_out, u16 *out_size); +}; + +struct nic_event_handler { + u16 opcode; + void (*event_handler)(struct sss_nic_io *nic_io, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); +}; + +int sss_nic_dettach_vf(struct sss_nic_io *nic_io, u16 vf_id); + +int sss_nic_l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +int sss_nic_l2nic_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u16 channel); + +int sss_nic_pf_mbx_handler(void *hwdev, + u16 vf_id, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +void sss_nic_notify_dcb_state_event(void *hwdev, + struct sss_nic_dcb_info *dcb_info); + +int sss_nic_vf_event_handler(void *hwdev, + u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +void sss_nic_pf_event_handler(void *hwdev, u16 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.c new file mode 100644 index 0000000000000..f397aac0ed2d1 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.c @@ -0,0 +1,495 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_cfg.h" + +enum sss_nic_rx_mode_state { + SSSNIC_PROMISC_ON, + SSSNIC_ALLMULTI_ON, + SSSNIC_PROMISC_FORCE_ON, + SSSNIC_ALLMULTI_FORCE_ON, +}; + +enum sss_nic_mac_filter_state { + SSSNIC_MAC_FILTER_WAIT_SYNC, + SSSNIC_MAC_FILTER_SYNCED, + SSSNIC_MAC_FILTER_WAIT_UNSYNC, + SSSNIC_MAC_FILTER_UNSYNCED, +}; + +struct sss_nic_mac_filter { + struct list_head list; + u8 address[ETH_ALEN]; + unsigned long status; +}; + +#define SSSNIC_DEFAULT_RX_MODE (SSSNIC_RX_MODE_UC | SSSNIC_RX_MODE_MC | SSSNIC_RX_MODE_BC) + +static bool mc_mac_filter = true; +module_param(mc_mac_filter, bool, 0444); +MODULE_PARM_DESC(mc_mac_filter, "Set multicast mac filter: 0 - disable, 1 - enable (default=1)"); + +static int sss_nic_sync_uc(struct net_device *netdev, u8 *address) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + return sss_nic_set_mac(nic_dev, address, 0, + sss_get_global_func_id(nic_dev->hwdev), SSS_CHANNEL_NIC); +} + +static int sss_nic_unsync_uc(struct net_device *netdev, u8 *address) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + /* The addr is in use */ + if (ether_addr_equal(address, netdev->dev_addr)) + return 0; + + return sss_nic_del_mac(nic_dev, address, 0, + sss_get_global_func_id(nic_dev->hwdev), SSS_CHANNEL_NIC); +} + +void sss_nic_clean_mac_list_filter(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *tmp_filter = NULL; + struct net_device *netdev = nic_dev->netdev; + + list_for_each_entry_safe(filter, tmp_filter, &nic_dev->uc_filter_list, list) { + if (filter->status == SSSNIC_MAC_FILTER_SYNCED) + sss_nic_unsync_uc(netdev, filter->address); + list_del(&filter->list); + kfree(filter); + } + + list_for_each_entry_safe(filter, tmp_filter, &nic_dev->mc_filter_list, list) { + if (filter->status == SSSNIC_MAC_FILTER_SYNCED) + sss_nic_unsync_uc(netdev, filter->address); + list_del(&filter->list); + kfree(filter); + } +} + +static struct sss_nic_mac_filter *sss_nic_find_mac(const struct list_head *filter_list, + u8 *address) +{ + struct sss_nic_mac_filter *filter = NULL; + + list_for_each_entry(filter, filter_list, list) { + if (ether_addr_equal(address, filter->address)) + return filter; + } + return NULL; +} + +static struct sss_nic_mac_filter *sss_nic_add_filter(struct sss_nic_dev *nic_dev, + struct list_head *mac_filter_list, + u8 *address) +{ + struct sss_nic_mac_filter *filter; + + filter = kzalloc(sizeof(*filter), GFP_ATOMIC); + if (!filter) + goto out; + + ether_addr_copy(filter->address, address); + + INIT_LIST_HEAD(&filter->list); + list_add_tail(&filter->list, mac_filter_list); + + filter->status = SSSNIC_MAC_FILTER_WAIT_SYNC; + set_bit(SSSNIC_MAC_FILTER_CHANGED, &nic_dev->flags); + +out: + return filter; +} + +static void sss_nic_del_filter(struct sss_nic_dev *nic_dev, + struct sss_nic_mac_filter *filter) +{ + set_bit(SSSNIC_MAC_FILTER_CHANGED, &nic_dev->flags); + + if (filter->status == SSSNIC_MAC_FILTER_WAIT_SYNC) { + /* have not added to hw, delete it directly */ + list_del(&filter->list); + kfree(filter); + return; + } + + filter->status = SSSNIC_MAC_FILTER_WAIT_UNSYNC; +} + +static struct sss_nic_mac_filter *sss_nic_copy_mac_filter_entry(const struct sss_nic_mac_filter *ft) +{ + struct sss_nic_mac_filter *filter; + + filter = kzalloc(sizeof(*filter), GFP_ATOMIC); + if (!filter) + return NULL; + + *filter = *ft; + INIT_LIST_HEAD(&filter->list); + + return filter; +} + +static void sss_nic_undo_del_filter_entry(struct list_head *filter_list, + const struct list_head *from) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *tmp_filter = NULL; + + list_for_each_entry_safe(filter, tmp_filter, from, list) { + if (sss_nic_find_mac(filter_list, filter->address)) + continue; + + if (filter->status == SSSNIC_MAC_FILTER_SYNCED) + filter->status = SSSNIC_MAC_FILTER_WAIT_UNSYNC; + + list_move_tail(&filter->list, filter_list); + } +} + +static void sss_nic_undo_add_filter_entry(struct list_head *filter_list, + const struct list_head *from) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *tmp_filter = NULL; + struct sss_nic_mac_filter *ftmp_filter = NULL; + + list_for_each_entry_safe(filter, ftmp_filter, from, list) { + tmp_filter = sss_nic_find_mac(filter_list, filter->address); + if (tmp_filter && tmp_filter->status == SSSNIC_MAC_FILTER_SYNCED) + tmp_filter->status = SSSNIC_MAC_FILTER_WAIT_SYNC; + } +} + +static void sss_nic_cleanup_filter_list(const struct list_head *head) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *ftmp_filter = NULL; + + list_for_each_entry_safe(filter, ftmp_filter, head, list) { + list_del(&filter->list); + kfree(filter); + } +} + +static int sss_nic_sync_mac_filter_to_hw(struct sss_nic_dev *nic_dev, + struct list_head *del_list, + struct list_head *add_list) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *ftmp_filter = NULL; + struct net_device *netdev = nic_dev->netdev; + int ret = 0; + int add_num = 0; + + if (!list_empty(del_list)) { + list_for_each_entry_safe(filter, ftmp_filter, del_list, list) { + ret = sss_nic_unsync_uc(netdev, filter->address); + if (ret != 0) { /* ignore errors when delete mac */ + nic_err(nic_dev->dev_hdl, "Fail to delete mac\n"); + } + + list_del(&filter->list); + kfree(filter); + } + } + + if (!list_empty(add_list)) { + list_for_each_entry_safe(filter, ftmp_filter, add_list, list) { + ret = sss_nic_sync_uc(netdev, filter->address); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to add mac\n"); + return ret; + } + + add_num++; + list_del(&filter->list); + kfree(filter); + } + } + + return add_num; +} + +static int sss_nic_sync_mac_filter(struct sss_nic_dev *nic_dev, + struct list_head *mac_filter_list, bool uc) +{ + struct net_device *netdev = nic_dev->netdev; + struct list_head del_tmp_list; + struct list_head add_tmp_list; + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *ftmp_filter = NULL; + struct sss_nic_mac_filter *fclone_filter = NULL; + int ret = 0; + int add_num = 0; + + INIT_LIST_HEAD(&del_tmp_list); + INIT_LIST_HEAD(&add_tmp_list); + + list_for_each_entry_safe(filter, ftmp_filter, mac_filter_list, list) { + if (filter->status != SSSNIC_MAC_FILTER_WAIT_UNSYNC) + continue; + + filter->status = SSSNIC_MAC_FILTER_UNSYNCED; + list_move_tail(&filter->list, &del_tmp_list); + } + + list_for_each_entry_safe(filter, ftmp_filter, mac_filter_list, list) { + if (filter->status != SSSNIC_MAC_FILTER_WAIT_SYNC) + continue; + + fclone_filter = sss_nic_copy_mac_filter_entry(filter); + if (!fclone_filter) { + ret = -ENOMEM; + break; + } + + filter->status = SSSNIC_MAC_FILTER_SYNCED; + list_add_tail(&fclone_filter->list, &add_tmp_list); + } + + if (ret != 0) { + sss_nic_undo_del_filter_entry(mac_filter_list, &del_tmp_list); + sss_nic_undo_add_filter_entry(mac_filter_list, &add_tmp_list); + nicif_err(nic_dev, drv, netdev, "Fail to clone mac_filter_entry\n"); + + sss_nic_cleanup_filter_list(&del_tmp_list); + sss_nic_cleanup_filter_list(&add_tmp_list); + return -ENOMEM; + } + + add_num = sss_nic_sync_mac_filter_to_hw(nic_dev, &del_tmp_list, &add_tmp_list); + if (list_empty(&add_tmp_list)) + return add_num; + + /* there are errors when add mac to hw, delete all mac in hw */ + sss_nic_undo_add_filter_entry(mac_filter_list, &add_tmp_list); + /* VF don't support to enter promisc mode, + * so we can't delete any other uc mac + */ + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev) || !uc) { + list_for_each_entry_safe(filter, ftmp_filter, mac_filter_list, list) { + if (filter->status != SSSNIC_MAC_FILTER_SYNCED) + continue; + + fclone_filter = sss_nic_copy_mac_filter_entry(filter); + if (!fclone_filter) + break; + + filter->status = SSSNIC_MAC_FILTER_WAIT_SYNC; + list_add_tail(&fclone_filter->list, &del_tmp_list); + } + } + + sss_nic_cleanup_filter_list(&add_tmp_list); + sss_nic_sync_mac_filter_to_hw(nic_dev, &del_tmp_list, &add_tmp_list); + + /* need to enter promisc/allmulti mode */ + return -ENOMEM; +} + +static void sss_nic_sync_all_mac_filter(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int add_num; + + if (test_bit(SSSNIC_MAC_FILTER_CHANGED, &nic_dev->flags)) { + clear_bit(SSSNIC_MAC_FILTER_CHANGED, &nic_dev->flags); + add_num = sss_nic_sync_mac_filter(nic_dev, &nic_dev->uc_filter_list, true); + if (add_num < 0 && SSSNIC_SUPPORT_PROMISC(nic_dev->nic_io)) { + set_bit(SSSNIC_PROMISC_FORCE_ON, &nic_dev->rx_mode); + nicif_info(nic_dev, drv, netdev, " Force promisc mode on\n"); + } else if (add_num != 0) { + clear_bit(SSSNIC_PROMISC_FORCE_ON, &nic_dev->rx_mode); + } + + add_num = sss_nic_sync_mac_filter(nic_dev, &nic_dev->mc_filter_list, false); + if (add_num < 0 && SSSNIC_SUPPORT_ALLMULTI(nic_dev->nic_io)) { + set_bit(SSSNIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mode); + nicif_info(nic_dev, drv, netdev, "Force allmulti mode on\n"); + } else if (add_num != 0) { + clear_bit(SSSNIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mode); + } + } +} + +static void sss_nic_update_mac_filter(struct sss_nic_dev *nic_dev, + const struct netdev_hw_addr_list *src_list, + struct list_head *filter_list) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *ftmp_filter = NULL; + struct sss_nic_mac_filter *f_filter = NULL; + struct netdev_hw_addr *hw_addr = NULL; + + /* add addr if not already in the filter list */ + netif_addr_lock_bh(nic_dev->netdev); + netdev_hw_addr_list_for_each(hw_addr, src_list) { + filter = sss_nic_find_mac(filter_list, hw_addr->addr); + if (!filter) + sss_nic_add_filter(nic_dev, filter_list, hw_addr->addr); + else if (filter->status == SSSNIC_MAC_FILTER_WAIT_UNSYNC) + filter->status = SSSNIC_MAC_FILTER_SYNCED; + } + netif_addr_unlock_bh(nic_dev->netdev); + + /* delete addr if not in netdev list */ + list_for_each_entry_safe(f_filter, ftmp_filter, filter_list, list) { + bool find = false; + + netif_addr_lock_bh(nic_dev->netdev); + netdev_hw_addr_list_for_each(hw_addr, src_list) + if (ether_addr_equal(hw_addr->addr, f_filter->address)) { + find = true; + break; + } + netif_addr_unlock_bh(nic_dev->netdev); + + if (find) + continue; + + sss_nic_del_filter(nic_dev, f_filter); + } +} + +#ifndef NETDEV_HW_ADDR_T_MULTICAST +static void sss_nic_update_mc_filter(struct sss_nic_dev *nic_dev, + struct list_head *filter_list) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *ftmp_filter = NULL; + struct sss_nic_mac_filter *f_filter = NULL; + struct dev_mc_list *hw_addr = NULL; + + /* add addr if not already in the filter list */ + netif_addr_lock_bh(nic_dev->netdev); + netdev_for_each_mc_addr(hw_addr, nic_dev->netdev) { + filter = sss_nic_find_mac(filter_list, hw_addr->da_addr); + if (!filter) + sss_nic_add_filter(nic_dev, filter_list, hw_addr->da_addr); + else if (filter->status == SSSNIC_MAC_FILTER_WAIT_UNSYNC) + filter->status = SSSNIC_MAC_FILTER_SYNCED; + } + netif_addr_unlock_bh(nic_dev->netdev); + /* delete addr if not in netdev list */ + list_for_each_entry_safe(f_filter, ftmp_filter, filter_list, list) { + bool find = false; + + netif_addr_lock_bh(nic_dev->netdev); + netdev_for_each_mc_addr(hw_addr, nic_dev->netdev) + if (ether_addr_equal(hw_addr->da_addr, f_filter->address)) { + find = true; + break; + } + netif_addr_unlock_bh(nic_dev->netdev); + + if (find) + continue; + + sss_nic_del_filter(nic_dev, f_filter); + } +} +#endif + +static void sss_nic_update_all_mac_filter(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + + if (test_and_clear_bit(SSSNIC_UPDATE_MAC_FILTER, &nic_dev->flags)) { + sss_nic_update_mac_filter(nic_dev, &netdev->uc, + &nic_dev->uc_filter_list); + if (mc_mac_filter) { +#ifdef NETDEV_HW_ADDR_T_MULTICAST + sss_nic_update_mac_filter(nic_dev, &netdev->mc, &nic_dev->mc_filter_list); +#else + sss_nic_update_mc_filter(nic_dev, &nic_dev->mc_filter_list); +#endif + } + } +} + +static void sss_nic_sync_rx_mode_to_hw(struct sss_nic_dev *nic_dev, int allmulti_enter, + int promisc_enter) +{ + int ret; + u32 rx_mode = SSSNIC_DEFAULT_RX_MODE; + struct net_device *netdev = nic_dev->netdev; + + rx_mode |= (allmulti_enter ? SSSNIC_RX_MODE_MC_ALL : 0); + rx_mode |= (promisc_enter ? SSSNIC_RX_MODE_PROMISC : 0); + + if (allmulti_enter != + test_bit(SSSNIC_ALLMULTI_ON, &nic_dev->rx_mode)) + nicif_info(nic_dev, drv, netdev, + "%s allmulti mode\n", + allmulti_enter ? "Enable" : "Disable"); + + if (promisc_enter != test_bit(SSSNIC_PROMISC_ON, + &nic_dev->rx_mode)) + nicif_info(nic_dev, drv, netdev, + "%s promisc mode\n", + promisc_enter ? "Enable" : "Disable"); + + ret = sss_nic_set_rx_mode(nic_dev, rx_mode); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set rx mode\n"); + return; + } + + if (allmulti_enter != 0) + set_bit(SSSNIC_ALLMULTI_ON, &nic_dev->rx_mode); + else + clear_bit(SSSNIC_ALLMULTI_ON, &nic_dev->rx_mode); + + if (promisc_enter != 0) + set_bit(SSSNIC_PROMISC_ON, &nic_dev->rx_mode); + else + clear_bit(SSSNIC_PROMISC_ON, &nic_dev->rx_mode); +} + +void sss_nic_set_rx_mode_work(struct work_struct *work) +{ + struct sss_nic_dev *nic_dev = + container_of(work, struct sss_nic_dev, rx_mode_work); + struct net_device *netdev = nic_dev->netdev; + int allmulti_enter = 0; + int promisc_enter = 0; + + sss_nic_update_all_mac_filter(nic_dev); + + sss_nic_sync_all_mac_filter(nic_dev); + + if (SSSNIC_SUPPORT_ALLMULTI(nic_dev->nic_io)) + allmulti_enter = !!(netdev->flags & IFF_ALLMULTI) || + test_bit(SSSNIC_ALLMULTI_FORCE_ON, + &nic_dev->rx_mode); + + if (SSSNIC_SUPPORT_PROMISC(nic_dev->nic_io)) + promisc_enter = !!(netdev->flags & IFF_PROMISC) || + test_bit(SSSNIC_PROMISC_FORCE_ON, + &nic_dev->rx_mode); + + if (allmulti_enter != + test_bit(SSSNIC_ALLMULTI_ON, &nic_dev->rx_mode) || + promisc_enter != + test_bit(SSSNIC_PROMISC_ON, &nic_dev->rx_mode)) + sss_nic_sync_rx_mode_to_hw(nic_dev, allmulti_enter, promisc_enter); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.h new file mode 100644 index 0000000000000..65d13b459fc91 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_FILTER_H +#define SSS_NIC_FILTER_H + +#include +#include "sss_nic_dev_define.h" + +void sss_nic_set_rx_mode_work(struct work_struct *work); +void sss_nic_clean_mac_list_filter(struct sss_nic_dev *nic_dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.c new file mode 100644 index 0000000000000..fc49b645d96fe --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.c @@ -0,0 +1,953 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_io_define.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_io.h" +#include "sss_nic_event.h" + +#define SSSNIC_DEAULT_DROP_THD_OFF 0 +#define SSSNIC_DEAULT_DROP_THD_ON (0xFFFF) +#define SSSNIC_DEAULT_TX_CI_PENDING_LIMIT 1 +#define SSSNIC_DEAULT_TX_CI_COALESCING_TIME 1 +#define SSSNIC_WQ_PREFETCH_MIN 1 +#define SSSNIC_WQ_PREFETCH_MAX 4 +#define SSSNIC_WQ_PREFETCH_THRESHOLD 256 +#define SSSNIC_Q_CTXT_MAX 31 /* (2048 - 8) / 64 */ + +/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ +#define SSSNIC_CI_Q_ADDR_SIZE (64) + +#define SSSNIC_CI_TABLE_SIZE(num_qps, pg_sz) \ + (ALIGN((num_qps) * SSSNIC_CI_Q_ADDR_SIZE, pg_sz)) + +#define SSSNIC_CI_PADDR(base_paddr, qid) ((base_paddr) + \ + (qid) * SSSNIC_CI_Q_ADDR_SIZE) + +#define SSSNIC_CI_VADDR(base_addr, qid) ((u8 *)(base_addr) + \ + (qid) * SSSNIC_CI_Q_ADDR_SIZE) + +#define SSSNIC_SQ_CTX_SIZE(num_sqs) ((u16)(sizeof(struct sss_nic_qp_ctx_header) \ + + (num_sqs) * sizeof(struct sss_nic_sq_ctx))) + +#define SSSNIC_RQ_CTX_SIZE(num_rqs) ((u16)(sizeof(struct sss_nic_qp_ctx_header) \ + + (num_rqs) * sizeof(struct sss_nic_rq_ctx))) + +#define SSSNIC_CI_ID_HIGH_SHIFH 12 +#define SSSNIC_CI_HIGN_ID(val) ((val) >> SSSNIC_CI_ID_HIGH_SHIFH) + +#define SSSNIC_SQ_CTX_MODE_SP_FLAG_SHIFT 0 +#define SSSNIC_SQ_CTX_MODE_PKT_DROP_SHIFT 1 + +#define SSSNIC_SQ_CTX_MODE_SP_FLAG_MASK 0x1U +#define SSSNIC_SQ_CTX_MODE_PKT_DROP_MASK 0x1U + +#define SSSNIC_SET_SQ_CTX_MODE(val, member) \ + (((val) & SSSNIC_SQ_CTX_MODE_##member##_MASK) \ + << SSSNIC_SQ_CTX_MODE_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_PI_ID_SHIFT 0 +#define SSSNIC_SQ_CTX_CI_ID_SHIFT 16 + +#define SSSNIC_SQ_CTX_PI_ID_MASK 0xFFFFU +#define SSSNIC_SQ_CTX_CI_ID_MASK 0xFFFFU + +#define SSSNIC_SET_SQ_CTX_CI_PI(val, member) \ + (((val) & SSSNIC_SQ_CTX_##member##_MASK) \ + << SSSNIC_SQ_CTX_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_WQ_PAGE_HI_PFN_SHIFT 0 +#define SSSNIC_SQ_CTX_WQ_PAGE_OWNER_SHIFT 23 + +#define SSSNIC_SQ_CTX_WQ_PAGE_HI_PFN_MASK 0xFFFFFU +#define SSSNIC_SQ_CTX_WQ_PAGE_OWNER_MASK 0x1U + +#define SSSNIC_SET_SQ_CTX_WQ_PAGE(val, member) \ + (((val) & SSSNIC_SQ_CTX_WQ_PAGE_##member##_MASK) \ + << SSSNIC_SQ_CTX_WQ_PAGE_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_GLOBAL_SQ_ID_SHIFT 0 + +#define SSSNIC_SQ_CTX_GLOBAL_SQ_ID_MASK 0x1FFFU + +#define SSSNIC_SET_SQ_CTX_GLOBAL_QUEUE_ID(val, member) \ + (((val) & SSSNIC_SQ_CTX_##member##_MASK) \ + << SSSNIC_SQ_CTX_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_PKT_DROP_THD_ON_SHIFT 0 +#define SSSNIC_SQ_CTX_PKT_DROP_THD_OFF_SHIFT 16 + +#define SSSNIC_SQ_CTX_PKT_DROP_THD_ON_MASK 0xFFFFU +#define SSSNIC_SQ_CTX_PKT_DROP_THD_OFF_MASK 0xFFFFU + +#define SSSNIC_SET_SQ_CTX_PKT_DROP_THD(val, member) \ + (((val) & SSSNIC_SQ_CTX_PKT_DROP_##member##_MASK) \ + << SSSNIC_SQ_CTX_PKT_DROP_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_PREF_CACHE_THRESHOLD_SHIFT 0 +#define SSSNIC_SQ_CTX_PREF_CACHE_MAX_SHIFT 14 +#define SSSNIC_SQ_CTX_PREF_CACHE_MIN_SHIFT 25 + +#define SSSNIC_SQ_CTX_PREF_CACHE_THRESHOLD_MASK 0x3FFFU +#define SSSNIC_SQ_CTX_PREF_CACHE_MAX_MASK 0x7FFU +#define SSSNIC_SQ_CTX_PREF_CACHE_MIN_MASK 0x7FU + +#define SSSNIC_SQ_CTX_PREF_CI_HI_SHIFT 0 +#define SSSNIC_SQ_CTX_PREF_OWNER_SHIFT 4 + +#define SSSNIC_SQ_CTX_PREF_CI_HI_MASK 0xFU +#define SSSNIC_SQ_CTX_PREF_OWNER_MASK 0x1U + +#define SSSNIC_SQ_CTX_PREF_WQ_PFN_HI_SHIFT 0 +#define SSSNIC_SQ_CTX_PREF_CI_LOW_SHIFT 20 + +#define SSSNIC_SQ_CTX_PREF_WQ_PFN_HI_MASK 0xFFFFFU +#define SSSNIC_SQ_CTX_PREF_CI_LOW_MASK 0xFFFU + +#define SSSNIC_SET_SQ_CTX_PREF(val, member) \ + (((val) & SSSNIC_SQ_CTX_PREF_##member##_MASK) \ + << SSSNIC_SQ_CTX_PREF_##member##_SHIFT) + +#define SSSNIC_RQ_CTX_WQ_PAGE_HI_PFN_SHIFT 0 +#define SSSNIC_RQ_CTX_WQ_PAGE_WQE_TYPE_SHIFT 28 +#define SSSNIC_RQ_CTX_WQ_PAGE_OWNER_SHIFT 31 + +#define SSSNIC_RQ_CTX_WQ_PAGE_HI_PFN_MASK 0xFFFFFU +#define SSSNIC_RQ_CTX_WQ_PAGE_WQE_TYPE_MASK 0x3U +#define SSSNIC_RQ_CTX_WQ_PAGE_OWNER_MASK 0x1U + +#define SSSNIC_SET_RQ_CTX_WQ_PAGE(val, member) \ + (((val) & SSSNIC_RQ_CTX_WQ_PAGE_##member##_MASK) << \ + SSSNIC_RQ_CTX_WQ_PAGE_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_VLAN_TAG_SHIFT 0 +#define SSSNIC_SQ_CTX_VLAN_TYPE_SEL_SHIFT 16 +#define SSSNIC_SQ_CTX_VLAN_INSERT_MODE_SHIFT 19 +#define SSSNIC_SQ_CTX_VLAN_CEQ_EN_SHIFT 23 + +#define SSSNIC_SQ_CTX_VLAN_TAG_MASK 0xFFFFU +#define SSSNIC_SQ_CTX_VLAN_TYPE_SEL_MASK 0x7U +#define SSSNIC_SQ_CTX_VLAN_INSERT_MODE_MASK 0x3U +#define SSSNIC_SQ_CTX_VLAN_CEQ_EN_MASK 0x1U + +#define SSSNIC_SET_SQ_CTX_VLAN_CEQ(val, member) \ + (((val) & SSSNIC_SQ_CTX_VLAN_##member##_MASK) \ + << SSSNIC_SQ_CTX_VLAN_##member##_SHIFT) + +#define SSSNIC_RQ_CTX_PI_ID_SHIFT 0 +#define SSSNIC_RQ_CTX_CI_ID_SHIFT 16 + +#define SSSNIC_RQ_CTX_PI_ID_MASK 0xFFFFU +#define SSSNIC_RQ_CTX_CI_ID_MASK 0xFFFFU + +#define SSSNIC_SET_RQ_CTX_CI_PI(val, member) \ + (((val) & SSSNIC_RQ_CTX_##member##_MASK) \ + << SSSNIC_RQ_CTX_##member##_SHIFT) + +#define SSSNIC_RQ_CTX_CEQ_ATTR_INTR_SHIFT 21 +#define SSSNIC_RQ_CTX_CEQ_ATTR_EN_SHIFT 31 + +#define SSSNIC_RQ_CTX_CEQ_ATTR_INTR_MASK 0x3FFU +#define SSSNIC_RQ_CTX_CEQ_ATTR_EN_MASK 0x1U + +#define SSSNIC_SET_RQ_CTX_CEQ_ATTR(val, member) \ + (((val) & SSSNIC_RQ_CTX_CEQ_ATTR_##member##_MASK) \ + << SSSNIC_RQ_CTX_CEQ_ATTR_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_WQ_BLOCK_PFN_HI_SHIFT 0 + +#define SSSNIC_SQ_CTX_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU + +#define SSSNIC_SET_SQ_CTX_WQ_BLOCK(val, member) \ + (((val) & SSSNIC_SQ_CTX_WQ_BLOCK_##member##_MASK) \ + << SSSNIC_SQ_CTX_WQ_BLOCK_##member##_SHIFT) + +#define SSSNIC_RQ_CTX_PREF_CACHE_THRESHOLD_SHIFT 0 +#define SSSNIC_RQ_CTX_PREF_CACHE_MAX_SHIFT 14 +#define SSSNIC_RQ_CTX_PREF_CACHE_MIN_SHIFT 25 + +#define SSSNIC_RQ_CTX_PREF_CACHE_THRESHOLD_MASK 0x3FFFU +#define SSSNIC_RQ_CTX_PREF_CACHE_MAX_MASK 0x7FFU +#define SSSNIC_RQ_CTX_PREF_CACHE_MIN_MASK 0x7FU + +#define SSSNIC_RQ_CTX_PREF_CI_HI_SHIFT 0 +#define SSSNIC_RQ_CTX_PREF_OWNER_SHIFT 4 + +#define SSSNIC_RQ_CTX_PREF_CI_HI_MASK 0xFU +#define SSSNIC_RQ_CTX_PREF_OWNER_MASK 0x1U + +#define SSSNIC_RQ_CTX_PREF_WQ_PFN_HI_SHIFT 0 +#define SSSNIC_RQ_CTX_PREF_CI_LOW_SHIFT 20 + +#define SSSNIC_RQ_CTX_PREF_WQ_PFN_HI_MASK 0xFFFFFU +#define SSSNIC_RQ_CTX_PREF_CI_LOW_MASK 0xFFFU + +#define SSSNIC_SET_RQ_CTX_PREF(val, member) \ + (((val) & SSSNIC_RQ_CTX_PREF_##member##_MASK) << \ + SSSNIC_RQ_CTX_PREF_##member##_SHIFT) + +#define SSSNIC_RQ_CTX_CQE_LEN_SHIFT 28 + +#define SSSNIC_RQ_CTX_CQE_LEN_MASK 0x3U + +#define SSSNIC_SET_RQ_CTX_CQE_LEN(val, member) \ + (((val) & SSSNIC_RQ_CTX_##member##_MASK) << \ + SSSNIC_RQ_CTX_##member##_SHIFT) + +#define SSSNIC_RQ_CTX_WQ_BLOCK_PFN_HI_SHIFT 0 + +#define SSSNIC_RQ_CTX_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU + +#define SSSNIC_SET_RQ_CTX_WQ_BLOCK(val, member) \ + (((val) & SSSNIC_RQ_CTX_WQ_BLOCK_##member##_MASK) << \ + SSSNIC_RQ_CTX_WQ_BLOCK_##member##_SHIFT) + +#define SSSNIC_WQ_PAGE_PFN(page_addr) ((page_addr) >> 12) +#define SSSNIC_WQ_BLOCK_PFN(page_addr) ((page_addr) >> 9) + +enum sss_nic_qp_ctx_type { + SSSNIC_QP_CTX_TYPE_SQ, + SSSNIC_QP_CTX_TYPE_RQ, +}; + +struct sss_nic_qp_ctx_header { + u16 q_num; + u16 q_type; + u16 start_qid; + u16 rsvd; +}; + +struct sss_nic_clear_q_ctx { + struct sss_nic_qp_ctx_header ctrlq_hdr; + u32 rsvd; +}; + +struct sss_nic_rq_ctx { + u32 ci_pi; + u32 ceq_attr; + u32 hi_wq_pfn; + u32 lo_wq_pfn; + + u32 rsvd[3]; + u32 cqe_sge_len; + + u32 pref_cache; + u32 pref_ci_owner; + u32 hi_pref_wq_pfn_ci; + u32 lo_pref_wq_pfn; + + u32 pi_paddr_hi; + u32 pi_paddr_lo; + u32 hi_wq_block_pfn; + u32 lo_wq_block_pfn; +}; + +struct sss_nic_sq_ctx { + u32 ci_pi; + u32 drop_mode_sp; + u32 hi_wq_pfn; + u32 lo_wq_pfn; + + u32 rsvd0; + u32 pkt_drop_thd; + u32 global_sq_id; + u32 vlan_ceq_attr; + + u32 pref_cache; + u32 pref_ci_owner; + u32 hi_pref_wq_pfn_ci; + u32 lo_pref_wq_pfn; + + u32 rsvd8; + u32 rsvd9; + u32 hi_wq_block_pfn; + u32 lo_wq_block_pfn; +}; + +struct sss_nic_rq_ctx_block { + struct sss_nic_qp_ctx_header ctrlq_hdr; + struct sss_nic_rq_ctx rq_ctxt[SSSNIC_Q_CTXT_MAX]; +}; + +struct sss_nic_sq_ctx_block { + struct sss_nic_qp_ctx_header ctrlq_hdr; + struct sss_nic_sq_ctx sq_ctxt[SSSNIC_Q_CTXT_MAX]; +}; + +static int sss_nic_create_sq(struct sss_nic_io *nic_io, + struct sss_nic_io_queue *sq, + u16 qid, u32 sq_depth, u16 msix_id) +{ + int ret = 0; + + sq->qid = qid; + sq->msix_id = msix_id; + sq->owner = 1; + + ret = sss_create_wq(nic_io->hwdev, &sq->wq, sq_depth, + (u16)BIT(SSSNIC_SQ_WQEBB_SHIFT)); + if (ret != 0) + nic_err(nic_io->dev_hdl, "Fail to create sq(%u) wq\n", qid); + + return ret; +} + +static void sss_nic_destroy_sq(struct sss_nic_io_queue *sq) +{ + sss_destroy_wq(&sq->wq); +} + +static int sss_nic_create_rq(struct sss_nic_io *nic_io, + struct sss_nic_io_queue *rq, + u16 qid, u32 rq_depth, u16 msix_id) +{ + int ret = 0; + + rq->qid = qid; + rq->msix_id = msix_id; + rq->wqe_type = SSSNIC_NORMAL_RQ_WQE; + + rq->rx.pi_vaddr = dma_zalloc_coherent(nic_io->dev_hdl, PAGE_SIZE, + &rq->rx.pi_daddr, GFP_KERNEL); + if (!rq->rx.pi_vaddr) { + nic_err(nic_io->dev_hdl, "Fail to allocate rq pi virt addr\n"); + return -ENOMEM; + } + + ret = sss_create_wq(nic_io->hwdev, &rq->wq, rq_depth, + (u16)BIT(SSSNIC_RQ_WQEBB_SHIFT + SSSNIC_NORMAL_RQ_WQE)); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to create rq(%u) wq\n", qid); + dma_free_coherent(nic_io->dev_hdl, PAGE_SIZE, rq->rx.pi_vaddr, + rq->rx.pi_daddr); + return ret; + } + + return 0; +} + +static void sss_nic_destroy_rq(struct sss_nic_io *nic_io, + struct sss_nic_io_queue *rq) +{ + dma_free_coherent(nic_io->dev_hdl, PAGE_SIZE, rq->rx.pi_vaddr, + rq->rx.pi_daddr); + + sss_destroy_wq(&rq->wq); +} + +static int sss_nic_create_qp(struct sss_nic_io *nic_io, + struct sss_nic_io_queue *rq, struct sss_nic_io_queue *sq, + u32 rq_depth, u32 sq_depth, u16 qid, u16 qp_msix_id) +{ + int ret = 0; + + ret = sss_nic_create_rq(nic_io, rq, qid, rq_depth, qp_msix_id); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to create rq, qid: %u\n", qid); + return ret; + } + + ret = sss_nic_create_sq(nic_io, sq, qid, sq_depth, qp_msix_id); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to create sq, qid: %u\n", qid); + sss_nic_destroy_rq(nic_io, rq); + } + + return ret; +} + +static void sss_nic_destroy_qp(struct sss_nic_io *nic_io, + struct sss_nic_io_queue *rq, struct sss_nic_io_queue *sq) +{ + sss_nic_destroy_rq(nic_io, rq); + sss_nic_destroy_sq(sq); +} + +int sss_nic_io_resource_init(struct sss_nic_io *nic_io) +{ + void __iomem *db_base = NULL; + int ret = 0; + + nic_io->max_qp_num = sss_get_max_sq_num(nic_io->hwdev); + + nic_io->ci_base_vaddr = dma_zalloc_coherent(nic_io->dev_hdl, + SSSNIC_CI_TABLE_SIZE(nic_io->max_qp_num, + PAGE_SIZE), + &nic_io->ci_base_daddr, GFP_KERNEL); + if (!nic_io->ci_base_vaddr) { + nic_err(nic_io->dev_hdl, "Fail to alloc ci dma buf\n"); + return -ENOMEM; + } + + ret = sss_alloc_db_addr(nic_io->hwdev, &db_base); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to alloc sq doorbell\n"); + goto out; + } + nic_io->sq_db_addr = (u8 *)db_base; + + ret = sss_alloc_db_addr(nic_io->hwdev, &db_base); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to alloc rq doorbell\n"); + sss_free_db_addr(nic_io->hwdev, nic_io->sq_db_addr); + goto out; + } + nic_io->rq_db_addr = (u8 *)db_base; + + return 0; + +out: + dma_free_coherent(nic_io->dev_hdl, + SSSNIC_CI_TABLE_SIZE(nic_io->max_qp_num, PAGE_SIZE), + nic_io->ci_base_vaddr, nic_io->ci_base_daddr); + nic_io->ci_base_vaddr = NULL; + + return -ENOMEM; +} + +void sss_nic_io_resource_deinit(struct sss_nic_io *nic_io) +{ + dma_free_coherent(nic_io->dev_hdl, + SSSNIC_CI_TABLE_SIZE(nic_io->max_qp_num, PAGE_SIZE), + nic_io->ci_base_vaddr, nic_io->ci_base_daddr); + + sss_free_db_addr(nic_io->hwdev, nic_io->sq_db_addr); + sss_free_db_addr(nic_io->hwdev, nic_io->rq_db_addr); +} + +int sss_nic_alloc_qp(struct sss_nic_io *nic_io, + struct sss_irq_desc *qp_msix_arry, struct sss_nic_qp_info *qp_info) +{ + u16 i; + u16 qid; + int ret = 0; + struct sss_nic_io_queue *rq_group = NULL; + struct sss_nic_io_queue *sq_group = NULL; + + if (qp_info->qp_num > nic_io->max_qp_num || qp_info->qp_num == 0) + return -EINVAL; + + rq_group = kcalloc(qp_info->qp_num, sizeof(*rq_group), GFP_KERNEL); + if (!rq_group) + return -ENOMEM; + + sq_group = kcalloc(qp_info->qp_num, sizeof(*sq_group), GFP_KERNEL); + if (!sq_group) { + ret = -ENOMEM; + nic_err(nic_io->dev_hdl, "Fail to allocate sq\n"); + goto alloc_sq_err; + } + + for (qid = 0; qid < qp_info->qp_num; qid++) { + ret = sss_nic_create_qp(nic_io, &rq_group[qid], &sq_group[qid], + qp_info->rq_depth, qp_info->sq_depth, qid, + qp_msix_arry[qid].msix_id); + if (ret != 0) { + nic_err(nic_io->dev_hdl, + "Fail to allocate qp %u, err: %d\n", qid, ret); + goto create_qp_err; + } + } + + qp_info->rq_group = rq_group; + qp_info->sq_group = sq_group; + + return 0; + +create_qp_err: + for (i = 0; i < qid; i++) + sss_nic_destroy_qp(nic_io, &rq_group[i], &sq_group[i]); + + kfree(sq_group); + +alloc_sq_err: + kfree(rq_group); + + return ret; +} + +void sss_nic_free_qp(struct sss_nic_io *nic_io, struct sss_nic_qp_info *qp_info) +{ + u16 qid; + + for (qid = 0; qid < qp_info->qp_num; qid++) + sss_nic_destroy_qp(nic_io, &qp_info->rq_group[qid], + &qp_info->sq_group[qid]); + + kfree(qp_info->rq_group); + kfree(qp_info->sq_group); + qp_info->rq_group = NULL; + qp_info->sq_group = NULL; +} + +static void sss_nic_init_db_info(struct sss_nic_io *nic_io, + struct sss_nic_qp_info *qp_info) +{ + u16 qid; + u16 *ci_addr = NULL; + + for (qid = 0; qid < nic_io->active_qp_num; qid++) { + qp_info->rq_group[qid].db_addr = nic_io->rq_db_addr; + qp_info->sq_group[qid].db_addr = nic_io->sq_db_addr; + qp_info->sq_group[qid].tx.ci_addr = + SSSNIC_CI_VADDR(nic_io->ci_base_vaddr, qid); + ci_addr = (u16 *)qp_info->sq_group[qid].tx.ci_addr; + *ci_addr = 0; + } +} + +int sss_nic_init_qp_info(struct sss_nic_io *nic_io, + struct sss_nic_qp_info *qp_info) +{ + nic_io->rq_group = qp_info->rq_group; + nic_io->sq_group = qp_info->sq_group; + nic_io->active_qp_num = qp_info->qp_num; + + sss_nic_init_db_info(nic_io, qp_info); + + return sss_nic_init_qp_ctx(nic_io); +} + +void sss_nic_deinit_qp_info(struct sss_nic_io *nic_io, + struct sss_nic_qp_info *qp_info) +{ + qp_info->qp_num = nic_io->active_qp_num; + qp_info->rq_group = nic_io->rq_group; + qp_info->sq_group = nic_io->sq_group; + + sss_nic_deinit_qp_ctx(nic_io->hwdev); +} + +static void sss_nic_fill_qp_ctx_ctrlq_header(struct sss_nic_qp_ctx_header *qp_ctx_hdr, + enum sss_nic_qp_ctx_type ctx_type, + u16 queue_num, u16 qid) +{ + qp_ctx_hdr->rsvd = 0; + qp_ctx_hdr->start_qid = qid; + qp_ctx_hdr->q_num = queue_num; + qp_ctx_hdr->q_type = ctx_type; + sss_cpu_to_be32(qp_ctx_hdr, sizeof(*qp_ctx_hdr)); +} + +static void sss_nic_fill_sq_ctx_ctrlq_body(struct sss_nic_io_queue *sq, u16 qid, + struct sss_nic_sq_ctx *sq_ctx) +{ + u16 ci_start; + u16 pi_start; + u32 lo_wq_block_pfn; + u32 hi_wq_block_pfn; + u32 lo_wq_page_pfn; + u32 hi_wq_page_pfn; + u64 wq_block_pfn; + u64 wq_page_addr; + u64 wq_page_pfn; + + pi_start = sss_nic_get_sq_local_pi(sq); + ci_start = sss_nic_get_sq_local_ci(sq); + + wq_block_pfn = SSSNIC_WQ_BLOCK_PFN(sq->wq.block_paddr); + lo_wq_block_pfn = lower_32_bits(wq_block_pfn); + hi_wq_block_pfn = upper_32_bits(wq_block_pfn); + + wq_page_addr = sss_wq_get_first_wqe_page_addr(&sq->wq); + wq_page_pfn = SSSNIC_WQ_PAGE_PFN(wq_page_addr); + lo_wq_page_pfn = lower_32_bits(wq_page_pfn); + hi_wq_page_pfn = upper_32_bits(wq_page_pfn); + + sq_ctx->rsvd0 = 0; + + sq_ctx->drop_mode_sp = + SSSNIC_SET_SQ_CTX_MODE(0, SP_FLAG) | + SSSNIC_SET_SQ_CTX_MODE(0, PKT_DROP); + + sq_ctx->ci_pi = + SSSNIC_SET_SQ_CTX_CI_PI(ci_start, CI_ID) | + SSSNIC_SET_SQ_CTX_CI_PI(pi_start, PI_ID); + + sq_ctx->global_sq_id = + SSSNIC_SET_SQ_CTX_GLOBAL_QUEUE_ID(qid, GLOBAL_SQ_ID); + + sq_ctx->pkt_drop_thd = + SSSNIC_SET_SQ_CTX_PKT_DROP_THD(SSSNIC_DEAULT_DROP_THD_ON, THD_ON) | + SSSNIC_SET_SQ_CTX_PKT_DROP_THD(SSSNIC_DEAULT_DROP_THD_OFF, THD_OFF); + + sq_ctx->vlan_ceq_attr = + SSSNIC_SET_SQ_CTX_VLAN_CEQ(0, CEQ_EN) | + SSSNIC_SET_SQ_CTX_VLAN_CEQ(1, INSERT_MODE); + + sq_ctx->pref_ci_owner = + SSSNIC_SET_SQ_CTX_PREF(SSSNIC_CI_HIGN_ID(ci_start), CI_HI) | + SSSNIC_SET_SQ_CTX_PREF(1, OWNER); + + sq_ctx->pref_cache = + SSSNIC_SET_SQ_CTX_PREF(SSSNIC_WQ_PREFETCH_MIN, CACHE_MIN) | + SSSNIC_SET_SQ_CTX_PREF(SSSNIC_WQ_PREFETCH_MAX, CACHE_MAX) | + SSSNIC_SET_SQ_CTX_PREF(SSSNIC_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); + + sq_ctx->lo_pref_wq_pfn = lo_wq_page_pfn; + + sq_ctx->hi_pref_wq_pfn_ci = + SSSNIC_SET_SQ_CTX_PREF(ci_start, CI_LOW) | + SSSNIC_SET_SQ_CTX_PREF(hi_wq_page_pfn, WQ_PFN_HI); + + sq_ctx->lo_wq_pfn = lo_wq_page_pfn; + + sq_ctx->hi_wq_pfn = + SSSNIC_SET_SQ_CTX_WQ_PAGE(hi_wq_page_pfn, HI_PFN) | + SSSNIC_SET_SQ_CTX_WQ_PAGE(1, OWNER); + + sq_ctx->lo_wq_block_pfn = lo_wq_block_pfn; + + sq_ctx->hi_wq_block_pfn = + SSSNIC_SET_SQ_CTX_WQ_BLOCK(hi_wq_block_pfn, PFN_HI); + + sss_cpu_to_be32(sq_ctx, sizeof(*sq_ctx)); +} + +static void sss_nic_fill_rq_ctx_ctrlq_body(struct sss_nic_io_queue *rq, + struct sss_nic_rq_ctx *rq_ctx) +{ + u16 wqe_type = rq->wqe_type; + u16 ci_start = (u16)((u32)sss_nic_get_rq_local_ci(rq) << wqe_type); + u16 pi_start = (u16)((u32)sss_nic_get_rq_local_pi(rq) << wqe_type); + u64 wq_page_addr = sss_wq_get_first_wqe_page_addr(&rq->wq); + u64 wq_page_pfn = SSSNIC_WQ_PAGE_PFN(wq_page_addr); + u64 wq_block_pfn = SSSNIC_WQ_BLOCK_PFN(rq->wq.block_paddr); + u32 lo_wq_page_pfn = lower_32_bits(wq_page_pfn); + u32 hi_wq_page_pfn = upper_32_bits(wq_page_pfn); + u32 lo_wq_block_pfn = lower_32_bits(wq_block_pfn); + u32 hi_wq_block_pfn = upper_32_bits(wq_block_pfn); + + rq_ctx->ceq_attr = SSSNIC_SET_RQ_CTX_CEQ_ATTR(0, EN) | + SSSNIC_SET_RQ_CTX_CEQ_ATTR(rq->msix_id, INTR); + + rq_ctx->ci_pi = + SSSNIC_SET_RQ_CTX_CI_PI(ci_start, CI_ID) | + SSSNIC_SET_RQ_CTX_CI_PI(pi_start, PI_ID); + + rq_ctx->pref_cache = + SSSNIC_SET_RQ_CTX_PREF(SSSNIC_WQ_PREFETCH_MIN, CACHE_MIN) | + SSSNIC_SET_RQ_CTX_PREF(SSSNIC_WQ_PREFETCH_MAX, CACHE_MAX) | + SSSNIC_SET_RQ_CTX_PREF(SSSNIC_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); + + rq_ctx->pref_ci_owner = + SSSNIC_SET_RQ_CTX_PREF(SSSNIC_CI_HIGN_ID(ci_start), CI_HI) | + SSSNIC_SET_RQ_CTX_PREF(1, OWNER); + + rq_ctx->lo_wq_pfn = lo_wq_page_pfn; + + rq_ctx->hi_wq_pfn = + SSSNIC_SET_RQ_CTX_WQ_PAGE(hi_wq_page_pfn, HI_PFN) | + SSSNIC_SET_RQ_CTX_WQ_PAGE(1, OWNER); + + if (wqe_type == SSSNIC_EXTEND_RQ_WQE) { + rq_ctx->hi_wq_pfn |= + SSSNIC_SET_RQ_CTX_WQ_PAGE(0, WQE_TYPE); + } else if (wqe_type == SSSNIC_NORMAL_RQ_WQE) { + rq_ctx->cqe_sge_len = SSSNIC_SET_RQ_CTX_CQE_LEN(1, CQE_LEN); + rq_ctx->hi_wq_pfn |= + SSSNIC_SET_RQ_CTX_WQ_PAGE(2, WQE_TYPE); + } else { + pr_err("Invalid rq wqe type: %u", wqe_type); + } + + rq_ctx->lo_pref_wq_pfn = lo_wq_page_pfn; + rq_ctx->hi_pref_wq_pfn_ci = + SSSNIC_SET_RQ_CTX_PREF(hi_wq_page_pfn, WQ_PFN_HI) | + SSSNIC_SET_RQ_CTX_PREF(ci_start, CI_LOW); + + rq_ctx->lo_wq_block_pfn = lo_wq_block_pfn; + rq_ctx->hi_wq_block_pfn = + SSSNIC_SET_RQ_CTX_WQ_BLOCK(hi_wq_block_pfn, PFN_HI); + + rq_ctx->pi_paddr_lo = lower_32_bits(rq->rx.pi_daddr); + rq_ctx->pi_paddr_hi = upper_32_bits(rq->rx.pi_daddr); + + sss_cpu_to_be32(rq_ctx, sizeof(*rq_ctx)); +} + +static int sss_nic_send_sq_ctx_by_ctrlq(struct sss_nic_io *nic_io, + struct sss_ctrl_msg_buf *msg_buf, u16 qid) +{ + u16 i; + u16 max_qp; + u64 out_param = 0; + int ret; + struct sss_nic_sq_ctx_block *sq_ctx_block = msg_buf->buf; + + max_qp = min(nic_io->active_qp_num - qid, SSSNIC_Q_CTXT_MAX); + sss_nic_fill_qp_ctx_ctrlq_header(&sq_ctx_block->ctrlq_hdr, + SSSNIC_QP_CTX_TYPE_SQ, max_qp, qid); + + for (i = 0; i < max_qp; i++) + sss_nic_fill_sq_ctx_ctrlq_body(&nic_io->sq_group[qid + i], qid + i, + &sq_ctx_block->sq_ctxt[i]); + + msg_buf->size = SSSNIC_SQ_CTX_SIZE(max_qp); + + ret = sss_ctrlq_direct_reply(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_MODIFY_QUEUE_CTX, + msg_buf, &out_param, 0, SSS_CHANNEL_NIC); + if (ret != 0 || out_param != 0) { + nic_err(nic_io->dev_hdl, + "Fail to set sq ctxt, ret: %d, out_param: 0x%llx\n", + ret, out_param); + + return -EFAULT; + } + + return 0; +} + +static int sss_nic_send_sq_ctx_to_hw(struct sss_nic_io *nic_io) +{ + int ret = 0; + u16 qid = 0; + u16 max_qp; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_io->hwdev); + if (!msg_buf) { + nic_err(nic_io->dev_hdl, "Fail to allocate cmd buf\n"); + return -ENOMEM; + } + + while (qid < nic_io->active_qp_num) { + max_qp = min(nic_io->active_qp_num - qid, SSSNIC_Q_CTXT_MAX); + ret = sss_nic_send_sq_ctx_by_ctrlq(nic_io, msg_buf, qid); + if (ret) { + nic_err(nic_io->dev_hdl, + "Fail to set sq ctx, qid: %u\n", qid); + break; + } + + qid += max_qp; + } + + sss_free_ctrlq_msg_buf(nic_io->hwdev, msg_buf); + + return ret; +} + +static int sss_nic_send_rq_ctx_by_ctrlq(struct sss_nic_io *nic_io, + struct sss_ctrl_msg_buf *msg_buf, u16 qid) +{ + u16 i; + u16 max_qp; + u64 out_param = 0; + int ret; + struct sss_nic_rq_ctx_block *rq_ctx_block = msg_buf->buf; + + rq_ctx_block = msg_buf->buf; + max_qp = min(nic_io->active_qp_num - qid, SSSNIC_Q_CTXT_MAX); + + sss_nic_fill_qp_ctx_ctrlq_header(&rq_ctx_block->ctrlq_hdr, + SSSNIC_QP_CTX_TYPE_RQ, max_qp, qid); + + for (i = 0; i < max_qp; i++) + sss_nic_fill_rq_ctx_ctrlq_body(&nic_io->rq_group[qid + i], + &rq_ctx_block->rq_ctxt[i]); + + msg_buf->size = SSSNIC_RQ_CTX_SIZE(max_qp); + + ret = sss_ctrlq_direct_reply(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_MODIFY_QUEUE_CTX, + msg_buf, &out_param, 0, SSS_CHANNEL_NIC); + if (ret != 0 || out_param != 0) { + nic_err(nic_io->dev_hdl, + "Fail to set rq ctx, ret: %d, out_param: 0x%llx\n", + ret, out_param); + + return -EFAULT; + } + + return 0; +} + +static int sss_nic_send_rq_ctx_to_hw(struct sss_nic_io *nic_io) +{ + int ret = 0; + u16 qid = 0; + u16 max_qp; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_io->hwdev); + if (!msg_buf) { + nic_err(nic_io->dev_hdl, "Fail to allocate cmd buf\n"); + return -ENOMEM; + } + + while (qid < nic_io->active_qp_num) { + max_qp = min(nic_io->active_qp_num - qid, SSSNIC_Q_CTXT_MAX); + + ret = sss_nic_send_rq_ctx_by_ctrlq(nic_io, msg_buf, qid); + if (ret) { + nic_err(nic_io->dev_hdl, + "Fail to set rq ctx, qid: %u\n", qid); + break; + } + + qid += max_qp; + } + + sss_free_ctrlq_msg_buf(nic_io->hwdev, msg_buf); + + return ret; +} + +static int sss_nic_reset_hw_offload_ctx(struct sss_nic_io *nic_io, + enum sss_nic_qp_ctx_type ctx_type) +{ + int ret = 0; + u64 out_param = 0; + struct sss_ctrl_msg_buf *msg_buf = NULL; + struct sss_nic_clear_q_ctx *ctx_block = NULL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_io->hwdev); + if (!msg_buf) { + nic_err(nic_io->dev_hdl, "Fail to allocate cmd buf\n"); + return -ENOMEM; + } + + ctx_block = msg_buf->buf; + ctx_block->ctrlq_hdr.start_qid = 0; + ctx_block->ctrlq_hdr.q_type = ctx_type; + ctx_block->ctrlq_hdr.q_num = nic_io->max_qp_num; + + sss_cpu_to_be32(ctx_block, sizeof(*ctx_block)); + + msg_buf->size = sizeof(*ctx_block); + + ret = sss_ctrlq_direct_reply(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_CLEAN_QUEUE_CONTEXT, + msg_buf, &out_param, 0, SSS_CHANNEL_NIC); + if (ret != 0 || out_param != 0) { + nic_err(nic_io->dev_hdl, + "Fail to clean queue offload ctxt, ret: %d, out_param: 0x%llx\n", + ret, out_param); + + ret = -EFAULT; + } + + sss_free_ctrlq_msg_buf(nic_io->hwdev, msg_buf); + + return ret; +} + +static int sss_nic_reset_hw_qp_offload_ctx(struct sss_nic_io *nic_io) +{ + int ret; + + ret = sss_nic_reset_hw_offload_ctx(nic_io, SSSNIC_QP_CTX_TYPE_SQ); + if (ret != 0) + return ret; + + ret = sss_nic_reset_hw_offload_ctx(nic_io, SSSNIC_QP_CTX_TYPE_RQ); + + return ret; +} + +static int sss_nic_set_hw_intr_attr(struct sss_nic_io *nic_io, u16 qid) +{ + struct sss_nic_mbx_intr_attr cmd_ci_attr = {0}; + u16 out_len = sizeof(cmd_ci_attr); + int ret; + + cmd_ci_attr.func_id = sss_get_global_func_id(nic_io->hwdev); + cmd_ci_attr.dma_attr_off = 0; + cmd_ci_attr.pending_limit = SSSNIC_DEAULT_TX_CI_PENDING_LIMIT; + cmd_ci_attr.coalescing_time = SSSNIC_DEAULT_TX_CI_COALESCING_TIME; + cmd_ci_attr.intr_en = 1; + cmd_ci_attr.intr_id = nic_io->sq_group[qid].msix_id; + cmd_ci_attr.l2nic_sqn = qid; + cmd_ci_attr.ci_addr = SSSNIC_CI_PADDR(nic_io->ci_base_daddr, qid) >> 0x2; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_SQ_CI_ATTR_SET, + &cmd_ci_attr, sizeof(cmd_ci_attr), &cmd_ci_attr, + &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_ci_attr)) { + nic_err(nic_io->dev_hdl, + "Fail to set ci attr table, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_ci_attr.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +static int sss_nic_set_qp_intr_attr(struct sss_nic_io *nic_io) +{ + u16 qid; + int ret; + + for (qid = 0; qid < nic_io->active_qp_num; qid++) { + ret = sss_nic_set_hw_intr_attr(nic_io, qid); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to set ci table, qid:%u\n", qid); + return ret; + } + } + + return 0; +} + +int sss_nic_init_qp_ctx(struct sss_nic_io *nic_io) +{ + u32 rq_depth; + int ret; + + ret = sss_nic_send_sq_ctx_to_hw(nic_io); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to send sq ctx to hw\n"); + return ret; + } + + ret = sss_nic_send_rq_ctx_to_hw(nic_io); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to send rq ctx to hw\n"); + return ret; + } + + ret = sss_nic_reset_hw_qp_offload_ctx(nic_io); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to reset qp offload ctx\n"); + return ret; + } + + rq_depth = nic_io->rq_group[0].wq.q_depth << nic_io->rq_group[0].wqe_type; + ret = sss_chip_set_root_ctx(nic_io->hwdev, rq_depth, nic_io->sq_group[0].wq.q_depth, + nic_io->rx_buff_len, SSS_CHANNEL_NIC); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to set root context\n"); + return ret; + } + + ret = sss_nic_set_qp_intr_attr(nic_io); + if (ret != 0) { + sss_chip_clean_root_ctx(nic_io->hwdev, SSS_CHANNEL_NIC); + nic_err(nic_io->dev_hdl, "Fail to set ci table\n"); + } + + return ret; +} + +void sss_nic_deinit_qp_ctx(void *hwdev) +{ + if (!hwdev) + return; + sss_chip_clean_root_ctx(hwdev, SSS_CHANNEL_NIC); +} +EXPORT_SYMBOL_GPL(sss_nic_deinit_qp_ctx); diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.h new file mode 100644 index 0000000000000..ab2be037dfd55 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_IO_H +#define SSS_NIC_IO_H + +#include "sss_hw.h" +#include "sss_hw_wq.h" +#include "sss_nic_io_define.h" + +#define SSSNIC_RQ_WQEBB_SHIFT 3 +#define SSSNIC_CQE_SIZE_SHIFT 4 +#define SSSNIC_SQ_WQEBB_SHIFT 4 +#define SSSNIC_MIN_QUEUE_DEPTH 128 +#define SSSNIC_MAX_RX_QUEUE_DEPTH 16384 +#define SSSNIC_MAX_TX_QUEUE_DEPTH 65536 +#define SSSNIC_SQ_WQEBB_SIZE BIT(SSSNIC_SQ_WQEBB_SHIFT) + +/* ******************** DOORBELL DEFINE INFO ******************** */ +#define DB_INFO_CFLAG_SHIFT 23 +#define DB_INFO_QID_SHIFT 0 +#define DB_INFO_TYPE_SHIFT 27 +#define DB_INFO_NON_FILTER_SHIFT 22 +#define DB_INFO_COS_SHIFT 24 + +#define DB_INFO_COS_MASK 0x7U +#define DB_INFO_QID_MASK 0x1FFFU +#define DB_INFO_CFLAG_MASK 0x1U +#define DB_INFO_TYPE_MASK 0x1FU +#define DB_INFO_NON_FILTER_MASK 0x1U +#define SSSNIC_DB_INFO_SET(val, member) \ + (((u32)(val) & DB_INFO_##member##_MASK) << \ + DB_INFO_##member##_SHIFT) + +#define DB_PI_HIGH_MASK 0xFFU +#define DB_PI_LOW_MASK 0xFFU +#define DB_PI_HI_SHIFT 8 +#define SRC_TYPE 1 +#define DB_PI_HIGH(pi) (((pi) >> DB_PI_HI_SHIFT) & DB_PI_HIGH_MASK) +#define DB_PI_LOW(pi) ((pi) & DB_PI_LOW_MASK) +#define DB_ADDR(queue, pi) ((u64 *)((queue)->db_addr) + DB_PI_LOW(pi)) + +#define sss_nic_get_sq_local_pi(sq) SSS_WQ_MASK_ID(&(sq)->wq, (sq)->wq.pi) +#define sss_nic_get_sq_local_ci(sq) SSS_WQ_MASK_ID(&(sq)->wq, (sq)->wq.ci) +#define sss_nic_get_sq_hw_ci(sq) \ + SSS_WQ_MASK_ID(&(sq)->wq, sss_hw_cpu16(*(u16 *)(sq)->tx.ci_addr)) + +#define sss_nic_get_rq_local_pi(rq) SSS_WQ_MASK_ID(&(rq)->wq, (rq)->wq.pi) +#define sss_nic_get_rq_local_ci(rq) SSS_WQ_MASK_ID(&(rq)->wq, (rq)->wq.ci) + +/* CFLAG_DATA_PATH */ +#define RQ_CFLAG_DP 1 +#define SQ_CFLAG_DP 0 + +enum sss_nic_queue_type { + SSSNIC_SQ, + SSSNIC_RQ, + SSSNIC_MAX_QUEUE_TYPE +}; + +struct sss_nic_db { + u32 db_info; + u32 pi_hi; +}; + +enum sss_nic_rq_wqe_type { + SSSNIC_COMPACT_RQ_WQE, + SSSNIC_NORMAL_RQ_WQE, + SSSNIC_EXTEND_RQ_WQE, +}; + +int sss_nic_io_resource_init(struct sss_nic_io *nic_io); +int sss_nic_init_qp_info(struct sss_nic_io *nic_io, struct sss_nic_qp_info *qp_info); +int sss_nic_alloc_qp(struct sss_nic_io *nic_io, + struct sss_irq_desc *qp_msix_arry, struct sss_nic_qp_info *qp_info); +void sss_nic_io_resource_deinit(struct sss_nic_io *nic_io); +void sss_nic_free_qp(struct sss_nic_io *nic_io, struct sss_nic_qp_info *qp_info); +void sss_nic_deinit_qp_info(struct sss_nic_io *nic_io, struct sss_nic_qp_info *qp_info); +int sss_nic_init_qp_ctx(struct sss_nic_io *nic_io); +void sss_nic_deinit_qp_ctx(void *hwdev); + +/* * + * @brief sss_nic_write_db - write doorbell + * @param queue: nic io queue + * @param cos: cos index + * @param cflag: 0--sq, 1--rq + * @param pi: product index + */ +static inline void sss_nic_write_db(struct sss_nic_io_queue *queue, + int cos, u8 cflag, u16 pi) +{ + struct sss_nic_db doorbell; + + doorbell.db_info = SSSNIC_DB_INFO_SET(SRC_TYPE, TYPE) | SSSNIC_DB_INFO_SET(cflag, CFLAG) | + SSSNIC_DB_INFO_SET(cos, COS) | SSSNIC_DB_INFO_SET(queue->qid, QID); + doorbell.pi_hi = DB_PI_HIGH(pi); + doorbell.db_info = sss_hw_be32(doorbell.db_info); + doorbell.pi_hi = sss_hw_be32(doorbell.pi_hi); + + /* make sure write correctly db to reg */ + wmb(); + + writeq(*((u64 *)&doorbell), DB_ADDR(queue, pi)); +} + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.c new file mode 100644 index 0000000000000..3691cd3ccff15 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" + +#define SSSNIC_AVG_PKT_SMALL_SIZE 256U + +static int sss_nic_napi_poll(struct napi_struct *napi, int budget) +{ + int tx_pkt; + int rx_pkt; + + struct sss_nic_irq_cfg *nic_irq = container_of(napi, struct sss_nic_irq_cfg, napi); + struct sss_nic_dev *nic_dev = netdev_priv(nic_irq->netdev); + + rx_pkt = sss_nic_rx_poll(nic_irq->rq, budget); + tx_pkt = sss_nic_tx_poll(nic_irq->sq, budget); + + if (tx_pkt >= budget || rx_pkt >= budget) + return budget; + + napi_complete(napi); + + sss_chip_set_msix_state(nic_dev->hwdev, nic_irq->msix_id, + SSS_MSIX_ENABLE); + + return max(tx_pkt, rx_pkt); +} + +static void sss_nic_add_napi(struct sss_nic_irq_cfg *nic_irq, int budget) +{ +#ifdef NEED_NETIF_NAPI_ADD_NO_WEIGHT + netif_napi_add_weight(nic_irq->netdev, &nic_irq->napi, sss_nic_napi_poll, budget); +#else + netif_napi_add(nic_irq->netdev, &nic_irq->napi, sss_nic_napi_poll, budget); +#endif + napi_enable(&nic_irq->napi); +} + +static void sss_nic_del_napi(struct sss_nic_irq_cfg *nic_irq) +{ + napi_disable(&nic_irq->napi); + netif_napi_del(&nic_irq->napi); +} + +static irqreturn_t sss_nic_qp_irq(int irq, void *data) +{ + struct sss_nic_irq_cfg *nic_irq = (struct sss_nic_irq_cfg *)data; + struct sss_nic_dev *nic_dev = netdev_priv(nic_irq->netdev); + + sss_chip_clear_msix_resend_bit(nic_dev->hwdev, nic_irq->msix_id, 1); + + napi_schedule(&nic_irq->napi); + + return IRQ_HANDLED; +} + +static int sss_nic_request_irq(struct sss_nic_dev *nic_dev, u16 qid) +{ + int ret; + struct sss_irq_cfg irq_cfg = {0}; + struct sss_nic_irq_cfg *nic_irq = &nic_dev->qp_res.irq_cfg[qid]; + + sss_nic_add_napi(nic_irq, nic_dev->poll_budget); + + irq_cfg.coalesc_intr_set = 1; + irq_cfg.msix_id = nic_irq->msix_id; + irq_cfg.pending = nic_dev->coal_info[qid].pending_limt; + irq_cfg.coalesc_timer = + nic_dev->coal_info[qid].coalesce_timer; + irq_cfg.resend_timer = nic_dev->coal_info[qid].resend_timer; + nic_dev->rq_desc_group[qid].last_coal_timer = + nic_dev->coal_info[qid].coalesce_timer; + nic_dev->rq_desc_group[qid].last_pending_limt = + nic_dev->coal_info[qid].pending_limt; + ret = sss_chip_set_msix_attr(nic_dev->hwdev, irq_cfg, SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to set rx msix attr.\n"); + goto out; + } + + ret = request_irq(nic_irq->irq_id, &sss_nic_qp_irq, 0, nic_irq->irq_name, nic_irq); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_irq->netdev, "Fail to request rx irq\n"); + goto out; + } + + irq_set_affinity_hint(nic_irq->irq_id, &nic_irq->affinity_mask); + + return 0; + +out: + sss_nic_del_napi(nic_irq); + return ret; +} + +static void sss_nic_release_irq(struct sss_nic_irq_cfg *nic_irq) +{ + irq_set_affinity_hint(nic_irq->irq_id, NULL); + synchronize_irq(nic_irq->irq_id); + free_irq(nic_irq->irq_id, nic_irq); + sss_nic_del_napi(nic_irq); +} + +static int sss_nic_set_hw_coal(struct sss_nic_dev *nic_dev, + u16 qid, u8 coal_timer_cfg, u8 pending_limt) +{ + int ret; + struct sss_irq_cfg cmd_irq_cfg = {0}; + + cmd_irq_cfg.coalesc_intr_set = 1; + cmd_irq_cfg.msix_id = nic_dev->qp_res.irq_cfg[qid].msix_id; + cmd_irq_cfg.pending = pending_limt; + cmd_irq_cfg.coalesc_timer = coal_timer_cfg; + cmd_irq_cfg.resend_timer = + nic_dev->coal_info[qid].resend_timer; + + ret = sss_chip_set_msix_attr(nic_dev->hwdev, cmd_irq_cfg, SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to modify moderation for Queue: %u\n", qid); + return ret; + } + + return 0; +} + +static void sss_nic_calculate_intr_coal(struct sss_nic_intr_coal_info *coal_info, + u64 rx_rate, u8 *coal_timer_cfg, u8 *pending_limt) +{ + if (rx_rate < coal_info->pkt_rate_low) { + *pending_limt = coal_info->rx_pending_limt_low; + *coal_timer_cfg = coal_info->rx_usecs_low; + } else if (rx_rate > coal_info->pkt_rate_high) { + *pending_limt = coal_info->rx_pending_limt_high; + *coal_timer_cfg = coal_info->rx_usecs_high; + } else { + u8 rx_pending_limt = coal_info->rx_pending_limt_high - + coal_info->rx_pending_limt_low; + u8 rx_usecs = coal_info->rx_usecs_high - coal_info->rx_usecs_low; + u64 rx_rate_diff = rx_rate - coal_info->pkt_rate_low; + u64 pkt_rate = coal_info->pkt_rate_high - coal_info->pkt_rate_low; + + *pending_limt = (u8)(rx_rate_diff * rx_pending_limt / pkt_rate + + coal_info->rx_pending_limt_low); + *coal_timer_cfg = (u8)(rx_rate_diff * rx_usecs / pkt_rate + + coal_info->rx_usecs_low); + } +} + +static void sss_nic_update_intr_coal(struct sss_nic_dev *nic_dev, + u16 qid, u64 rx_rate, u64 tx_rate, u64 avg_pkt_size) +{ + u8 pending_limt; + u8 coal_timer_cfg; + struct sss_nic_intr_coal_info *coal_info = NULL; + + coal_info = &nic_dev->coal_info[qid]; + + if (rx_rate > SSSNIC_RX_RATE_THRESH && avg_pkt_size > SSSNIC_AVG_PKT_SMALL_SIZE) { + sss_nic_calculate_intr_coal(coal_info, rx_rate, &coal_timer_cfg, &pending_limt); + } else { + pending_limt = coal_info->rx_pending_limt_low; + coal_timer_cfg = SSSNIC_LOWEST_LATENCY; + } + + if (coal_timer_cfg == nic_dev->rq_desc_group[qid].last_coal_timer && + pending_limt == nic_dev->rq_desc_group[qid].last_pending_limt) + return; + + if (!SSS_CHANNEL_RES_VALID(nic_dev) || qid >= nic_dev->qp_res.qp_num) + return; + + (void)sss_nic_set_hw_coal(nic_dev, qid, coal_timer_cfg, pending_limt); + + nic_dev->rq_desc_group[qid].last_pending_limt = pending_limt; + nic_dev->rq_desc_group[qid].last_coal_timer = coal_timer_cfg; +} + +static void sss_nic_adjust_coal_work(struct work_struct *work) +{ + u16 qid; + u64 avg_pkt_size; + u64 tx_pkts; + u64 tx_rate; + u64 rx_bytes; + u64 rx_pkts; + u64 rx_rate; + struct delayed_work *delay = to_delayed_work(work); + struct sss_nic_dev *nic_dev = + container_of(delay, struct sss_nic_dev, moderation_task); + unsigned long period; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) + return; + + queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, + SSSNIC_MODERATONE_DELAY); + period = (unsigned long)(jiffies - nic_dev->last_jiffies); + + if (nic_dev->use_adaptive_rx_coalesce == 0 || period == 0) + return; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + rx_bytes = nic_dev->rq_desc_group[qid].stats.rx_bytes - + nic_dev->rq_desc_group[qid].last_rx_bytes; + rx_pkts = nic_dev->rq_desc_group[qid].stats.rx_packets - + nic_dev->rq_desc_group[qid].last_rx_pkts; + avg_pkt_size = (rx_pkts != 0) ? (rx_bytes / rx_pkts) : 0; + rx_rate = rx_pkts * HZ / period; + + tx_pkts = nic_dev->sq_desc_group[qid].stats.tx_packets - + nic_dev->sq_desc_group[qid].last_tx_pkts; + tx_rate = tx_pkts * HZ / period; + + nic_dev->rq_desc_group[qid].last_rx_bytes = + nic_dev->rq_desc_group[qid].stats.rx_bytes; + nic_dev->rq_desc_group[qid].last_rx_pkts = + nic_dev->rq_desc_group[qid].stats.rx_packets; + nic_dev->sq_desc_group[qid].last_tx_bytes = + nic_dev->sq_desc_group[qid].stats.tx_bytes; + nic_dev->sq_desc_group[qid].last_tx_pkts = + nic_dev->sq_desc_group[qid].stats.tx_packets; + + sss_nic_update_intr_coal(nic_dev, qid, rx_rate, tx_rate, avg_pkt_size); + } + + nic_dev->last_jiffies = jiffies; +} + +static void sss_nic_dev_irq_cfg_init(struct sss_nic_dev *nic_dev, u16 qid) +{ + struct sss_irq_desc *irq_desc = &nic_dev->irq_desc_group[qid]; + struct sss_nic_irq_cfg *nic_irq = &nic_dev->qp_res.irq_cfg[qid]; + + nic_irq->netdev = nic_dev->netdev; + nic_irq->msix_id = irq_desc->msix_id; + nic_irq->irq_id = irq_desc->irq_id; + nic_irq->sq = &nic_dev->sq_desc_group[qid]; + nic_irq->rq = &nic_dev->rq_desc_group[qid]; + nic_dev->rq_desc_group[qid].irq_cfg = nic_irq; +} + +static void __sss_nic_release_qp_irq(struct sss_nic_dev *nic_dev, + struct sss_nic_irq_cfg *nic_irq) +{ + sss_chip_set_msix_state(nic_dev->hwdev, nic_irq->msix_id, SSS_MSIX_DISABLE); + sss_chip_set_msix_auto_mask(nic_dev->hwdev, + nic_irq->msix_id, SSS_CLR_MSIX_AUTO_MASK); + sss_nic_release_irq(nic_irq); +} + +int sss_nic_request_qp_irq(struct sss_nic_dev *nic_dev) +{ + u16 i; + u16 qid; + u32 cpuid; + int ret; + struct sss_nic_irq_cfg *nic_irq = NULL; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + nic_irq = &nic_dev->qp_res.irq_cfg[qid]; + sss_nic_dev_irq_cfg_init(nic_dev, qid); + + cpuid = cpumask_local_spread(qid, dev_to_node(nic_dev->dev_hdl)); + cpumask_set_cpu(cpuid, &nic_irq->affinity_mask); + + ret = snprintf(nic_irq->irq_name, sizeof(nic_irq->irq_name), + "%s_qp%u", nic_dev->netdev->name, qid); + if (ret < 0) { + ret = -EINVAL; + goto out; + } + + ret = sss_nic_request_irq(nic_dev, qid); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to request rx irq\n"); + goto out; + } + + sss_chip_set_msix_auto_mask(nic_dev->hwdev, nic_irq->msix_id, + SSS_SET_MSIX_AUTO_MASK); + sss_chip_set_msix_state(nic_dev->hwdev, nic_irq->msix_id, + SSS_MSIX_ENABLE); + } + + INIT_DELAYED_WORK(&nic_dev->moderation_task, sss_nic_adjust_coal_work); + + return 0; + +out: + for (i = 0; i < qid; i++) + __sss_nic_release_qp_irq(nic_dev, &nic_dev->qp_res.irq_cfg[i]); + + return ret; +} + +void sss_nic_release_qp_irq(struct sss_nic_dev *nic_dev) +{ + u16 qid; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) + __sss_nic_release_qp_irq(nic_dev, &nic_dev->qp_res.irq_cfg[qid]); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.h new file mode 100644 index 0000000000000..9731e34712935 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_IRQ_H +#define SSS_NIC_IRQ_H + +#include + +#include "sss_kernel.h" +#include "sss_nic_dev_define.h" + +int sss_nic_request_qp_irq(struct sss_nic_dev *nic_dev); +void sss_nic_release_qp_irq(struct sss_nic_dev *nic_dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.c new file mode 100644 index 0000000000000..c11ec5a24515b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.c @@ -0,0 +1,765 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_io_define.h" +#include "sss_nic_event.h" + +struct sss_nic_event_link_info { + u8 valid; + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; +}; + +#define SSSNIC_LOOP_MODE_MIN 1 +#define SSSNIC_LOOP_MODE_MAX 6 + +#define SSSNIC_LOOP_MODE_IS_INVALID(mode) \ + (unlikely(((mode) > SSSNIC_LOOP_MODE_MAX) || ((mode) < SSSNIC_LOOP_MODE_MIN))) + +#define SSSNIC_LINK_INFO_VALID 1 + +static int sss_nic_mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *in_buf, + u16 in_size, void *out_buf, u16 *out_size); +static int sss_nic_mag_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *in_buf, + u16 in_size, void *out_buf, u16 *out_size, u16 channel); + +int sss_nic_set_hw_port_state(struct sss_nic_dev *nic_dev, bool enable, u16 channel) +{ + struct sss_nic_mbx_set_port_mag_state port_state = {0}; + u16 out_len = sizeof(port_state); + int ret; + + if (!nic_dev) + return -EINVAL; + + if (sss_get_func_type(nic_dev->hwdev) == SSS_FUNC_TYPE_VF) + return 0; + + port_state.state = enable ? (SSSNIC_MAG_OPCODE_TX_ENABLE | SSSNIC_MAG_OPCODE_RX_ENABLE) : + SSSNIC_MAG_OPCODE_PORT_DISABLE; + port_state.function_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_mag_msg_to_mgmt_sync_ch(nic_dev->hwdev, SSSNIC_MAG_OPCODE_SET_PORT_ENABLE, + &port_state, sizeof(port_state), + &port_state, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &port_state)) { + nic_err(nic_dev->dev_hdl, + "Fail to set port state, ret: %d, state: 0x%x, out_len: 0x%x, channel: 0x%x\n", + ret, port_state.head.state, out_len, channel); + return -EIO; + } + + return 0; +} + +int sss_nic_get_phy_port_stats(struct sss_nic_dev *nic_dev, struct sss_nic_mag_port_stats *stats) +{ + struct sss_nic_mbx_mag_port_stats_info stats_info = {0}; + struct sss_nic_mbx_mag_port_stats *port_stats = NULL; + u16 out_len = sizeof(*port_stats); + int ret; + + port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); + if (!port_stats) + return -ENOMEM; + + stats_info.port_id = sss_get_phy_port_id(nic_dev->hwdev); + + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_GET_PORT_STAT, + &stats_info, sizeof(stats_info), + port_stats, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, port_stats)) { + nic_err(nic_dev->dev_hdl, + "Fail to get port statistics, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, port_stats->head.state, out_len); + ret = -EIO; + goto out; + } + + memcpy(stats, &port_stats->counter, sizeof(*stats)); + +out: + kfree(port_stats); + + return ret; +} + +int sss_nic_set_autoneg(struct sss_nic_dev *nic_dev, bool enable) +{ + struct sss_nic_link_ksettings settings = {0}; + u32 valid_bitmap = 0; + + valid_bitmap |= SSSNIC_LINK_SET_AUTONEG; + settings.valid_bitmap = valid_bitmap; + settings.autoneg = enable ? SSSNIC_PORT_CFG_AN_ON : SSSNIC_PORT_CFG_AN_OFF; + + return sss_nic_set_link_settings(nic_dev, &settings); +} + +static int sss_nic_cfg_loopback_mode(struct sss_nic_dev *nic_dev, u8 opcode, + u8 *mode, u8 *enable) +{ + struct sss_nic_mbx_loopback_mode loopback_mode = {0}; + u16 out_len = sizeof(loopback_mode); + int ret; + + if (opcode == SSS_MGMT_MSG_SET_CMD) { + loopback_mode.mode = *mode; + loopback_mode.en = *enable; + } + loopback_mode.opcode = opcode; + loopback_mode.port_id = sss_get_phy_port_id(nic_dev->hwdev); + + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_CFG_LOOPBACK_MODE, + &loopback_mode, sizeof(loopback_mode), + &loopback_mode, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &loopback_mode)) { + nic_err(nic_dev->dev_hdl, + "Fail to %s loopback mode, ret: %d, state: 0x%x, out_len: 0x%x\n", + opcode == SSS_MGMT_MSG_SET_CMD ? "set" : "get", + ret, loopback_mode.head.state, out_len); + return -EIO; + } + + if (opcode == SSS_MGMT_MSG_GET_CMD) { + *enable = loopback_mode.en; + *mode = loopback_mode.mode; + } + + return 0; +} + +int sss_nic_set_loopback_mode(struct sss_nic_dev *nic_dev, u8 lp_mode, u8 enable) +{ + if (SSSNIC_LOOP_MODE_IS_INVALID(lp_mode)) { + nic_err(nic_dev->dev_hdl, "Invalid loopback mode %u to set\n", + lp_mode); + return -EINVAL; + } + + return sss_nic_cfg_loopback_mode(nic_dev, SSS_MGMT_MSG_SET_CMD, &lp_mode, &enable); +} + +int sss_nic_get_loopback_mode(struct sss_nic_dev *nic_dev, u8 *mode, u8 *enable) +{ + if (!nic_dev || !mode || !enable) + return -EINVAL; + + return sss_nic_cfg_loopback_mode(nic_dev, SSS_MGMT_MSG_GET_CMD, mode, + enable); +} + +int sss_nic_set_hw_led_state(struct sss_nic_dev *nic_dev, enum sss_nic_mag_led_type led_type, + enum sss_nic_mag_led_mode led_mode) +{ + struct sss_nic_mbx_set_led_cfg led_info = {0}; + u16 out_len = sizeof(led_info); + int ret; + + led_info.mode = led_mode; + led_info.type = led_type; + led_info.function_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_SET_LED_CFG, + &led_info, sizeof(led_info), &led_info, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &led_info)) { + nic_err(nic_dev->dev_hdl, + "Fail to set led state, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, led_info.head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_nic_get_hw_port_info(struct sss_nic_dev *nic_dev, + struct sss_nic_port_info *port_info, u16 channel) +{ + struct sss_nic_mbx_get_port_info mbx_port_info = {0}; + u16 out_len = sizeof(mbx_port_info); + int ret; + + if (!nic_dev || !port_info) + return -EINVAL; + + mbx_port_info.port_id = sss_get_phy_port_id(nic_dev->hwdev); + + ret = sss_nic_mag_msg_to_mgmt_sync_ch(nic_dev->hwdev, SSSNIC_MAG_OPCODE_GET_PORT_INFO, + &mbx_port_info, sizeof(mbx_port_info), + &mbx_port_info, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &mbx_port_info)) { + nic_err(nic_dev->dev_hdl, + "Fail to get port info, ret: %d, state: 0x%x, out_len: 0x%x, channel: 0x%x\n", + ret, mbx_port_info.head.state, out_len, channel); + return -EIO; + } + + port_info->advertised_mode = mbx_port_info.advertised_mode; + port_info->duplex = mbx_port_info.duplex; + port_info->autoneg_cap = mbx_port_info.an_support; + port_info->fec = mbx_port_info.fec; + port_info->autoneg_state = mbx_port_info.an_en; + port_info->port_type = mbx_port_info.wire_type; + port_info->supported_mode = mbx_port_info.supported_mode; + port_info->speed = mbx_port_info.speed; + + return 0; +} + +int sss_nic_set_link_settings(struct sss_nic_dev *nic_dev, + struct sss_nic_link_ksettings *settings) +{ + struct sss_nic_mbx_mag_set_port_cfg port_cfg = {0}; + u16 out_len = sizeof(port_cfg); + int ret; + + port_cfg.autoneg = settings->autoneg; + port_cfg.port_id = sss_get_phy_port_id(nic_dev->hwdev); + port_cfg.fec = settings->fec; + port_cfg.config_bitmap = settings->valid_bitmap; + port_cfg.speed = settings->speed; + + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_SET_PORT_CFG, + &port_cfg, sizeof(port_cfg), &port_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &port_cfg)) { + nic_err(nic_dev->dev_hdl, + "Fail to set link settings, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, port_cfg.head.state, out_len); + return -EIO; + } + + return port_cfg.head.state; +} + +int sss_nic_get_hw_link_state(struct sss_nic_dev *nic_dev, u8 *out_state) +{ + struct sss_nic_mbx_get_link_state link_state = {0}; + u16 out_len = sizeof(link_state); + int ret; + + if (!nic_dev || !out_state) + return -EINVAL; + + link_state.port_id = sss_get_phy_port_id(nic_dev->hwdev); + + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_LINK_STATUS, + &link_state, sizeof(link_state), &link_state, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &link_state)) { + nic_err(nic_dev->dev_hdl, + "Fail to get link state, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, link_state.head.state, out_len); + return -EIO; + } + + *out_state = link_state.status; + + return 0; +} + +void sss_nic_notify_vf_link_state(struct sss_nic_io *nic_io, u16 vf_id, u8 state) +{ + struct sss_nic_mbx_get_link_state link_state = {0}; + u16 out_len = sizeof(link_state); + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + int ret; + + link_state.status = state; + link_state.port_id = sss_get_phy_port_id(nic_io->hwdev); + ret = sss_mbx_send_to_vf(nic_io->hwdev, vf_id, SSS_MOD_TYPE_SSSLINK, + SSSNIC_MAG_OPCODE_LINK_STATUS, + &link_state, sizeof(link_state), + &link_state, &out_len, 0, SSS_CHANNEL_NIC); + if (ret == SSS_MBX_ERRCODE_UNKNOWN_DES_FUNC) { + sss_nic_dettach_vf(nic_io, vf_id); + nic_warn(nic_io->dev_hdl, "VF %d not initialize, need to disconnect it\n", id); + } else if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &link_state)) { + nic_err(nic_io->dev_hdl, + "Fail to send VF %d the link state change event, ret:%d, state:0x%x, out_len:0x%x\n", + id, ret, link_state.head.state, out_len); + } +} + +void sss_nic_notify_all_vf_link_state(struct sss_nic_io *nic_io, u8 state) +{ + struct sss_nic_vf_info *vf_info = NULL; + u16 vf_id; + + nic_io->link_status = state; + for (vf_id = 1; vf_id <= nic_io->max_vf_num; vf_id++) { + vf_info = &nic_io->vf_info_group[SSSNIC_HW_VF_ID_TO_OS(vf_id)]; + if (vf_info->link_forced || !vf_info->attach) + continue; + sss_nic_notify_vf_link_state(nic_io, vf_id, state); + } +} + +static int sss_nic_get_vf_link_status_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *buf_in, u16 in_len, + void *buf_out, u16 *out_len) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_mbx_get_link_state *link_state = buf_out; + struct sss_nic_vf_info *vf_info_group = nic_io->vf_info_group; + bool link_up = vf_info_group[id].link_up; + bool link_forced = vf_info_group[id].link_forced; + + if (link_forced) + link_state->status = link_up ? SSSNIC_LINK_UP : SSSNIC_LINK_DOWN; + else + link_state->status = nic_io->link_status; + + link_state->head.state = SSS_MGMT_CMD_SUCCESS; + *out_len = sizeof(*link_state); + + return 0; +} + +static void sss_nic_get_link_info(struct sss_nic_io *nic_io, + const struct sss_nic_mbx_get_link_state *link_state, + struct sss_nic_event_link_info *link_info) +{ + struct sss_nic_port_info port_info = {0}; + int ret; + + /* link event reported only after set vport enable */ + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF || + link_state->status == SSSNIC_LINK_DOWN) + return; + + ret = sss_nic_get_hw_port_info(nic_io->nic_dev, &port_info, SSS_CHANNEL_NIC); + if (ret != 0) { + nic_warn(nic_io->dev_hdl, "Fail to get port info\n"); + return; + } + + link_info->valid = SSSNIC_LINK_INFO_VALID; + link_info->duplex = port_info.duplex; + link_info->port_type = port_info.port_type; + link_info->speed = port_info.speed; + link_info->autoneg_state = port_info.autoneg_state; + link_info->autoneg_cap = port_info.autoneg_cap; +} + +static void sss_nic_link_status_event_handler(struct sss_nic_io *nic_io, + void *buf_in, u16 in_len, + void *buf_out, u16 *out_len) +{ + struct sss_nic_mbx_get_link_state *in_link_state = buf_in; + struct sss_nic_mbx_get_link_state *out_link_state = buf_out; + struct sss_event_info event_info = {0}; + struct sss_nic_event_link_info *link_info = (void *)event_info.event_data; + + nic_info(nic_io->dev_hdl, "Link status report received, func_id: %u, status: %u\n", + sss_get_global_func_id(nic_io->hwdev), in_link_state->status); + + sss_update_link_stats(nic_io->hwdev, in_link_state->status); + + sss_nic_get_link_info(nic_io, in_link_state, link_info); + + event_info.type = (in_link_state->status == SSSNIC_LINK_DOWN) ? + SSSNIC_EVENT_LINK_DOWN : SSSNIC_EVENT_LINK_UP; + event_info.service = SSS_EVENT_SRV_NIC; + sss_do_event_callback(nic_io->hwdev, &event_info); + + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF) + return; + + *out_len = sizeof(*out_link_state); + out_link_state->head.state = SSS_MGMT_CMD_SUCCESS; + sss_nic_notify_all_vf_link_state(nic_io, in_link_state->status); +} + +static void sss_nic_cable_plug_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_nic_mag_wire_event *in_wire_event = in_buf; + struct sss_nic_mag_wire_event *out_wire_event = out_buf; + struct sss_nic_cache_port_sfp *routine_cmd = NULL; + struct sss_event_info event_info = {0}; + struct sss_nic_port_module_event *module_event = (void *)event_info.event_data; + + routine_cmd = &nic_io->mag_cfg.rt_cmd; + mutex_lock(&nic_io->mag_cfg.sfp_mutex); + routine_cmd->mpu_send_sfp_info = false; + routine_cmd->mpu_send_sfp_abs = false; + mutex_unlock(&nic_io->mag_cfg.sfp_mutex); + + *out_size = sizeof(*out_wire_event); + out_wire_event->head.state = SSS_MGMT_CMD_SUCCESS; + + event_info.service = SSS_EVENT_SRV_NIC; + event_info.type = SSSNIC_EVENT_PORT_MODULE_EVENT; + module_event->type = (in_wire_event->status != SSNSIC_PORT_PRESENT) ? + SSSNIC_PORT_MODULE_CABLE_PLUGGED : SSSNIC_PORT_MODULE_CABLE_UNPLUGGED; + + sss_do_event_callback(nic_io->hwdev, &event_info); +} + +static void sss_nic_port_sfp_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + struct sss_nic_mbx_get_xsfp_info *in_xsfp_info = in_buf; + struct sss_nic_cache_port_sfp *routine_cmd = NULL; + + if (in_size != sizeof(*in_xsfp_info)) { + nic_err(nic_io->dev_hdl, "Invalid in_size: %u, should be %ld\n", + in_size, sizeof(*in_xsfp_info)); + return; + } + + routine_cmd = &nic_io->mag_cfg.rt_cmd; + mutex_lock(&nic_io->mag_cfg.sfp_mutex); + routine_cmd->mpu_send_sfp_info = true; + memcpy(&routine_cmd->std_sfp_info, in_xsfp_info, sizeof(*in_xsfp_info)); + mutex_unlock(&nic_io->mag_cfg.sfp_mutex); +} + +static void sss_nic_port_sfp_absent_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_nic_mbx_get_xsfp_present *in_xsfp_present = in_buf; + struct sss_nic_cache_port_sfp *routine_cmd = NULL; + + if (in_size != sizeof(*in_xsfp_present)) { + nic_err(nic_io->dev_hdl, "Invalid in_size: %u, should be %ld\n", + in_size, sizeof(*in_xsfp_present)); + return; + } + + routine_cmd = &nic_io->mag_cfg.rt_cmd; + mutex_lock(&nic_io->mag_cfg.sfp_mutex); + routine_cmd->mpu_send_sfp_abs = true; + memcpy(&routine_cmd->abs, in_xsfp_present, sizeof(*in_xsfp_present)); + mutex_unlock(&nic_io->mag_cfg.sfp_mutex); +} + +bool sss_nic_if_sfp_absent(struct sss_nic_dev *nic_dev) +{ + int ret; + bool sfp_abs_state; + struct sss_nic_cache_port_sfp *routine_cmd = NULL; + u8 port_id = sss_get_phy_port_id(nic_dev->hwdev); + struct sss_nic_mbx_get_xsfp_present xsfp_present = {0}; + u16 out_len = sizeof(xsfp_present); + + routine_cmd = &nic_dev->nic_io->mag_cfg.rt_cmd; + mutex_lock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + if (routine_cmd->mpu_send_sfp_abs) { + if (routine_cmd->abs.head.state) { + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + return true; + } + + sfp_abs_state = (bool)routine_cmd->abs.abs_status; + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + return sfp_abs_state; + } + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + + xsfp_present.port_id = port_id; + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_GET_XSFP_PRESENT, + &xsfp_present, sizeof(xsfp_present), &xsfp_present, + &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &xsfp_present)) { + nic_err(nic_dev->dev_hdl, + "Fail to get port%u sfp absent status, ret: %d, status: 0x%x, out_len: 0x%x\n", + port_id, ret, xsfp_present.head.state, out_len); + return true; + } + + return !!xsfp_present.abs_status; +} + +int sss_nic_get_sfp_info(struct sss_nic_dev *nic_dev, + struct sss_nic_mbx_get_xsfp_info *xsfp_info) +{ + int ret; + u16 out_len = sizeof(*xsfp_info); + struct sss_nic_cache_port_sfp *routine_cmd = NULL; + + if (!nic_dev || !xsfp_info) + return -EINVAL; + + routine_cmd = &nic_dev->nic_io->mag_cfg.rt_cmd; + mutex_lock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + if (routine_cmd->mpu_send_sfp_info) { + if (routine_cmd->std_sfp_info.head.state) { + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + return -EIO; + } + + memcpy(xsfp_info, &routine_cmd->std_sfp_info, sizeof(*xsfp_info)); + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + return 0; + } + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + + xsfp_info->port_id = sss_get_phy_port_id(nic_dev->hwdev); + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_GET_XSFP_INFO, + xsfp_info, sizeof(*xsfp_info), xsfp_info, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, xsfp_info)) { + nic_err(nic_dev->dev_hdl, + "Fail to get port%u sfp eeprom information, ret: %d, status: 0x%x, out_len: 0x%x\n", + sss_get_phy_port_id(nic_dev->hwdev), ret, + xsfp_info->head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_nic_get_sfp_eeprom(struct sss_nic_dev *nic_dev, u8 *data, u32 len) +{ + struct sss_nic_mbx_get_xsfp_info xsfp_info = {0}; + int ret; + + if (!nic_dev || !data) + return -EINVAL; + + if (sss_nic_if_sfp_absent(nic_dev)) + return -ENXIO; + + ret = sss_nic_get_sfp_info(nic_dev, &xsfp_info); + if (ret != 0) + return ret; + + memcpy(data, xsfp_info.sfp_info, len); + + return 0; +} + +int sss_nic_get_sfp_type(struct sss_nic_dev *nic_dev, u8 *sfp_type, u8 *sfp_type_ext) +{ + struct sss_nic_cache_port_sfp *routine_cmd = NULL; + u8 sfp_data[SSSNIC_STD_SFP_INFO_MAX_SIZE]; + int ret; + + if (!nic_dev || !sfp_type || !sfp_type_ext) + return -EINVAL; + + if (sss_nic_if_sfp_absent(nic_dev)) + return -ENXIO; + + routine_cmd = &nic_dev->nic_io->mag_cfg.rt_cmd; + + mutex_lock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + if (routine_cmd->mpu_send_sfp_info) { + if (routine_cmd->std_sfp_info.head.state) { + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + return -EIO; + } + + *sfp_type_ext = routine_cmd->std_sfp_info.sfp_info[1]; + *sfp_type = routine_cmd->std_sfp_info.sfp_info[0]; + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + return 0; + } + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + + ret = sss_nic_get_sfp_eeprom(nic_dev, (u8 *)sfp_data, SSSNIC_STD_SFP_INFO_MAX_SIZE); + if (ret != 0) + return ret; + + *sfp_type = sfp_data[0]; + *sfp_type_ext = sfp_data[1]; + + return 0; +} + +int sss_nic_set_link_follow_state(struct sss_nic_dev *nic_dev, + enum sss_nic_link_follow_status state) +{ + int ret; + struct sss_nic_mbx_set_link_follow link_follow = {0}; + u16 out_len = sizeof(link_follow); + + link_follow.function_id = sss_get_global_func_id(nic_dev->hwdev); + link_follow.follow = state; + + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_SET_LINK_FOLLOW, + &link_follow, sizeof(link_follow), + &link_follow, &out_len); + if ((link_follow.head.state != SSS_MGMT_CMD_UNSUPPORTED && link_follow.head.state != 0) || + ret != 0 || out_len == 0) { + nic_err(nic_dev->dev_hdl, + "Fail to set link status follow, ret: %d, state: 0x%x, out size: 0x%x\n", + ret, link_follow.head.state, out_len); + return -EFAULT; + } + + return link_follow.head.state; +} + +static const struct sss_nic_vf_msg_handler g_sss_nic_vf_mag_cmd_proc[] = { + { + .opcode = SSSNIC_MAG_OPCODE_LINK_STATUS, + .msg_handler = sss_nic_get_vf_link_status_handler, + }, +}; + +static const struct sss_nic_vf_msg_handler *sss_nic_get_vf_mag_cmd_proc(u16 opcode) +{ + u16 i; + u16 cmd_num = ARRAY_LEN(g_sss_nic_vf_mag_cmd_proc); + + for (i = 0; i < cmd_num; i++) + if (g_sss_nic_vf_mag_cmd_proc[i].opcode == opcode) + return &g_sss_nic_vf_mag_cmd_proc[i]; + + return NULL; +} + +/* pf/ppf handler mbx msg from vf */ +int sss_nic_pf_mag_mbx_handler(void *hwdev, + u16 vf_id, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + const struct sss_nic_vf_msg_handler *handler = NULL; + struct sss_nic_io *nic_io; + + if (!hwdev) + return -EFAULT; + + nic_io = sss_get_service_adapter(hwdev, SSS_SERVICE_TYPE_NIC); + if (!nic_io) + return -EINVAL; + + handler = sss_nic_get_vf_mag_cmd_proc(cmd); + if (handler) + return handler->msg_handler(nic_io, vf_id, + in_buf, in_size, out_buf, out_size); + + nic_warn(nic_io->dev_hdl, "NO function found for mag cmd: %u received from vf id: %u\n", + cmd, vf_id); + + return -EINVAL; +} + +static struct nic_event_handler g_sss_nic_mag_cmd_proc[] = { + { + .opcode = SSSNIC_MAG_OPCODE_LINK_STATUS, + .event_handler = sss_nic_link_status_event_handler, + }, + + { + .opcode = SSSNIC_MAG_OPCODE_WIRE_EVENT, + .event_handler = sss_nic_cable_plug_event_handler, + }, + + { + .opcode = SSSNIC_MAG_OPCODE_GET_XSFP_INFO, + .event_handler = sss_nic_port_sfp_event_handler, + }, + + { + .opcode = SSSNIC_MAG_OPCODE_GET_XSFP_PRESENT, + .event_handler = sss_nic_port_sfp_absent_event_handler, + }, +}; + +static const struct nic_event_handler *sss_nic_get_mag_cmd_proc(u16 opcode) +{ + u16 i; + u16 cmd_num = ARRAY_LEN(g_sss_nic_mag_cmd_proc); + + for (i = 0; i < cmd_num; i++) + if (g_sss_nic_mag_cmd_proc[i].opcode == opcode) + return &g_sss_nic_mag_cmd_proc[i]; + + return NULL; +} + +static int _sss_nic_mag_event_handler(void *hwdev, u16 cmd, + void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + const struct nic_event_handler *handler = NULL; + struct sss_nic_io *nic_io = NULL; + struct sss_mgmt_msg_head *out_msg_head = NULL; + + if (!hwdev) + return -EINVAL; + + nic_io = sss_get_service_adapter(hwdev, SSS_SERVICE_TYPE_NIC); + if (!nic_io) + return -EINVAL; + + *out_size = 0; + + handler = sss_nic_get_mag_cmd_proc(cmd); + if (handler) { + handler->event_handler(nic_io, in_buf, in_size, out_buf, out_size); + return 0; + } + + out_msg_head = out_buf; + out_msg_head->state = SSS_MGMT_CMD_UNSUPPORTED; + *out_size = sizeof(*out_msg_head); + + nic_warn(nic_io->dev_hdl, "Invalid mag event cmd: %u\n", cmd); + + return 0; +} + +int sss_nic_vf_mag_event_handler(void *hwdev, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + return _sss_nic_mag_event_handler(hwdev, cmd, in_buf, in_size, out_buf, out_size); +} + +/* pf/ppf handler mgmt cpu report ssslink event */ +void sss_nic_pf_mag_event_handler(void *hwdev, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + _sss_nic_mag_event_handler(hwdev, cmd, in_buf, in_size, out_buf, out_size); +} + +static int _sss_nic_mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size, u16 channel) +{ + if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF) + if (sss_nic_get_vf_mag_cmd_proc(cmd)) + return sss_mbx_send_to_pf(hwdev, SSS_MOD_TYPE_SSSLINK, cmd, + in_buf, in_size, out_buf, out_size, 0, channel); + + return sss_sync_mbx_send_msg(hwdev, SSS_MOD_TYPE_SSSLINK, + cmd, in_buf, in_size, out_buf, out_size, 0, channel); +} + +static int sss_nic_mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, + void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + return _sss_nic_mag_msg_to_mgmt_sync(hwdev, cmd, in_buf, in_size, + out_buf, out_size, SSS_CHANNEL_NIC); +} + +static int sss_nic_mag_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size, u16 channel) +{ + return _sss_nic_mag_msg_to_mgmt_sync(hwdev, cmd, in_buf, in_size, + out_buf, out_size, channel); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.h new file mode 100644 index 0000000000000..ef112925cf505 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_MAG_CFG_H +#define SSS_NIC_MAG_CFG_H + +#include + +#include "sss_nic_cfg_mag_define.h" +#include "sss_nic_io_define.h" +#include "sss_nic_dev_define.h" + +enum port_module_event_type { + SSSNIC_PORT_MODULE_CABLE_PLUGGED, + SSSNIC_PORT_MODULE_CABLE_UNPLUGGED, + SSSNIC_PORT_MODULE_LINK_ERR, + SSSNIC_PORT_MODULE_MAX_EVENT, +}; + +enum link_err_type { + LINK_ERR_MODULE_UNRECOGENIZED, + LINK_ERR_NUM, +}; + +struct sss_nic_port_module_event { + enum port_module_event_type type; + enum link_err_type err_type; +}; + +int sss_nic_set_hw_port_state(struct sss_nic_dev *nic_dev, bool enable, u16 channel); + +int sss_nic_get_hw_link_state(struct sss_nic_dev *nic_dev, u8 *link_state); + +void sss_nic_notify_all_vf_link_state(struct sss_nic_io *nic_io, u8 link_status); + +int sss_nic_get_hw_port_info(struct sss_nic_dev *nic_dev, struct sss_nic_port_info *port_info, + u16 channel); + +int sss_nic_get_phy_port_stats(struct sss_nic_dev *nic_dev, struct sss_nic_mag_port_stats *stats); + +int sss_nic_set_link_settings(struct sss_nic_dev *nic_dev, + struct sss_nic_link_ksettings *settings); + +int sss_nic_set_hw_led_state(struct sss_nic_dev *nic_dev, enum sss_nic_mag_led_type type, + enum sss_nic_mag_led_mode mode); + +int sss_nic_set_loopback_mode(struct sss_nic_dev *nic_dev, u8 mode, u8 enable); + +int sss_nic_set_autoneg(struct sss_nic_dev *nic_dev, bool enable); + +int sss_nic_get_sfp_type(struct sss_nic_dev *nic_dev, u8 *sfp_type, u8 *sfp_type_ext); +int sss_nic_get_sfp_eeprom(struct sss_nic_dev *nic_dev, u8 *data, u32 len); + +int sss_nic_set_link_follow_state(struct sss_nic_dev *nic_dev, + enum sss_nic_link_follow_status status); + +void sss_nic_notify_vf_link_state(struct sss_nic_io *nic_io, + u16 vf_id, u8 link_status); + +int sss_nic_vf_mag_event_handler(void *hwdev, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); + +void sss_nic_pf_mag_event_handler(void *pri_handle, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); + +int sss_nic_pf_mag_mbx_handler(void *hwdev, + u16 vf_id, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +int sss_nic_get_sfp_info(struct sss_nic_dev *nic_dev, + struct sss_nic_mbx_get_xsfp_info *xsfp_info); + +bool sss_nic_if_sfp_absent(struct sss_nic_dev *nic_dev); + +int sss_nic_get_loopback_mode(struct sss_nic_dev *nic_dev, u8 *mode, u8 *enable); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_main.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_main.c new file mode 100644 index 0000000000000..e20992f4ba40f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_main.c @@ -0,0 +1,1077 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_tx_init.h" +#include "sss_nic_rx.h" +#include "sss_nic_rx_init.h" +#include "sss_nic_rx_reset.h" +#include "sss_nic_rss.h" +#include "sss_nic_dcb.h" +#include "sss_nic_ethtool.h" +#include "sss_nic_filter.h" +#include "sss_nic_netdev_ops.h" +#include "sss_nic_netdev_ops_api.h" +#include "sss_nic_ntuple.h" +#include "sss_nic_event.h" +#include "sss_tool_nic_func.h" + +#define DEFAULT_POLL_BUDGET 64 +static u32 poll_budget = DEFAULT_POLL_BUDGET; +module_param(poll_budget, uint, 0444); +MODULE_PARM_DESC(poll_budget, "Number packets for NAPI budget (default=64)"); + +#define SSSNIC_DEAULT_TXRX_MSIX_PENDING_LIMIT 2 +#define SSSNIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG 25 +#define SSSNIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG 7 + +static u8 msix_pending_limit = SSSNIC_DEAULT_TXRX_MSIX_PENDING_LIMIT; +module_param(msix_pending_limit, byte, 0444); +MODULE_PARM_DESC(msix_pending_limit, "QP MSI-X Interrupt coalescing parameter pending_limit (default=2)"); + +static u8 msix_coalesc_timer = + SSSNIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG; +module_param(msix_coalesc_timer, byte, 0444); +MODULE_PARM_DESC(msix_coalesc_timer, "QP MSI-X Interrupt coalescing parameter coalesc_timer_cfg (default=25)"); + +#define DEFAULT_RX_BUFF_LEN 2 +u16 rx_buff_size = DEFAULT_RX_BUFF_LEN; +module_param(rx_buff_size, ushort, 0444); +MODULE_PARM_DESC(rx_buff_size, "Set rx_buff size, buffer len must be 2^n. 2 - 16, default is 2KB"); + +static u32 rx_poll_wqe = 256; +module_param(rx_poll_wqe, uint, 0444); +MODULE_PARM_DESC(rx_poll_wqe, "Number wqe for rx poll (default=256)"); + +static u8 link_follow_status = SSSNIC_LINK_FOLLOW_STATUS_MAX; +module_param(link_follow_status, byte, 0444); +MODULE_PARM_DESC(link_follow_status, "Set link follow status port status (0=default,1=follow,2=separate,3=unset"); + +#define SSSNIC_DEV_WQ_NAME "sssnic_dev_wq" + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_LINK) + +#define QID_MASKED(qid, nic_dev) ((qid) & ((nic_dev)->qp_num - 1)) +#define WATCHDOG_TIMEOUT 5 + +#define SSSNIC_SQ_DEPTH 1024 +#define SSSNIC_RQ_DEPTH 1024 + +enum sss_nic_rx_buff_len { + RX_BUFF_VALID_2KB = 2, + RX_BUFF_VALID_4KB = 4, + RX_BUFF_VALID_8KB = 8, + RX_BUFF_VALID_16KB = 16, +}; + +#define CONVERT_UNIT 1024 +#define RX_BUFF_TO_BYTES(size) ((u16)((size) * CONVERT_UNIT)) +#define RX_BUFF_NUM_PER_PAGE 2 +#define RX_BUFF_TO_DMA_SIZE(rx_buff_len) (RX_BUFF_NUM_PER_PAGE * (rx_buff_len)) +#define DMA_SIZE_TO_PAGE_NUM(buff_size) ((buff_size) / PAGE_SIZE) +#define PAGE_NUM_TO_ORDER(page_num) ((page_num) > 0 ? ilog2(page_num) : 0) +#define BUFF_SIZE_TO_PAGE_ORDER(buff_size) PAGE_NUM_TO_ORDER(DMA_SIZE_TO_PAGE_NUM(buff_size)) + +#define POLL_BUDGET_IS_VALID(budget) ((budget) <= SSSNIC_MAX_RX_QUEUE_DEPTH) + +#define SSSNIC_NETDEV_DEFAULT_FEATURE (NETIF_F_SG | NETIF_F_HIGHDMA) + +#define SSSNIC_LP_PKT_LEN 60 + +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN + +#define SSSNIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT 1 +#define SSSNIC_VLAN_CLEAR_OFFLOAD (~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ + NETIF_F_SCTP_CRC | NETIF_F_RXCSUM | \ + NETIF_F_ALL_TSO)) + +#define SSSNIC_DRV_DESC "Intelligent Network Interface Card Driver" + +static int sss_nic_netdev_event_handler(struct notifier_block *notifier, + unsigned long event, void *ptr); +typedef void (*sss_nic_port_module_event_handler_t)(struct sss_nic_dev *nic_dev, void *event_data); + +static DEFINE_MUTEX(g_netdev_notifier_mutex); +static int g_netdev_notifier_ref_cnt; + +typedef void (*sss_nic_event_handler_t)(struct sss_nic_dev *nic_dev, struct sss_event_info *event); + +static struct notifier_block g_netdev_notifier = { + .notifier_call = sss_nic_netdev_event_handler, +}; + +static void sss_nic_register_notifier(struct sss_nic_dev *nic_dev) +{ + int ret; + + mutex_lock(&g_netdev_notifier_mutex); + g_netdev_notifier_ref_cnt++; + if (g_netdev_notifier_ref_cnt == 1) { + ret = register_netdevice_notifier(&g_netdev_notifier); + if (ret != 0) { + nic_info(nic_dev->dev_hdl, + "Fail to register netdevice notifier, ret: %d\n", ret); + g_netdev_notifier_ref_cnt--; + } + } + mutex_unlock(&g_netdev_notifier_mutex); +} + +static void sss_nic_unregister_notifier(struct sss_nic_dev *nic_dev) +{ + mutex_lock(&g_netdev_notifier_mutex); + if (g_netdev_notifier_ref_cnt == 1) + unregister_netdevice_notifier(&g_netdev_notifier); + + if (g_netdev_notifier_ref_cnt > 0) + g_netdev_notifier_ref_cnt--; + mutex_unlock(&g_netdev_notifier_mutex); +} + +#if IS_ENABLED(CONFIG_VLAN_8021Q) +static u16 sss_nic_get_vlan_depth(struct net_device *dev) +{ + u16 vlan_depth = 0; + struct net_device *vlan_dev = dev; + + do { + vlan_depth++; + vlan_dev = vlan_dev_priv(vlan_dev)->real_dev; + } while (is_vlan_dev(vlan_dev)); + + return vlan_depth; +} + +static void sss_nic_clear_netdev_vlan_offload(struct net_device *dev, u16 vlan_depth) +{ + if (vlan_depth == SSSNIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) { + dev->vlan_features &= SSSNIC_VLAN_CLEAR_OFFLOAD; + } else if (vlan_depth > SSSNIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) { +#ifdef HAVE_NDO_SET_FEATURES + dev->hw_features &= SSSNIC_VLAN_CLEAR_OFFLOAD; +#endif + dev->features &= SSSNIC_VLAN_CLEAR_OFFLOAD; + } +} +#endif + +static int sss_nic_netdev_event_handler(struct notifier_block *notifier, + unsigned long event, void *ptr) +{ +#if IS_ENABLED(CONFIG_VLAN_8021Q) + u16 vlan_depth; +#endif + struct net_device *real_dev = NULL; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + if (!is_vlan_dev(dev)) + return NOTIFY_DONE; + + if (event != NETDEV_REGISTER) + return NOTIFY_DONE; + + dev_hold(dev); + + real_dev = vlan_dev_real_dev(dev); + if (!sss_nic_is_netdev_ops_match(real_dev)) + goto out; + +#if IS_ENABLED(CONFIG_VLAN_8021Q) + vlan_depth = sss_nic_get_vlan_depth(dev); + sss_nic_clear_netdev_vlan_offload(dev, vlan_depth); +#endif +out: + dev_put(dev); + + return NOTIFY_DONE; +} +#endif + +static netdev_features_t sss_nic_default_cso_feature(struct sss_nic_dev *nic_dev) +{ + netdev_features_t feature = 0; + + if (SSSNIC_SUPPORT_CSUM(nic_dev->nic_io)) + feature |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; + if (SSSNIC_SUPPORT_SCTP_CRC(nic_dev->nic_io)) + feature |= NETIF_F_SCTP_CRC; + + return feature; +} + +static netdev_features_t sss_nic_default_gso_feature(struct sss_nic_dev *nic_dev) +{ + netdev_features_t feature = 0; + + if (SSSNIC_SUPPORT_TSO(nic_dev->nic_io)) + feature |= NETIF_F_TSO | NETIF_F_TSO6; +#ifdef HAVE_ENCAPSULATION_TSO + if (SSSNIC_SUPPORT_VXLAN_OFFLOAD(nic_dev->nic_io)) + feature |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; +#endif /* HAVE_ENCAPSULATION_TSO */ + + return feature; +} + +static netdev_features_t sss_nic_default_vlan_feature(struct sss_nic_dev *nic_dev) +{ + netdev_features_t feature = 0; + + if (SSSNIC_SUPPORT_RXVLAN_FILTER(nic_dev->nic_io)) { +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) + feature |= NETIF_F_HW_VLAN_CTAG_FILTER; +#elif defined(NETIF_F_HW_VLAN_FILTER) + feature |= NETIF_F_HW_VLAN_FILTER; +#endif + } + + if (SSSNIC_SUPPORT_VLAN_OFFLOAD(nic_dev->nic_io)) { +#if defined(NETIF_F_HW_VLAN_CTAG_TX) + feature |= NETIF_F_HW_VLAN_CTAG_TX; +#elif defined(NETIF_F_HW_VLAN_TX) + feature |= NETIF_F_HW_VLAN_TX; +#endif + +#if defined(NETIF_F_HW_VLAN_CTAG_RX) + feature |= NETIF_F_HW_VLAN_CTAG_RX; +#elif defined(NETIF_F_HW_VLAN_RX) + feature |= NETIF_F_HW_VLAN_RX; +#endif + } + + return feature; +} + +static netdev_features_t sss_nic_default_lro_feature(struct sss_nic_dev *nic_dev) +{ + netdev_features_t feature = 0; + + if (SSSNIC_SUPPORT_LRO(nic_dev->nic_io)) + feature = NETIF_F_LRO; + + return feature; +} + +static void sss_nic_init_netdev_hw_feature(struct sss_nic_dev *nic_dev, + netdev_features_t lro_feature) +{ + struct net_device *netdev = nic_dev->netdev; + netdev_features_t hw_features = 0; + + hw_features = netdev->hw_features; + + hw_features |= netdev->features | lro_feature; + + netdev->hw_features = hw_features; +} + +static void sss_nic_init_netdev_hw_enc_feature(struct sss_nic_dev *nic_dev, + netdev_features_t cso_feature, + netdev_features_t gso_feature) +{ + struct net_device *netdev = nic_dev->netdev; + +#ifdef HAVE_ENCAPSULATION_CSUM + netdev->hw_enc_features |= SSSNIC_NETDEV_DEFAULT_FEATURE; + if (SSSNIC_SUPPORT_VXLAN_OFFLOAD(nic_dev->nic_io)) { + netdev->hw_enc_features |= cso_feature; +#ifdef HAVE_ENCAPSULATION_TSO + netdev->hw_enc_features |= gso_feature | NETIF_F_TSO_ECN; +#endif /* HAVE_ENCAPSULATION_TSO */ + } +#endif /* HAVE_ENCAPSULATION_CSUM */ +} + +static void sss_nic_init_netdev_feature(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + netdev_features_t cso_feature = 0; + netdev_features_t gso_feature = 0; + netdev_features_t vlan_feature = 0; + netdev_features_t lro_feature = 0; + + cso_feature = sss_nic_default_cso_feature(nic_dev); + gso_feature = sss_nic_default_gso_feature(nic_dev); + vlan_feature = sss_nic_default_vlan_feature(nic_dev); + lro_feature = sss_nic_default_lro_feature(nic_dev); + + netdev->features |= SSSNIC_NETDEV_DEFAULT_FEATURE | + cso_feature | gso_feature | vlan_feature; + netdev->vlan_features |= SSSNIC_NETDEV_DEFAULT_FEATURE | + cso_feature | gso_feature; + + sss_nic_init_netdev_hw_feature(nic_dev, lro_feature); + sss_nic_init_netdev_hw_enc_feature(nic_dev, cso_feature, gso_feature); + +#ifdef IFF_UNICAST_FLT + netdev->priv_flags |= IFF_UNICAST_FLT; +#endif +} + +static void sss_nic_init_intr_coal_param(struct sss_nic_intr_coal_info *intr_coal, u16 max_qp) +{ + u16 i; + + for (i = 0; i < max_qp; i++) { + intr_coal[i].pkt_rate_low = SSSNIC_RX_RATE_LOW; + intr_coal[i].pkt_rate_high = SSSNIC_RX_RATE_HIGH; + intr_coal[i].rx_usecs_low = SSSNIC_RX_COAL_TIME_LOW; + intr_coal[i].rx_usecs_high = SSSNIC_RX_COAL_TIME_HIGH; + intr_coal[i].rx_pending_limt_low = SSSNIC_RX_PENDING_LIMIT_LOW; + intr_coal[i].rx_pending_limt_high = SSSNIC_RX_PENDING_LIMIT_HIGH; + intr_coal[i].pending_limt = msix_pending_limit; + intr_coal[i].coalesce_timer = msix_coalesc_timer; + intr_coal[i].resend_timer = SSSNIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG; + } +} + +static int sss_nic_init_intr_coalesce(struct sss_nic_dev *nic_dev) +{ + u64 coalesce_size; + + coalesce_size = sizeof(*nic_dev->coal_info) * nic_dev->max_qp_num; + nic_dev->coal_info = kzalloc(coalesce_size, GFP_KERNEL); + if (!nic_dev->coal_info) + return -ENOMEM; + + sss_nic_init_intr_coal_param(nic_dev->coal_info, nic_dev->max_qp_num); + + if (test_bit(SSSNIC_INTR_ADAPT, &nic_dev->flags)) + nic_dev->use_adaptive_rx_coalesce = 1; + else + nic_dev->use_adaptive_rx_coalesce = 0; + + return 0; +} + +static void sss_nic_deinit_intr_coalesce(struct sss_nic_dev *nic_dev) +{ + kfree(nic_dev->coal_info); + nic_dev->coal_info = NULL; +} + +static int sss_nic_alloc_lb_test_buf(struct sss_nic_dev *nic_dev) +{ + u8 *loop_test_rx_buf = NULL; + + loop_test_rx_buf = vmalloc(SSSNIC_LP_PKT_CNT * SSSNIC_LP_PKT_LEN); + if (!loop_test_rx_buf) + return -ENOMEM; + + nic_dev->loop_test_rx_buf = loop_test_rx_buf; + nic_dev->loop_pkt_len = SSSNIC_LP_PKT_LEN; + + return 0; +} + +static void sss_nic_free_lb_test_buf(struct sss_nic_dev *nic_dev) +{ + vfree(nic_dev->loop_test_rx_buf); + nic_dev->loop_test_rx_buf = NULL; +} + +static void sss_nic_dev_deinit(struct sss_nic_dev *nic_dev) +{ + sss_nic_free_lb_test_buf(nic_dev); + + sss_nic_deinit_intr_coalesce(nic_dev); + + sss_nic_free_rq_desc_group(nic_dev); + + sss_nic_free_sq_desc_group(nic_dev); + + sss_nic_clean_mac_list_filter(nic_dev); + + sss_nic_del_mac(nic_dev, nic_dev->netdev->dev_addr, 0, + sss_get_global_func_id(nic_dev->hwdev), SSS_CHANNEL_NIC); + + sss_nic_free_rss_key(nic_dev); + if (test_bit(SSSNIC_DCB_ENABLE, &nic_dev->flags)) + sss_nic_set_hw_dcb_state(nic_dev, + SSSNIC_MBX_OPCODE_SET_DCB_STATE, SSSNIC_DCB_STATE_DISABLE); +} + +static int sss_nic_init_mac_addr(struct sss_nic_dev *nic_dev) +{ + int ret; + struct net_device *netdev = nic_dev->netdev; + + ret = sss_nic_get_default_mac(nic_dev, (u8 *)(netdev->dev_addr)); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to get MAC address\n"); + return ret; + } + + if (!is_valid_ether_addr(netdev->dev_addr)) { + nic_info(nic_dev->dev_hdl, + "Invalid default mac address %pM\n", netdev->dev_addr); + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) { + nic_err(nic_dev->dev_hdl, "Invalid default MAC address\n"); + return -EIO; + } + + eth_hw_addr_random(netdev); + nic_info(nic_dev->dev_hdl, + "Use random mac address %pM\n", netdev->dev_addr); + } + + ret = sss_nic_set_mac(nic_dev, netdev->dev_addr, 0, + sss_get_global_func_id(nic_dev->hwdev), SSS_CHANNEL_NIC); + if (ret != 0 && ret != SSSNIC_PF_SET_VF_ALREADY) { + /* If it is a VF device, it is possible that the MAC address has been set by PF, + * and this situation is legal. + */ + nic_err(nic_dev->dev_hdl, "Fail to set default MAC\n"); + return ret; + } + + return 0; +} + +static void sss_nic_set_mtu_range(struct net_device *netdev) +{ + /* MTU range: 384 - 9600 */ +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU + netdev->min_mtu = SSSNIC_MIN_MTU_SIZE; + netdev->max_mtu = SSSNIC_MAX_JUMBO_FRAME_SIZE; +#endif + +#ifdef HAVE_NETDEVICE_EXTENDED_MIN_MAX_MTU + netdev->extended->min_mtu = SSSNIC_MIN_MTU_SIZE; + netdev->extended->max_mtu = SSSNIC_MAX_JUMBO_FRAME_SIZE; +#endif +} + +static int sss_nic_dev_init(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int ret = 0; + + /* get nic cap from hw */ + sss_get_nic_capability(nic_dev->hwdev, &nic_dev->nic_svc_cap); + + ret = sss_nic_dcb_init(nic_dev); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to init dcb\n"); + return -EFAULT; + } + + sss_nic_try_to_enable_rss(nic_dev); + + ret = sss_nic_init_mac_addr(nic_dev); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to init mac address\n"); + goto init_mac_addr_err; + } + + sss_nic_set_mtu_range(netdev); + + ret = sss_nic_alloc_sq_desc_group(nic_dev); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to init sq\n"); + goto init_sq_err; + } + + ret = sss_nic_alloc_rq_desc_group(nic_dev); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to init rq\n"); + goto init_rq_err; + } + + ret = sss_nic_init_intr_coalesce(nic_dev); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to init interrupt and coalesce\n"); + goto init_intr_coalesce_err; + } + + ret = sss_nic_alloc_lb_test_buf(nic_dev); + if (ret) { + nic_err(nic_dev->dev_hdl, "Fail to alloc loopback test buf\n"); + goto alloc_lb_test_buf_err; + } + + return 0; + +alloc_lb_test_buf_err: + sss_nic_deinit_intr_coalesce(nic_dev); + +init_intr_coalesce_err: + sss_nic_free_rq_desc_group(nic_dev); + +init_rq_err: + sss_nic_free_sq_desc_group(nic_dev); + +init_sq_err: + sss_nic_del_mac(nic_dev, netdev->dev_addr, 0, + sss_get_global_func_id(nic_dev->hwdev), SSS_CHANNEL_NIC); + +init_mac_addr_err: + sss_nic_free_rss_key(nic_dev); + + return ret; +} + +static void sss_nic_init_netdev_ops(struct sss_nic_dev *nic_dev) +{ + sss_nic_set_netdev_ops(nic_dev); + + sss_nic_set_ethtool_ops(nic_dev); + + nic_dev->netdev->watchdog_timeo = WATCHDOG_TIMEOUT * HZ; +} + +static void sss_nic_validate_parameters(struct pci_dev *pdev) +{ + u16 i; + u16 valid_rx_buff_len_list[] = { + RX_BUFF_VALID_2KB, RX_BUFF_VALID_4KB, + RX_BUFF_VALID_8KB, RX_BUFF_VALID_16KB + }; + + if (!POLL_BUDGET_IS_VALID(poll_budget)) + poll_budget = DEFAULT_POLL_BUDGET; + + for (i = 0; i < ARRAY_LEN(valid_rx_buff_len_list); i++) { + if (rx_buff_size == valid_rx_buff_len_list[i]) + return; + } + + rx_buff_size = DEFAULT_RX_BUFF_LEN; +} + +static void sss_nic_periodic_work_handler(struct work_struct *work) +{ + struct delayed_work *delay_work = to_delayed_work(work); + struct sss_nic_dev *nic_dev = container_of(delay_work, struct sss_nic_dev, routine_work); + + if (SSSNIC_TEST_CLEAR_NIC_EVENT_FLAG(nic_dev, SSSNIC_EVENT_TX_TIMEOUT)) + sss_fault_event_report(nic_dev->hwdev, SSS_FAULT_SRC_TX_TIMEOUT, + SSS_FAULT_LEVEL_SERIOUS_FLR); + + queue_delayed_work(nic_dev->workq, &nic_dev->routine_work, HZ); +} + +static void sss_nic_dev_resource_destroy(struct sss_nic_dev *nic_dev) +{ + destroy_workqueue(nic_dev->workq); + kfree(nic_dev->vlan_bitmap); +} + +static int sss_nic_dev_params_init(struct net_device *netdev, + struct sss_hal_dev *uld_dev) +{ + struct pci_dev *pdev = uld_dev->pdev; + struct sss_nic_dev *nic_dev; + + nic_dev = (struct sss_nic_dev *)netdev_priv(netdev); + nic_dev->hwdev = uld_dev->hwdev; + nic_dev->netdev = netdev; + nic_dev->pdev = pdev; + nic_dev->dev_hdl = &pdev->dev; + nic_dev->uld_dev = uld_dev; + nic_dev->rx_buff_len = RX_BUFF_TO_BYTES(rx_buff_size); + nic_dev->rx_dma_buff_size = RX_BUFF_TO_DMA_SIZE(nic_dev->rx_buff_len); + nic_dev->page_order = BUFF_SIZE_TO_PAGE_ORDER(nic_dev->rx_dma_buff_size); + nic_dev->poll_budget = (int)poll_budget; + nic_dev->rx_poll_wqe = rx_poll_wqe; + nic_dev->msg_enable = DEFAULT_MSG_ENABLE; + nic_dev->qp_res.sq_depth = SSSNIC_SQ_DEPTH; + nic_dev->qp_res.rq_depth = SSSNIC_RQ_DEPTH; + nic_dev->max_qp_num = sss_get_max_sq_num(nic_dev->hwdev); + SET_NETDEV_DEV(netdev, &pdev->dev); + + mutex_init(&nic_dev->qp_mutex); + sema_init(&nic_dev->port_sem, 1); + + nic_dev->vlan_bitmap = kzalloc(SSSNIC_VLAN_BITMAP_SIZE(nic_dev), GFP_KERNEL); + if (!nic_dev->vlan_bitmap) + return -ENOMEM; + + nic_dev->workq = create_singlethread_workqueue(SSSNIC_DEV_WQ_NAME); + if (!nic_dev->workq) { + nic_err(&pdev->dev, "Fail to initialize nic workqueue\n"); + kfree(nic_dev->vlan_bitmap); + return -ENOMEM; + } + + INIT_LIST_HEAD(&nic_dev->tcam_info.tcam_node_info.tcam_node_list); + INIT_LIST_HEAD(&nic_dev->tcam_info.tcam_list); + INIT_LIST_HEAD(&nic_dev->rx_rule.rule_list); + + INIT_LIST_HEAD(&nic_dev->mc_filter_list); + INIT_LIST_HEAD(&nic_dev->uc_filter_list); + + INIT_DELAYED_WORK(&nic_dev->routine_work, sss_nic_periodic_work_handler); + INIT_DELAYED_WORK(&nic_dev->rq_watchdog_work, sss_nic_rq_watchdog_handler); + INIT_WORK(&nic_dev->rx_mode_work, sss_nic_set_rx_mode_work); + + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_INTR_ADAPT); + + return 0; +} + +static void sss_nic_set_default_link_follow(struct sss_nic_dev *nic_dev) +{ + int ret; + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + return; + + if (link_follow_status >= SSSNIC_LINK_FOLLOW_STATUS_MAX) + return; + + ret = sss_nic_set_link_follow_state(nic_dev, link_follow_status); + if (ret == SSS_MGMT_CMD_UNSUPPORTED) + nic_warn(nic_dev->dev_hdl, + "Firmware doesn't support to set link status follow port status\n"); +} + +static int sss_nic_set_default_feature_to_hw(struct sss_nic_dev *nic_dev) +{ + int ret; + + sss_nic_set_default_link_follow(nic_dev); + + ret = sss_nic_set_feature_to_hw(nic_dev->nic_io); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to set nic feature\n"); + return ret; + } + + /* enable all features in netdev->features */ + ret = sss_nic_enable_netdev_feature(nic_dev); + if (ret != 0) { + sss_nic_update_nic_feature(nic_dev, 0); + sss_nic_set_feature_to_hw(nic_dev->nic_io); + nic_err(nic_dev->dev_hdl, "Fail to set netdev feature\n"); + return ret; + } + + if (SSSNIC_SUPPORT_RXQ_RECOVERY(nic_dev->nic_io)) + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY); + + return 0; +} + +static struct net_device *sss_nic_alloc_netdev(void *hwdev) +{ + u16 max_qps = sss_get_max_sq_num(hwdev); + + return alloc_etherdev_mq(sizeof(struct sss_nic_dev), max_qps); +} + +static void sss_nic_free_netdev(struct sss_nic_dev *nic_dev) +{ + kfree(nic_dev->vlan_bitmap); + free_netdev(nic_dev->netdev); +} + +static int sss_nic_reset_function(void *hwdev) +{ + u16 glb_func_id = sss_get_global_func_id(hwdev); + + return sss_chip_reset_function(hwdev, glb_func_id, SSS_NIC_RESET, SSS_CHANNEL_NIC); +} + +static int sss_nic_init_netdev(struct sss_nic_dev *nic_dev) +{ + int ret; + + sss_nic_init_netdev_ops(nic_dev); + + sss_nic_init_netdev_feature(nic_dev); + + ret = sss_nic_set_default_feature_to_hw(nic_dev); + if (ret != 0) + return ret; + + return 0; +} + +static void sss_nic_deinit_netdev(struct sss_nic_dev *nic_dev) +{ + sss_nic_update_nic_feature(nic_dev, 0); + sss_nic_set_feature_to_hw(nic_dev->nic_io); +} + +static int sss_nic_register_netdev(struct sss_nic_dev *nic_dev) +{ + int ret; + struct net_device *netdev = nic_dev->netdev; + +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN + sss_nic_register_notifier(nic_dev); +#endif + + ret = register_netdev(netdev); + if (ret != 0) { +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN + sss_nic_unregister_notifier(nic_dev); +#endif + nic_err(nic_dev->dev_hdl, "Fail to register netdev\n"); + return -ENOMEM; + } + + queue_delayed_work(nic_dev->workq, &nic_dev->routine_work, HZ); + + netif_carrier_off(netdev); + + return 0; +} + +static void sss_nic_unregister_netdev(struct sss_nic_dev *nic_dev) +{ + unregister_netdev(nic_dev->netdev); + +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN + sss_nic_unregister_notifier(nic_dev); +#endif + cancel_delayed_work_sync(&nic_dev->routine_work); + cancel_delayed_work_sync(&nic_dev->rq_watchdog_work); + cancel_work_sync(&nic_dev->rx_mode_work); + destroy_workqueue(nic_dev->workq); +} + +static int sss_nic_probe(struct sss_hal_dev *hal_dev, void **uld_dev, + char *uld_dev_name) +{ + struct pci_dev *pdev = hal_dev->pdev; + void *hwdev = hal_dev->hwdev; + struct sss_nic_dev *nic_dev = NULL; + struct net_device *netdev = NULL; + int ret; + + if (!sss_support_nic(hwdev)) { + nic_info(&pdev->dev, "Hw don't support nic\n"); + return 0; + } + + nic_info(&pdev->dev, "NIC probe begin\n"); + + sss_nic_validate_parameters(pdev); + + ret = sss_nic_reset_function(hwdev); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to reset function\n"); + goto err_out; + } + + netdev = sss_nic_alloc_netdev(hwdev); + if (!netdev) { + nic_err(&pdev->dev, "Fail to allocate net device\n"); + ret = -ENOMEM; + goto err_out; + } + + ret = sss_nic_dev_params_init(netdev, hal_dev); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to init nic_dev params\n"); + goto nic_dev_params_init_err; + } + + nic_dev = (struct sss_nic_dev *)netdev_priv(netdev); + + ret = sss_nic_io_init(nic_dev); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to init nic io\n"); + goto nic_io_init_err; + } + + ret = sss_nic_dev_init(nic_dev); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to init nic dev\n"); + goto nic_dev_init_err; + } + + ret = sss_nic_init_netdev(nic_dev); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to init net device\n"); + goto init_netdev_err; + } + + ret = sss_nic_register_netdev(nic_dev); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to register net device\n"); + goto register_netdev_err; + } + + *uld_dev = nic_dev; + nic_info(&pdev->dev, "Success to probe NIC\n"); + + return 0; + +register_netdev_err: + sss_nic_deinit_netdev(nic_dev); + +init_netdev_err: + sss_nic_dev_deinit(nic_dev); + +nic_dev_init_err: + sss_nic_io_deinit(nic_dev); + +nic_io_init_err: + sss_nic_dev_resource_destroy(nic_dev); + +nic_dev_params_init_err: + free_netdev(netdev); + +err_out: + nic_err(&pdev->dev, "Fail to run NIC probe\n"); + + return ret; +} + +static void sss_nic_remove(struct sss_hal_dev *hal_dev, void *adapter) +{ + struct sss_nic_dev *nic_dev = adapter; + + if (!nic_dev || !sss_support_nic(hal_dev->hwdev)) + return; + + nic_info(&hal_dev->pdev->dev, "NIC remove begin\n"); + + sss_nic_unregister_netdev(nic_dev); + + sss_nic_flush_tcam(nic_dev); + + sss_nic_deinit_netdev(nic_dev); + + sss_nic_dev_deinit(nic_dev); + + sss_nic_io_deinit(nic_dev); + + sss_nic_free_netdev(nic_dev); + + nic_info(&hal_dev->pdev->dev, "Success to remove NIC\n"); +} + +static void sss_nic_sriov_state_change(struct sss_nic_dev *nic_dev, + struct sss_event_info *event) +{ + struct sss_sriov_state_info *info = (void *)event->event_data; + + if (!info->enable) + sss_nic_clear_all_vf_info(nic_dev->nic_io); +} + +void sss_nic_port_module_cable_plug(struct sss_nic_dev *nic_dev, void *event_data) +{ + nicif_info(nic_dev, link, nic_dev->netdev, + "Port module event: Cable plugged\n"); +} + +void sss_nic_port_module_cable_unplug(struct sss_nic_dev *nic_dev, void *event_data) +{ + nicif_info(nic_dev, link, nic_dev->netdev, + "Port module event: Cable unplugged\n"); +} + +void sss_nic_port_module_link_err(struct sss_nic_dev *nic_dev, void *event_data) +{ + struct sss_nic_port_module_event *port_event = event_data; + enum link_err_type err_type = port_event->err_type; + + nicif_info(nic_dev, link, nic_dev->netdev, + "Fail to link, err_type: 0x%x\n", err_type); +} + +static void sss_nic_port_module_event_handler(struct sss_nic_dev *nic_dev, + struct sss_event_info *event) +{ + struct sss_nic_port_module_event *port_event = (void *)event->event_data; + enum port_module_event_type type = port_event->type; + + sss_nic_port_module_event_handler_t handler[SSSNIC_PORT_MODULE_MAX_EVENT] = { + sss_nic_port_module_cable_plug, + sss_nic_port_module_cable_unplug, + sss_nic_port_module_link_err, + }; + + if (type >= SSSNIC_PORT_MODULE_MAX_EVENT) { + nicif_err(nic_dev, link, nic_dev->netdev, + "Unknown port module type %d\n", type); + return; + } + + if (handler[type]) + handler[type](nic_dev, event->event_data); +} + +static void sss_nic_link_down(struct sss_nic_dev *nic_dev, struct sss_event_info *event) +{ + struct net_device *netdev = nic_dev->netdev; + + if (!SSS_CHANNEL_RES_VALID(nic_dev) || + test_bit(SSSNIC_LP_TEST, &nic_dev->flags) || + test_bit(SSSNIC_FORCE_LINK_UP, &nic_dev->flags)) + return; + + if (!netif_carrier_ok(netdev)) + return; + + netif_carrier_off(netdev); + nic_dev->link_status = false; + nicif_info(nic_dev, link, netdev, "Link is down\n"); +} + +static void sss_nic_link_up(struct sss_nic_dev *nic_dev, struct sss_event_info *event) +{ + struct net_device *netdev = nic_dev->netdev; + + if (!SSS_CHANNEL_RES_VALID(nic_dev) || + test_bit(SSSNIC_LP_TEST, &nic_dev->flags) || + test_bit(SSSNIC_FORCE_LINK_UP, &nic_dev->flags)) + return; + + if (netif_carrier_ok(netdev)) + return; + + netif_carrier_on(netdev); + nic_dev->link_status = true; + + nicif_info(nic_dev, link, netdev, "Link is up\n"); +} + +static void sss_nic_comm_fail_envet_handler(struct sss_nic_dev *nic_dev, + struct sss_event_info *event) +{ + struct sss_fault_event *fault = (void *)event->event_data; + + if (fault->fault_level == SSS_FAULT_LEVEL_SERIOUS_FLR && + fault->info.chip.func_id == sss_get_global_func_id(nic_dev->hwdev)) + sss_nic_link_down(nic_dev, event); +} + +static void sss_nic_event_handler(struct sss_nic_dev *nic_dev, struct sss_event_info *event) +{ + sss_nic_event_handler_t handler[SSSNIC_EVENT_MAX] = { + sss_nic_link_down, + sss_nic_link_up, + sss_nic_port_module_event_handler, + NULL, + }; + + if (event->type >= SSSNIC_EVENT_MAX) + return; + + if (handler[event->type]) + handler[event->type](nic_dev, event); +} + +static void sss_nic_comm_event_handler(struct sss_nic_dev *nic_dev, + struct sss_event_info *event) +{ + sss_nic_event_handler_t handler[SSS_EVENT_MAX] = { + sss_nic_link_down, + sss_nic_link_down, + sss_nic_comm_fail_envet_handler, + sss_nic_sriov_state_change, + NULL, + sss_nic_link_down, + }; + + if (event->type >= SSS_EVENT_MAX) + return; + + if (handler[event->type]) + handler[event->type](nic_dev, event); +} + +static void sss_nic_event(struct sss_hal_dev *uld_dev, void *adapter, + struct sss_event_info *event) +{ + struct sss_nic_dev *nic_dev = adapter; + + if (!nic_dev || !event || !sss_support_nic(uld_dev->hwdev)) + return; + + if (event->service == SSS_EVENT_SRV_NIC) { + sss_nic_event_handler(nic_dev, event); + return; + } + + if (event->service == SSS_EVENT_SRV_COMM) { + sss_nic_comm_event_handler(nic_dev, event); + return; + } +} + +struct sss_uld_info g_nic_uld_info = { + .probe = sss_nic_probe, + .remove = sss_nic_remove, + .suspend = NULL, + .resume = NULL, + .event = sss_nic_event, + .ioctl = sss_tool_ioctl, +}; + +struct sss_uld_info *get_nic_uld_info(void) +{ + return &g_nic_uld_info; +} + +static __init int sss_nic_init(void) +{ + int ret; + + pr_info("%s - version %s\n", SSSNIC_DRV_DESC, + SSSNIC_DRV_VERSION); + + ret = sss_init_pci(); + if (ret) { + pr_err("SDK init failed.\n"); + return ret; + } + + ret = sss_register_uld(SSS_SERVICE_TYPE_NIC, &g_nic_uld_info); + if (ret != 0) { + pr_err("Fail to register sss_nic uld\n"); + sss_exit_pci(); + return ret; + } + + return 0; +} + +static __exit void sss_nic_exit(void) +{ + sss_unregister_uld(SSS_SERVICE_TYPE_NIC); + sss_exit_pci(); +} + +#ifndef _LLT_TEST_ +module_init(sss_nic_init); +module_exit(sss_nic_exit); +#endif + +MODULE_AUTHOR("steven.song@3snic.com"); +MODULE_DESCRIPTION("3SNIC Network Interface Card Driver"); +MODULE_VERSION(SSSNIC_DRV_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.c new file mode 100644 index 0000000000000..9cdee66cc83b9 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.c @@ -0,0 +1,799 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#ifdef HAVE_XDP_SUPPORT +#include +#endif +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" +#include "sss_nic_dcb.h" +#include "sss_nic_netdev_ops_api.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" + +#define SSSNIC_MAX_VLAN_ID 4094 +#define SSSNIC_MAX_QOS_NUM 7 + +#define SSSNIC_TX_RATE_TABLE_FULL 12 + +static int sss_nic_ndo_open(struct net_device *netdev) +{ + int ret; + struct sss_nic_qp_info qp_info = {0}; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) { + nicif_info(nic_dev, drv, netdev, "Netdev already open\n"); + return 0; + } + + ret = sss_nic_io_resource_init(nic_dev->nic_io); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to init nic io resource\n"); + return ret; + } + + ret = sss_nic_dev_resource_init(nic_dev); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to init qp resource\n"); + goto init_dev_res_err; + } + + ret = sss_nic_qp_resource_init(nic_dev, &qp_info, &nic_dev->qp_res); + if (ret != 0) + goto alloc_qp_res_err; + + ret = sss_nic_open_dev(nic_dev, &qp_info, &nic_dev->qp_res); + if (ret != 0) + goto open_chan_err; + + ret = sss_nic_vport_up(nic_dev); + if (ret != 0) + goto vport_err; + + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP); + nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is up\n"); + + return 0; + +vport_err: + sss_nic_close_dev(nic_dev, &qp_info); + +open_chan_err: + sss_nic_qp_resource_deinit(nic_dev, &qp_info, &nic_dev->qp_res); + +alloc_qp_res_err: + sss_nic_dev_resource_deinit(nic_dev); + +init_dev_res_err: + sss_nic_io_resource_deinit(nic_dev->nic_io); + + return ret; +} + +static int sss_nic_ndo_stop(struct net_device *netdev) +{ + struct sss_nic_qp_info qp_info = {0}; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (!SSSNIC_TEST_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) { + nicif_info(nic_dev, drv, netdev, "Netdev already close\n"); + return 0; + } + + if (SSSNIC_TEST_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_CHANGE_RES_INVALID)) + goto out; + + sss_nic_vport_down(nic_dev); + sss_nic_close_dev(nic_dev, &qp_info); + sss_nic_qp_resource_deinit(nic_dev, &qp_info, &nic_dev->qp_res); + +out: + sss_nic_io_resource_deinit(nic_dev->nic_io); + sss_nic_dev_resource_deinit(nic_dev); + + nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is down\n"); + + return 0; +} + +#if defined(HAVE_NDO_SELECT_QUEUE_SB_DEV_ONLY) +static u16 sss_nic_ndo_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev) +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) +#if defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) +static u16 sss_nic_ndo_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +#else +static u16 sss_nic_ndo_select_queue(struct net_device *netdev, struct sk_buff *skb, + __always_unused void *accel, + select_queue_fallback_t fallback) +#endif +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL) +static u16 sss_nic_ndo_select_queue(struct net_device *netdev, struct sk_buff *skb, + __always_unused void *accel) +#else +static u16 sss_nic_ndo_select_queue(struct net_device *netdev, struct sk_buff *skb) +#endif /* end of HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK */ +{ + u8 cos; + u8 qp_num; + u16 sq_num; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_SAME_RXTX)) + return sss_nic_select_queue_by_hash_func(netdev, skb, netdev->real_num_tx_queues); + + sq_num = +#if defined(HAVE_NDO_SELECT_QUEUE_SB_DEV_ONLY) + netdev_pick_tx(netdev, skb, NULL); +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) +#ifdef HAVE_NDO_SELECT_QUEUE_SB_DEV + fallback(netdev, skb, sb_dev); +#else + fallback(netdev, skb); +#endif +#else + skb_tx_hash(netdev, skb); +#endif + + if (likely(!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE))) + return sq_num; + + cos = sss_nic_get_cos(nic_dev, skb); + + qp_num = (nic_dev->hw_dcb_cfg.cos_qp_num[cos] != 0) ? + sq_num % nic_dev->hw_dcb_cfg.cos_qp_num[cos] : 0; + sq_num = nic_dev->hw_dcb_cfg.cos_qp_offset[cos] + qp_num; + + return sq_num; +} + +#ifdef HAVE_NDO_GET_STATS64 +#ifdef HAVE_VOID_NDO_GET_STATS64 +static void sss_nic_ndo_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#else +static struct rtnl_link_stats64 *sss_nic_ndo_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif + +#else /* !HAVE_NDO_GET_STATS64 */ +static struct net_device_stats *sss_nic_ndo_get_stats(struct net_device *netdev) +#endif +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); +#ifndef HAVE_NDO_GET_STATS64 +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *stats = &netdev->stats; +#else + struct net_device_stats *stats = &nic_dev->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +#endif /* HAVE_NDO_GET_STATS64 */ + + sss_nic_get_tx_stats(nic_dev, stats); + sss_nic_get_rx_stats(nic_dev, stats); + +#ifndef HAVE_VOID_NDO_GET_STATS64 + return stats; +#endif +} + +#ifdef HAVE_TX_TIMEOUT_TXQUEUE +static void sss_nic_ndo_tx_timeout(struct net_device *netdev, + unsigned int __maybe_unused queue) +#else +static void sss_nic_ndo_tx_timeout(struct net_device *netdev) +#endif +{ + struct sss_nic_io_queue *sq = NULL; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + u32 sw_pi; + u32 hw_ci; + u8 qid; + + SSSNIC_STATS_TX_TIMEOUT_INC(nic_dev); + nicif_err(nic_dev, drv, netdev, "Tx timeout\n"); + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, qid))) + continue; + + sq = nic_dev->sq_desc_group[qid].sq; + sw_pi = sss_nic_get_sq_local_pi(sq); + hw_ci = sss_nic_get_sq_hw_ci(sq); + nicif_info(nic_dev, drv, netdev, + "Sq%u: sw_pi: %u, hw_ci: %u, sw_ci: %u, napi state: 0x%lx.\n", + qid, sw_pi, hw_ci, sss_nic_get_sq_local_ci(sq), + nic_dev->qp_res.irq_cfg[qid].napi.state); + + if (sw_pi != hw_ci) { + SSSNIC_SET_NIC_EVENT_FLAG(nic_dev, SSSNIC_EVENT_TX_TIMEOUT); + return; + } + } +} + +static int sss_nic_ndo_change_mtu(struct net_device *netdev, int new_mtu) +{ + int ret = 0; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + +#ifdef HAVE_XDP_SUPPORT + u32 xdp_max_mtu; + + if (SSSNIC_IS_XDP_ENABLE(nic_dev)) { + xdp_max_mtu = SSSNIC_XDP_MAX_MTU(nic_dev); + if (new_mtu > xdp_max_mtu) { + nicif_err(nic_dev, drv, netdev, + "Fail to change mtu to %d, max mtu is %d\n", + new_mtu, xdp_max_mtu); + return -EINVAL; + } + } +#endif + + ret = sss_nic_set_dev_mtu(nic_dev, (u16)new_mtu); + if (ret) { + nicif_err(nic_dev, drv, netdev, "Fail to change mtu to %d\n", + new_mtu); + return ret; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, "Success to change mtu from %u to %d\n", + netdev->mtu, new_mtu); + + netdev->mtu = new_mtu; + + return 0; +} + +static int sss_nic_ndo_set_mac_address(struct net_device *netdev, void *mac_addr) +{ + int ret = 0; + struct sockaddr *set_addr = mac_addr; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (!is_valid_ether_addr(set_addr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, set_addr->sa_data)) { + nicif_info(nic_dev, drv, netdev, + "Already using mac addr: %pM\n", set_addr->sa_data); + return 0; + } + + ret = sss_nic_update_mac(nic_dev, set_addr->sa_data); + if (ret) + return ret; + + ether_addr_copy((u8 *)(netdev->dev_addr), set_addr->sa_data); + + nicif_info(nic_dev, drv, netdev, + "Success to set new mac addr: %pM\n", set_addr->sa_data); + + return 0; +} + +int sss_nic_ndo_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vlan_id) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret; + + if (vlan_id == 0) + return 0; + + ret = sss_nic_config_vlan(nic_dev, SSSNIC_MBX_OPCODE_ADD, vlan_id); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to add vlan %u\n", vlan_id); + return ret; + } + + SSSNIC_SET_VLAN_BITMAP(nic_dev, vlan_id); + nicif_info(nic_dev, drv, netdev, "Success to add vlan %u\n", vlan_id); + + return 0; +} + +int sss_nic_ndo_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vlan_id) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret; + + if (vlan_id == 0) + return 0; + + ret = sss_nic_config_vlan(nic_dev, SSSNIC_MBX_OPCODE_DEL, vlan_id); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to delete vlan\n"); + return ret; + } + + SSSNIC_CLEAR_VLAN_BITMAP(nic_dev, vlan_id); + nicif_info(nic_dev, drv, netdev, "Success to delete vlan %u\n", vlan_id); + + return 0; +} + +static netdev_features_t sss_nic_ndo_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t netdev_feature = features; + + /* If Rx checksum is disabled, then LRO should also be disabled */ + if ((netdev_feature & NETIF_F_RXCSUM) == 0) + netdev_feature &= ~NETIF_F_LRO; + + return netdev_feature; +} + +static int sss_nic_ndo_set_features(struct net_device *netdev, netdev_features_t features) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + return sss_nic_set_feature(nic_dev, nic_dev->netdev->features, features); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void sss_nic_ndo_poll_controller(struct net_device *netdev) +{ + u16 i; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + for (i = 0; i < nic_dev->qp_res.qp_num; i++) + napi_schedule(&nic_dev->qp_res.irq_cfg[i].napi); +} +#endif + +static void sss_nic_ndo_set_rx_mode(struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (netdev_uc_count(netdev) != nic_dev->netdev_uc_cnt || + netdev_mc_count(netdev) != nic_dev->netdev_mc_cnt) { + nic_dev->netdev_uc_cnt = netdev_uc_count(netdev); + nic_dev->netdev_mc_cnt = netdev_mc_count(netdev); + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_UPDATE_MAC_FILTER); + } + + queue_work(nic_dev->workq, &nic_dev->rx_mode_work); +} + +static int sss_nic_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_io *nic_io = nic_dev->nic_io; + struct sss_nic_vf_info *vf_info = NULL; + + if (vf_id >= pci_num_vf(nic_dev->pdev) || + is_multicast_ether_addr(mac)) + return -EINVAL; + + vf_info = &nic_io->vf_info_group[vf_id]; + ether_addr_copy(vf_info->user_mac, mac); + + if (is_zero_ether_addr(mac)) + nic_info(nic_dev->dev_hdl, + "Success to delete mac on vf %d\n", vf_id); + else + nic_info(nic_dev->dev_hdl, + "Success to set mac %pM on vf %d\n", mac, vf_id); + + return 0; +} + +#ifdef IFLA_VF_MAX +#ifdef IFLA_VF_VLAN_INFO_MAX +static int sss_nic_ndo_set_vf_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, + u8 qos, __be16 vlan_proto) +#else +static int sss_nic_ndo_set_vf_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, + u8 qos) +#endif +{ + u16 pre_vlanprio; + u16 cur_vlanprio; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (qos > SSSNIC_MAX_QOS_NUM || vlan_id > SSSNIC_MAX_VLAN_ID || + vf_id >= pci_num_vf(nic_dev->pdev)) + return -EINVAL; +#ifdef IFLA_VF_VLAN_INFO_MAX + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; +#endif + pre_vlanprio = SSSNIC_GET_VLAN_PRIO(vlan_id, qos); + cur_vlanprio = + sss_nic_vf_info_vlan_prio(nic_dev->nic_io, SSSNIC_OS_VF_ID_TO_HW(vf_id)); + if (pre_vlanprio == cur_vlanprio) + return 0; + + return sss_nic_set_hw_vf_vlan(nic_dev, cur_vlanprio, vf_id, vlan_id, qos); +} +#endif + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE +static int sss_nic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, + bool set_spoofchk) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + bool cur_spoofchk; + u16 id = SSSNIC_OS_VF_ID_TO_HW(vf_id); + int ret; + + if (vf_id >= pci_num_vf(nic_dev->pdev)) + return -EINVAL; + + cur_spoofchk = SSSNIC_GET_VF_SPOOFCHK(nic_dev->nic_io, vf_id); + if (set_spoofchk == cur_spoofchk) + return 0; + + ret = sss_nic_set_vf_spoofchk(nic_dev->nic_io, id, set_spoofchk); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to %s spoofchk control for VF %d\n", + set_spoofchk ? "enable" : "disable", vf_id); + return ret; + } + + nicif_info(nic_dev, drv, netdev, + "Success to %s spoofchk control for VF %d\n", + set_spoofchk ? "enable" : "disable", vf_id); + return 0; +} +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST +static int sss_nic_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool new_trust) +{ + bool old_trust; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (vf_id >= pci_num_vf(nic_dev->pdev) || vf_id > nic_dev->nic_io->max_vf_num) { + nicif_err(nic_dev, drv, netdev, "Invalid vf id, VF: %d pci_num_vf: %d max_vfs: %d\n", + vf_id, pci_num_vf(nic_dev->pdev), nic_dev->nic_io->max_vf_num); + return -EINVAL; + } + + old_trust = !!nic_dev->nic_io->vf_info_group[vf_id].trust; + /* Same old and new, no need to set, return success directly */ + if (new_trust == old_trust) + return 0; + + nic_dev->nic_io->vf_info_group[vf_id].trust = !!new_trust; + + nicif_info(nic_dev, drv, netdev, "Success to set VF %d trust %d to %d\n", + vf_id, old_trust, new_trust); + + return 0; +} +#endif + +static int sss_nic_ndo_get_vf_config(struct net_device *netdev, + int vf_id, struct ifla_vf_info *ifla_vf) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (vf_id >= pci_num_vf(nic_dev->pdev)) + return -EINVAL; + + sss_nic_get_vf_attribute(nic_dev->nic_io, (u16)SSSNIC_OS_VF_ID_TO_HW(vf_id), ifla_vf); + + return 0; +} + +int sss_nic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) +{ + int ret; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (vf_id >= pci_num_vf(nic_dev->pdev)) { + nicif_err(nic_dev, drv, netdev, + "Invalid VF Id %d, pci_num_vf %d\n", vf_id, pci_num_vf(nic_dev->pdev)); + return -EINVAL; + } + + ret = sss_nic_set_vf_link_state(nic_dev->nic_io, (u16)SSSNIC_OS_VF_ID_TO_HW(vf_id), link); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set VF %d link state %d\n", vf_id, link); + return ret; + } + + nicif_info(nic_dev, drv, netdev, "Success to set VF %d link state %d\n", + vf_id, link); + + return 0; +} + +static int sss_nic_check_vf_bw_param(const struct sss_nic_dev *nic_dev, + int vf_id, int min_rate, int max_rate) +{ + if (!SSSNIC_SUPPORT_RATE_LIMIT(nic_dev->nic_io)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport to set vf rate limit.\n"); + return -EOPNOTSUPP; + } + + if (vf_id >= pci_num_vf(nic_dev->pdev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid VF number %d\n", + pci_num_vf(nic_dev->pdev)); + return -EINVAL; + } + + if (max_rate < min_rate) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid rate, maximum rate %d minimum rate %d\n", + max_rate, min_rate); + return -EINVAL; + } + + if (max_rate < 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid maximum rate %d\n", max_rate); + return -EINVAL; + } + + return 0; +} + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +static int sss_nic_ndo_set_vf_rate(struct net_device *netdev, + int vf_id, int min_tx_rate, int max_tx_rate) +#else +static int sss_nic_ndo_set_vf_tx_rate(struct net_device *netdev, int vf_id, + int max_tx_rate) +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +{ +#ifndef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + int min_tx_rate = 0; +#endif + u8 link_status; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_port_info port_info = {0}; + u32 speeds[] = {0, SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, + SPEED_25000, SPEED_40000, SPEED_50000, SPEED_100000, + SPEED_200000 + }; + int ret; + + ret = sss_nic_check_vf_bw_param(nic_dev, vf_id, min_tx_rate, max_tx_rate); + if (ret != 0) + return ret; + + ret = sss_nic_get_hw_link_state(nic_dev, &link_status); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to get link status when set vf tx rate.\n"); + return -EIO; + } + + if (link_status == 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to set vf tx rate. the link state is down.\n"); + return -EINVAL; + } + + ret = sss_nic_get_hw_port_info(nic_dev, &port_info, + SSS_CHANNEL_NIC); + if (ret != 0 || port_info.speed >= SSSNIC_PORT_SPEED_UNKNOWN) + return -EIO; + + if (max_tx_rate > speeds[port_info.speed]) { + nicif_err(nic_dev, drv, netdev, "Invalid max_tx_rate, it must be in [0 - %u]\n", + speeds[port_info.speed]); + return -EINVAL; + } + + ret = sss_nic_set_vf_tx_rate_limit(nic_dev->nic_io, (u16)SSSNIC_OS_VF_ID_TO_HW(vf_id), + (u32)min_tx_rate, (u32)max_tx_rate); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to set VF %d max rate %d min rate %d%s\n", + vf_id, max_tx_rate, min_tx_rate, + ret == SSSNIC_TX_RATE_TABLE_FULL ? + ", tx rate profile is full" : ""); + return -EIO; + } + + nicif_info(nic_dev, drv, netdev, + "Success to set VF %d tx rate [%u-%u]\n", + vf_id, min_tx_rate, max_tx_rate); + + return 0; +} + +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_NDO_BPF_NETDEV_BPF +static int sss_nic_ndo_bpf(struct net_device *netdev, struct netdev_bpf *xdp) +#else +static int sss_nic_ndo_xdp(struct net_device *netdev, struct netdev_xdp *xdp) +#endif +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + +#ifdef HAVE_XDP_QUERY_PROG + if (xdp->command == XDP_QUERY_PROG) { + xdp->prog_id = nic_dev->xdp_prog ? nic_dev->xdp_prog->aux->id : 0; + return 0; + } +#endif + if (xdp->command == XDP_SETUP_PROG) + return sss_nic_setup_xdp(nic_dev, xdp); + + return -EINVAL; +} +#endif + +static const struct net_device_ops g_nic_netdev_ops = { + .ndo_open = sss_nic_ndo_open, + .ndo_stop = sss_nic_ndo_stop, + .ndo_start_xmit = sss_nic_ndo_start_xmit, + +#ifdef HAVE_NDO_GET_STATS64 + .ndo_get_stats64 = sss_nic_ndo_get_stats64, +#else + .ndo_get_stats = sss_nic_ndo_get_stats, +#endif /* HAVE_NDO_GET_STATS64 */ + + .ndo_tx_timeout = sss_nic_ndo_tx_timeout, + .ndo_select_queue = sss_nic_ndo_select_queue, +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_CHANGE_MTU + .extended.ndo_change_mtu = sss_nic_ndo_change_mtu, +#else + .ndo_change_mtu = sss_nic_ndo_change_mtu, +#endif + .ndo_set_mac_address = sss_nic_ndo_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + .ndo_vlan_rx_add_vid = sss_nic_ndo_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = sss_nic_ndo_vlan_rx_kill_vid, +#endif + +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + /* RHEL7 requires this to be defined to enable extended ops. RHEL7 + * uses the function get_ndo_ext to retrieve offsets for extended + * fields from with the net_device_ops struct and ndo_size is checked + * to determine whether or not the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif + +#ifdef IFLA_VF_MAX + .ndo_set_vf_mac = sss_nic_ndo_set_vf_mac, +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN + .extended.ndo_set_vf_vlan = sss_nic_ndo_set_vf_vlan, +#else + .ndo_set_vf_vlan = sss_nic_ndo_set_vf_vlan, +#endif +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + .ndo_set_vf_rate = sss_nic_ndo_set_vf_rate, +#else + .ndo_set_vf_tx_rate = sss_nic_ndo_set_vf_tx_rate, +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + .ndo_set_vf_spoofchk = sss_nic_ndo_set_vf_spoofchk, +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + .extended.ndo_set_vf_trust = sss_nic_ndo_set_vf_trust, +#else + .ndo_set_vf_trust = sss_nic_ndo_set_vf_trust, +#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */ +#endif /* HAVE_NDO_SET_VF_TRUST */ + + .ndo_get_vf_config = sss_nic_ndo_get_vf_config, +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = sss_nic_ndo_poll_controller, +#endif /* CONFIG_NET_POLL_CONTROLLER */ + + .ndo_set_rx_mode = sss_nic_ndo_set_rx_mode, + +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_NDO_BPF_NETDEV_BPF + .ndo_bpf = sss_nic_ndo_bpf, +#else + .ndo_xdp = sss_nic_ndo_xdp, +#endif +#endif + +#ifdef HAVE_NDO_SET_VF_LINK_STATE + .ndo_set_vf_link_state = sss_nic_ndo_set_vf_link_state, +#endif + +#ifdef HAVE_NDO_SET_FEATURES + .ndo_fix_features = sss_nic_ndo_fix_features, + .ndo_set_features = sss_nic_ndo_set_features, +#endif /* HAVE_NDO_SET_FEATURES */ +}; + +static const struct net_device_ops g_nicvf_netdev_ops = { + .ndo_open = sss_nic_ndo_open, + .ndo_stop = sss_nic_ndo_stop, + .ndo_start_xmit = sss_nic_ndo_start_xmit, + +#ifdef HAVE_NDO_GET_STATS64 + .ndo_get_stats64 = sss_nic_ndo_get_stats64, +#else + .ndo_get_stats = sss_nic_ndo_get_stats, +#endif /* HAVE_NDO_GET_STATS64 */ + + .ndo_tx_timeout = sss_nic_ndo_tx_timeout, + .ndo_select_queue = sss_nic_ndo_select_queue, + +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + /* RHEL7 requires this to be defined to enable extended ops. RHEL7 + * uses the function get_ndo_ext to retrieve offsets for extended + * fields from with the net_device_ops struct and ndo_size is checked + * to determine whether or not the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif + +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_CHANGE_MTU + .extended.ndo_change_mtu = sss_nic_ndo_change_mtu, +#else + .ndo_change_mtu = sss_nic_ndo_change_mtu, +#endif + .ndo_set_mac_address = sss_nic_ndo_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + .ndo_vlan_rx_add_vid = sss_nic_ndo_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = sss_nic_ndo_vlan_rx_kill_vid, +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = sss_nic_ndo_poll_controller, +#endif /* CONFIG_NET_POLL_CONTROLLER */ + + .ndo_set_rx_mode = sss_nic_ndo_set_rx_mode, + +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_NDO_BPF_NETDEV_BPF + .ndo_bpf = sss_nic_ndo_bpf, +#else + .ndo_xdp = sss_nic_ndo_xdp, +#endif +#endif + +#ifdef HAVE_NDO_SET_FEATURES + .ndo_fix_features = sss_nic_ndo_fix_features, + .ndo_set_features = sss_nic_ndo_set_features, +#endif /* HAVE_NDO_SET_FEATURES */ +}; + +void sss_nic_set_netdev_ops(struct sss_nic_dev *nic_dev) +{ + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + nic_dev->netdev->netdev_ops = &g_nic_netdev_ops; + else + nic_dev->netdev->netdev_ops = &g_nicvf_netdev_ops; +} + +bool sss_nic_is_netdev_ops_match(const struct net_device *netdev) +{ + return netdev->netdev_ops == &g_nic_netdev_ops || + netdev->netdev_ops == &g_nicvf_netdev_ops; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.h new file mode 100644 index 0000000000000..941dcca091f0d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_NETDEV_OPS_H +#define SSS_NIC_NETDEV_OPS_H + +#include +#include + +#include "sss_nic_dev_define.h" + +void sss_nic_set_netdev_ops(struct sss_nic_dev *nic_dev); +bool sss_nic_is_netdev_ops_match(const struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.c new file mode 100644 index 0000000000000..c4ad4fe7bcd76 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.c @@ -0,0 +1,1074 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#ifdef HAVE_XDP_SUPPORT +#include +#endif +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_tx_init.h" +#include "sss_nic_rx_init.h" +#include "sss_nic_rx.h" +#include "sss_nic_dcb.h" +#include "sss_nic_netdev_ops_api.h" +#include "sss_nic_irq.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" + +#define IPV4_VERSION 4 +#define IPV6_VERSION 6 + +#define SSSNIC_LRO_DEF_COAL_PKT_SIZE 32 +#define SSSNIC_LRO_DEF_TIME_LIMIT 16 +#define SSSNIC_WAIT_FLUSH_QP_RES_TIMEOUT 100 + +#define SSSNIC_IPV6_ADDR_SIZE 4 +#define SSSNIC_PKT_INFO_SIZE 9 +#define SSSNIC_BIT_PER_TUPLE 32 + +#define SSSNIC_RSS_VAL(val, type) \ + (((type) == SSSNIC_RSS_ENGINE_TOEP) ? ntohl(val) : (val)) + +/* Low 16 bits are sport, High 16 bits are dport */ +#define SSSNIC_RSS_VAL_BY_L4_PORT(l4_hdr) \ + (((u32)ntohs(*((u16 *)(l4_hdr) + 1U)) << 16) | ntohs(*(u16 *)(l4_hdr))) + +#define SSSNIC_GET_SQ_ID_BY_RSS_INDIR(nic_dev, sq_id) \ + ((u16)(nic_dev)->rss_indir_tbl[(sq_id) & 0xFF]) + +#define SSSNIC_GET_DSCP_PRI_OFFSET 2 + +#define SSSNIC_FEATURE_OP_STR(op) ((op) ? "Enable" : "Disable") + +#define SSSNIC_VLAN_TCI_TO_COS_ID(skb) \ + ((skb)->vlan_tci >> VLAN_PRIO_SHIFT) + +#define SSSNIC_IPV4_DSF_TO_COS_ID(skb) \ + (ipv4_get_dsfield(ip_hdr(skb)) >> SSSNIC_GET_DSCP_PRI_OFFSET) + +#define SSSNIC_IPV6_DSF_TO_COS_ID(skb) \ + (ipv6_get_dsfield(ipv6_hdr(skb)) >> SSSNIC_GET_DSCP_PRI_OFFSET) + +static int sss_nic_alloc_qp_mgmt_info(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + u16 qp_num = qp_res->qp_num; + u32 len; + + len = sizeof(*qp_res->irq_cfg) * qp_num; + qp_res->irq_cfg = kzalloc(len, GFP_KERNEL); + if (!qp_res->irq_cfg) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc irq config\n"); + return -ENOMEM; + } + + len = sizeof(*qp_res->rq_res_group) * qp_num; + qp_res->rq_res_group = kzalloc(len, GFP_KERNEL); + if (!qp_res->rq_res_group) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc rq res info\n"); + goto alloc_rq_res_err; + } + + len = sizeof(*qp_res->sq_res_group) * qp_num; + qp_res->sq_res_group = kzalloc(len, GFP_KERNEL); + if (!qp_res->sq_res_group) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc sq res info\n"); + goto alloc_sq_res_err; + } + + return 0; + +alloc_sq_res_err: + kfree(qp_res->rq_res_group); + qp_res->rq_res_group = NULL; + +alloc_rq_res_err: + kfree(qp_res->irq_cfg); + qp_res->irq_cfg = NULL; + + return -ENOMEM; +} + +static void sss_nic_free_qp_mgmt_info(struct sss_nic_qp_resource *qp_res) +{ + kfree(qp_res->irq_cfg); + kfree(qp_res->rq_res_group); + kfree(qp_res->sq_res_group); + qp_res->irq_cfg = NULL; + qp_res->sq_res_group = NULL; + qp_res->rq_res_group = NULL; +} + +static int sss_nic_alloc_qp_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + int ret; + + ret = sss_nic_alloc_qp_mgmt_info(nic_dev, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc qp mgmt info\n"); + return ret; + } + + ret = sss_nic_alloc_rq_res_group(nic_dev, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc rq resource\n"); + goto alloc_rq_res_err; + } + + ret = sss_nic_alloc_sq_resource(nic_dev, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc sq resource\n"); + goto alloc_sq_res_err; + } + + return 0; + +alloc_sq_res_err: + sss_nic_free_rq_res_group(nic_dev, qp_res); + +alloc_rq_res_err: + sss_nic_free_qp_mgmt_info(qp_res); + + return ret; +} + +static void sss_nic_free_qp_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + sss_nic_free_rq_res_group(nic_dev, qp_res); + sss_nic_free_sq_resource(nic_dev, qp_res); + sss_nic_free_qp_mgmt_info(qp_res); +} + +static int sss_nic_init_qp_wq(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + int ret; + + sss_nic_init_all_sq(nic_dev, qp_res); + + ret = sss_nic_init_rq_desc_group(nic_dev, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to configure rq\n"); + return ret; + } + + return 0; +} + +static void sss_nic_config_dcb_qp_map(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u8 cos_num; + u16 qp_num = nic_dev->qp_res.qp_num; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)) { + sss_nic_update_sq_cos(nic_dev, 0); + return; + } + + cos_num = sss_nic_get_user_cos_num(nic_dev); + sss_nic_update_qp_cos_map(nic_dev, cos_num); + /* For now, we don't support to change cos_num */ + if (cos_num > nic_dev->max_cos_num || cos_num > qp_num) { + nicif_err(nic_dev, drv, netdev, + "Invalid cos_num: %u, qp_num: %u or RSS is disable, disable DCB\n", + cos_num, qp_num); + nic_dev->qp_res.cos_num = 0; + SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE); + /* if we can't enable rss or get enough qp_num, + * need to sync default configure to hw + */ + sss_nic_update_dcb_cfg(nic_dev); + } + + sss_nic_update_sq_cos(nic_dev, 1); +} + +static int sss_nic_update_dev_cfg(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int ret; + + ret = sss_nic_set_dev_mtu(nic_dev, (u16)netdev->mtu); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set mtu\n"); + return ret; + } + + sss_nic_config_dcb_qp_map(nic_dev); + + ret = sss_nic_update_rx_rss(nic_dev); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to update rx rss\n"); + return ret; + } + + return 0; +} + +static u16 sss_nic_realloc_qp_irq(struct sss_nic_dev *nic_dev, + u16 new_qp_irq_num) +{ + struct sss_irq_desc *qps_irq_info = nic_dev->irq_desc_group; + u16 act_irq_num; + u16 extra_irq_num; + u16 id; + u16 i; + + if (new_qp_irq_num > nic_dev->irq_desc_num) { + extra_irq_num = new_qp_irq_num - nic_dev->irq_desc_num; + act_irq_num = sss_alloc_irq(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC, + &qps_irq_info[nic_dev->irq_desc_num], + extra_irq_num); + if (act_irq_num == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to alloc irq\n"); + return nic_dev->irq_desc_num; + } + + nic_dev->irq_desc_num += act_irq_num; + } else if (new_qp_irq_num < nic_dev->irq_desc_num) { + extra_irq_num = nic_dev->irq_desc_num - new_qp_irq_num; + for (i = 0; i < extra_irq_num; i++) { + id = (nic_dev->irq_desc_num - i) - 1; + sss_free_irq(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC, + qps_irq_info[id].irq_id); + qps_irq_info[id].irq_id = 0; + qps_irq_info[id].msix_id = 0; + } + nic_dev->irq_desc_num = new_qp_irq_num; + } + + return nic_dev->irq_desc_num; +} + +static void sss_nic_update_dcb_cos_map(struct sss_nic_dev *nic_dev, + const struct sss_nic_qp_resource *qp_res) +{ + u8 cos_num = qp_res->cos_num; + u16 max_qp = qp_res->qp_num; + u8 user_cos_num = sss_nic_get_user_cos_num(nic_dev); + + if (cos_num == 0 || cos_num > nic_dev->max_cos_num || cos_num > max_qp) + return; /* will disable DCB */ + + sss_nic_update_qp_cos_map(nic_dev, user_cos_num); +} + +static void sss_nic_update_qp_info(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + u16 alloc_irq_num; + u16 dst_irq_num; + u16 cur_irq_num; + struct net_device *netdev = nic_dev->netdev; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_RSS_ENABLE)) + qp_res->qp_num = 1; + + sss_nic_update_dcb_cos_map(nic_dev, qp_res); + + if (nic_dev->irq_desc_num >= qp_res->qp_num) + goto out; + + cur_irq_num = nic_dev->irq_desc_num; + + alloc_irq_num = sss_nic_realloc_qp_irq(nic_dev, qp_res->qp_num); + if (alloc_irq_num < qp_res->qp_num) { + qp_res->qp_num = alloc_irq_num; + sss_nic_update_dcb_cos_map(nic_dev, qp_res); + nicif_warn(nic_dev, drv, netdev, + "Fail to alloc enough irq, qp_num: %u\n", + qp_res->qp_num); + + dst_irq_num = (u16)max_t(u16, cur_irq_num, qp_res->qp_num); + sss_nic_realloc_qp_irq(nic_dev, dst_irq_num); + } + +out: + nicif_info(nic_dev, drv, netdev, "Finally qp_num: %u\n", + qp_res->qp_num); +} + +static int sss_nic_init_qp_irq(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u32 irq_info_len = sizeof(*nic_dev->irq_desc_group) * nic_dev->max_qp_num; + + nic_dev->irq_desc_num = 0; + + if (irq_info_len == 0) { + nicif_err(nic_dev, drv, netdev, "Invalid irq_info_len\n"); + return -EINVAL; + } + + nic_dev->irq_desc_group = kzalloc(irq_info_len, GFP_KERNEL); + if (!nic_dev->irq_desc_group) + return -ENOMEM; + + if (!test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags)) + nic_dev->qp_res.qp_num = 1; + + if (nic_dev->irq_desc_num >= nic_dev->qp_res.qp_num) { + nicif_info(nic_dev, drv, netdev, "Finally qp_num: %u\n", + nic_dev->qp_res.qp_num); + return 0; + } + + nic_dev->irq_desc_num = sss_alloc_irq(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC, + nic_dev->irq_desc_group, nic_dev->qp_res.qp_num); + if (nic_dev->irq_desc_num == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to alloc qp irq\n"); + kfree(nic_dev->irq_desc_group); + nic_dev->irq_desc_group = NULL; + return -ENOMEM; + } + + if (nic_dev->irq_desc_num < nic_dev->qp_res.qp_num) { + nic_dev->qp_res.qp_num = nic_dev->irq_desc_num; + nicif_warn(nic_dev, drv, netdev, + "Fail to alloc enough irq, now qp_num: %u\n", + nic_dev->qp_res.qp_num); + } + + return 0; +} + +static void sss_nic_deinit_qp_irq(struct sss_nic_dev *nic_dev) +{ + u16 id; + + for (id = 0; id < nic_dev->irq_desc_num; id++) + sss_free_irq(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC, + nic_dev->irq_desc_group[id].irq_id); + + kfree(nic_dev->irq_desc_group); + nic_dev->irq_desc_group = NULL; +} + +int sss_nic_dev_resource_init(struct sss_nic_dev *nic_dev) +{ + int ret; + struct net_device *netdev = nic_dev->netdev; + + ret = sss_nic_init_qp_irq(nic_dev); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to init irq info\n"); + return ret; + } + + sss_nic_update_dcb_cos_map(nic_dev, &nic_dev->qp_res); + + return 0; +} + +void sss_nic_dev_resource_deinit(struct sss_nic_dev *nic_dev) +{ + sss_nic_deinit_qp_irq(nic_dev); +} + +static int sss_nic_set_port_state(struct sss_nic_dev *nic_dev, bool state) +{ + int ret; + + down(&nic_dev->port_sem); + + ret = sss_nic_set_hw_port_state(nic_dev, state, SSS_CHANNEL_NIC); + + up(&nic_dev->port_sem); + + return ret; +} + +static void sss_nic_update_link_state(struct sss_nic_dev *nic_dev, + u8 link_state) +{ + struct net_device *netdev = nic_dev->netdev; + + if (nic_dev->link_status == link_state) + return; + + nic_dev->link_status = link_state; + + nicif_info(nic_dev, link, netdev, "Link is %s\n", + (link_state ? "up" : "down")); +} + +int sss_nic_qp_resource_init(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info, + struct sss_nic_qp_resource *qp_res) +{ + int ret; + struct net_device *netdev = nic_dev->netdev; + + qp_info->sq_depth = qp_res->sq_depth; + qp_info->rq_depth = qp_res->rq_depth; + qp_info->qp_num = qp_res->qp_num; + + ret = sss_nic_alloc_qp(nic_dev->nic_io, nic_dev->irq_desc_group, qp_info); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to alloc qp\n"); + return ret; + } + + ret = sss_nic_alloc_qp_resource(nic_dev, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to alloc qp resource\n"); + sss_nic_free_qp(nic_dev->nic_io, qp_info); + return ret; + } + + return 0; +} + +void sss_nic_qp_resource_deinit(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info, + struct sss_nic_qp_resource *qp_res) +{ + mutex_lock(&nic_dev->qp_mutex); + sss_nic_free_qp_resource(nic_dev, qp_res); + sss_nic_free_qp(nic_dev->nic_io, qp_info); + mutex_unlock(&nic_dev->qp_mutex); +} + +int sss_nic_open_dev(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info, + struct sss_nic_qp_resource *qp_res) +{ + int ret; + struct net_device *netdev = nic_dev->netdev; + + ret = sss_nic_init_qp_info(nic_dev->nic_io, qp_info); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to init qp info\n"); + return ret; + } + + ret = sss_nic_init_qp_wq(nic_dev, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to init qp wq\n"); + goto cfg_qp_err; + } + + ret = sss_nic_request_qp_irq(nic_dev); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to request qp irq\n"); + goto init_qp_irq_err; + } + + ret = sss_nic_update_dev_cfg(nic_dev); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to update configure\n"); + goto cfg_err; + } + + return 0; + +cfg_err: + sss_nic_release_qp_irq(nic_dev); + +init_qp_irq_err: +cfg_qp_err: + sss_nic_deinit_qp_info(nic_dev->nic_io, qp_info); + + return ret; +} + +void sss_nic_close_dev(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info) +{ + sss_nic_reset_rx_rss(nic_dev->netdev); + sss_nic_release_qp_irq(nic_dev); + sss_nic_deinit_qp_info(nic_dev->nic_io, qp_info); +} + +int sss_nic_vport_up(struct sss_nic_dev *nic_dev) +{ + u16 func_id; + u8 link_state = 0; + int ret; + struct net_device *netdev = nic_dev->netdev; + + func_id = sss_get_global_func_id(nic_dev->hwdev); + ret = sss_nic_set_hw_vport_state(nic_dev, func_id, true, SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set vport enable\n"); + goto set_vport_state_err; + } + + ret = sss_nic_set_port_state(nic_dev, true); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set port enable\n"); + goto set_port_state_err; + } + + netif_set_real_num_rx_queues(netdev, nic_dev->qp_res.qp_num); + netif_set_real_num_tx_queues(netdev, nic_dev->qp_res.qp_num); + netif_tx_wake_all_queues(netdev); + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_FORCE_LINK_UP)) { + ret = sss_nic_get_hw_link_state(nic_dev, &link_state); + if (ret == 0 && link_state != 0) + netif_carrier_on(netdev); + } else { + link_state = true; + netif_carrier_on(netdev); + } + + queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, + SSSNIC_MODERATONE_DELAY); + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY)) + queue_delayed_work(nic_dev->workq, &nic_dev->rq_watchdog_work, HZ); + + sss_nic_update_link_state(nic_dev, link_state); + + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + sss_nic_notify_all_vf_link_state(nic_dev->nic_io, link_state); + + return 0; + +set_port_state_err: + sss_nic_set_hw_vport_state(nic_dev, func_id, false, SSS_CHANNEL_NIC); + +set_vport_state_err: + sss_nic_clear_hw_qp_resource(nic_dev); + /*No packets will be send to host when after set vport disable 100ms*/ + msleep(SSSNIC_WAIT_FLUSH_QP_RES_TIMEOUT); + + return ret; +} + +void sss_nic_vport_down(struct sss_nic_dev *nic_dev) +{ + u16 func_id; + + netif_carrier_off(nic_dev->netdev); + netif_tx_disable(nic_dev->netdev); + + cancel_delayed_work_sync(&nic_dev->rq_watchdog_work); + cancel_delayed_work_sync(&nic_dev->moderation_task); + + if (sss_get_dev_present_flag(nic_dev->hwdev) == 0) + return; + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev) == 0) + sss_nic_notify_all_vf_link_state(nic_dev->nic_io, 0); + + sss_nic_set_port_state(nic_dev, false); + + func_id = sss_get_global_func_id(nic_dev->hwdev); + sss_nic_set_hw_vport_state(nic_dev, func_id, false, SSS_CHANNEL_NIC); + + sss_nic_flush_all_sq(nic_dev); + msleep(SSSNIC_WAIT_FLUSH_QP_RES_TIMEOUT); + sss_nic_clear_hw_qp_resource(nic_dev); +} + +int sss_nic_update_channel_setting(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res, + sss_nic_reopen_handler_t reopen_hdl, + const void *priv_data) +{ + struct net_device *netdev = nic_dev->netdev; + struct sss_nic_qp_info cur_qp_info = {0}; + struct sss_nic_qp_info new_qp_info = {0}; + int ret; + + sss_nic_update_qp_info(nic_dev, qp_res); + + ret = sss_nic_qp_resource_init(nic_dev, &new_qp_info, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to alloc channel resource\n"); + return ret; + } + + if (!SSSNIC_TEST_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_CHANGE_RES_INVALID)) { + sss_nic_vport_down(nic_dev); + sss_nic_close_dev(nic_dev, &cur_qp_info); + sss_nic_qp_resource_deinit(nic_dev, &cur_qp_info, + &nic_dev->qp_res); + } + + if (nic_dev->irq_desc_num > qp_res->qp_num) + sss_nic_realloc_qp_irq(nic_dev, qp_res->qp_num); + nic_dev->qp_res = *qp_res; + + if (reopen_hdl) + reopen_hdl(nic_dev, priv_data); + + ret = sss_nic_open_dev(nic_dev, &new_qp_info, qp_res); + if (ret != 0) + goto open_channel_err; + + ret = sss_nic_vport_up(nic_dev); + if (ret != 0) + goto up_vport_err; + + clear_bit(SSSNIC_CHANGE_RES_INVALID, &nic_dev->flags); + nicif_info(nic_dev, drv, netdev, "Success to update channel settings\n"); + + return 0; + +up_vport_err: + sss_nic_close_dev(nic_dev, &new_qp_info); + +open_channel_err: + sss_nic_qp_resource_deinit(nic_dev, &new_qp_info, qp_res); + + return ret; +} + +static u32 sss_nic_calc_xor_rss(u8 *rss_tunple, u32 size) +{ + u32 count; + u32 hash_value; + + hash_value = rss_tunple[0]; + for (count = 1; count < size; count++) + hash_value = hash_value ^ rss_tunple[count]; + + return hash_value; +} + +static u32 sss_nic_calc_toep_rss(const u32 *rss_tunple, u32 size, const u32 *rss_key) +{ + u32 i; + u32 j; + u32 rss = 0; + u32 tunple; + + for (i = 0; i < size; i++) { + for (j = 0; j < SSSNIC_BIT_PER_TUPLE; j++) { + tunple = rss_tunple[i] & + ((u32)1 << (u32)((SSSNIC_BIT_PER_TUPLE - 1) - j)); + if (tunple != 0) + rss ^= (rss_key[i] << j) | + ((u32)((u64)rss_key[i + 1] >> (SSSNIC_BIT_PER_TUPLE - j))); + } + } + + return rss; +} + +static u8 sss_nic_parse_ipv6_info(struct sk_buff *skb, u8 hash_engine, + u32 *rss_tunple, u32 *size) +{ + struct ipv6hdr *ipv6hdr = ipv6_hdr(skb); + u32 *daddr = (u32 *)&ipv6hdr->daddr; + u32 *saddr = (u32 *)&ipv6hdr->saddr; + u32 offset; + u8 i; + + for (i = 0; i < SSSNIC_IPV6_ADDR_SIZE; i++) { + rss_tunple[i] = SSSNIC_RSS_VAL(daddr[i], hash_engine); + /* The offset of the sport relative to the dport is 4 */ + offset = (u32)(i + SSSNIC_IPV6_ADDR_SIZE); + rss_tunple[offset] = SSSNIC_RSS_VAL(saddr[i], hash_engine); + } + *size = SSSNIC_IPV6_ADDR_SIZE << 1; + + return (skb_network_header(skb) + sizeof(*ipv6hdr) == + skb_transport_header(skb)) ? ipv6hdr->nexthdr : 0; +} + +u16 sss_nic_select_queue_by_hash_func(struct net_device *dev, struct sk_buff *skb, + unsigned int max_sq_num) +{ + struct iphdr *iphdr = NULL; + unsigned char *l4_hdr = NULL; + struct sss_nic_dev *nic_dev = netdev_priv(dev); + struct sss_nic_rss_type rss_type = nic_dev->rss_type; + u8 l4_proto; + u32 sq_id = 0; + u32 cnt = 0; + u8 hash_engine = nic_dev->rss_hash_engine; + u32 rss_tunple[SSSNIC_PKT_INFO_SIZE] = {0}; + bool convert_flag; + + if (skb_rx_queue_recorded(skb)) { + sq_id = skb_get_rx_queue(skb); + if (unlikely(sq_id >= max_sq_num)) + sq_id %= max_sq_num; + + return (u16)sq_id; + } + + iphdr = ip_hdr(skb); + + if (iphdr->version != IPV4_VERSION && iphdr->version != IPV6_VERSION) + return (u16)sq_id; + + if (iphdr->version == IPV4_VERSION) { + rss_tunple[cnt++] = SSSNIC_RSS_VAL(iphdr->daddr, hash_engine); + rss_tunple[cnt++] = SSSNIC_RSS_VAL(iphdr->saddr, hash_engine); + l4_proto = iphdr->protocol; + convert_flag = ((l4_proto == IPPROTO_UDP) && rss_type.udp_ipv4) || + ((l4_proto == IPPROTO_TCP) && rss_type.tcp_ipv4); + } else { + l4_proto = sss_nic_parse_ipv6_info(skb, hash_engine, (u32 *)rss_tunple, &cnt); + convert_flag = ((l4_proto == IPPROTO_UDP) && rss_type.udp_ipv6) || + ((l4_proto == IPPROTO_TCP) && rss_type.tcp_ipv6); + } + + if (convert_flag) { + l4_hdr = skb_transport_header(skb); + rss_tunple[cnt++] = SSSNIC_RSS_VAL_BY_L4_PORT(l4_hdr); + } + + if (hash_engine == SSSNIC_RSS_ENGINE_TOEP) + sq_id = sss_nic_calc_toep_rss((u32 *)rss_tunple, cnt, nic_dev->rss_key_big); + else + sq_id = sss_nic_calc_xor_rss((u8 *)rss_tunple, cnt * (u32)sizeof(cnt)); + + return SSSNIC_GET_SQ_ID_BY_RSS_INDIR(nic_dev, sq_id); +} + +static inline u8 sss_nic_get_cos_by_dscp(struct sss_nic_dev *nic_dev, struct sk_buff *skb) +{ + int dscp_cp; + + dscp_cp = (skb->protocol == htons(ETH_P_IP)) ? SSSNIC_IPV4_DSF_TO_COS_ID(skb) : + (skb->protocol == htons(ETH_P_IPV6) ? SSSNIC_IPV6_DSF_TO_COS_ID(skb) : + nic_dev->hw_dcb_cfg.default_cos); + return nic_dev->hw_dcb_cfg.dscp2cos[dscp_cp]; +} + +static inline u8 sss_nic_get_cos_by_pcp(struct sss_nic_dev *nic_dev, + struct sk_buff *skb) +{ + return skb->vlan_tci ? + nic_dev->hw_dcb_cfg.pcp2cos[SSSNIC_VLAN_TCI_TO_COS_ID(skb)] : + nic_dev->hw_dcb_cfg.default_cos; +} + +u8 sss_nic_get_cos(struct sss_nic_dev *nic_dev, struct sk_buff *skb) +{ + if (nic_dev->hw_dcb_cfg.trust == DCB_PCP) + return sss_nic_get_cos_by_pcp(nic_dev, skb); + + return sss_nic_get_cos_by_dscp(nic_dev, skb); +} + +#ifdef NEED_VLAN_RESTORE +static int sss_nic_restore_vlan(struct sss_nic_dev *nic_dev) +{ + int ret = 0; +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + u16 i; + struct net_device *netdev = nic_dev->netdev; + struct net_device *vlandev = NULL; + + rcu_read_lock(); + for (i = 0; i < VLAN_N_VID; i++) { +#ifdef HAVE_VLAN_FIND_DEV_DEEP_RCU + vlandev = __vlan_find_dev_deep_rcu(netdev, htons(ETH_P_8021Q), i); +#else + vlandev = __vlan_find_dev_deep(netdev, htons(ETH_P_8021Q), i); +#endif + + if (!vlandev && SSSNIC_TEST_VLAN_BIT(nic_dev, i) != 0) { + ret = netdev->netdev_ops->ndo_vlan_rx_kill_vid(netdev, + htons(ETH_P_8021Q), i); + if (ret != 0) { + sss_nic_err(nic_dev, drv, + "Fail to delete vlan %u, ret: %d\n", i, ret); + break; + } + } else if (vlandev && SSSNIC_TEST_VLAN_BIT(nic_dev, i) == 0) { + ret = netdev->netdev_ops->ndo_vlan_rx_add_vid(netdev, + htons(ETH_P_8021Q), i); + if (ret != 0) { + sss_nic_err(nic_dev, drv, + "Fail to restore vlan %u, ret: %d\n", i, ret); + break; + } + } + } + rcu_read_unlock(); +#endif +#endif + return ret; +} +#endif + +static int sss_nic_set_lro_feature(struct sss_nic_dev *nic_dev, netdev_features_t old_feature, + netdev_features_t new_feature, netdev_features_t *fail_feature) +{ + int ret; + bool change = !!((new_feature ^ old_feature) & NETIF_F_LRO); + bool en = !!(new_feature & NETIF_F_LRO); + + if (!change) + return 0; + +#ifdef HAVE_XDP_SUPPORT + if (en && SSSNIC_IS_XDP_ENABLE(nic_dev)) { + *fail_feature |= NETIF_F_LRO; + sss_nic_err(nic_dev, drv, "Fail to enable LRO when xdp is enable\n"); + return -EINVAL; + } +#endif + ret = sss_nic_set_rx_lro_state(nic_dev, en, + SSSNIC_LRO_DEF_TIME_LIMIT, SSSNIC_LRO_DEF_COAL_PKT_SIZE); + if (ret != 0) { + *fail_feature |= NETIF_F_LRO; + sss_nic_err(nic_dev, drv, "Fail to set lro %s\n", SSSNIC_FEATURE_OP_STR(en)); + return ret; + } + + sss_nic_info(nic_dev, drv, "Success to set lro %s\n", SSSNIC_FEATURE_OP_STR(en)); + + return 0; +} + +static int sss_nic_set_rx_cvlan_feature(struct sss_nic_dev *nic_dev, netdev_features_t old_feature, + netdev_features_t new_feature, + netdev_features_t *fail_feature) +{ + int ret; +#ifdef NETIF_F_HW_VLAN_CTAG_RX + netdev_features_t vlan_feature = NETIF_F_HW_VLAN_CTAG_RX; +#else + netdev_features_t vlan_feature = NETIF_F_HW_VLAN_RX; +#endif + bool change = !!((old_feature ^ new_feature) & vlan_feature); + bool en = !!(new_feature & vlan_feature); + + if (!change) + return 0; + + ret = sss_nic_set_rx_vlan_offload(nic_dev, en); + if (ret != 0) { + *fail_feature |= vlan_feature; + sss_nic_err(nic_dev, drv, "Fail to set %s rx vlan offload\n", + SSSNIC_FEATURE_OP_STR(en)); + return ret; + } + + sss_nic_info(nic_dev, drv, "Success to set rx vlan offload %s\n", + SSSNIC_FEATURE_OP_STR(en)); + + return 0; +} + +static int sss_nic_set_vlan_filter_feature(struct sss_nic_dev *nic_dev, + netdev_features_t old_feature, + netdev_features_t new_feature, + netdev_features_t *fail_feature) +{ + int ret = 0; +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) + netdev_features_t filter_feature = NETIF_F_HW_VLAN_CTAG_FILTER; +#elif defined(NETIF_F_HW_VLAN_FILTER) + netdev_features_t filter_feature = NETIF_F_HW_VLAN_FILTER; +#endif + bool change = !!((new_feature ^ old_feature) & filter_feature); + bool en = !!(new_feature & filter_feature); + + if (!change) + return 0; + +#ifdef NEED_VLAN_RESTORE + if (en) { + ret = sss_nic_restore_vlan(nic_dev); + if (ret != 0) { + *fail_feature |= filter_feature; + sss_nic_err(nic_dev, drv, + "Fail to set rx vlan filter %s\n", SSSNIC_FEATURE_OP_STR(en)); + return ret; + } + } +#endif + ret = sss_nic_set_vlan_fliter(nic_dev, en); + if (ret != 0) { + *fail_feature |= filter_feature; + sss_nic_err(nic_dev, drv, + "Fail to set rx vlan filter %s\n", SSSNIC_FEATURE_OP_STR(en)); + return ret; + } + + sss_nic_info(nic_dev, drv, "Success to set rx vlan filter %s\n", SSSNIC_FEATURE_OP_STR(en)); + + return 0; +} + +int sss_nic_set_feature(struct sss_nic_dev *nic_dev, netdev_features_t old_feature, + netdev_features_t new_feature) +{ + u32 ret = 0; + netdev_features_t fail_feature = 0; + + ret |= (u32)sss_nic_set_lro_feature(nic_dev, old_feature, new_feature, &fail_feature); + ret |= (u32)sss_nic_set_rx_cvlan_feature(nic_dev, old_feature, new_feature, &fail_feature); + ret |= (u32)sss_nic_set_vlan_filter_feature(nic_dev, old_feature, + new_feature, &fail_feature); + if (ret != 0) { + nic_dev->netdev->features = new_feature ^ fail_feature; + return -EIO; + } + + return 0; +} + +int sss_nic_enable_netdev_feature(struct sss_nic_dev *nic_dev) +{ + /* enable all feature in netdev->features */ + return sss_nic_set_feature(nic_dev, ~nic_dev->netdev->features, nic_dev->netdev->features); +} + +#ifdef IFLA_VF_MAX +int sss_nic_set_hw_vf_vlan(struct sss_nic_dev *nic_dev, + u16 cur_vlanprio, int vf_id, u16 vlan_id, u8 qos) +{ + int ret = 0; + u16 old_vlan = cur_vlanprio & VLAN_VID_MASK; + + if (vlan_id == 0 && qos == 0) { + ret = sss_nic_destroy_vf_vlan(nic_dev->nic_io, SSSNIC_OS_VF_ID_TO_HW(vf_id)); + } else { + if (cur_vlanprio != 0) { + ret = sss_nic_destroy_vf_vlan(nic_dev->nic_io, + SSSNIC_OS_VF_ID_TO_HW(vf_id)); + if (ret != 0) + return ret; + } + ret = sss_nic_create_vf_vlan(nic_dev->nic_io, SSSNIC_OS_VF_ID_TO_HW(vf_id), + vlan_id, qos); + } + + ret = sss_nic_update_mac_vlan(nic_dev, old_vlan, vlan_id, SSSNIC_OS_VF_ID_TO_HW(vf_id)); + return ret; +} +#endif + +#ifdef HAVE_XDP_SUPPORT +static void sss_nic_put_prog(struct sss_nic_dev *nic_dev, struct bpf_prog *prog) +{ + int i; + struct bpf_prog *pre_prog = NULL; + + pre_prog = xchg(&nic_dev->xdp_prog, prog); + for (i = 0; i < nic_dev->max_qp_num; i++) + xchg(&nic_dev->rq_desc_group[i].xdp_prog, nic_dev->xdp_prog); + + if (pre_prog) + bpf_prog_put(pre_prog); +} + +#ifdef HAVE_NDO_BPF_NETDEV_BPF +int sss_nic_setup_xdp(struct sss_nic_dev *nic_dev, struct netdev_bpf *xdp) +#else +int sss_nic_setup_xdp(struct sss_nic_dev *nic_dev, struct netdev_xdp *xdp) +#endif +{ + struct net_device *netdev = nic_dev->netdev; + struct netlink_ext_ack *extack = xdp->extack; + int xdp_max_mtu = SSSNIC_XDP_MAX_MTU(nic_dev); + + if (netdev->mtu > xdp_max_mtu) { + NL_SET_ERR_MSG_MOD(extack, "Invalid mtu for loading xdp program"); + nicif_err(nic_dev, drv, netdev, + "Fail to setup xdp, netdev mtu %d is larger than xdp allowed mtu %d\n", + netdev->mtu, xdp_max_mtu); + + return -EINVAL; + } + + if ((netdev->features & NETIF_F_LRO) != 0) { + NL_SET_ERR_MSG_MOD(extack, + "Fail to setup xdp when LRO is on\n"); + nicif_err(nic_dev, drv, netdev, + "Fail to setup xdp when LRO is on\n"); + + return -EINVAL; + } + + sss_nic_put_prog(nic_dev, xdp->prog); + + return 0; +} + +void sss_nic_get_tx_stats(struct sss_nic_dev *nic_dev, + struct rtnl_link_stats64 *stats) +{ + struct sss_nic_sq_desc *sq_desc = NULL; + struct sss_nic_sq_stats *sq_stats = NULL; + unsigned int start; + int qid; + + stats->tx_bytes = 0; + stats->tx_packets = 0; + stats->tx_dropped = 0; + + if (!nic_dev->sq_desc_group) + return; + + for (qid = 0; qid < nic_dev->max_qp_num; qid++) { + sq_desc = &nic_dev->sq_desc_group[qid]; + sq_stats = &sq_desc->stats; + do { + start = u64_stats_fetch_begin(&sq_stats->stats_sync); + stats->tx_dropped += sq_stats->tx_dropped; + stats->tx_packets += sq_stats->tx_packets; + stats->tx_bytes += sq_stats->tx_bytes; + } while (u64_stats_fetch_retry(&sq_stats->stats_sync, start)); + } +} + +void sss_nic_get_rx_stats(struct sss_nic_dev *nic_dev, + struct rtnl_link_stats64 *stats) +{ + struct sss_nic_rq_desc *rq_desc = NULL; + struct sss_nic_rq_stats *rq_stats = NULL; + unsigned int start; + int qid; + + stats->rx_errors = 0; + stats->rx_dropped = 0; + stats->rx_packets = 0; + stats->rx_bytes = 0; + + if (!nic_dev->rq_desc_group) + return; + + for (qid = 0; qid < nic_dev->max_qp_num; qid++) { + rq_desc = &nic_dev->rq_desc_group[qid]; + rq_stats = &rq_desc->stats; + do { + start = u64_stats_fetch_begin(&rq_stats->stats_sync); + stats->rx_dropped += rq_stats->rx_dropped; + stats->rx_errors += rq_stats->csum_errors + + rq_stats->other_errors; + stats->rx_packets += rq_stats->rx_packets; + stats->rx_bytes += rq_stats->rx_bytes; + } while (u64_stats_fetch_retry(&rq_stats->stats_sync, start)); + } +} +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.h new file mode 100644 index 0000000000000..bb8bfce43c01c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_NETDEV_OPS_API_H +#define SSS_NIC_NETDEV_OPS_API_H + +#include +#include +#include + +#include "sss_kernel.h" +#ifdef HAVE_XDP_SUPPORT +#include +#endif +#include "sss_hw.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_io_define.h" + +typedef void (*sss_nic_reopen_handler_t)(struct sss_nic_dev *nic_dev, + const void *priv_data); + +int sss_nic_dev_resource_init(struct sss_nic_dev *nic_dev); +void sss_nic_dev_resource_deinit(struct sss_nic_dev *nic_dev); +int sss_nic_qp_resource_init(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info, + struct sss_nic_qp_resource *qp_res); +void sss_nic_qp_resource_deinit(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info, + struct sss_nic_qp_resource *qp_res); +int sss_nic_open_dev(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info, + struct sss_nic_qp_resource *qp_res); +void sss_nic_close_dev(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info); +int sss_nic_vport_up(struct sss_nic_dev *nic_dev); +void sss_nic_vport_down(struct sss_nic_dev *nic_dev); +int sss_nic_update_channel_setting(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res, + sss_nic_reopen_handler_t reopen_handler, + const void *priv_data); +u16 sss_nic_select_queue_by_hash_func(struct net_device *dev, struct sk_buff *skb, + unsigned int num_tx_queues); +u8 sss_nic_get_cos(struct sss_nic_dev *nic_dev, struct sk_buff *skb); +int sss_nic_set_feature(struct sss_nic_dev *nic_dev, netdev_features_t old_feature, + netdev_features_t new_feature); + +int sss_nic_enable_netdev_feature(struct sss_nic_dev *nic_dev); + +#ifdef IFLA_VF_MAX +int sss_nic_set_hw_vf_vlan(struct sss_nic_dev *nic_dev, + u16 cur_vlanprio, int vf, u16 vlan, u8 qos); +#endif + +#ifdef HAVE_XDP_SUPPORT +#define SSSNIC_XDP_MAX_MTU(nic_dev) ((nic_dev)->rx_buff_len - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)) +#ifdef HAVE_NDO_BPF_NETDEV_BPF +int sss_nic_setup_xdp(struct sss_nic_dev *nic_dev, struct netdev_bpf *xdp); +#else +int sss_nic_setup_xdp(struct sss_nic_dev *nic_dev, struct netdev_xdp *xdp); +#endif +#endif +void sss_nic_get_tx_stats(struct sss_nic_dev *nic_dev, + struct rtnl_link_stats64 *stats); +void sss_nic_get_rx_stats(struct sss_nic_dev *nic_dev, + struct rtnl_link_stats64 *stats); + +u32 sss_nic_get_io_stats_size(const struct sss_nic_dev *nic_dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.c new file mode 100644 index 0000000000000..341a37bbfb674 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.c @@ -0,0 +1,919 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" + +#define SSSNIC_MAX_ETHTOOL_NTUPLE_RULE BIT(9) + +#define SSSNIC_TCAM_IP_TYPE_MASK 0x1 +#define SSSNIC_TCAM_TUNNEL_TYPE_MASK 0xF +#define SSSNIC_TCAM_FUNC_ID_MASK 0x7FFF + +#define SSSNIC_TCAM_IPV4_TYPE 0 +#define SSSNIC_TCAM_IPV6_TYPE 1 + +#ifndef UNSUPPORT_NTUPLE_IPV6 +enum sss_nic_ipv6_parse_res { + SSSNIC_IPV6_MASK_INVALID, + SSSNIC_IPV6_MASK_ALL_MASK, + SSSNIC_IPV6_MASK_ALL_ZERO, +}; + +enum sss_nic_ipv6_index { + SSSNIC_IPV6_ID0, + SSSNIC_IPV6_ID1, + SSSNIC_IPV6_ID2, + SSSNIC_IPV6_ID3, +}; +#endif + +struct sss_nic_ethtool_rx_flow_rule { + struct list_head list; + struct ethtool_rx_flow_spec flow_spec; +}; + +static void sss_nic_calculate_tcam_key_y(u8 *key_y, const u8 *src_input, const u8 *mask, u8 len) +{ + u8 id; + + for (id = 0; id < len; id++) + key_y[id] = src_input[id] & mask[id]; +} + +static void sss_nic_calculate_tcam_key_x(u8 *key_x, const u8 *key_y, const u8 *mask, u8 len) +{ + u8 id; + + for (id = 0; id < len; id++) + key_x[id] = key_y[id] ^ mask[id]; +} + +static void sss_nic_calculate_tcam_key(struct sss_nic_tcam_key_tag *tcam_key, + struct sss_nic_tcam_rule_cfg *fdir_tcam_rule) +{ + sss_nic_calculate_tcam_key_y(fdir_tcam_rule->key.key_y, + (u8 *)(&tcam_key->key_info_ipv4), + (u8 *)(&tcam_key->key_mask_ipv4), SSSNIC_TCAM_FLOW_KEY_SIZE); + sss_nic_calculate_tcam_key_x(fdir_tcam_rule->key.key_x, fdir_tcam_rule->key.key_y, + (u8 *)(&tcam_key->key_mask_ipv4), SSSNIC_TCAM_FLOW_KEY_SIZE); +} + +static int sss_nic_parse_ipv4_base(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key) +{ + u32 temp; + struct ethtool_tcpip4_spec *val = &flow_spec->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *mask = &flow_spec->m_u.tcp_ip4_spec; + + if (mask->ip4src == U32_MAX) { + temp = ntohl(val->ip4src); + tcam_key->key_info_ipv4.sipv4_l = low_16_bits(temp); + tcam_key->key_info_ipv4.sipv4_h = high_16_bits(temp); + + tcam_key->key_mask_ipv4.sipv4_l = U16_MAX; + tcam_key->key_mask_ipv4.sipv4_h = U16_MAX; + + } else if (mask->ip4src != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid source ip mask\n"); + return -EINVAL; + } + + if (mask->ip4dst == U32_MAX) { + temp = ntohl(val->ip4dst); + tcam_key->key_info_ipv4.dipv4_l = low_16_bits(temp); + tcam_key->key_info_ipv4.dipv4_h = high_16_bits(temp); + + tcam_key->key_mask_ipv4.dipv4_l = U16_MAX; + tcam_key->key_mask_ipv4.dipv4_h = U16_MAX; + + } else if (mask->ip4dst != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid destination ip mask\n"); + return -EINVAL; + } + + tcam_key->key_mask_ipv4.ip_type = SSSNIC_TCAM_IP_TYPE_MASK; + tcam_key->key_info_ipv4.ip_type = SSSNIC_TCAM_IPV4_TYPE; + + tcam_key->key_info_ipv4.func_id = sss_get_global_func_id(nic_dev->hwdev); + tcam_key->key_mask_ipv4.func_id = SSSNIC_TCAM_FUNC_ID_MASK; + + return 0; +} + +static int sss_nic_init_ipv4_l4_fdir_tcam(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key) +{ + struct ethtool_tcpip4_spec *l4_val = &flow_spec->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *l4_mask = &flow_spec->m_u.tcp_ip4_spec; + int ret; + + ret = sss_nic_parse_ipv4_base(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + + tcam_key->key_info_ipv4.dport = ntohs(l4_val->pdst); + tcam_key->key_mask_ipv4.dport = l4_mask->pdst; + + tcam_key->key_info_ipv4.sport = ntohs(l4_val->psrc); + tcam_key->key_mask_ipv4.sport = l4_mask->psrc; + + tcam_key->key_mask_ipv4.ip_proto = U8_MAX; + if (flow_spec->flow_type == TCP_V4_FLOW) + tcam_key->key_info_ipv4.ip_proto = IPPROTO_TCP; + else + tcam_key->key_info_ipv4.ip_proto = IPPROTO_UDP; + + return 0; +} + +static int sss_nic_init_ipv4_fdir_tcam(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key) +{ + int ret; + struct ethtool_usrip4_spec *l3_val = &flow_spec->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *l3_mask = &flow_spec->m_u.usr_ip4_spec; + + ret = sss_nic_parse_ipv4_base(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + + tcam_key->key_mask_ipv4.ip_proto = l3_mask->proto; + tcam_key->key_info_ipv4.ip_proto = l3_val->proto; + + return 0; +} + +#ifndef UNSUPPORT_NTUPLE_IPV6 +static int sss_nic_parse_ipv6_mask(const u32 *ipv6_mask) +{ + if (ipv6_mask[SSSNIC_IPV6_ID0] == 0 && ipv6_mask[SSSNIC_IPV6_ID1] == 0 && + ipv6_mask[SSSNIC_IPV6_ID2] == 0 && ipv6_mask[SSSNIC_IPV6_ID3] == 0) + return SSSNIC_IPV6_MASK_ALL_ZERO; + + if (ipv6_mask[SSSNIC_IPV6_ID0] == U32_MAX && + ipv6_mask[SSSNIC_IPV6_ID1] == U32_MAX && + ipv6_mask[SSSNIC_IPV6_ID2] == U32_MAX && ipv6_mask[SSSNIC_IPV6_ID3] == U32_MAX) + return SSSNIC_IPV6_MASK_ALL_MASK; + + return SSSNIC_IPV6_MASK_INVALID; +} + +static int sss_nic_parse_ipv6_base(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key) +{ + int parse_res; + u32 temp; + struct ethtool_tcpip6_spec *val = &flow_spec->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *mask = &flow_spec->m_u.tcp_ip6_spec; + + parse_res = sss_nic_parse_ipv6_mask((u32 *)mask->ip6src); + if (parse_res == SSSNIC_IPV6_MASK_ALL_MASK) { + tcam_key->key_mask_ipv6.sipv6_key0 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key1 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key2 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key3 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key4 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key5 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key6 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key7 = U16_MAX; + + temp = ntohl(val->ip6src[SSSNIC_IPV6_ID0]); + tcam_key->key_info_ipv6.sipv6_key0 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key1 = low_16_bits(temp); + temp = ntohl(val->ip6src[SSSNIC_IPV6_ID1]); + tcam_key->key_info_ipv6.sipv6_key2 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key3 = low_16_bits(temp); + temp = ntohl(val->ip6src[SSSNIC_IPV6_ID2]); + tcam_key->key_info_ipv6.sipv6_key4 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key5 = low_16_bits(temp); + temp = ntohl(val->ip6src[SSSNIC_IPV6_ID3]); + tcam_key->key_info_ipv6.sipv6_key6 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key7 = low_16_bits(temp); + + } else if (parse_res == SSSNIC_IPV6_MASK_INVALID) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid src_ipv6 mask\n"); + return -EINVAL; + } + + parse_res = sss_nic_parse_ipv6_mask((u32 *)mask->ip6dst); + if (parse_res == SSSNIC_IPV6_MASK_ALL_MASK) { + tcam_key->key_mask_ipv6.dipv6_key0 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key1 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key2 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key3 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key4 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key5 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key6 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key7 = U16_MAX; + + temp = ntohl(val->ip6dst[SSSNIC_IPV6_ID0]); + tcam_key->key_info_ipv6.dipv6_key0 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key1 = low_16_bits(temp); + temp = ntohl(val->ip6dst[SSSNIC_IPV6_ID1]); + tcam_key->key_info_ipv6.dipv6_key2 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key3 = low_16_bits(temp); + temp = ntohl(val->ip6dst[SSSNIC_IPV6_ID2]); + tcam_key->key_info_ipv6.dipv6_key4 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key5 = low_16_bits(temp); + temp = ntohl(val->ip6dst[SSSNIC_IPV6_ID3]); + tcam_key->key_info_ipv6.dipv6_key6 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key7 = low_16_bits(temp); + + } else if (parse_res == SSSNIC_IPV6_MASK_INVALID) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid dst_ipv6 mask\n"); + return -EINVAL; + } + + tcam_key->key_mask_ipv6.ip_type = SSSNIC_TCAM_IP_TYPE_MASK; + tcam_key->key_info_ipv6.ip_type = SSSNIC_TCAM_IPV6_TYPE; + + tcam_key->key_info_ipv6.func_id = + sss_get_global_func_id(nic_dev->hwdev); + tcam_key->key_mask_ipv6.func_id = SSSNIC_TCAM_FUNC_ID_MASK; + + return 0; +} + +static int sss_nic_init_ipv6_l4_fdir_tcam(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key) +{ + int ret; + struct ethtool_tcpip6_spec *l4_val = &flow_spec->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *l4_mask = &flow_spec->m_u.tcp_ip6_spec; + + ret = sss_nic_parse_ipv6_base(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + + tcam_key->key_mask_ipv6.dport = l4_mask->pdst; + tcam_key->key_info_ipv6.dport = ntohs(l4_val->pdst); + + tcam_key->key_mask_ipv6.sport = l4_mask->psrc; + tcam_key->key_info_ipv6.sport = ntohs(l4_val->psrc); + + tcam_key->key_mask_ipv6.ip_proto = U8_MAX; + if (flow_spec->flow_type == TCP_V6_FLOW) + tcam_key->key_info_ipv6.ip_proto = NEXTHDR_TCP; + else + tcam_key->key_info_ipv6.ip_proto = NEXTHDR_UDP; + + return 0; +} + +static int sss_nic_init_ipv6_fdir_tcam(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key) +{ + int ret; + struct ethtool_usrip6_spec *l3_mask = &flow_spec->m_u.usr_ip6_spec; + struct ethtool_usrip6_spec *l3_val = &flow_spec->h_u.usr_ip6_spec; + + ret = sss_nic_parse_ipv6_base(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + + tcam_key->key_mask_ipv6.ip_proto = l3_mask->l4_proto; + tcam_key->key_info_ipv6.ip_proto = l3_val->l4_proto; + + return 0; +} +#endif + +static int sss_nic_init_fdir_tcam_info(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key, + struct sss_nic_tcam_rule_cfg *fdir_tcam_rule) +{ + int ret; + + switch (flow_spec->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + ret = sss_nic_init_ipv4_l4_fdir_tcam(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + break; + case IP_USER_FLOW: + ret = sss_nic_init_ipv4_fdir_tcam(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + break; +#ifndef UNSUPPORT_NTUPLE_IPV6 + case TCP_V6_FLOW: + case UDP_V6_FLOW: + ret = sss_nic_init_ipv6_l4_fdir_tcam(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + break; + case IPV6_USER_FLOW: + ret = sss_nic_init_ipv6_fdir_tcam(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + break; +#endif + default: + return -EOPNOTSUPP; + } + + tcam_key->key_mask_ipv4.tunnel_type = SSSNIC_TCAM_TUNNEL_TYPE_MASK; + tcam_key->key_info_ipv4.tunnel_type = 0; + + fdir_tcam_rule->data.qid = (u32)flow_spec->ring_cookie; + sss_nic_calculate_tcam_key(tcam_key, fdir_tcam_rule); + + return 0; +} + +void sss_nic_flush_tcam_list(struct sss_nic_tcam_info *tcam_info) +{ + struct sss_nic_tcam_filter *filter_tmp = NULL; + struct sss_nic_tcam_filter *filter = NULL; + struct list_head *tcam_list = &tcam_info->tcam_list; + + if (list_empty(tcam_list)) + return; + + list_for_each_entry_safe(filter, filter_tmp, + tcam_list, tcam_filter_list) { + list_del(&filter->tcam_filter_list); + kfree(filter); + } +} + +void sss_nic_flush_tcam_node_list(struct sss_nic_tcam_info *tcam_info) +{ + struct sss_nic_tcam_node *block_tmp = NULL; + struct sss_nic_tcam_node *block = NULL; + struct list_head *dynamic_list = + &tcam_info->tcam_node_info.tcam_node_list; + + if (list_empty(dynamic_list)) + return; + + list_for_each_entry_safe(block, block_tmp, dynamic_list, block_list) { + list_del(&block->block_list); + kfree(block); + } +} + +void sss_nic_flush_rx_flow_rule(struct sss_nic_rx_rule *rx_flow_rule) +{ + struct sss_nic_ethtool_rx_flow_rule *rule_tmp = NULL; + struct sss_nic_ethtool_rx_flow_rule *rule = NULL; + struct list_head *rule_list = &rx_flow_rule->rule_list; + + if (list_empty(rule_list)) + return; + + list_for_each_entry_safe(rule, rule_tmp, rule_list, list) { + list_del(&rule->list); + kfree(rule); + } +} + +void sss_nic_flush_tcam(struct sss_nic_dev *nic_dev) +{ + sss_nic_flush_tcam_list(&nic_dev->tcam_info); + + sss_nic_flush_tcam_node_list(&nic_dev->tcam_info); + + sss_nic_flush_rx_flow_rule(&nic_dev->rx_rule); + + if (SSSNIC_SUPPORT_FDIR(nic_dev->nic_io)) { + sss_nic_flush_tcam_rule(nic_dev); + sss_nic_set_fdir_tcam_rule_filter(nic_dev, false); + } +} + +static struct sss_nic_tcam_node * +sss_nic_alloc_tcam_block_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_tcam_info *nic_tcam_info, + u16 block_id) +{ + struct sss_nic_tcam_node *dynamic_block_ptr = NULL; + + dynamic_block_ptr = kzalloc(sizeof(*dynamic_block_ptr), GFP_KERNEL); + if (!dynamic_block_ptr) + return NULL; + + dynamic_block_ptr->block_id = block_id; + list_add_tail(&dynamic_block_ptr->block_list, + &nic_tcam_info->tcam_node_info.tcam_node_list); + + nic_tcam_info->tcam_node_info.block_cnt++; + + return dynamic_block_ptr; +} + +static void sss_nic_free_tcam_block_resource(struct sss_nic_tcam_info *nic_tcam_info, + struct sss_nic_tcam_node *block_ptr) +{ + if (!block_ptr) + return; + + list_del(&block_ptr->block_list); + kfree(block_ptr); + + nic_tcam_info->tcam_node_info.block_cnt--; +} + +static struct sss_nic_tcam_node * +sss_nic_dynamic_lookup_tcam_filter(struct sss_nic_dev *nic_dev, + struct sss_nic_tcam_rule_cfg *fdir_tcam_rule, + const struct sss_nic_tcam_info *tcam_info, + struct sss_nic_tcam_filter *tcam_filter, + u16 *tcam_index) +{ + u16 index; + struct sss_nic_tcam_node *ptr = NULL; + + list_for_each_entry(ptr, + &tcam_info->tcam_node_info.tcam_node_list, + block_list) + if (ptr->index_cnt < SSSNIC_TCAM_BLOCK_SIZE) + break; + + if (!ptr || ptr->index_cnt >= SSSNIC_TCAM_BLOCK_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to lookup index for fdir filter dynamic\n"); + return NULL; + } + + for (index = 0; index < SSSNIC_TCAM_BLOCK_SIZE; index++) + if (ptr->index_used[index] == 0) + break; + + if (index == SSSNIC_TCAM_BLOCK_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, "tcam block 0x%x supports filter rules is full\n", + ptr->block_id); + return NULL; + } + + tcam_filter->block_id = ptr->block_id; + tcam_filter->index = index; + *tcam_index = index; + + fdir_tcam_rule->index = index + + SSSNIC_PKT_TCAM_INDEX_START(ptr->block_id); + + return ptr; +} + +static int sss_nic_add_tcam_filter(struct sss_nic_dev *nic_dev, + struct sss_nic_tcam_filter *tcam_filter, + struct sss_nic_tcam_rule_cfg *fdir_tcam_rule) +{ + int ret; + struct sss_nic_tcam_info *tcam_info = &nic_dev->tcam_info; + struct sss_nic_tcam_node *dynamic_block_ptr = NULL; + struct sss_nic_tcam_node *tmp = NULL; + u16 block_cnt = tcam_info->tcam_node_info.block_cnt; + u16 tcam_block_index = 0; + int block_alloc_flag = 0; + u16 index = 0; + + if (tcam_info->tcam_rule_num >= + block_cnt * SSSNIC_TCAM_BLOCK_SIZE) { + if (block_cnt >= (SSSNIC_TCAM_FILTERS_MAX / + SSSNIC_TCAM_BLOCK_SIZE)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to alloc, dynamic tcam block is full\n"); + goto failed; + } + + ret = sss_nic_alloc_tcam_block(nic_dev, &tcam_block_index); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to fdir filter dynamic tcam alloc block\n"); + goto failed; + } + + block_alloc_flag = 1; + + dynamic_block_ptr = + sss_nic_alloc_tcam_block_resource(nic_dev, tcam_info, + tcam_block_index); + if (!dynamic_block_ptr) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to Fdir filter dynamic alloc block memory\n"); + goto block_alloc_failed; + } + } + + tmp = sss_nic_dynamic_lookup_tcam_filter(nic_dev, + fdir_tcam_rule, tcam_info, + tcam_filter, &index); + if (!tmp) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to dynamic lookup tcam filter\n"); + goto lookup_tcam_index_failed; + } + + ret = sss_nic_add_tcam_rule(nic_dev, fdir_tcam_rule); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to add fdir_tcam_rule\n"); + goto add_tcam_rules_failed; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, + "Add fdir tcam rule, func_id: 0x%x, tcam_block_id: %d, local_index: %d, global_index: %d, queue: %d, tcam_rule_num: %d succeed\n", + sss_get_global_func_id(nic_dev->hwdev), + tcam_filter->block_id, index, fdir_tcam_rule->index, + fdir_tcam_rule->data.qid, tcam_info->tcam_rule_num + 1); + + if (tcam_info->tcam_rule_num == 0) { + ret = sss_nic_set_fdir_tcam_rule_filter(nic_dev, true); + if (ret != 0) + goto enable_failed; + } + + list_add_tail(&tcam_filter->tcam_filter_list, &tcam_info->tcam_list); + + tmp->index_used[index] = 1; + tmp->index_cnt++; + + tcam_info->tcam_rule_num++; + + return 0; + +enable_failed: + sss_nic_del_tcam_rule(nic_dev, fdir_tcam_rule->index); + +add_tcam_rules_failed: +lookup_tcam_index_failed: + if (block_alloc_flag == 1) + sss_nic_free_tcam_block_resource(tcam_info, + dynamic_block_ptr); + +block_alloc_failed: + if (block_alloc_flag == 1) + sss_nic_free_tcam_block(nic_dev, &tcam_block_index); + +failed: + return -EFAULT; +} + +static int sss_nic_del_tcam_filter(struct sss_nic_dev *nic_dev, + struct sss_nic_tcam_filter *tcam_filter) +{ + int ret; + struct sss_nic_tcam_info *tcam_info = &nic_dev->tcam_info; + u16 block_id = tcam_filter->block_id; + struct sss_nic_tcam_node *ptr = NULL; + u32 index = 0; + + list_for_each_entry(ptr, + &tcam_info->tcam_node_info.tcam_node_list, + block_list) { + if (ptr->block_id == block_id) + break; + } + if (!ptr || ptr->block_id != block_id) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to lookup block for fdir filter del dynamic\n"); + return -EFAULT; + } + + index = SSSNIC_PKT_TCAM_INDEX_START(ptr->block_id) + + tcam_filter->index; + + ret = sss_nic_del_tcam_rule(nic_dev, index); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to del fdir_tcam_rule\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, + "Del fdir_tcam_dynamic_rule func_id: 0x%x, tcam_block_id: %d, local_index: %d, global_index: %d, local_rules_nums: %d, global_rule_nums: %d succeed\n", + sss_get_global_func_id(nic_dev->hwdev), block_id, + tcam_filter->index, index, ptr->index_cnt - 1, + tcam_info->tcam_rule_num - 1); + + ptr->index_used[tcam_filter->index] = 0; + ptr->index_cnt--; + tcam_info->tcam_rule_num--; + if (ptr->index_cnt == 0) { + sss_nic_free_tcam_block(nic_dev, &block_id); + sss_nic_free_tcam_block_resource(tcam_info, ptr); + } + + if (tcam_info->tcam_rule_num == 0) + sss_nic_set_fdir_tcam_rule_filter(nic_dev, false); + + list_del(&tcam_filter->tcam_filter_list); + kfree(tcam_filter); + + return 0; +} + +static inline struct sss_nic_tcam_filter * +sss_nic_lookup_tcam_filter(const struct list_head *filter_list, + struct sss_nic_tcam_key_tag *key) +{ + struct sss_nic_tcam_filter *ptr; + + list_for_each_entry(ptr, filter_list, tcam_filter_list) { + if (memcmp(key, &ptr->tcam_key, + sizeof(*key)) == 0) + return ptr; + } + + return NULL; +} + +static void sss_nic_del_ethtool_rule(struct sss_nic_dev *nic_dev, + struct sss_nic_ethtool_rx_flow_rule *eth_rule) +{ + list_del(ð_rule->list); + nic_dev->rx_rule.rule_cnt--; + + kfree(eth_rule); +} + +static int sss_nic_del_one_rule(struct sss_nic_dev *nic_dev, + struct sss_nic_ethtool_rx_flow_rule *eth_rule) +{ + int ret; + struct sss_nic_tcam_info *tcam_info = &nic_dev->tcam_info; + struct sss_nic_tcam_filter *tcam_filter; + struct sss_nic_tcam_rule_cfg fdir_tcam_rule = {0}; + struct sss_nic_tcam_key_tag tcam_key = {0}; + + ret = sss_nic_init_fdir_tcam_info(nic_dev, ð_rule->flow_spec, + &tcam_key, &fdir_tcam_rule); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to init fdir info\n"); + return ret; + } + + tcam_filter = sss_nic_lookup_tcam_filter(&tcam_info->tcam_list, + &tcam_key); + if (!tcam_filter) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Filter does not exists\n"); + return -EEXIST; + } + + ret = sss_nic_del_tcam_filter(nic_dev, tcam_filter); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to delete tcam filter\n"); + return ret; + } + + sss_nic_del_ethtool_rule(nic_dev, eth_rule); + + return 0; +} + +static void sss_nic_add_rule_to_list(struct sss_nic_dev *nic_dev, + struct sss_nic_ethtool_rx_flow_rule *rule) +{ + struct sss_nic_ethtool_rx_flow_rule *ptr = NULL; + struct list_head *head = &nic_dev->rx_rule.rule_list; + + list_for_each_entry(ptr, &nic_dev->rx_rule.rule_list, list) { + if (ptr->flow_spec.location > rule->flow_spec.location) + break; + head = &ptr->list; + } + nic_dev->rx_rule.rule_cnt++; + list_add(&rule->list, head); +} + +static int sss_nic_add_one_rule(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec) +{ + int ret; + struct sss_nic_tcam_key_tag tcam_key = {0}; + struct sss_nic_tcam_rule_cfg fdir_tcam_rule = {0}; + struct sss_nic_tcam_filter *tcam_filter = NULL; + struct sss_nic_ethtool_rx_flow_rule *eth_rule = NULL; + struct sss_nic_tcam_info *tcam_info = &nic_dev->tcam_info; + + ret = sss_nic_init_fdir_tcam_info(nic_dev, flow_spec, &tcam_key, + &fdir_tcam_rule); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to init fdir info\n"); + return ret; + } + + tcam_filter = sss_nic_lookup_tcam_filter(&tcam_info->tcam_list, + &tcam_key); + if (tcam_filter) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Filter exists\n"); + return -EEXIST; + } + + tcam_filter = kzalloc(sizeof(*tcam_filter), GFP_KERNEL); + if (!tcam_filter) + return -ENOMEM; + memcpy(&tcam_filter->tcam_key, + &tcam_key, sizeof(tcam_key)); + tcam_filter->qid = (u16)fdir_tcam_rule.data.qid; + + ret = sss_nic_add_tcam_filter(nic_dev, tcam_filter, &fdir_tcam_rule); + if (ret != 0) + goto add_tcam_filter_fail; + + /* driver save new rule filter */ + eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL); + if (!eth_rule) { + ret = -ENOMEM; + goto alloc_eth_rule_fail; + } + + eth_rule->flow_spec = *flow_spec; + sss_nic_add_rule_to_list(nic_dev, eth_rule); + + return 0; + +alloc_eth_rule_fail: + sss_nic_del_tcam_filter(nic_dev, tcam_filter); +add_tcam_filter_fail: + kfree(tcam_filter); + return ret; +} + +static struct sss_nic_ethtool_rx_flow_rule * +sss_nic_ethtool_find_rule(const struct sss_nic_dev *nic_dev, u32 location) +{ + struct sss_nic_ethtool_rx_flow_rule *ptr = NULL; + + list_for_each_entry(ptr, &nic_dev->rx_rule.rule_list, list) { + if (ptr->flow_spec.location == location) + return ptr; + } + return NULL; +} + +static int sss_nic_validate_flow(struct sss_nic_dev *nic_dev, + const struct ethtool_rx_flow_spec *flow_spec) +{ + int i; + u32 flow_type[] = { + TCP_V4_FLOW, UDP_V4_FLOW, IP_USER_FLOW, +#ifndef UNSUPPORT_NTUPLE_IPV6 + TCP_V6_FLOW, UDP_V6_FLOW, IPV6_USER_FLOW, +#endif + }; + + if (flow_spec->ring_cookie >= nic_dev->qp_res.qp_num) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Action larger than queue number %u\n", + nic_dev->qp_res.qp_num); + return -EINVAL; + } + + if (flow_spec->location >= SSSNIC_MAX_ETHTOOL_NTUPLE_RULE) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid location out of range: [0,%lu]\n", + SSSNIC_MAX_ETHTOOL_NTUPLE_RULE); + return -EINVAL; + } + + for (i = 0; i < ARRAY_LEN(flow_type); i++) { + if (flow_spec->flow_type == flow_type[i]) + return 0; + } + + nicif_err(nic_dev, drv, nic_dev->netdev, "flow type not supported\n"); + return -EOPNOTSUPP; +} + +int sss_nic_ethtool_update_flow(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec) +{ + int ret; + struct ethtool_rx_flow_spec flow_spec_temp; + int loc_exit_flag = 0; + struct sss_nic_ethtool_rx_flow_rule *eth_rule = NULL; + + if (!SSSNIC_SUPPORT_FDIR(nic_dev->nic_io)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport ntuple function\n"); + return -EOPNOTSUPP; + } + + ret = sss_nic_validate_flow(nic_dev, flow_spec); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "flow is not valid %d\n", ret); + return ret; + } + + eth_rule = sss_nic_ethtool_find_rule(nic_dev, flow_spec->location); + /* when location is same, delete old location rule. */ + if (eth_rule) { + memcpy(&flow_spec_temp, ð_rule->flow_spec, + sizeof(flow_spec_temp)); + ret = sss_nic_del_one_rule(nic_dev, eth_rule); + if (ret != 0) + return ret; + + loc_exit_flag = 1; + } + + /* add new rule filter */ + ret = sss_nic_add_one_rule(nic_dev, flow_spec); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to add new rule filter\n"); + if (loc_exit_flag) + sss_nic_add_one_rule(nic_dev, &flow_spec_temp); + + return -ENOENT; + } + + return 0; +} + +int sss_nic_ethtool_delete_flow(struct sss_nic_dev *nic_dev, u32 location) +{ + int ret; + struct sss_nic_ethtool_rx_flow_rule *eth_rule = NULL; + + if (!SSSNIC_SUPPORT_FDIR(nic_dev->nic_io)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport ntuple function\n"); + return -EOPNOTSUPP; + } + + if (location >= SSSNIC_MAX_ETHTOOL_NTUPLE_RULE) + return -ENOSPC; + + eth_rule = sss_nic_ethtool_find_rule(nic_dev, location); + if (!eth_rule) + return -ENOENT; + + ret = sss_nic_del_one_rule(nic_dev, eth_rule); + + return ret; +} + +int sss_nic_ethtool_get_flow(const struct sss_nic_dev *nic_dev, + struct ethtool_rxnfc *info, u32 location) +{ + struct sss_nic_ethtool_rx_flow_rule *nic_eth_rule = NULL; + + if (!SSSNIC_SUPPORT_FDIR(nic_dev->nic_io)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupported ntuple function\n"); + return -EOPNOTSUPP; + } + + if (location >= SSSNIC_MAX_ETHTOOL_NTUPLE_RULE) + return -EINVAL; + + list_for_each_entry(nic_eth_rule, &nic_dev->rx_rule.rule_list, list) { + if (nic_eth_rule->flow_spec.location == location) { + info->fs = nic_eth_rule->flow_spec; + return 0; + } + } + + return -ENOENT; +} + +int sss_nic_ethtool_get_all_flows(const struct sss_nic_dev *nic_dev, + struct ethtool_rxnfc *info, u32 *rule_locs) +{ + int id = 0; + struct sss_nic_ethtool_rx_flow_rule *nic_eth_rule = NULL; + + if (!SSSNIC_SUPPORT_FDIR(nic_dev->nic_io)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupported ntuple function\n"); + return -EOPNOTSUPP; + } + + info->data = SSSNIC_MAX_ETHTOOL_NTUPLE_RULE; + list_for_each_entry(nic_eth_rule, &nic_dev->rx_rule.rule_list, list) + rule_locs[id++] = nic_eth_rule->flow_spec.location; + + return info->rule_cnt == id ? 0 : -ENOENT; +} + +bool sss_nic_validate_channel_setting_in_ntuple(const struct sss_nic_dev *nic_dev, u32 q_num) +{ + struct sss_nic_ethtool_rx_flow_rule *ptr = NULL; + + list_for_each_entry(ptr, &nic_dev->rx_rule.rule_list, list) { + if (ptr->flow_spec.ring_cookie >= q_num) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "User defined filter %u assigns flow to queue %llu. Queue number %u is Invalid\n", + ptr->flow_spec.location, ptr->flow_spec.ring_cookie, q_num); + return false; + } + } + + return true; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.h new file mode 100644 index 0000000000000..3712434b05103 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_NTUPLE_H +#define SSS_NIC_NTUPLE_H + +#include +#include + +#include "sss_nic_dev_define.h" + +void sss_nic_flush_tcam(struct sss_nic_dev *nic_dev); + +int sss_nic_ethtool_update_flow(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs); + +int sss_nic_ethtool_delete_flow(struct sss_nic_dev *nic_dev, u32 location); + +int sss_nic_ethtool_get_flow(const struct sss_nic_dev *nic_dev, + struct ethtool_rxnfc *info, u32 location); + +int sss_nic_ethtool_get_all_flows(const struct sss_nic_dev *nic_dev, + struct ethtool_rxnfc *info, u32 *rule_locs); + +bool sss_nic_validate_channel_setting_in_ntuple(const struct sss_nic_dev *nic_dev, u32 q_num); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.c new file mode 100644 index 0000000000000..eb00a311597c7 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.c @@ -0,0 +1,1002 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_rss.h" +#include "sss_nic_ntuple.h" +#include "sss_nic_netdev_ops_api.h" +#include "sss_nic_dcb.h" + +#define SSSNIC_INVALID_TC_ID 0xFF + +#define SSSNIC_DEF_RSS_KEY_0 0x6d +#define SSSNIC_DEF_RSS_KEY_1 0x5a +#define SSSNIC_DEF_RSS_KEY_2 0x56 +#define SSSNIC_DEF_RSS_KEY_3 0xda +#define SSSNIC_DEF_RSS_KEY_4 0x25 +#define SSSNIC_DEF_RSS_KEY_5 0x5b +#define SSSNIC_DEF_RSS_KEY_6 0x0e +#define SSSNIC_DEF_RSS_KEY_7 0xc2 +#define SSSNIC_DEF_RSS_KEY_8 0x41 +#define SSSNIC_DEF_RSS_KEY_9 0x67 +#define SSSNIC_DEF_RSS_KEY_10 0x25 +#define SSSNIC_DEF_RSS_KEY_11 0x3d +#define SSSNIC_DEF_RSS_KEY_12 0x43 +#define SSSNIC_DEF_RSS_KEY_13 0xa3 +#define SSSNIC_DEF_RSS_KEY_14 0x8f +#define SSSNIC_DEF_RSS_KEY_15 0xb0 +#define SSSNIC_DEF_RSS_KEY_16 0xd0 +#define SSSNIC_DEF_RSS_KEY_17 0xca +#define SSSNIC_DEF_RSS_KEY_18 0x2b +#define SSSNIC_DEF_RSS_KEY_19 0xcb +#define SSSNIC_DEF_RSS_KEY_20 0xae +#define SSSNIC_DEF_RSS_KEY_21 0x7b +#define SSSNIC_DEF_RSS_KEY_22 0x30 +#define SSSNIC_DEF_RSS_KEY_23 0xb4 +#define SSSNIC_DEF_RSS_KEY_24 0x77 +#define SSSNIC_DEF_RSS_KEY_25 0xcb +#define SSSNIC_DEF_RSS_KEY_26 0x2d +#define SSSNIC_DEF_RSS_KEY_27 0xa3 +#define SSSNIC_DEF_RSS_KEY_28 0x80 +#define SSSNIC_DEF_RSS_KEY_29 0x30 +#define SSSNIC_DEF_RSS_KEY_30 0xf2 +#define SSSNIC_DEF_RSS_KEY_31 0x0c +#define SSSNIC_DEF_RSS_KEY_32 0x6a +#define SSSNIC_DEF_RSS_KEY_33 0x42 +#define SSSNIC_DEF_RSS_KEY_34 0xb7 +#define SSSNIC_DEF_RSS_KEY_35 0x3b +#define SSSNIC_DEF_RSS_KEY_36 0xbe +#define SSSNIC_DEF_RSS_KEY_37 0xac +#define SSSNIC_DEF_RSS_KEY_38 0x01 +#define SSSNIC_DEF_RSS_KEY_39 0xfa + +#define SSSNIC_COS_CHANGE_OFFSET 4 + +#define SSSNIC_RXH_PORT (RXH_L4_B_0_1 | RXH_L4_B_2_3) +#define SSSNIC_RXH_IP (RXH_IP_DST | RXH_IP_SRC) +#define SSSNIC_SUPPORT_RXH (SSSNIC_RXH_IP | SSSNIC_RXH_PORT) + +static int sss_nic_set_hw_rss(struct net_device *netdev, u8 *cos_map, u8 cos_num); + +static u16 max_qp_num; +module_param(max_qp_num, ushort, 0444); +MODULE_PARM_DESC(max_qp_num, "Number of Queue Pairs (default=0)"); + +static void sss_nic_fill_indir_tbl(struct sss_nic_dev *nic_dev, u8 cos_num, u32 *indir) +{ + int i = 0; + u16 k; + u16 group_size; + u16 start_qid = 0; + u16 qp_num = 0; + u8 cur_cos = 0; + u8 j; + u8 default_cos; + u8 cos_map = sss_nic_get_valid_cos_map(nic_dev); + + if (cos_num == 0) { + for (i = 0; i < SSSNIC_RSS_INDIR_SIZE; i++) + indir[i] = i % nic_dev->qp_res.qp_num; + return; + } + + group_size = SSSNIC_RSS_INDIR_SIZE / cos_num; + for (j = 0; j < cos_num; j++) { + while (cur_cos < SSSNIC_DCB_COS_MAX && + nic_dev->hw_dcb_cfg.cos_qp_num[cur_cos] == 0) + cur_cos++; + + if (cur_cos < SSSNIC_DCB_COS_MAX) { + qp_num = nic_dev->hw_dcb_cfg.cos_qp_num[cur_cos]; + start_qid = nic_dev->hw_dcb_cfg.cos_qp_offset[cur_cos]; + } else { + if (BIT(nic_dev->hw_dcb_cfg.default_cos) & cos_map) + default_cos = nic_dev->hw_dcb_cfg.default_cos; + else + default_cos = (u8)fls(cos_map) - 1; + qp_num = nic_dev->hw_dcb_cfg.cos_qp_num[default_cos]; + start_qid = nic_dev->hw_dcb_cfg.cos_qp_offset[default_cos]; + } + + for (k = 0; k < group_size; k++) + indir[i++] = start_qid + k % qp_num; + + cur_cos++; + } +} + +static void sss_nic_get_dcb_cos_map(struct sss_nic_dev *nic_dev, + u8 *cos_map, u8 *cos_num) +{ + u8 i; + u8 num; + u8 cfg_map[SSSNIC_DCB_UP_MAX]; + bool dcb_en = !!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE); + + if (!dcb_en) + return; + + if (nic_dev->hw_dcb_cfg.trust == 0) { + memcpy(cfg_map, nic_dev->hw_dcb_cfg.pcp2cos, sizeof(cfg_map)); + } else if (nic_dev->hw_dcb_cfg.trust == 1) { + for (i = 0; i < SSSNIC_DCB_UP_MAX; i++) + cfg_map[i] = nic_dev->hw_dcb_cfg.dscp2cos[i * SSSNIC_DCB_DSCP_NUM]; + } + + for (i = 0; i < SSSNIC_COS_CHANGE_OFFSET; i++) + cos_map[SSSNIC_COS_CHANGE_OFFSET + i] = cfg_map[i]; + + for (i = 0; i < SSSNIC_COS_CHANGE_OFFSET; i++) + cos_map[i] = cfg_map[SSSNIC_DCB_UP_MAX - (i + 1)]; + + num = sss_nic_get_user_cos_num(nic_dev); + while (num & (num - 1)) + num++; + + *cos_num = num; +} + +int sss_nic_update_rss_cfg(struct sss_nic_dev *nic_dev) +{ + int ret; + u8 cos_num = 0; + u8 cos_map[SSSNIC_DCB_UP_MAX] = {0}; + struct net_device *netdev = nic_dev->netdev; + + sss_nic_get_dcb_cos_map(nic_dev, cos_map, &cos_num); + + ret = sss_nic_set_hw_rss(netdev, cos_map, cos_num); + if (ret != 0) + return ret; + + return ret; +} + +void sss_nic_reset_rss_cfg(struct sss_nic_dev *nic_dev) +{ + u8 cos_map[SSSNIC_DCB_UP_MAX] = {0}; + + sss_nic_config_rss_to_hw(nic_dev, 0, cos_map, 1, 0); +} + +static void sss_nic_init_rss_type(struct sss_nic_dev *nic_dev) +{ + nic_dev->rss_type.ipv4 = 1; + nic_dev->rss_type.ipv6 = 1; + nic_dev->rss_type.ipv6_ext = 1; + nic_dev->rss_type.tcp_ipv4 = 1; + nic_dev->rss_type.tcp_ipv6 = 1; + nic_dev->rss_type.tcp_ipv6_ext = 1; + nic_dev->rss_type.udp_ipv4 = 1; + nic_dev->rss_type.udp_ipv6 = 1; + nic_dev->rss_hash_engine = SSSNIC_RSS_ENGINE_XOR; +} + +void sss_nic_free_rss_key(struct sss_nic_dev *nic_dev) +{ + kfree(nic_dev->rss_key); + nic_dev->rss_key = NULL; + nic_dev->rss_key_big = NULL; + + kfree(nic_dev->rss_indir_tbl); + nic_dev->rss_indir_tbl = NULL; +} + +void sss_nic_set_default_rss_indir(struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + set_bit(SSSNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); +} + +static void sss_nic_maybe_reset_rss_indir(struct net_device *netdev, bool dcb_en) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int i; + + if (dcb_en) { + nicif_info(nic_dev, drv, netdev, "DCB is enabled, set default rss indir\n"); + set_bit(SSSNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); + return; + } + + for (i = 0; i < SSSNIC_RSS_INDIR_SIZE; i++) { + if (nic_dev->rss_indir_tbl[i] >= nic_dev->qp_res.qp_num) { + set_bit(SSSNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); + return; + } + } +} + +static u16 sss_nic_get_online_cpu(struct pci_dev *pdev) +{ + int i; + int node; + u16 cpu_num = 0; + + for (i = 0; i < (int)num_online_cpus(); i++) { + node = (int)cpu_to_node(i); + if (node == dev_to_node(&pdev->dev)) + cpu_num++; + } + + if (cpu_num == 0) + cpu_num = (u16)num_online_cpus(); + + return cpu_num; +} + +static void sss_nic_init_qp_num(struct sss_nic_dev *nic_dev) +{ + u16 cpu_num = 0; + u16 qp_num = nic_dev->max_qp_num; + u16 default_qp_num = nic_dev->nic_svc_cap.def_queue_num; + + if (default_qp_num != 0 && default_qp_num < qp_num) + qp_num = default_qp_num; + + if (max_qp_num > nic_dev->max_qp_num) + qp_num = nic_dev->max_qp_num; + else if (max_qp_num > 0) + qp_num = max_qp_num; + + cpu_num = sss_nic_get_online_cpu(nic_dev->pdev); + + nic_dev->qp_res.qp_num = (u16)min_t(u16, qp_num, cpu_num); +} + +static void sss_nic_set_rss_hkey(struct sss_nic_dev *nic_dev, const u8 *key) +{ + u32 i; + u32 *rss_hkey = (u32 *)nic_dev->rss_key; + + memcpy(nic_dev->rss_key, key, SSSNIC_RSS_KEY_SIZE); + + /* make a copy of the key, and convert it to Big Endian */ + for (i = 0; i < SSSNIC_RSS_KEY_SIZE / sizeof(u32); i++) + nic_dev->rss_key_big[i] = cpu_to_be32(rss_hkey[i]); +} + +static void sss_nic_init_rss_default_key(struct sss_nic_dev *nic_dev) +{ + u8 default_key[SSSNIC_RSS_KEY_SIZE] = { + SSSNIC_DEF_RSS_KEY_0, SSSNIC_DEF_RSS_KEY_1, SSSNIC_DEF_RSS_KEY_2, + SSSNIC_DEF_RSS_KEY_3, SSSNIC_DEF_RSS_KEY_4, SSSNIC_DEF_RSS_KEY_5, + SSSNIC_DEF_RSS_KEY_6, SSSNIC_DEF_RSS_KEY_7, SSSNIC_DEF_RSS_KEY_8, + SSSNIC_DEF_RSS_KEY_9, SSSNIC_DEF_RSS_KEY_10, SSSNIC_DEF_RSS_KEY_11, + SSSNIC_DEF_RSS_KEY_12, SSSNIC_DEF_RSS_KEY_13, SSSNIC_DEF_RSS_KEY_14, + SSSNIC_DEF_RSS_KEY_15, SSSNIC_DEF_RSS_KEY_16, SSSNIC_DEF_RSS_KEY_17, + SSSNIC_DEF_RSS_KEY_18, SSSNIC_DEF_RSS_KEY_19, SSSNIC_DEF_RSS_KEY_20, + SSSNIC_DEF_RSS_KEY_21, SSSNIC_DEF_RSS_KEY_22, SSSNIC_DEF_RSS_KEY_23, + SSSNIC_DEF_RSS_KEY_24, SSSNIC_DEF_RSS_KEY_25, SSSNIC_DEF_RSS_KEY_26, + SSSNIC_DEF_RSS_KEY_27, SSSNIC_DEF_RSS_KEY_28, SSSNIC_DEF_RSS_KEY_29, + SSSNIC_DEF_RSS_KEY_30, SSSNIC_DEF_RSS_KEY_31, SSSNIC_DEF_RSS_KEY_32, + SSSNIC_DEF_RSS_KEY_33, SSSNIC_DEF_RSS_KEY_34, SSSNIC_DEF_RSS_KEY_35, + SSSNIC_DEF_RSS_KEY_36, SSSNIC_DEF_RSS_KEY_37, SSSNIC_DEF_RSS_KEY_38, + SSSNIC_DEF_RSS_KEY_39 + }; + + sss_nic_set_rss_hkey(nic_dev, default_key); +} + +static int sss_nic_alloc_rss_key(struct sss_nic_dev *nic_dev) +{ + /* We need double the space to store the RSS key, + * with the second space used to store the RSS key in big-endian mode. + */ + nic_dev->rss_key = + kzalloc(SSSNIC_RSS_KEY_SIZE * SSSNIC_RSS_KEY_RSV_NUM, GFP_KERNEL); + if (!nic_dev->rss_key) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to alloc memory for rss_hkey\n"); + return -ENOMEM; + } + + nic_dev->rss_indir_tbl = kzalloc(sizeof(u32) * SSSNIC_RSS_INDIR_SIZE, GFP_KERNEL); + if (!nic_dev->rss_indir_tbl) { + kfree(nic_dev->rss_key); + nic_dev->rss_key = NULL; + return -ENOMEM; + } + + /* The second space is for big edian hash key */ + nic_dev->rss_key_big = (u32 *)(nic_dev->rss_key + SSSNIC_RSS_KEY_SIZE); + + return 0; +} + +static int sss_nic_config_rss_hw_resource(struct sss_nic_dev *nic_dev, u32 *indir) +{ + int ret; + u8 engine_type = nic_dev->rss_hash_engine; + + ret = sss_nic_set_rss_indir_tbl(nic_dev, indir); + if (ret != 0) + return ret; + + ret = sss_nic_set_rss_type(nic_dev, nic_dev->rss_type); + if (ret != 0) + return ret; + + return sss_nic_rss_hash_engine(nic_dev, SSSNIC_MBX_OPCODE_SET, &engine_type); +} + +static int sss_nic_set_hw_rss(struct net_device *netdev, u8 *cos_map, u8 cos_num) +{ + int ret; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + bool dcb_en = !!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE); + + ret = sss_nic_cfg_rss_hash_key(nic_dev, SSSNIC_MBX_OPCODE_SET, nic_dev->rss_key); + if (ret != 0) + return ret; + + sss_nic_maybe_reset_rss_indir(netdev, dcb_en); + + if (test_bit(SSSNIC_RSS_DEFAULT_INDIR, &nic_dev->flags)) + sss_nic_fill_indir_tbl(nic_dev, cos_num, nic_dev->rss_indir_tbl); + + ret = sss_nic_config_rss_hw_resource(nic_dev, nic_dev->rss_indir_tbl); + if (ret != 0) + return ret; + + ret = sss_nic_config_rss_to_hw(nic_dev, cos_num, cos_map, + nic_dev->qp_res.qp_num, 1); + if (ret != 0) + return ret; + + return 0; +} + +static void sss_nic_init_rss_key(struct sss_nic_dev *nic_dev) +{ + sss_nic_init_rss_default_key(nic_dev); + + sss_nic_init_qp_num(nic_dev); + + sss_nic_init_rss_type(nic_dev); + + sss_nic_fill_indir_tbl(nic_dev, 0, nic_dev->rss_indir_tbl); +} + +static int sss_nic_set_rss_key_to_hw(struct sss_nic_dev *nic_dev) +{ + int ret; + u8 engine_type = nic_dev->rss_hash_engine; + + ret = sss_nic_cfg_rss_hash_key(nic_dev, SSSNIC_MBX_OPCODE_SET, nic_dev->rss_key); + if (ret != 0) + return ret; + + ret = sss_nic_set_rss_indir_tbl(nic_dev, nic_dev->rss_indir_tbl); + if (ret != 0) + return ret; + + ret = sss_nic_set_rss_type(nic_dev, nic_dev->rss_type); + if (ret != 0) + return ret; + + ret = sss_nic_rss_hash_engine(nic_dev, SSSNIC_MBX_OPCODE_SET, &engine_type); + if (ret != 0) + return ret; + + ret = sss_nic_init_hw_rss(nic_dev, nic_dev->qp_res.qp_num); + if (ret != 0) + return ret; + + return 0; +} + +void sss_nic_try_to_enable_rss(struct sss_nic_dev *nic_dev) +{ + int ret = 0; + + if (!SSSNIC_SUPPORT_RSS(nic_dev->nic_io) || nic_dev->max_qp_num <= 1) { + nic_dev->qp_res.qp_num = nic_dev->max_qp_num; + return; + } + + ret = sss_nic_alloc_rss_key(nic_dev); + if (ret != 0) + goto disable_rss; + + set_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags); + set_bit(SSSNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); + sss_nic_init_rss_key(nic_dev); + + ret = sss_nic_set_rss_key_to_hw(nic_dev); + if (ret != 0) { + sss_nic_free_rss_key(nic_dev); + nic_err(nic_dev->dev_hdl, "Fail to set hardware rss parameters\n"); + goto disable_rss; + } + + return; + +disable_rss: + clear_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags); + nic_dev->max_qp_num = 1; + nic_dev->qp_res.qp_num = nic_dev->max_qp_num; +} + +/* for ethtool */ +static int sss_nic_set_l4_rss_hash_type(const struct ethtool_rxnfc *cmd, + struct sss_nic_rss_type *rss_type) +{ + u8 rss_l4_en = 0; + + if ((cmd->data & SSSNIC_RXH_PORT) == 0) + rss_l4_en = 0; + else if ((cmd->data & SSSNIC_RXH_PORT) == SSSNIC_RXH_PORT) + rss_l4_en = 1; + else + return -EINVAL; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + rss_type->tcp_ipv4 = rss_l4_en; + break; + case TCP_V6_FLOW: + rss_type->tcp_ipv6 = rss_l4_en; + break; + case UDP_V4_FLOW: + rss_type->udp_ipv4 = rss_l4_en; + break; + case UDP_V6_FLOW: + rss_type->udp_ipv6 = rss_l4_en; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int sss_nic_update_rss_type(struct sss_nic_dev *nic_dev, + struct ethtool_rxnfc *cmd, + struct sss_nic_rss_type *rss_type) +{ + int ret; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + ret = sss_nic_set_l4_rss_hash_type(cmd, rss_type); + if (ret != 0) + return ret; + + break; + case IPV4_FLOW: + rss_type->ipv4 = 1; + break; + case IPV6_FLOW: + rss_type->ipv6 = 1; + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unsupport flow type\n"); + return -EINVAL; + } + + return 0; +} + +static inline int sss_nic_check_cmd_data(struct ethtool_rxnfc *cmd) +{ + /* RSS only support hashing to queues based src and dst IP and port */ + if (cmd->data & ~SSSNIC_SUPPORT_RXH) + return -EINVAL; + + /* We need at least the IP SRC and DEST fields for hashing */ + if (!(cmd->data & SSSNIC_RXH_IP)) + return -EINVAL; + + return 0; +} + +static int sss_nic_set_rss_hash_type(struct sss_nic_dev *nic_dev, struct ethtool_rxnfc *cmd) +{ + struct sss_nic_rss_type *rss_type = &nic_dev->rss_type; + int ret; + + if (test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags) == 0) { + cmd->data = 0; + nicif_err(nic_dev, drv, nic_dev->netdev, + "RSS disable, no support to set flow-hash\n"); + return -EOPNOTSUPP; + } + + if (sss_nic_check_cmd_data(cmd) != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid ethool rxnfc cmd data\n"); + return -EINVAL; + } + + ret = sss_nic_get_rss_type(nic_dev, rss_type); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to get rss type\n"); + return -EFAULT; + } + + ret = sss_nic_update_rss_type(nic_dev, cmd, rss_type); + if (ret != 0) + return ret; + + ret = sss_nic_set_rss_type(nic_dev, *rss_type); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to set rss type\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, "Success to set rss hash options\n"); + + return 0; +} + +static void translate_rss_type(u8 rss_opt, struct ethtool_rxnfc *cmd) +{ + if (rss_opt != 0) + cmd->data |= SSSNIC_RXH_PORT; +} + +static int sss_nic_translate_rss_type(struct sss_nic_dev *nic_dev, + struct sss_nic_rss_type *rss_type, + struct ethtool_rxnfc *cmd) +{ + cmd->data = SSSNIC_RXH_IP; + switch (cmd->flow_type) { + case TCP_V4_FLOW: + translate_rss_type(rss_type->tcp_ipv4, cmd); + break; + case UDP_V4_FLOW: + translate_rss_type(rss_type->udp_ipv4, cmd); + break; + case TCP_V6_FLOW: + translate_rss_type(rss_type->tcp_ipv6, cmd); + break; + case UDP_V6_FLOW: + translate_rss_type(rss_type->udp_ipv6, cmd); + break; + case IPV4_FLOW: + case IPV6_FLOW: + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport flow type\n"); + cmd->data = 0; + return -EINVAL; + } + + return 0; +} + +static int sss_nic_get_rss_hash_type(struct sss_nic_dev *nic_dev, struct ethtool_rxnfc *cmd) +{ + struct sss_nic_rss_type rss_type = {0}; + int ret; + + cmd->data = 0; + + if (test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags) == 0) + return 0; + + ret = sss_nic_get_rss_type(nic_dev, &rss_type); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to get rss type\n"); + return ret; + } + + return sss_nic_translate_rss_type(nic_dev, &rss_type, cmd); +} + +int sss_nic_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = nic_dev->qp_res.qp_num; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = (u32)nic_dev->rx_rule.rule_cnt; + break; + case ETHTOOL_GRXCLSRULE: + ret = sss_nic_ethtool_get_flow(nic_dev, cmd, cmd->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + ret = sss_nic_ethtool_get_all_flows(nic_dev, cmd, rule_locs); + break; + case ETHTOOL_GRXFH: + ret = sss_nic_get_rss_hash_type(nic_dev, cmd); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +int sss_nic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret = 0; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = sss_nic_set_rss_hash_type(nic_dev, cmd); + break; + case ETHTOOL_SRXCLSRLINS: + ret = sss_nic_ethtool_update_flow(nic_dev, &cmd->fs); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = sss_nic_ethtool_delete_flow(nic_dev, cmd->fs.location); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static u16 sss_nic_channels_max(struct sss_nic_dev *nic_dev) +{ + u8 tcs = (u8)netdev_get_num_tc(nic_dev->netdev); + + return tcs ? nic_dev->max_qp_num / tcs : nic_dev->max_qp_num; +} + +static u16 sss_nic_curr_channels(struct sss_nic_dev *nic_dev) +{ + if (netif_running(nic_dev->netdev)) + return nic_dev->qp_res.qp_num ? + nic_dev->qp_res.qp_num : 1; + else + return (u16)min_t(u16, sss_nic_channels_max(nic_dev), + nic_dev->qp_res.qp_num); +} + +void sss_nic_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + channels->tx_count = 0; + channels->rx_count = 0; + channels->other_count = 0; + channels->max_tx = 0; + channels->max_rx = 0; + channels->max_other = 0; + channels->max_combined = sss_nic_channels_max(nic_dev); + /* report flow director queues as maximum channels */ + channels->combined_count = sss_nic_curr_channels(nic_dev); +} + +static int sss_nic_check_channel_parameter(struct net_device *netdev, + const struct ethtool_channels *channels) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + unsigned int combined_count = channels->combined_count; + u16 max_channel = sss_nic_channels_max(nic_dev); + + if (combined_count == 0) { + nicif_err(nic_dev, drv, netdev, + "Unsupport combined_count=0\n"); + return -EINVAL; + } + + if (channels->tx_count != 0 || channels->rx_count != 0 || + channels->other_count != 0) { + nicif_err(nic_dev, drv, netdev, + "Set rx/tx/other count no support\n"); + return -EINVAL; + } + + if (combined_count > max_channel) { + nicif_err(nic_dev, drv, netdev, + "Invalid combined_count %u out of range %u\n", combined_count, + max_channel); + return -EINVAL; + } + + return 0; +} + +static void sss_nic_change_num_channel_reopen_handler(struct sss_nic_dev *nic_dev, + const void *priv_data) +{ + sss_nic_set_default_rss_indir(nic_dev->netdev); +} + +int sss_nic_set_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct sss_nic_qp_resource q_param = {0}; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + unsigned int combined_count = channels->combined_count; + u8 user_cos_num = sss_nic_get_user_cos_num(nic_dev); + int ret; + + if (sss_nic_check_channel_parameter(netdev, channels)) + return -EINVAL; + + if (!test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, netdev, + "This function not support RSS, only support 1 queue pair\n"); + return -EOPNOTSUPP; + } + + if (test_bit(SSSNIC_DCB_ENABLE, &nic_dev->flags)) { + if (combined_count < user_cos_num) { + nicif_err(nic_dev, drv, netdev, + "DCB is on, channel num should more than valid cos num:%u\n", + user_cos_num); + return -EOPNOTSUPP; + } + } + + if (SSSNIC_SUPPORT_FDIR(nic_dev->nic_io) && + !sss_nic_validate_channel_setting_in_ntuple(nic_dev, combined_count)) + return -EOPNOTSUPP; + + nicif_info(nic_dev, drv, netdev, "Set max combine queue number from %u to %u\n", + nic_dev->qp_res.qp_num, combined_count); + + if (netif_running(netdev)) { + q_param = nic_dev->qp_res; + q_param.irq_cfg = NULL; + q_param.rq_res_group = NULL; + q_param.sq_res_group = NULL; + q_param.qp_num = (u16)combined_count; + + nicif_info(nic_dev, drv, netdev, "Restart channel\n"); + ret = sss_nic_update_channel_setting(nic_dev, &q_param, + sss_nic_change_num_channel_reopen_handler, + NULL); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to change channel setting\n"); + return -EFAULT; + } + } else { + /* Discard user configured rss */ + sss_nic_set_default_rss_indir(netdev); + nic_dev->qp_res.qp_num = (u16)combined_count; + } + + return 0; +} + +#ifndef NOT_HAVE_GET_RXFH_INDIR_SIZE +u32 sss_nic_get_rxfh_indir_size(struct net_device *netdev) +{ + return SSSNIC_RSS_INDIR_SIZE; +} +#endif + +static int sss_nic_set_rss_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *hash_key) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret; + + if (indir) { + ret = sss_nic_set_rss_indir_tbl(nic_dev, indir); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to set rss indir table\n"); + return -EFAULT; + } + clear_bit(SSSNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); + + memcpy(nic_dev->rss_indir_tbl, indir, + sizeof(u32) * SSSNIC_RSS_INDIR_SIZE); + nicif_info(nic_dev, drv, netdev, "Success to set rss indir\n"); + } + + if (hash_key) { + ret = sss_nic_set_rss_hash_key(nic_dev, hash_key); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set rss key\n"); + return -EFAULT; + } + + sss_nic_set_rss_hkey(nic_dev, hash_key); + nicif_info(nic_dev, drv, netdev, "Success to set rss key\n"); + } + + return 0; +} + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) +u32 sss_nic_get_rxfh_key_size(struct net_device *netdev) +{ + return SSSNIC_RSS_KEY_SIZE; +} + +#ifdef HAVE_RXFH_HASHFUNC +int sss_nic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hash_key, u8 *hfunc) +#else +int sss_nic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hash_key) +#endif +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret = 0; + + if (!test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Rss is disable\n"); + return -EOPNOTSUPP; + } + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc) + *hfunc = nic_dev->rss_hash_engine ? + ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR; +#endif + + if (indir) { + ret = sss_nic_get_rss_indir_tbl(nic_dev, indir); + if (ret != 0) + return -EFAULT; + } + + if (hash_key) + memcpy(hash_key, nic_dev->rss_key, SSSNIC_RSS_KEY_SIZE); + + return ret; +} + +#ifdef HAVE_RXFH_HASHFUNC +int sss_nic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *hash_key, + const u8 hfunc) +#else +#ifdef HAVE_RXFH_NONCONST +int sss_nic_set_rxfh(struct net_device *netdev, u32 *indir, u8 *hash_key) +#else +int sss_nic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *hash_key) +#endif +#endif /* HAVE_RXFH_HASHFUNC */ +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret = 0; + + if (!test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "No support to set rss parameters when rss disable\n"); + return -EOPNOTSUPP; + } + + if (test_bit(SSSNIC_DCB_ENABLE, &nic_dev->flags) && indir) { + nicif_err(nic_dev, drv, netdev, + "No support to set indir when DCB enable\n"); + return -EOPNOTSUPP; + } + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc != ETH_RSS_HASH_NO_CHANGE) { + if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR) { + nicif_err(nic_dev, drv, netdev, + "No support to set hfunc type except TOP and XOR\n"); + return -EOPNOTSUPP; + } + + nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ? + SSSNIC_RSS_ENGINE_XOR : + SSSNIC_RSS_ENGINE_TOEP; + ret = sss_nic_set_rss_hash_engine(nic_dev, + nic_dev->rss_hash_engine); + if (ret != 0) + return -EFAULT; + + nicif_info(nic_dev, drv, netdev, + "Success to set hfunc to RSS_HASH_%s\n", + (hfunc == ETH_RSS_HASH_XOR) ? "XOR" : "TOP"); + } +#endif + ret = sss_nic_set_rss_rxfh(netdev, indir, hash_key); + + return ret; +} + +#else /* !(defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)) */ + +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE +int sss_nic_get_rxfh_indir(struct net_device *netdev, + struct ethtool_rxfh_indir *rxfh_indir) +#else +int sss_nic_get_rxfh_indir(struct net_device *netdev, u32 *indir) +#endif +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret = 0; +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE + u32 *indir = NULL; + + /* In a low version kernel(eg:suse 11.2), call the interface twice. + * First call to get the size value, + * and second call to get the rxfh indir according to the size value. + */ + if (rxfh_indir->size == 0) { + rxfh_indir->size = SSSNIC_RSS_INDIR_SIZE; + return 0; + } + + if (rxfh_indir->size < SSSNIC_RSS_INDIR_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to get rss indir, rss size(%d) less than default rss size(%u).\n", + rxfh_indir->size, SSSNIC_RSS_INDIR_SIZE); + return -EINVAL; + } + + indir = rxfh_indir->ring_index; +#endif + if (!test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "No support to get rss when rss disable\n"); + return -EOPNOTSUPP; + } + + if (indir) + ret = sss_nic_get_rss_indir_tbl(nic_dev, indir); + + return ret; +} + +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE +int sss_nic_set_rxfh_indir(struct net_device *netdev, + const struct ethtool_rxfh_indir *rxfh_indir) +#else +int sss_nic_set_rxfh_indir(struct net_device *netdev, const u32 *indir) +#endif +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE + const u32 *indir = NULL; + + if (rxfh_indir->size != SSSNIC_RSS_INDIR_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to set rss indir, rss size(%d) is less than default rss size(%u).\n", + rxfh_indir->size, SSSNIC_RSS_INDIR_SIZE); + return -EINVAL; + } + + indir = rxfh_indir->ring_index; +#endif + + if (!test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "No support to set rss indir when rss disable\n"); + return -EOPNOTSUPP; + } + + if (test_bit(SSSNIC_DCB_ENABLE, &nic_dev->flags) && indir) { + nicif_err(nic_dev, drv, netdev, + "No support to set indir when DCB enable\n"); + return -EOPNOTSUPP; + } + + return sss_nic_set_rss_rxfh(netdev, indir, NULL); +} + +#endif /* defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) */ diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.h new file mode 100644 index 0000000000000..93b7dee999518 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_RSS_H +#define SSS_NIC_RSS_H + +#include "sss_nic_dev_define.h" + +#define SSS_NIC_NUM_IQ_PER_FUNC 8 + +int sss_nic_update_rss_cfg(struct sss_nic_dev *nic_dev); + +void sss_nic_reset_rss_cfg(struct sss_nic_dev *nic_dev); + +void sss_nic_set_default_rss_indir(struct net_device *netdev); + +void sss_nic_try_to_enable_rss(struct sss_nic_dev *nic_dev); + +void sss_nic_free_rss_key(struct sss_nic_dev *nic_dev); + +/* for ethtool */ +int sss_nic_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, u32 *rule_locs); + +int sss_nic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd); + +void sss_nic_get_channels(struct net_device *netdev, + struct ethtool_channels *channels); + +int sss_nic_set_channels(struct net_device *netdev, + struct ethtool_channels *channels); + +#ifndef NOT_HAVE_GET_RXFH_INDIR_SIZE +u32 sss_nic_get_rxfh_indir_size(struct net_device *netdev); +#endif /* NOT_HAVE_GET_RXFH_INDIR_SIZE */ + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) +u32 sss_nic_get_rxfh_key_size(struct net_device *netdev); + +#ifdef HAVE_RXFH_HASHFUNC +int sss_nic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc); +#else /* HAVE_RXFH_HASHFUNC */ +int sss_nic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key); +#endif /* HAVE_RXFH_HASHFUNC */ + +#ifdef HAVE_RXFH_HASHFUNC +int sss_nic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, + const u8 hfunc); +#else +#ifdef HAVE_RXFH_NONCONST +int sss_nic_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key); +#else +int sss_nic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key); +#endif /* HAVE_RXFH_NONCONST */ +#endif /* HAVE_RXFH_HASHFUNC */ + +#else /* !(defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)) */ + +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE +int sss_nic_get_rxfh_indir(struct net_device *netdev, + struct ethtool_rxfh_indir *indir1); +#else +int sss_nic_get_rxfh_indir(struct net_device *netdev, u32 *indir); +#endif + +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE +int sss_nic_set_rxfh_indir(struct net_device *netdev, + const struct ethtool_rxfh_indir *indir1); +#else +int sss_nic_set_rxfh_indir(struct net_device *netdev, const u32 *indir); +#endif /* NOT_HAVE_GET_RXFH_INDIR_SIZE */ + +#endif /* (defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)) */ + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.c new file mode 100644 index 0000000000000..a8c3a4a447d17 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.c @@ -0,0 +1,341 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_io_define.h" +#include "sss_nic_event.h" + +int sss_nic_cfg_rss_hash_key(struct sss_nic_dev *nic_dev, u8 opcode, u8 *hash_key) +{ + int ret; + struct sss_nic_mbx_rss_key_cfg cmd_rss_hash_key = {0}; + u16 out_len = sizeof(cmd_rss_hash_key); + + if (opcode == SSSNIC_MBX_OPCODE_SET) + memcpy(cmd_rss_hash_key.key, hash_key, SSSNIC_RSS_KEY_SIZE); + + cmd_rss_hash_key.opcode = opcode; + cmd_rss_hash_key.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_CFG_RSS_HASH_KEY, + &cmd_rss_hash_key, sizeof(cmd_rss_hash_key), + &cmd_rss_hash_key, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_rss_hash_key)) { + nic_err(nic_dev->dev_hdl, + "Fail to hash key,opcode: %d ret: %d, status: 0x%x, out_len: 0x%x\n", + opcode, ret, cmd_rss_hash_key.head.state, out_len); + return -EINVAL; + } + + if (opcode == SSSNIC_MBX_OPCODE_GET) + memcpy(hash_key, cmd_rss_hash_key.key, SSSNIC_RSS_KEY_SIZE); + + return 0; +} + +int sss_nic_set_rss_hash_key(struct sss_nic_dev *nic_dev, const u8 *hash_key) +{ + u8 rss_hash_key[SSSNIC_RSS_KEY_SIZE]; + + memcpy(rss_hash_key, hash_key, SSSNIC_RSS_KEY_SIZE); + return sss_nic_cfg_rss_hash_key(nic_dev, SSSNIC_MBX_OPCODE_SET, rss_hash_key); +} + +int sss_nic_get_rss_indir_tbl(struct sss_nic_dev *nic_dev, u32 *indir_tbl) +{ + int i; + int ret; + u16 *temp_tbl = NULL; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + if (!nic_dev || !indir_tbl) + return -EINVAL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_dev->hwdev); + if (!msg_buf) { + nic_err(nic_dev->dev_hdl, "Fail to allocate cmd buf\n"); + return -ENOMEM; + } + + msg_buf->size = sizeof(struct sss_nic_rss_indirect_table); + ret = sss_ctrlq_detail_reply(nic_dev->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_GET_RSS_INDIR_TABLE, + msg_buf, msg_buf, NULL, 0, + SSS_CHANNEL_NIC); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to get rss indir tbl\n"); + goto get_tbl_fail; + } + + temp_tbl = (u16 *)msg_buf->buf; + for (i = 0; i < SSSNIC_RSS_INDIR_SIZE; i++) + indir_tbl[i] = *(temp_tbl + i); + +get_tbl_fail: + sss_free_ctrlq_msg_buf(nic_dev->hwdev, msg_buf); + + return ret; +} + +static void sss_nic_fill_indir_tbl(struct sss_nic_rss_indirect_table *indir_tbl, + const u32 *indir_table) +{ + u32 i; + u32 tbl_size; + u32 *temp_entry = NULL; + + memset(indir_tbl, 0, sizeof(*indir_tbl)); + for (i = 0; i < SSSNIC_RSS_INDIR_SIZE; i++) + indir_tbl->entry[i] = (u16)indir_table[i]; + + temp_entry = (u32 *)indir_tbl->entry; + tbl_size = sizeof(indir_tbl->entry) / (sizeof(u32)); + for (i = 0; i < tbl_size; i++) + temp_entry[i] = cpu_to_be32(temp_entry[i]); +} + +int sss_nic_set_rss_indir_tbl(struct sss_nic_dev *nic_dev, const u32 *indir_tbl) +{ + int ret; + u64 output_param = 0; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + if (!nic_dev || !indir_tbl) + return -EINVAL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_dev->hwdev); + if (!msg_buf) { + nic_err(nic_dev->dev_hdl, "Fail to allocate cmd buf\n"); + return -ENOMEM; + } + + msg_buf->size = sizeof(struct sss_nic_rss_indirect_table); + + sss_nic_fill_indir_tbl(msg_buf->buf, indir_tbl); + + ret = sss_ctrlq_direct_reply(nic_dev->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_SET_RSS_INDIR_TABLE, + msg_buf, &output_param, + 0, SSS_CHANNEL_NIC); + if (ret != 0 || output_param != 0) { + sss_free_ctrlq_msg_buf(nic_dev->hwdev, msg_buf); + nic_err(nic_dev->dev_hdl, "Fail to set rss indir tbl\n"); + return -EFAULT; + } + + sss_free_ctrlq_msg_buf(nic_dev->hwdev, msg_buf); + return ret; +} + +static int sss_nic_set_rss_type_by_ctrlq(struct sss_nic_dev *nic_dev, u32 ctx) +{ + int ret; + u64 output_param = 0; + struct sss_nic_rss_ctx_table *rss_ctx_tbl = NULL; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_dev->hwdev); + if (!msg_buf) { + nic_err(nic_dev->dev_hdl, "Fail to allocate cmd buf\n"); + return -ENOMEM; + } + + rss_ctx_tbl = (struct sss_nic_rss_ctx_table *)msg_buf->buf; + memset(rss_ctx_tbl, 0, sizeof(*rss_ctx_tbl)); + rss_ctx_tbl->ctx = cpu_to_be32(ctx); + msg_buf->size = sizeof(*rss_ctx_tbl); + + ret = sss_ctrlq_direct_reply(nic_dev->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_SET_RSS_CONTEXT_TABLE, msg_buf, + &output_param, 0, SSS_CHANNEL_NIC); + if (ret != 0 || output_param != 0) { + sss_free_ctrlq_msg_buf(nic_dev->hwdev, msg_buf); + nic_err(nic_dev->dev_hdl, "Fail to set rss ctx, ret: %d\n", ret); + return -EFAULT; + } + + sss_free_ctrlq_msg_buf(nic_dev->hwdev, msg_buf); + + return 0; +} + +static int sss_nic_set_rss_type_by_mbx(struct sss_nic_dev *nic_dev, u32 ctx) +{ + struct sss_nic_mbx_rss_ctx ctx_tbl = {0}; + u16 out_len = sizeof(ctx_tbl); + int ret; + + ctx_tbl.func_id = sss_get_global_func_id(nic_dev->hwdev); + ctx_tbl.context = ctx; + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, + SSSNIC_MBX_OPCODE_SET_RSS_CTX_TBL_INTO_FUNC, + &ctx_tbl, sizeof(ctx_tbl), &ctx_tbl, &out_len); + + if (ctx_tbl.head.state == SSS_MGMT_CMD_UNSUPPORTED) { + return SSS_MGMT_CMD_UNSUPPORTED; + } else if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &ctx_tbl)) { + nic_err(nic_dev->dev_hdl, + "Fail to set rss ctx, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, ctx_tbl.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +int sss_nic_set_rss_type(struct sss_nic_dev *nic_dev, struct sss_nic_rss_type rss_type) +{ + int ret; + u32 ctx = 0; + + ctx |= SSSNIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) | + SSSNIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) | + SSSNIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) | + SSSNIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) | + SSSNIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) | + SSSNIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) | + SSSNIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) | + SSSNIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6) | + SSSNIC_RSS_TYPE_SET(1, VALID); + + ret = sss_nic_set_rss_type_by_mbx(nic_dev, ctx); + if (ret == SSS_MGMT_CMD_UNSUPPORTED) + ret = sss_nic_set_rss_type_by_ctrlq(nic_dev, ctx); + + return ret; +} + +int sss_nic_get_rss_type(struct sss_nic_dev *nic_dev, struct sss_nic_rss_type *rss_type) +{ + int ret; + struct sss_nic_mbx_rss_ctx rss_ctx_tbl = {0}; + u16 out_len = sizeof(rss_ctx_tbl); + + if (!nic_dev || !rss_type) + return -EINVAL; + + rss_ctx_tbl.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_GET_RSS_CTX_TBL, + &rss_ctx_tbl, sizeof(rss_ctx_tbl), + &rss_ctx_tbl, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &rss_ctx_tbl)) { + nic_err(nic_dev->dev_hdl, "Fail to get hash type, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, rss_ctx_tbl.head.state, out_len); + return -EINVAL; + } + + rss_type->ipv4 = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, IPV4); + rss_type->ipv6 = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, IPV6); + rss_type->ipv6_ext = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, IPV6_EXT); + rss_type->tcp_ipv4 = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, TCP_IPV4); + rss_type->tcp_ipv6 = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, TCP_IPV6); + rss_type->tcp_ipv6_ext = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, TCP_IPV6_EXT); + rss_type->udp_ipv4 = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, UDP_IPV4); + rss_type->udp_ipv6 = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, UDP_IPV6); + + return 0; +} + +int sss_nic_rss_hash_engine(struct sss_nic_dev *nic_dev, u8 cmd, u8 *hash_engine) +{ + int ret; + struct sss_nic_mbx_rss_engine_cfg cmd_rss_engine = {0}; + u16 out_len = sizeof(cmd_rss_engine); + + cmd_rss_engine.opcode = cmd; + cmd_rss_engine.func_id = sss_get_global_func_id(nic_dev->hwdev); + + if (cmd == SSSNIC_MBX_OPCODE_SET) + cmd_rss_engine.hash_engine = *hash_engine; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, + SSSNIC_MBX_OPCODE_CFG_RSS_HASH_ENGINE, + &cmd_rss_engine, sizeof(cmd_rss_engine), + &cmd_rss_engine, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_rss_engine)) { + nic_err(nic_dev->dev_hdl, "Fail to handle hash engine,opcode:%d, ret: %d, status: 0x%x, out_len: 0x%x\n", + cmd, ret, cmd_rss_engine.head.state, out_len); + + return -EIO; + } + + if (cmd == SSSNIC_MBX_OPCODE_GET) + *hash_engine = cmd_rss_engine.hash_engine; + + return 0; +} + +int sss_nic_set_rss_hash_engine(struct sss_nic_dev *nic_dev, u8 hash_engine) +{ + return sss_nic_rss_hash_engine(nic_dev, SSSNIC_MBX_OPCODE_SET, &hash_engine); +} + +int sss_nic_config_rss_to_hw(struct sss_nic_dev *nic_dev, + u8 cos_num, u8 *cos_map, u16 qp_num, u8 rss_en) +{ + int ret; + struct sss_nic_mbx_rss_cfg cmd_rss_cfg = {0}; + u16 out_len = sizeof(cmd_rss_cfg); + + if (!nic_dev || !cos_map || (cos_num & (cos_num - 1)) != 0) + return -EINVAL; + + cmd_rss_cfg.rss_en = rss_en; + cmd_rss_cfg.qp_num = qp_num; + cmd_rss_cfg.rq_priority_number = (cos_num > 0) ? (u8)ilog2(cos_num) : 0; + cmd_rss_cfg.func_id = sss_get_global_func_id(nic_dev->hwdev); + memcpy(cmd_rss_cfg.prio_tc, cos_map, SSSNIC_DCB_UP_MAX); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_RSS_CFG, + &cmd_rss_cfg, sizeof(cmd_rss_cfg), + &cmd_rss_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_rss_cfg)) { + nic_err(nic_dev->dev_hdl, + "Fail to set rss cfg, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_rss_cfg.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +int sss_nic_init_hw_rss(struct sss_nic_dev *nic_dev, u16 qp_num) +{ + int ret; + struct sss_nic_mbx_rss_cfg cmd_rss_cfg = {0}; + u16 out_len = sizeof(cmd_rss_cfg); + + if (!nic_dev) + return -EINVAL; + + cmd_rss_cfg.qp_num = qp_num; + cmd_rss_cfg.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_RSS_CFG, + &cmd_rss_cfg, sizeof(cmd_rss_cfg), + &cmd_rss_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_rss_cfg)) { + nic_err(nic_dev->dev_hdl, + "Fail to set rss cfg, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_rss_cfg.head.state, out_len); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.h new file mode 100644 index 0000000000000..e5515c1e11cf2 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_RSS_CFG_H +#define SSS_NIC_RSS_CFG_H + +#include + +#include "sss_nic_cfg_rss_define.h" + +int sss_nic_set_rss_type(struct sss_nic_dev *nic_dev, struct sss_nic_rss_type rss_type); + +int sss_nic_get_rss_type(struct sss_nic_dev *nic_dev, struct sss_nic_rss_type *rss_type); + +int sss_nic_set_rss_hash_engine(struct sss_nic_dev *nic_dev, u8 hash_engine); + +int sss_nic_rss_hash_engine(struct sss_nic_dev *nic_dev, u8 cmd, u8 *hash_engine); + +int sss_nic_config_rss_to_hw(struct sss_nic_dev *nic_dev, + u8 cos_num, u8 *prio_tc, u16 qp_num, u8 rss_en); + +int sss_nic_init_hw_rss(struct sss_nic_dev *nic_dev, u16 qp_num); + +int sss_nic_set_rss_hash_key(struct sss_nic_dev *nic_dev, const u8 *hash_key); + +int sss_nic_cfg_rss_hash_key(struct sss_nic_dev *nic_dev, u8 opcode, u8 *hash_key); + +int sss_nic_set_rss_indir_tbl(struct sss_nic_dev *nic_dev, const u32 *indir_tbl); + +int sss_nic_get_rss_indir_tbl(struct sss_nic_dev *nic_dev, u32 *indir_tbl); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.c new file mode 100644 index 0000000000000..d26cc00fe0282 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.c @@ -0,0 +1,904 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_rss.h" +#include "sss_nic_rx.h" +#include "sss_nic_cfg.h" + +/* rx cqe checksum err */ +#define SSSNIC_RX_IP_CSUM_ERR BIT(0) +#define SSSNIC_RX_TCP_CSUM_ERR BIT(1) +#define SSSNIC_RX_UDP_CSUM_ERR BIT(2) +#define SSSNIC_RX_IGMP_CSUM_ERR BIT(3) +#define SSSNIC_RX_ICMPV4_CSUM_ERR BIT(4) +#define SSSNIC_RX_ICMPV6_CSUM_ERR BIT(5) +#define SSSNIC_RX_SCTP_CRC_ERR BIT(6) +#define SSSNIC_RX_CSUM_HW_CHECK_NONE BIT(7) +#define SSSNIC_RX_CSUM_IPSU_OTHER_ERR BIT(8) + +#define LRO_PKT_HDR_LEN_IPV4 66 +#define LRO_PKT_HDR_LEN_IPV6 86 +#define LRO_PKT_HDR_LEN(cqe) \ + (SSSNIC_GET_RX_IP_TYPE(sss_hw_cpu32((cqe)->offload_type)) == \ + SSSNIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4) + +#define SSSNIC_MAX_NUM_RQ 256 + +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT 0 +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_IP_TYPE_SHIFT 5 +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_ENC_L3_TYPE_SHIFT 7 +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_SHIFT 8 +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_SHIFT 19 +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT 21 +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_SHIFT 24 + +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK 0x1FU +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_IP_TYPE_MASK 0x3U +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_ENC_L3_TYPE_MASK 0x1U +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_MASK 0xFU +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_MASK 0x3U +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK 0x1U +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_MASK 0xFFU + +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(val, member) \ + (((val) >> SSSNIC_RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_##member##_MASK) + +#define SSSNIC_GET_RX_PKT_TYPE(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE) +#define SSSNIC_GET_RX_IP_TYPE(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, IP_TYPE) +#define SSSNIC_GET_RX_ENC_L3_TYPE(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, ENC_L3_TYPE) +#define SSSNIC_GET_RX_TUNNEL_PKT_FORMAT(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, TUNNEL_PKT_FORMAT) + +#define SSSNIC_GET_RX_PKT_UMBCAST(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_UMBCAST) + +#define SSSNIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN) + +#define SSSNIC_GET_RSS_TYPES(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, RSS_TYPE) + +#define SSSNIC_RQ_CQE_SGE_VLAN_SHIFT 0 +#define SSSNIC_RQ_CQE_SGE_LEN_SHIFT 16 + +#define SSSNIC_RQ_CQE_SGE_VLAN_MASK 0xFFFFU +#define SSSNIC_RQ_CQE_SGE_LEN_MASK 0xFFFFU + +#define SSSNIC_RQ_CQE_SGE_GET(val, member) \ + (((val) >> SSSNIC_RQ_CQE_SGE_##member##_SHIFT) & SSSNIC_RQ_CQE_SGE_##member##_MASK) + +#define SSSNIC_GET_RX_VLAN_TAG(vlan_len) SSSNIC_RQ_CQE_SGE_GET(vlan_len, VLAN) + +#define SSSNIC_GET_RX_PKT_LEN(vlan_len) SSSNIC_RQ_CQE_SGE_GET(vlan_len, LEN) + +#define SSSNIC_GET_RX_CSUM_ERR(status) SSSNIC_RQ_CQE_STATUS_GET(status, CSUM_ERR) + +#define SSSNIC_GET_RX_FLUSH(status) SSSNIC_RQ_CQE_STATUS_GET(status, FLUSH) + +#define SSSNIC_GET_RX_BP_EN(status) SSSNIC_RQ_CQE_STATUS_GET(status, BP_EN) + +#define SSSNIC_GET_RX_NUM_LRO(status) SSSNIC_RQ_CQE_STATUS_GET(status, NUM_LRO) + +#define SSSNIC_RX_IS_DECRY_PKT(status) SSSNIC_RQ_CQE_STATUS_GET(status, DECRY_PKT) + +#define SSSNIC_RQ_CQE_SUPER_CQE_EN_SHIFT 0 +#define SSSNIC_RQ_CQE_PKT_NUM_SHIFT 1 +#define SSSNIC_RQ_CQE_PKT_LAST_LEN_SHIFT 6 +#define SSSNIC_RQ_CQE_PKT_FIRST_LEN_SHIFT 19 + +#define SSSNIC_RQ_CQE_SUPER_CQE_EN_MASK 0x1 +#define SSSNIC_RQ_CQE_PKT_NUM_MASK 0x1FU +#define SSSNIC_RQ_CQE_PKT_FIRST_LEN_MASK 0x1FFFU +#define SSSNIC_RQ_CQE_PKT_LAST_LEN_MASK 0x1FFFU + +#define SSSNIC_RQ_CQE_PKT_NUM_GET(val, member) \ + (((val) >> SSSNIC_RQ_CQE_PKT_##member##_SHIFT) & SSSNIC_RQ_CQE_PKT_##member##_MASK) +#define SSSNIC_GET_RQ_CQE_PKT_NUM(pkt_info) SSSNIC_RQ_CQE_PKT_NUM_GET(pkt_info, NUM) + +#define SSSNIC_RQ_CQE_SUPER_CQE_EN_GET(val, member) \ + (((val) >> SSSNIC_RQ_CQE_##member##_SHIFT) & SSSNIC_RQ_CQE_##member##_MASK) +#define SSSNIC_GET_SUPER_CQE_EN(pkt_info) \ + SSSNIC_RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN) + +/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ +#define SSSNIC_RX_HDR_SIZE 256 +#define SSSNIC_RX_BUFFER_WRITE 16 + +#define SSSNIC_RX_TCP_PKT 0x3 +#define SSSNIC_RX_UDP_PKT 0x4 +#define SSSNIC_RX_SCTP_PKT 0x7 + +#define SSSNIC_RX_IPV4_PKT 0 +#define SSSNIC_RX_IPV6_PKT 1 +#define SSSNIC_RX_INVALID_IP_TYPE 2 + +#define SSSNIC_RX_PKT_FORMAT_NON_TUNNEL 0 +#define SSSNIC_RX_PKT_FORMAT_VXLAN 1 + +#ifdef HAVE_XDP_SUPPORT +enum sss_nic_xdp_pkt { + SSSNIC_XDP_PKT_PASS, + SSSNIC_XDP_PKT_DROP, +}; +#endif + +#define SSSNIC_LRO_PKT_HDR_LEN_IPV4 66 +#define SSSNIC_LRO_PKT_HDR_LEN_IPV6 86 +#define SSSNIC_LRO_PKT_HDR_LEN(cqe) \ + (SSSNIC_GET_RX_IP_TYPE(sss_hw_cpu32((cqe)->offload_type)) == \ + SSSNIC_RX_IPV6_PKT ? SSSNIC_LRO_PKT_HDR_LEN_IPV6 : SSSNIC_LRO_PKT_HDR_LEN_IPV4) + +#define SSSNIC_GET_SGE_NUM(pkt_len, rxq) \ + ((u8)(((pkt_len) >> (rxq)->buff_size_shift) + \ + (((pkt_len) & ((rxq)->buf_len - 1)) ? 1 : 0))) + +bool sss_nic_rx_alloc_dma_page(struct sss_nic_dev *nic_dev, + struct sss_nic_rx_desc *rx_desc) +{ + struct page *page = rx_desc->page; + dma_addr_t dma_addr = rx_desc->buf_daddr; + + if (likely(dma_addr != 0)) + return true; + + page = alloc_pages_node(NUMA_NO_NODE, + GFP_ATOMIC | __GFP_COLD | __GFP_COMP, nic_dev->page_order); + if (unlikely(!page)) + return false; + + dma_addr = dma_map_page(nic_dev->dev_hdl, page, 0, + nic_dev->rx_dma_buff_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(nic_dev->dev_hdl, dma_addr) != 0)) { + __free_pages(page, nic_dev->page_order); + return false; + } + + rx_desc->page = page; + rx_desc->buf_daddr = dma_addr; + rx_desc->page_offset = 0; + + return true; +} + +u32 sss_nic_fill_bd_sge(struct sss_nic_rq_desc *rq_desc) +{ + struct net_device *netdev = rq_desc->netdev; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_rx_desc *rx_desc = NULL; + struct sss_nic_rqe *rqe = NULL; + u32 idle_wqe = rq_desc->delta - 1; + dma_addr_t dma_addr; + u32 i; + + for (i = 0; i < idle_wqe; i++) { + rx_desc = &rq_desc->rx_desc_group[rq_desc->pi]; + rqe = rx_desc->rqe; + + if (unlikely(!sss_nic_rx_alloc_dma_page(nic_dev, rx_desc))) { + SSSNIC_RQ_STATS_INC(rq_desc, alloc_rx_dma_err); + break; + } + + dma_addr = rx_desc->buf_daddr + rx_desc->page_offset; + + if (rq_desc->rq->wqe_type == SSSNIC_EXTEND_RQ_WQE) { + rqe->extend_rqe.bd_sect.sge.low_addr = + sss_hw_be32(lower_32_bits(dma_addr)); + rqe->extend_rqe.bd_sect.sge.high_addr = + sss_hw_be32(upper_32_bits(dma_addr)); + } else { + rqe->normal_rqe.bd_lo_addr = sss_hw_be32(lower_32_bits(dma_addr)); + rqe->normal_rqe.bd_hi_addr = sss_hw_be32(upper_32_bits(dma_addr)); + } + rq_desc->pi = (u16)((rq_desc->pi + 1) & rq_desc->qid_mask); + } + + if (likely(i != 0)) { + sss_nic_write_db(rq_desc->rq, rq_desc->qid & (SSSNIC_DCB_COS_MAX - 1), + RQ_CFLAG_DP, (u16)((u32)rq_desc->pi << rq_desc->rq->wqe_type)); + + rq_desc->delta -= i; + rq_desc->backup_pi = rq_desc->pi; + } else if (idle_wqe == rq_desc->q_depth - 1) { + SSSNIC_RQ_STATS_INC(rq_desc, rx_buf_errors); + } + + return i; +} + +#define SSS_NIC_FILL_BD_SGE(rq_desc) \ +do { \ + struct sss_nic_dev *nic_dev = netdev_priv((rq_desc)->netdev); \ + struct sss_nic_rx_desc *_rx_desc = NULL; \ + struct sss_nic_rqe *_rqe = NULL; \ + u32 _idle_wqe = (rq_desc)->delta - 1; \ + dma_addr_t _dma_addr; \ + u32 _id; \ +\ + for (_id = 0; _id < _idle_wqe; _id++) { \ + _rx_desc = &(rq_desc)->rx_desc_group[(rq_desc)->pi]; \ + _rqe = _rx_desc->rqe; \ +\ + if (unlikely(!sss_nic_rx_alloc_dma_page(nic_dev, _rx_desc))) { \ + SSSNIC_RQ_STATS_INC((rq_desc), alloc_rx_dma_err); \ + break; \ + } \ +\ + _dma_addr = _rx_desc->buf_daddr + _rx_desc->page_offset; \ +\ + if ((rq_desc)->rq->wqe_type == SSSNIC_EXTEND_RQ_WQE) { \ + _rqe->extend_rqe.bd_sect.sge.low_addr = \ + sss_hw_be32(lower_32_bits(_dma_addr)); \ + _rqe->extend_rqe.bd_sect.sge.high_addr = \ + sss_hw_be32(upper_32_bits(_dma_addr)); \ + } else { \ + _rqe->normal_rqe.bd_lo_addr = sss_hw_be32(lower_32_bits(_dma_addr)); \ + _rqe->normal_rqe.bd_hi_addr = sss_hw_be32(upper_32_bits(_dma_addr)); \ + } \ + (rq_desc)->pi = (u16)(((rq_desc)->pi + 1) & (rq_desc)->qid_mask); \ + } \ +\ + if (likely(_id != 0)) { \ + sss_nic_write_db((rq_desc)->rq, (rq_desc)->qid & (SSSNIC_DCB_COS_MAX - 1), \ + RQ_CFLAG_DP, \ + (u16)((u32)(rq_desc)->pi << (rq_desc)->rq->wqe_type)); \ +\ + (rq_desc)->delta -= _id; \ + (rq_desc)->backup_pi = (rq_desc)->pi; \ + } else if (_idle_wqe == (rq_desc)->q_depth - 1) { \ + SSSNIC_RQ_STATS_INC((rq_desc), rx_buf_errors); \ + } \ +} while (0) + +#define sss_nic_rx_reuse_dma_page(rq_desc, old_rqe_desc) \ +do { \ + u16 _pi = (rq_desc)->backup_pi; \ + struct sss_nic_rx_desc *new_rqe_desc; \ +\ + new_rqe_desc = &(rq_desc)->rx_desc_group[_pi++]; \ +\ + (rq_desc)->backup_pi = (_pi < (rq_desc)->q_depth) ? _pi : 0; \ +\ + new_rqe_desc->page = (old_rqe_desc)->page; \ + new_rqe_desc->page_offset = (old_rqe_desc)->page_offset; \ + new_rqe_desc->buf_daddr = (old_rqe_desc)->buf_daddr; \ +\ + dma_sync_single_range_for_device((rq_desc)->dev, new_rqe_desc->buf_daddr, \ + new_rqe_desc->page_offset, (rq_desc)->buf_len, \ + DMA_FROM_DEVICE); \ +} while (0) + +#if L1_CACHE_BYTES < 128 +#define PREFETCH_L1_CACHE(vaddr) prefetch((vaddr) + L1_CACHE_BYTES) +#else +#define PREFETCH_L1_CACHE(vaddr) do {} while (0) +#endif + +#define sss_nic_skb_add_rx_frag(rq_desc, rx_desc, skb, size, ret_flag) \ +do { \ + u8 *vaddr; \ + struct page *page; \ +\ + page = (rx_desc)->page; \ + vaddr = (u8 *)page_address(page) + (rx_desc)->page_offset; \ + prefetch(vaddr); \ + PREFETCH_L1_CACHE(vaddr); \ +\ + dma_sync_single_range_for_cpu((rq_desc)->dev, (rx_desc)->buf_daddr, \ + (rx_desc)->page_offset, (rq_desc)->buf_len, \ + DMA_FROM_DEVICE); \ +\ + if ((size) <= SSSNIC_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { \ + memcpy(__skb_put((skb), (size)), vaddr, ALIGN((size), sizeof(long))); \ + if (likely(page_to_nid(page) == numa_node_id())) \ + *(ret_flag) = true; \ + else { \ + put_page(page); \ + *(ret_flag) = false; \ + } \ + } else { \ + skb_add_rx_frag((skb), skb_shinfo(skb)->nr_frags, page, \ + (int)(rx_desc)->page_offset, (int)(size), (rq_desc)->buf_len); \ + if (unlikely(page_count(page) != 1)) \ + *(ret_flag) = false; \ + else if (unlikely(page_to_nid(page) != numa_node_id())) \ + *(ret_flag) = false; \ + else { \ + (rx_desc)->page_offset ^= (rq_desc)->buf_len; \ + get_page(page); \ + *(ret_flag) = true; \ + } \ + } \ +} while (0) + +#define sss_nic_combine_skb(rq_desc, head_skb, sge_num, pkt_size) \ +do { \ + struct sss_nic_rx_desc *_rx_desc = NULL; \ + struct sk_buff *_skb = NULL; \ + u8 _frag_num = 0; \ + u32 tmp_pkt_sz = (pkt_size); \ + u8 tmp_sge_num = (sge_num); \ + u32 _size; \ + u32 _ci; \ + u8 _ret; \ +\ + _skb = (head_skb); \ + _ci = (rq_desc)->ci & (rq_desc)->qid_mask; \ + while (tmp_sge_num > 0) { \ + _rx_desc = &(rq_desc)->rx_desc_group[_ci]; \ + if (unlikely(tmp_pkt_sz > (rq_desc)->buf_len)) { \ + _size = (rq_desc)->buf_len; \ + tmp_pkt_sz -= (rq_desc)->buf_len; \ + } else { \ + _size = tmp_pkt_sz; \ + } \ +\ + if (unlikely(_frag_num == MAX_SKB_FRAGS)) { \ + if (_skb == (head_skb)) \ + _skb = skb_shinfo(_skb)->frag_list; \ + else \ + _skb = _skb->next; \ +\ + _frag_num = 0; \ + } \ +\ + if (unlikely(_skb != (head_skb))) { \ + (head_skb)->truesize += (rq_desc)->buf_len; \ + (head_skb)->len += _size; \ + (head_skb)->data_len += _size; \ + } \ +\ + sss_nic_skb_add_rx_frag((rq_desc), _rx_desc, _skb, _size, &_ret); \ + if (likely(_ret)) \ + sss_nic_rx_reuse_dma_page((rq_desc), _rx_desc); \ + else \ + dma_unmap_page((rq_desc)->dev, _rx_desc->buf_daddr, \ + (rq_desc)->dma_buff_size, DMA_FROM_DEVICE); \ +\ + _rx_desc->buf_daddr = 0; \ + _rx_desc->page = NULL; \ + tmp_sge_num--; \ + _frag_num++; \ + _ci = (_ci + 1) & (rq_desc)->qid_mask; \ + } \ +} while (0) + +#define sss_nic_fetch_one_skb(rq_desc, pkt_size, ret_skb) \ +do { \ + struct net_device *_netdev = (rq_desc)->netdev; \ + struct sk_buff *head_skb = NULL; \ + struct sk_buff *next_skb = NULL; \ + struct sk_buff *_skb = NULL; \ + u8 sge_num; \ + u8 skb_num; \ +\ + head_skb = netdev_alloc_skb_ip_align((rq_desc)->netdev, SSSNIC_RX_HDR_SIZE); \ + if (likely(head_skb)) { \ + sge_num = SSSNIC_GET_SGE_NUM((pkt_size), (rq_desc)); \ + if (likely(sge_num <= MAX_SKB_FRAGS)) \ + skb_num = 1; \ + else \ + skb_num = (sge_num / MAX_SKB_FRAGS) + \ + ((sge_num % MAX_SKB_FRAGS) ? 1 : 0); \ +\ + while (unlikely(skb_num > 1)) { \ + next_skb = netdev_alloc_skb_ip_align(_netdev, SSSNIC_RX_HDR_SIZE); \ + if (unlikely(!next_skb)) { \ + dev_kfree_skb_any(head_skb); \ + break; \ + } \ +\ + if (!_skb) { \ + skb_shinfo(head_skb)->frag_list = next_skb; \ + _skb = next_skb; \ + } else { \ + _skb->next = next_skb; \ + _skb = next_skb; \ + } \ +\ + skb_num--; \ + } \ +\ + if (likely(skb_num <= 1)) { \ + prefetchw(head_skb->data); \ + sss_nic_combine_skb((rq_desc), head_skb, sge_num, (pkt_size)); \ +\ + (rq_desc)->delta += sge_num; \ + (rq_desc)->ci += sge_num; \ +\ + (ret_skb) = head_skb; \ + } else { \ + (ret_skb) = NULL; \ + } \ + } else { \ + (ret_skb) = NULL; \ + } \ +} while (0) + +void sss_nic_get_rq_stats(struct sss_nic_rq_desc *rq_desc, + struct sss_nic_rq_stats *stats) +{ + struct sss_nic_rq_stats *rq_stats = &rq_desc->stats; + unsigned int start; + + u64_stats_update_begin(&stats->stats_sync); + do { + start = u64_stats_fetch_begin(&rq_stats->stats_sync); + stats->rx_bytes = rq_stats->rx_bytes; + stats->rx_packets = rq_stats->rx_packets; + stats->csum_errors = rq_stats->csum_errors; + stats->other_errors = rq_stats->other_errors; + stats->errors = rq_stats->csum_errors + rq_stats->other_errors; + stats->rx_dropped = rq_stats->rx_dropped; + stats->xdp_dropped = rq_stats->xdp_dropped; + stats->rx_buf_errors = rq_stats->rx_buf_errors; + } while (u64_stats_fetch_retry(&rq_stats->stats_sync, start)); + u64_stats_update_end(&stats->stats_sync); +} + +static unsigned int sss_nic_eth_get_headlen(struct sk_buff *skb, + unsigned char *data, + unsigned int max_hlen) +{ +#ifdef HAVE_ETH_GET_HEADLEN_FUNC +#ifdef ETH_GET_HEADLEN_NEED_DEV + return eth_get_headlen(skb->dev, data, SSSNIC_RX_HDR_SIZE); +#else + return eth_get_headlen(data, SSSNIC_RX_HDR_SIZE); +#endif +#else +#define IP_FRAG_OFFSET 0x1FFF +#define FCOE_HLEN 38 +#define TCP_HEAD_OFFSET 12 + u8 nexthdr = 0; + u16 proto; + u8 hlen; + union { + struct ethhdr *eth; + struct vlan_ethhdr *vlan; + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + unsigned char *data; + } header; + + if (unlikely(max_hlen < ETH_HLEN)) + return max_hlen; + + header.data = data; + proto = header.eth->h_proto; + + if (proto == htons(ETH_P_8021AD) || proto == htons(ETH_P_8021Q)) { + if (unlikely(max_hlen < ETH_HLEN + VLAN_HLEN)) + return max_hlen; + + proto = header.vlan->h_vlan_encapsulated_proto; + header.data += sizeof(struct vlan_ethhdr); + } else { + header.data += ETH_HLEN; + } + + if (proto == htons(ETH_P_IP)) { + if ((int)(header.data - data) > (int)(max_hlen - sizeof(struct iphdr))) + return max_hlen; + + hlen = (header.data[0] & 0x0F) << 2; + if (hlen < sizeof(struct iphdr)) + return (unsigned int)(header.data - data); + + if ((header.ipv4->frag_off & htons(IP_FRAG_OFFSET)) == 0) + nexthdr = header.ipv4->proto; + + header.data += hlen; + } else if (proto == htons(ETH_P_IPV6)) { + if ((int)(header.data - data) > (int)(max_hlen - sizeof(struct ipv6hdr))) + return max_hlen; + + nexthdr = header.ipv6->nexthdr; + header.data += sizeof(struct ipv6hdr); + } else if (proto == htons(ETH_P_FCOE)) { + header.data += FCOE_HLEN; + } else { + return (unsigned int)(header.data - data); + } + + if (nexthdr == IPPROTO_TCP) { + if ((int)(header.data - data) > (int)(max_hlen - sizeof(struct tcphdr))) + return max_hlen; + + if (SSSNIC_HEADER_LEN_TO_BYTE(header.data[TCP_HEAD_OFFSET] & 0xF0) > + sizeof(struct tcphdr)) + header.data += SSSNIC_HEADER_LEN_TO_BYTE(header.data[TCP_HEAD_OFFSET] & + 0xF0); + else + header.data += sizeof(struct tcphdr); + } else if (nexthdr == IPPROTO_UDP || nexthdr == IPPROTO_UDPLITE) { + header.data += sizeof(struct udphdr); + } else if (nexthdr == IPPROTO_SCTP) { + header.data += sizeof(struct sctphdr); + } + + if ((header.data - data) > max_hlen) + return max_hlen; + else + return (unsigned int)(header.data - data); +#endif +} + +#define sss_nic_pull_tail(skb) \ +do { \ + skb_frag_t *_frag = &skb_shinfo(skb)->frags[0]; \ + unsigned int _len; \ + unsigned char *_data = NULL; \ +\ + _data = skb_frag_address(_frag); \ +\ + _len = sss_nic_eth_get_headlen((skb), _data, SSSNIC_RX_HDR_SIZE); \ +\ + skb_copy_to_linear_data((skb), _data, ALIGN(_len, sizeof(long))); \ +\ + skb_frag_size_sub(_frag, (int)_len); \ + skb_frag_off_add(_frag, (int)_len); \ +\ + (skb)->tail += _len; \ + (skb)->data_len -= _len; \ +} while (0) + +#define sss_nic_check_rx_csum(rq_desc, offload_type, status, skb) \ +do { \ + struct net_device *_netdev = (rq_desc)->netdev; \ + u32 pkt_fmt = SSSNIC_GET_RX_TUNNEL_PKT_FORMAT(offload_type); \ + u32 pkt_type = SSSNIC_GET_RX_PKT_TYPE(offload_type); \ + u32 ip_type = SSSNIC_GET_RX_IP_TYPE(offload_type); \ + u32 chksum_err; \ +\ + chksum_err = SSSNIC_GET_RX_CSUM_ERR(status); \ + if (unlikely(chksum_err == SSSNIC_RX_CSUM_IPSU_OTHER_ERR)) \ + (rq_desc)->stats.other_errors++; \ +\ + if ((_netdev->features & NETIF_F_RXCSUM)) { \ + if (unlikely(chksum_err != 0)) { \ + if ((chksum_err & \ + (SSSNIC_RX_CSUM_HW_CHECK_NONE | \ + SSSNIC_RX_CSUM_IPSU_OTHER_ERR)) == 0) \ + (rq_desc)->stats.csum_errors++; \ + (skb)->ip_summed = CHECKSUM_NONE; \ + } else if (ip_type == SSSNIC_RX_INVALID_IP_TYPE || \ + !(pkt_fmt == SSSNIC_RX_PKT_FORMAT_NON_TUNNEL || \ + pkt_fmt == SSSNIC_RX_PKT_FORMAT_VXLAN)) { \ + (skb)->ip_summed = CHECKSUM_NONE; \ + } else if (pkt_type == SSSNIC_RX_TCP_PKT || \ + pkt_type == SSSNIC_RX_UDP_PKT || \ + pkt_type == SSSNIC_RX_SCTP_PKT) \ + (skb)->ip_summed = CHECKSUM_UNNECESSARY; \ + else \ + (skb)->ip_summed = CHECKSUM_NONE; \ + } \ +} while (0) + +#ifdef HAVE_SKBUFF_CSUM_LEVEL +#define sss_nic_check_rx_gso(rq_desc, offload_type, skb) \ +do { \ + struct net_device *_netdev = (rq_desc)->netdev; \ +\ + if (_netdev->features & NETIF_F_GRO) { \ + if (SSSNIC_GET_RX_TUNNEL_PKT_FORMAT(offload_type) == \ + SSSNIC_RX_PKT_FORMAT_VXLAN && \ + (skb)->ip_summed == CHECKSUM_UNNECESSARY) \ + (skb)->csum_level = 1; \ + } \ +} while (0) +#else +#define sss_nic_check_rx_gso(rq_desc, offload_type, skb) do {} while (0) +#endif /* HAVE_SKBUFF_CSUM_LEVEL */ + +static void sss_nic_loop_copy_data(struct sss_nic_dev *nic_dev, + struct sk_buff *skb) +{ + struct net_device *netdev = nic_dev->netdev; + u8 *loop_test_rx_buf = nic_dev->loop_test_rx_buf; + int loop_pkt_len = nic_dev->loop_pkt_len; + void *frag_data = NULL; + int frag_size; + int pkt_off; + int i; + + if (nic_dev->loop_test_rx_cnt == SSSNIC_LP_PKT_CNT) { + nic_dev->loop_test_rx_cnt = 0; + nicif_warn(nic_dev, rx_err, netdev, "Loopback test received too many pkts\n"); + } + + if (skb->len != loop_pkt_len) { + nicif_warn(nic_dev, rx_err, netdev, "Invalid packet length\n"); + nic_dev->loop_test_rx_cnt++; + return; + } + + pkt_off = nic_dev->loop_test_rx_cnt * loop_pkt_len; + frag_size = (int)skb_headlen(skb); + memcpy(loop_test_rx_buf + pkt_off, skb->data, (size_t)(u32)frag_size); + + pkt_off += frag_size; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]); + frag_size = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]); + memcpy(loop_test_rx_buf + pkt_off, frag_data, (size_t)(u32)frag_size); + + pkt_off += frag_size; + } + nic_dev->loop_test_rx_cnt++; +} + +#define sss_nic_update_gso_params(skb, gso) \ +do { \ + struct ethhdr *_ether = (struct ethhdr *)((skb)->data); \ + __be16 _protocol; \ +\ + _protocol = __vlan_get_protocol((skb), _ether->h_proto, NULL); \ +\ + skb_shinfo(skb)->gso_segs = gso; \ + skb_shinfo(skb)->gso_size = (u16)DIV_ROUND_UP(((skb)->len - skb_headlen(skb)), \ + gso); \ + skb_shinfo(skb)->gso_type = (_protocol == htons(ETH_P_IP)) ? \ + SKB_GSO_TCPV4 : SKB_GSO_TCPV6; \ +} while (0) + +#ifdef HAVE_XDP_SUPPORT +#define sss_nic_xdp_update_rx_info(rq_desc, wqe_num) \ +do { \ + struct sss_nic_rx_desc *_rx_desc = NULL; \ + u16 _wqe_cnt = wqe_num; \ +\ + while (_wqe_cnt > 0) { \ + _rx_desc = &(rq_desc)->rx_desc_group[(rq_desc)->ci & (rq_desc)->qid_mask]; \ + if (likely(page_to_nid(_rx_desc->page) == numa_node_id())) \ + sss_nic_rx_reuse_dma_page((rq_desc), _rx_desc); \ +\ + (rq_desc)->ci++; \ + (rq_desc)->delta++; \ + _rx_desc->buf_daddr = 0; \ + _rx_desc->page = NULL; \ +\ + _wqe_cnt--; \ + } \ +} while (0) + +#ifdef HAVE_XDP_FRAME_SZ +#define SSSNIC_SET_XDP_FRAME_SZ(xdp, len) ((xdp)->frame_sz = (len)) +#else +#define SSSNIC_SET_XDP_FRAME_SZ(xdp, len) do {} while (0) +#endif + +#ifdef HAVE_XDP_DATA_META +#define SSSNIC_XDP_SET_DATA_META_INVALID(xdp) xdp_set_data_meta_invalid(xdp) +#else +#define SSSNIC_XDP_SET_DATA_META_INVALID(xdp) do {} while (0) +#endif + +#ifdef HAVE_BFP_WARN_NETDEV_PARAM +#define SSSNIC_BDF_WARN_INVALID_XDP_ACTION(netdev, xdp_prog, ret) \ + bpf_warn_invalid_xdp_action(netdev, xdp_prog, ret) +#else +#define SSSNIC_BDF_WARN_INVALID_XDP_ACTION(netdev, xdp_prog, ret) \ + bpf_warn_invalid_xdp_action(ret) +#endif + +#define sss_nic_bpf_prog_run_xdp(rq_desc, pkt_size, result) \ +do { \ + struct bpf_prog *xdp_prog = NULL; \ + struct sss_nic_rx_desc *rx_desc = NULL; \ + struct xdp_buff xdp; \ + u16 _wqe_num = 1; \ + u8 *_data = NULL; \ + u32 _ret; \ +\ + rcu_read_lock(); \ +\ + xdp_prog = READ_ONCE((rq_desc)->xdp_prog); \ + if (!xdp_prog) { \ + *(result) = SSSNIC_XDP_PKT_PASS; \ + } else if (unlikely((pkt_size) > (rq_desc)->buf_len)) { \ + SSSNIC_RQ_STATS_INC((rq_desc), large_xdp_pkts); \ + _wqe_num = (u16)((pkt_size) >> (rq_desc)->buff_size_shift) + \ + (((pkt_size) & ((rq_desc)->buf_len - 1)) ? 1 : 0); \ + SSSNIC_RQ_STATS_INC((rq_desc), xdp_dropped); \ + sss_nic_xdp_update_rx_info((rq_desc), _wqe_num); \ + *(result) = SSSNIC_XDP_PKT_DROP; \ + } else { \ + rx_desc = &(rq_desc)->rx_desc_group[(rq_desc)->ci & (rq_desc)->qid_mask]; \ + _data = (u8 *)page_address(rx_desc->page) + rx_desc->page_offset; \ + prefetch(_data); \ + dma_sync_single_range_for_cpu((rq_desc)->dev, rx_desc->buf_daddr, \ + rx_desc->page_offset, (rq_desc)->buf_len, \ + DMA_FROM_DEVICE); \ + xdp.data = _data; \ + xdp.data_hard_start = xdp.data; \ + xdp.data_end = xdp.data + (pkt_size); \ + SSSNIC_SET_XDP_FRAME_SZ(&xdp, (rq_desc)->buf_len); \ + SSSNIC_XDP_SET_DATA_META_INVALID(&xdp); \ + prefetchw(xdp.data_hard_start); \ +\ + _ret = bpf_prog_run_xdp(xdp_prog, &xdp); \ + if (_ret == XDP_PASS) { \ + *(result) = SSSNIC_XDP_PKT_PASS; \ + } else { \ + *(result) = SSSNIC_XDP_PKT_DROP; \ + if (_ret != XDP_DROP) { \ + SSSNIC_BDF_WARN_INVALID_XDP_ACTION((rq_desc)->netdev, \ + xdp_prog, _ret); \ + } \ + SSSNIC_RQ_STATS_INC((rq_desc), xdp_dropped); \ + sss_nic_xdp_update_rx_info((rq_desc), _wqe_num); \ + } \ + } \ +\ + rcu_read_unlock(); \ +} while (0) +#endif + +#if defined(NETIF_F_HW_VLAN_CTAG_RX) +#define sss_nic_vlan_put_tag(skb, netdev, offload_type, vlan_len) \ +do { \ + u16 vlan_id; \ + if (((netdev)->features & NETIF_F_HW_VLAN_CTAG_RX) != 0 && \ + SSSNIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) != 0) { \ + vlan_id = SSSNIC_GET_RX_VLAN_TAG(vlan_len); \ +\ + /* if the packet is a vlan pkt, the vid may be 0 */ \ + __vlan_hwaccel_put_tag((skb), htons(ETH_P_8021Q), vlan_id); \ + } \ +} while (0) +#else +#define sss_nic_vlan_put_tag(skb, netdev, offload_type, vlan_len) \ +do { \ + u16 vlan_id; \ + if (((netdev)->features & NETIF_F_HW_VLAN_RX) != 0 && \ + SSSNIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) != 0) { \ + vlan_id = SSSNIC_GET_RX_VLAN_TAG(vlan_len); \ +\ + /* if the packet is a vlan pkt, the vid may be 0 */ \ + __vlan_hwaccel_put_tag((skb), htons(ETH_P_8021Q), vlan_id); \ + } \ +} while (0) +#endif + +static int sss_nic_recv_one_packet(struct sss_nic_rq_desc *rq_desc, + struct sss_nic_cqe *rx_cqe, u32 pkt_len, + u32 vlan_len, u32 status) +{ + struct net_device *netdev = rq_desc->netdev; + struct sss_nic_dev *nic_dev = netdev_priv(rq_desc->netdev); + struct sk_buff *skb = NULL; + u32 offload_type; + u16 lro_segs; + +#ifdef HAVE_XDP_SUPPORT + u32 xdp_result; + + sss_nic_bpf_prog_run_xdp(rq_desc, pkt_len, &xdp_result); + if (xdp_result == SSSNIC_XDP_PKT_DROP) + return 0; +#endif + + sss_nic_fetch_one_skb(rq_desc, pkt_len, skb); + if (unlikely(!skb)) { + SSSNIC_RQ_STATS_INC(rq_desc, alloc_skb_err); + return -ENOMEM; + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + sss_nic_pull_tail(skb); + + offload_type = sss_hw_cpu32(rx_cqe->offload_type); + sss_nic_check_rx_csum(rq_desc, offload_type, status, skb); + sss_nic_check_rx_gso(rq_desc, offload_type, skb); + sss_nic_vlan_put_tag(skb, netdev, offload_type, vlan_len); + + if (unlikely(test_bit(SSSNIC_LP_TEST, &nic_dev->flags))) + sss_nic_loop_copy_data(nic_dev, skb); + + lro_segs = SSSNIC_GET_RX_NUM_LRO(status); + if (lro_segs > 0) + sss_nic_update_gso_params(skb, lro_segs); + + skb_record_rx_queue(skb, rq_desc->qid); + skb->protocol = eth_type_trans(skb, netdev); + + if (skb_has_frag_list(skb)) { +#ifdef HAVE_NAPI_GRO_FLUSH_OLD + napi_gro_flush(&rq_desc->irq_cfg->napi, false); +#else + napi_gro_flush(&rq_desc->irq_cfg->napi); +#endif + netif_receive_skb(skb); + } else { + napi_gro_receive(&rq_desc->irq_cfg->napi, skb); + } + + return 0; +} + +int sss_nic_rx_poll(struct sss_nic_rq_desc *rq_desc, int budget) +{ + struct sss_nic_dev *nic_dev = netdev_priv(rq_desc->netdev); + struct sss_nic_cqe *rx_cqe = NULL; + u64 rx_bytes = 0; + int pkts = 0; + int rx_packets = 0; + u16 wqe_num = 0; + u16 lro_segs; + u32 ci; + u32 status; + u32 pkt_len; + u32 vlan_len; + + while (likely(pkts < budget)) { + ci = rq_desc->ci & rq_desc->qid_mask; + rx_cqe = rq_desc->rx_desc_group[ci].cqe; + status = sss_hw_cpu32(rx_cqe->state); + if (!SSSNIC_GET_RX_DONE(status)) + break; + + /* read rx cqe firstly */ + rmb(); + + vlan_len = sss_hw_cpu32(rx_cqe->vlan_len); + pkt_len = SSSNIC_GET_RX_PKT_LEN(vlan_len); + if (sss_nic_recv_one_packet(rq_desc, rx_cqe, pkt_len, vlan_len, status)) + break; + + rx_bytes += pkt_len; + pkts++; + rx_packets++; + + lro_segs = SSSNIC_GET_RX_NUM_LRO(status); + if (lro_segs > 0) { + rx_bytes += ((lro_segs - 1) * SSSNIC_LRO_PKT_HDR_LEN(rx_cqe)); + wqe_num += SSSNIC_GET_SGE_NUM(pkt_len, rq_desc); + } + + rx_cqe->state = 0; + + if (wqe_num >= nic_dev->rx_poll_wqe) + break; + } + + if (rq_desc->delta >= SSSNIC_RX_BUFFER_WRITE) + SSS_NIC_FILL_BD_SGE(rq_desc); + + u64_stats_update_begin(&rq_desc->stats.stats_sync); + rq_desc->stats.rx_packets += (u64)(u32)rx_packets; + rq_desc->stats.rx_bytes += rx_bytes; + u64_stats_update_end(&rq_desc->stats.stats_sync); + + return pkts; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.h new file mode 100644 index 0000000000000..15df34e5b1745 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_RX_H +#define SSS_NIC_RX_H + +#include +#include +#include +#include +#include +#include + +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" + +#define SSSNIC_HEADER_LEN_TO_BYTE(header) ((header) >> 2) + +#define SSSNIC_RQ_CQE_STATUS_CSUM_ERR_SHIFT 0 +#define SSSNIC_RQ_CQE_STATUS_NUM_LRO_SHIFT 16 +#define SSSNIC_RQ_CQE_STATUS_LRO_PUSH_SHIFT 25 +#define SSSNIC_RQ_CQE_STATUS_LRO_ENTER_SHIFT 26 +#define SSSNIC_RQ_CQE_STATUS_LRO_INTR_SHIFT 27 + +#define SSSNIC_RQ_CQE_STATUS_BP_EN_SHIFT 30 +#define SSSNIC_RQ_CQE_STATUS_RXDONE_SHIFT 31 +#define SSSNIC_RQ_CQE_STATUS_DECRY_PKT_SHIFT 29 +#define SSSNIC_RQ_CQE_STATUS_FLUSH_SHIFT 28 + +#define SSSNIC_RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU +#define SSSNIC_RQ_CQE_STATUS_NUM_LRO_MASK 0xFFU +#define SSSNIC_RQ_CQE_STATUS_LRO_PUSH_MASK 0X1U +#define SSSNIC_RQ_CQE_STATUS_LRO_ENTER_MASK 0X1U +#define SSSNIC_RQ_CQE_STATUS_LRO_INTR_MASK 0X1U +#define SSSNIC_RQ_CQE_STATUS_BP_EN_MASK 0X1U +#define SSSNIC_RQ_CQE_STATUS_RXDONE_MASK 0x1U +#define SSSNIC_RQ_CQE_STATUS_FLUSH_MASK 0x1U +#define SSSNIC_RQ_CQE_STATUS_DECRY_PKT_MASK 0x1U + +#define SSSNIC_RQ_CQE_STATUS_GET(val, member) \ + (((val) >> SSSNIC_RQ_CQE_STATUS_##member##_SHIFT) & \ + SSSNIC_RQ_CQE_STATUS_##member##_MASK) + +#define SSSNIC_GET_RQ_CQE_STATUS(rq_desc, id) \ + sss_hw_cpu32((rq_desc)->rx_desc_group[id].cqe->state) + +#define SSSNIC_GET_RX_DONE(status) SSSNIC_RQ_CQE_STATUS_GET(status, RXDONE) + +bool sss_nic_rx_alloc_dma_page(struct sss_nic_dev *nic_dev, + struct sss_nic_rx_desc *rx_desc); +u32 sss_nic_fill_bd_sge(struct sss_nic_rq_desc *rq_desc); +void sss_nic_get_rq_stats(struct sss_nic_rq_desc *rq_desc, + struct sss_nic_rq_stats *stats); +int sss_nic_rx_poll(struct sss_nic_rq_desc *rq_desc, int budget); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.c new file mode 100644 index 0000000000000..d3b2e523afb98 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_rss.h" +#include "sss_nic_rx.h" +#include "sss_nic_cfg.h" + +static void sss_nic_rx_free_dma_page(struct sss_nic_dev *nic_dev, + struct sss_nic_rx_desc *rx_desc) +{ + if (rx_desc->buf_daddr != 0) { + dma_unmap_page(nic_dev->dev_hdl, rx_desc->buf_daddr, + nic_dev->rx_dma_buff_size, DMA_FROM_DEVICE); + rx_desc->buf_daddr = 0; + } + + if (rx_desc->page) { + __free_pages(rx_desc->page, nic_dev->page_order); + rx_desc->page = NULL; + } +} + +static u32 sss_nic_rx_alloc_dma_buffer(struct sss_nic_dev *nic_dev, + u32 rq_depth, struct sss_nic_rx_desc *rx_desc_group) +{ + u32 i; + + for (i = 0; i < rq_depth - 1; i++) + if (!sss_nic_rx_alloc_dma_page(nic_dev, &rx_desc_group[i])) + break; + + return i; +} + +static void sss_nic_rx_free_dma_buffer(struct sss_nic_dev *nic_dev, + u32 rq_depth, struct sss_nic_rx_desc *rx_desc_group) +{ + u32 id; + + for (id = 0; id < rq_depth; id++) + sss_nic_rx_free_dma_page(nic_dev, &rx_desc_group[id]); +} + +static void _sss_nic_free_rq_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_rq_resource *rq_res, u32 rq_depth) +{ + u64 size = sizeof(struct sss_nic_cqe) * rq_depth; + + sss_nic_rx_free_dma_buffer(nic_dev, rq_depth, rq_res->rx_desc_group); + dma_free_coherent(nic_dev->dev_hdl, size, rq_res->cqe_vaddr, rq_res->cqe_paddr); + kfree(rq_res->rx_desc_group); + rq_res->cqe_vaddr = NULL; + rq_res->rx_desc_group = NULL; +} + +int sss_nic_alloc_rq_res_group(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + int i; + int id; + u32 page_num; + u64 size; + u64 cqe_dma_size = sizeof(struct sss_nic_cqe) * qp_res->rq_depth; + struct sss_nic_rq_resource *rq_res = NULL; + + for (id = 0; id < qp_res->qp_num; id++) { + rq_res = &qp_res->rq_res_group[id]; + rq_res->cqe_vaddr = dma_zalloc_coherent(nic_dev->dev_hdl, cqe_dma_size, + &rq_res->cqe_paddr, GFP_KERNEL); + if (!rq_res->cqe_vaddr) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc cqe dma buf, rq%d\n", id); + goto alloc_cqe_dma_err; + } + + size = sizeof(*rq_res->rx_desc_group) * qp_res->rq_depth; + rq_res->rx_desc_group = kzalloc(size, GFP_KERNEL); + if (!rq_res->rx_desc_group) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc rx info, rq%d\n", id); + goto alloc_rqe_desc_group_err; + } + + page_num = sss_nic_rx_alloc_dma_buffer(nic_dev, qp_res->rq_depth, + rq_res->rx_desc_group); + if (page_num == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc rx buffer, rq%d\n", id); + goto alloc_rx_buf_err; + } + rq_res->page_num = (u16)page_num; + } + return 0; + +alloc_rx_buf_err: + kfree(rq_res->rx_desc_group); + rq_res->rx_desc_group = NULL; + +alloc_rqe_desc_group_err: + dma_free_coherent(nic_dev->dev_hdl, cqe_dma_size, rq_res->cqe_vaddr, + rq_res->cqe_paddr); + rq_res->cqe_vaddr = NULL; + +alloc_cqe_dma_err: + for (i = 0; i < id; i++) + _sss_nic_free_rq_resource(nic_dev, &qp_res->rq_res_group[i], + qp_res->rq_depth); + + return -ENOMEM; +} + +void sss_nic_free_rq_res_group(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + int id; + + for (id = 0; id < qp_res->qp_num; id++) + _sss_nic_free_rq_resource(nic_dev, &qp_res->rq_res_group[id], + qp_res->rq_depth); +} + +static void sss_nic_init_rq_desc(struct sss_nic_rq_desc *rq_desc, + struct sss_nic_qp_resource *qp_res, + struct sss_nic_rq_resource *rq_res, + struct sss_irq_desc *irq_desc) +{ + u32 id; + dma_addr_t dma_addr; + struct sss_nic_cqe *rq_cqe; + + rq_desc->irq_id = irq_desc->irq_id; + rq_desc->msix_id = irq_desc->msix_id; + rq_desc->pi = 0; + rq_desc->backup_pi = rq_res->page_num; + rq_desc->q_depth = qp_res->rq_depth; + rq_desc->delta = rq_desc->q_depth; + rq_desc->qid_mask = rq_desc->q_depth - 1; + rq_desc->ci = 0; + rq_desc->last_sw_pi = rq_desc->q_depth - 1; + rq_desc->last_sw_ci = 0; + rq_desc->last_hw_ci = 0; + rq_desc->check_err_cnt = 0; + rq_desc->print_err_cnt = 0; + rq_desc->rx_pkts = 0; + rq_desc->reset_wqe_num = 0; + rq_desc->rx_desc_group = rq_res->rx_desc_group; + + dma_addr = rq_res->cqe_paddr; + rq_cqe = (struct sss_nic_cqe *)rq_res->cqe_vaddr; + for (id = 0; id < qp_res->rq_depth; id++) { + rq_desc->rx_desc_group[id].cqe = rq_cqe; + rq_desc->rx_desc_group[id].cqe_daddr = dma_addr; + dma_addr += sizeof(*rq_desc->rx_desc_group[id].cqe); + rq_cqe++; + } +} + +static void sss_nic_fill_cqe_sge(struct sss_nic_rq_desc *rq_desc) +{ + struct net_device *netdev = rq_desc->netdev; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_rx_desc *rx_desc = NULL; + struct sss_nic_rqe *rqe = NULL; + u32 i; + + for (i = 0; i < rq_desc->q_depth; i++) { + rx_desc = &rq_desc->rx_desc_group[i]; + rqe = sss_wq_wqebb_addr(&rq_desc->rq->wq, (u16)i); + + if (rq_desc->rq->wqe_type == SSSNIC_EXTEND_RQ_WQE) { + sss_set_sge(&rqe->extend_rqe.cqe_sect.sge, rx_desc->cqe_daddr, + (sizeof(struct sss_nic_cqe) >> SSSNIC_CQE_SIZE_SHIFT)); + + rqe->extend_rqe.bd_sect.sge.len = nic_dev->rx_buff_len; + } else { + rqe->normal_rqe.cqe_lo_addr = lower_32_bits(rx_desc->cqe_daddr); + rqe->normal_rqe.cqe_hi_addr = upper_32_bits(rx_desc->cqe_daddr); + } + + rx_desc->rqe = rqe; + } +} + +int sss_nic_init_rq_desc_group(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + struct sss_nic_rq_desc *rq_desc = NULL; + u16 qid; + u32 pkt; + + nic_dev->get_rq_fail_cnt = 0; + for (qid = 0; qid < qp_res->qp_num; qid++) { + rq_desc = &nic_dev->rq_desc_group[qid]; + rq_desc->rq = &nic_dev->nic_io->rq_group[rq_desc->qid]; + + sss_nic_init_rq_desc(rq_desc, qp_res, &qp_res->rq_res_group[qid], + &nic_dev->irq_desc_group[qid]); + + sss_nic_fill_cqe_sge(rq_desc); + + pkt = sss_nic_fill_bd_sge(rq_desc); + if (pkt == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to fill rx buffer\n"); + return -ENOMEM; + } + } + + return 0; +} + +void sss_nic_free_rq_desc_group(struct sss_nic_dev *nic_dev) +{ + kfree(nic_dev->rq_desc_group); + nic_dev->rq_desc_group = NULL; +} + +int sss_nic_alloc_rq_desc_group(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_rq_desc *rq_desc = NULL; + u16 rq_num = nic_dev->max_qp_num; + u16 i; + + nic_dev->rq_desc_group = kcalloc(rq_num, sizeof(*nic_dev->rq_desc_group), GFP_KERNEL); + if (!nic_dev->rq_desc_group) + return -ENOMEM; + + for (i = 0; i < rq_num; i++) { + rq_desc = &nic_dev->rq_desc_group[i]; + rq_desc->dev = nic_dev->dev_hdl; + rq_desc->netdev = nic_dev->netdev; + rq_desc->qid = i; + rq_desc->qid_mask = nic_dev->qp_res.rq_depth - 1; + rq_desc->q_depth = nic_dev->qp_res.rq_depth; + rq_desc->dma_buff_size = nic_dev->rx_dma_buff_size; + rq_desc->buff_size_shift = (u32)ilog2(nic_dev->rx_buff_len); + rq_desc->buf_len = nic_dev->rx_buff_len; + u64_stats_init(&rq_desc->stats.stats_sync); + } + + return 0; +} + +int sss_nic_update_rx_rss(struct sss_nic_dev *nic_dev) +{ + int ret; + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_RSS_ENABLE)) { + ret = sss_nic_update_rss_cfg(nic_dev); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to init rss\n"); + return -EFAULT; + } + } + + return 0; +} + +void sss_nic_reset_rx_rss(struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags) != 0) + sss_nic_reset_rss_cfg(nic_dev); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.h new file mode 100644 index 0000000000000..1273262c49fec --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_RX_INIT_H +#define SSS_NIC_RX_INIT_H + +#include +#include +#include +#include +#include +#include + +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" + +int sss_nic_alloc_rq_res_group(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res); + +void sss_nic_free_rq_res_group(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res); + +int sss_nic_init_rq_desc_group(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res); + +int sss_nic_alloc_rq_desc_group(struct sss_nic_dev *nic_dev); + +void sss_nic_free_rq_desc_group(struct sss_nic_dev *nic_dev); + +int sss_nic_update_rx_rss(struct sss_nic_dev *nic_dev); + +void sss_nic_reset_rx_rss(struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.c new file mode 100644 index 0000000000000..4674cc5dd5a95 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_rss.h" +#include "sss_nic_rx.h" +#include "sss_nic_cfg.h" + +#define SSSNIC_RQ_GET_ERR_CNT_THRESHOLD 3 +#define SSSNIC_RQ_CHECK_ERR_CNT_THRESHOLD 2 +#define SSSNIC_RQ_PRINT_CNT_THRESHOLD 3 + +static inline void sss_nic_fill_wqe_sge(struct sss_nic_rx_desc *rx_desc, + u8 wqe_type) +{ + dma_addr_t dma_addr = rx_desc->buf_daddr + rx_desc->page_offset; + struct sss_nic_rqe *rqe = rx_desc->rqe; + + if (unlikely(wqe_type == SSSNIC_EXTEND_RQ_WQE)) { + rqe->extend_rqe.bd_sect.sge.low_addr = + sss_hw_be32(lower_32_bits(dma_addr)); + rqe->extend_rqe.bd_sect.sge.high_addr = + sss_hw_be32(upper_32_bits(dma_addr)); + } else { + rqe->normal_rqe.bd_lo_addr = + sss_hw_be32(lower_32_bits(dma_addr)); + rqe->normal_rqe.bd_hi_addr = + sss_hw_be32(upper_32_bits(dma_addr)); + } +} + +static inline void sss_nic_free_wqe_buffer(struct sss_nic_dev *nic_dev, + struct sss_nic_rx_desc *rx_desc) +{ + if (rx_desc->buf_daddr) { + dma_unmap_page(nic_dev->dev_hdl, rx_desc->buf_daddr, + nic_dev->rx_dma_buff_size, DMA_FROM_DEVICE); + rx_desc->buf_daddr = 0; + } + + if (rx_desc->page) { + __free_pages(rx_desc->page, nic_dev->page_order); + rx_desc->page = NULL; + } +} + +static inline int sss_nic_fill_idle_wqe(struct sss_nic_rq_desc *rq_desc, + u32 wqebb_num, u32 start_pi) +{ + u32 pi = start_pi; + u32 i; + struct sss_nic_rx_desc *rx_desc = NULL; + struct sss_nic_dev *nic_dev = netdev_priv(rq_desc->netdev); + + for (i = 0; i < wqebb_num; i++) { + rx_desc = &rq_desc->rx_desc_group[pi]; + + if (unlikely(!sss_nic_rx_alloc_dma_page(nic_dev, rx_desc))) { + rq_desc->reset_pi = (u16)((rq_desc->reset_pi + i) & rq_desc->qid_mask); + SSSNIC_RQ_STATS_INC(rq_desc, alloc_rx_dma_err); + return -ENOMEM; + } + + sss_nic_fill_wqe_sge(rx_desc, rq_desc->rq->wqe_type); + + pi = (u16)((pi + 1) & rq_desc->qid_mask); + rq_desc->reset_wqe_num++; + } + + return 0; +} + +static int sss_nic_reset_rq(struct sss_nic_dev *nic_dev, u16 qid, u16 hw_ci) +{ + int ret; + u32 i; + u32 total; + u32 ci; + u32 pi; + struct sss_nic_rq_desc *rq_desc = &nic_dev->rq_desc_group[qid]; + u32 idle_wqebb = rq_desc->delta - rq_desc->reset_wqe_num; + struct sss_nic_rx_desc *rx_desc = NULL; + + if (rq_desc->delta < rq_desc->reset_wqe_num) + return -EINVAL; + + if (rq_desc->reset_wqe_num == 0) + rq_desc->reset_pi = rq_desc->pi; + + ci = rq_desc->ci & rq_desc->qid_mask; + total = ci + rq_desc->q_depth - rq_desc->pi; + if ((total % rq_desc->q_depth) != rq_desc->delta) + return -EINVAL; + + ret = sss_nic_fill_idle_wqe(rq_desc, idle_wqebb, rq_desc->reset_pi); + if (ret) + return ret; + + nic_info(nic_dev->dev_hdl, "Reset rq: rq %u, restore_buf_num:%u\n", qid, + rq_desc->reset_wqe_num); + + pi = (hw_ci + rq_desc->q_depth - 1) & rq_desc->qid_mask; + rx_desc = &rq_desc->rx_desc_group[pi]; + sss_nic_free_wqe_buffer(nic_dev, rx_desc); + + rq_desc->delta = 1; + rq_desc->reset_wqe_num = 0; + rq_desc->pi = (u16)pi; + rq_desc->backup_pi = rq_desc->pi; + rq_desc->ci = (u16)((rq_desc->pi + 1) & rq_desc->qid_mask); + + for (i = 0; i < rq_desc->q_depth; i++) { + if (!SSSNIC_GET_RX_DONE(sss_hw_cpu32(rq_desc->rx_desc_group[i].cqe->state))) + continue; + + rq_desc->rx_desc_group[i].cqe->state = 0; + SSSNIC_RQ_STATS_INC(rq_desc, reset_drop_sge); + } + + ret = sss_nic_cache_out_qp_resource(nic_dev->nic_io); + if (ret) { + SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY); + return ret; + } + + sss_nic_write_db(rq_desc->rq, rq_desc->qid & (SSSNIC_DCB_COS_MAX - 1), + RQ_CFLAG_DP, (u16)((u32)rq_desc->pi << rq_desc->rq->wqe_type)); + + return 0; +} + +static bool sss_nic_rq_is_normal(struct sss_nic_rq_desc *rq_desc, + struct sss_nic_rq_pc_info check_info) +{ + u32 status; + u32 sw_ci = rq_desc->ci & rq_desc->qid_mask; + + if (check_info.hw_pi != check_info.hw_ci || + check_info.hw_ci != rq_desc->last_hw_ci) + return true; + + if (rq_desc->stats.rx_packets != rq_desc->rx_pkts || + rq_desc->pi != rq_desc->last_sw_pi) + return true; + + status = SSSNIC_GET_RQ_CQE_STATUS(rq_desc, sw_ci); + if (SSSNIC_GET_RX_DONE(status)) + return true; + + if (sw_ci != rq_desc->last_sw_ci || rq_desc->pi != check_info.hw_pi) + return true; + + return false; +} + +void sss_nic_rq_watchdog_handler(struct work_struct *work) +{ + int ret; + u16 qid; + struct sss_nic_rq_pc_info *check_info = NULL; + struct sss_nic_rq_desc *rq_desc = NULL; + struct delayed_work *delay = to_delayed_work(work); + struct sss_nic_dev *nic_dev = container_of(delay, struct sss_nic_dev, rq_watchdog_work); + u64 size = sizeof(*check_info) * nic_dev->qp_res.qp_num; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) + return; + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY)) + queue_delayed_work(nic_dev->workq, &nic_dev->rq_watchdog_work, HZ); + + if (!size) + return; + check_info = kzalloc(size, GFP_KERNEL); + if (!check_info) + return; + + ret = sss_nic_rq_hw_pc_info(nic_dev, check_info, nic_dev->qp_res.qp_num, + nic_dev->rq_desc_group[0].rq->wqe_type); + if (ret) { + nic_dev->get_rq_fail_cnt++; + if (nic_dev->get_rq_fail_cnt >= SSSNIC_RQ_GET_ERR_CNT_THRESHOLD) + SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY); + goto free_rq_info; + } + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + rq_desc = &nic_dev->rq_desc_group[qid]; + if (!sss_nic_rq_is_normal(rq_desc, check_info[qid])) { + rq_desc->check_err_cnt++; + if (rq_desc->check_err_cnt < SSSNIC_RQ_CHECK_ERR_CNT_THRESHOLD) + continue; + + if (rq_desc->print_err_cnt <= SSSNIC_RQ_PRINT_CNT_THRESHOLD) { + nic_warn(nic_dev->dev_hdl, + "Rq handle: rq(%u) wqe abnormal, hw_pi:%u, hw_ci:%u, sw_pi:%u, sw_ci:%u delta:%u\n", + qid, check_info[qid].hw_pi, check_info[qid].hw_ci, + rq_desc->pi, + rq_desc->ci & rq_desc->qid_mask, rq_desc->delta); + rq_desc->print_err_cnt++; + } + + ret = sss_nic_reset_rq(nic_dev, qid, check_info[qid].hw_ci); + if (ret) + continue; + } + + rq_desc->last_hw_ci = check_info[qid].hw_ci; + rq_desc->rx_pkts = rq_desc->stats.rx_packets; + rq_desc->last_sw_pi = rq_desc->pi; + rq_desc->last_sw_ci = rq_desc->ci & rq_desc->qid_mask; + rq_desc->print_err_cnt = 0; + rq_desc->check_err_cnt = 0; + } + + nic_dev->get_rq_fail_cnt = 0; + +free_rq_info: + kfree(check_info); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.h new file mode 100644 index 0000000000000..6d588e690cca7 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_RX_RESET_H +#define SSS_NIC_RX_RESET_H + +#include +#include +#include +#include +#include +#include + +void sss_nic_rq_watchdog_handler(struct work_struct *work); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.c new file mode 100644 index 0000000000000..2bda358869df5 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.c @@ -0,0 +1,866 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" + +#define SSSNIC_DEFAULT_MSS 0x3E00 +#define SSSNIC_MIN_MSS 0x50 +#define SSSNIC_SKB_LEN_MIN 32 +#define SSSNIC_SKB_LEN_MAX 16383 +#define SSSNIC_PAYLOAD_OFFSET_MAX 221 + +#define SSSNIC_IPV4_VERSION 4 +#define SSSNIC_IPV6_VERSION 6 +#define SSSNIC_TCP_DOFF_TO_BYTES(doff) ((doff) << 2) +#define SSSNIC_VXLAN_OFFLOAD_PORT 46354 + +#define SSSNIC_TRANSPORT_OFFSET(hdr, skb) ((u32)((hdr) - (skb)->data)) + +// SQE CTRL +#define SSSNIC_SQE_CTRL_SECT_BD0_LEN_SHIFT 0 +#define SSSNIC_SQE_CTRL_SECT_RSVD_SHIFT 18 +#define SSSNIC_SQE_CTRL_SECT_BUFDESC_NUM_SHIFT 19 +#define SSSNIC_SQE_CTRL_SECT_TASKSECT_LEN_SHIFT 27 +#define SSSNIC_SQE_CTRL_SECT_DATA_FORMAT_SHIFT 28 +#define SSSNIC_SQE_CTRL_SECT_DIRECT_SHIFT 29 +#define SSSNIC_SQE_CTRL_SECT_EXTENDED_SHIFT 30 +#define SSSNIC_SQE_CTRL_SECT_OWNER_SHIFT 31 + +#define SSSNIC_SQE_CTRL_SECT_BD0_LEN_MASK 0x3FFFFU +#define SSSNIC_SQE_CTRL_SECT_RSVD_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_BUFDESC_NUM_MASK 0xFFU +#define SSSNIC_SQE_CTRL_SECT_TASKSECT_LEN_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_DATA_FORMAT_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_DIRECT_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_EXTENDED_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_OWNER_MASK 0x1U + +#define SSSNIC_SQE_CTRL_SECT_SET(val, member) \ +(((u32)(val) & SSSNIC_SQE_CTRL_SECT_##member##_MASK) << SSSNIC_SQE_CTRL_SECT_##member##_SHIFT) + +// SQ CTRL QINFO +#define SSSNIC_SQE_CTRL_SECT_QINFO_PKT_TYPE_SHIFT 0 +#define SSSNIC_SQE_CTRL_SECT_QINFO_PLDOFF_SHIFT 2 +#define SSSNIC_SQE_CTRL_SECT_QINFO_UFO_SHIFT 10 +#define SSSNIC_SQE_CTRL_SECT_QINFO_TSO_SHIFT 11 +#define SSSNIC_SQE_CTRL_SECT_QINFO_TCPUDP_CS_SHIFT 12 +#define SSSNIC_SQE_CTRL_SECT_QINFO_MSS_SHIFT 13 +#define SSSNIC_SQE_CTRL_SECT_QINFO_SCTP_SHIFT 27 +#define SSSNIC_SQE_CTRL_SECT_QINFO_UC_SHIFT 28 +#define SSSNIC_SQE_CTRL_SECT_QINFO_PRI_SHIFT 29 + +#define SSSNIC_SQE_CTRL_SECT_QINFO_PKT_TYPE_MASK 0x3U +#define SSSNIC_SQE_CTRL_SECT_QINFO_PLDOFF_MASK 0xFFU +#define SSSNIC_SQE_CTRL_SECT_QINFO_UFO_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_QINFO_TSO_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_QINFO_TCPUDP_CS_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_QINFO_MSS_MASK 0x3FFFU +#define SSSNIC_SQE_CTRL_SECT_QINFO_SCTP_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_QINFO_UC_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_QINFO_PRI_MASK 0x7U + +#define SSSNIC_SQE_CTRL_SECT_QINFO_SET(val, member) \ + (((u32)(val) & SSSNIC_SQE_CTRL_SECT_QINFO_##member##_MASK) << \ + SSSNIC_SQE_CTRL_SECT_QINFO_##member##_SHIFT) + +#define SSSNIC_SQE_CTRL_SECT_QINFO_GET(val, member) \ + (((val) >> SSSNIC_SQE_CTRL_SECT_QINFO_##member##_SHIFT) & \ + SSSNIC_SQE_CTRL_SECT_QINFO_##member##_MASK) + +#define SSSNIC_SQE_CTRL_SECT_QINFO_CLEAR(val, member) \ + ((val) & (~(SSSNIC_SQE_CTRL_SECT_QINFO_##member##_MASK << \ + SSSNIC_SQE_CTRL_SECT_QINFO_##member##_SHIFT))) + +// SQ TASK +#define SSSNIC_SQE_TASK_SECT_VALUE0_TUNNEL_FLAG_SHIFT 19 +#define SSSNIC_SQE_TASK_SECT_VALUE0_ESP_NEXT_PROTO_SHIFT 22 +#define SSSNIC_SQE_TASK_SECT_VALUE0_INNER_L4_EN_SHIFT 24 +#define SSSNIC_SQE_TASK_SECT_VALUE0_INNER_L3_EN_SHIFT 25 +#define SSSNIC_SQE_TASK_SECT_VALUE0_INNER_L4_PSEUDO_SHIFT 26 +#define SSSNIC_SQE_TASK_SECT_VALUE0_OUT_L4_EN_SHIFT 27 +#define SSSNIC_SQE_TASK_SECT_VALUE0_OUT_L3_EN_SHIFT 28 +#define SSSNIC_SQE_TASK_SECT_VALUE0_OUT_L4_PSEUDO_SHIFT 29 +#define SSSNIC_SQE_TASK_SECT_VALUE0_ESP_OFFLOAD_SHIFT 30 +#define SSSNIC_SQE_TASK_SECT_VALUE0_IPSEC_PROTO_SHIFT 31 + +#define SSSNIC_SQE_TASK_SECT_VALUE0_TUNNEL_FLAG_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_ESP_NEXT_PROTO_MASK 0x3U +#define SSSNIC_SQE_TASK_SECT_VALUE0_INNER_L4_EN_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_INNER_L3_EN_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_INNER_L4_PSEUDO_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_OUT_L4_EN_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_OUT_L3_EN_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_OUT_L4_PSEUDO_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_ESP_OFFLOAD_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_IPSEC_PROTO_MASK 0x1U + +#define SSSNIC_SQE_TASK_SECT_VALUE0_SET(val, member) \ + (((u32)(val) & SSSNIC_SQE_TASK_SECT_VALUE0_##member##_MASK) << \ + SSSNIC_SQE_TASK_SECT_VALUE0_##member##_SHIFT) + +#define SSSNIC_SQE_TASK_SECT_VALUE3_VLAN_TAG_SHIFT 0 +#define SSSNIC_SQE_TASK_SECT_VALUE3_VLAN_TYPE_SHIFT 16 +#define SSSNIC_SQE_TASK_SECT_VALUE3_VLAN_TAG_VALID_SHIFT 19 + +#define SSSNIC_SQE_TASK_SECT_VALUE3_VLAN_TAG_MASK 0xFFFFU +#define SSSNIC_SQE_TASK_SECT_VALUE3_VLAN_TYPE_MASK 0x7U +#define SSSNIC_SQE_TASK_SECT_VALUE3_VLAN_TAG_VALID_MASK 0x1U + +#define SSSNIC_SQE_TASK_SECT_VALUE3_SET(val, member) \ + (((val) & SSSNIC_SQE_TASK_SECT_VALUE3_##member##_MASK) << \ + SSSNIC_SQE_TASK_SECT_VALUE3_##member##_SHIFT) + +#define SSSNIC_VLAN_INSERT_MODE_MAX 5 +#define SSSNIC_TSO_CS_EN 1 +#define SSSNIC_DEF_PKT_CNT 1 + +#define SSSNIC_SQ_STATS_INC(sq_desc, field) \ +do { \ + u64_stats_update_begin(&(sq_desc)->stats.stats_sync); \ + (sq_desc)->stats.field++; \ + u64_stats_update_end(&(sq_desc)->stats.stats_sync); \ +} while (0) + +enum sss_nic_check_tx_offload_type { + SSSNIC_OFFLOAD_TSO = BIT(0), + SSSNIC_OFFLOAD_TX_CSUM = BIT(1), + SSSNIC_OFFLOAD_TX_VLAN = BIT(2), + SSSNIC_OFFLOAD_TX_DISABLE = BIT(3), + SSSNIC_OFFLOAD_TX_ESP = BIT(4), +}; + +union sss_nic_ip { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; +}; + +struct sss_nic_sqe_ctrl_section { + u32 sect_len; + u32 qinfo; + u32 addr_high; + u32 addr_low; +}; + +/* Engine only pass first 12B TS field directly to uCode through metadata + * vlan_offoad is used for hardware when vlan insert in tx + */ +struct sss_nic_sqe_task_section { + u32 value[4]; +}; + +struct sss_nic_sqe_bd_section { + u32 len; /* 31-bits Length, L2NIC only use length[17:0] */ + u32 rsvd; + u32 addr_high; + u32 addr_low; +}; + +/* use section pointer for support non continuous wqe */ +struct sss_nic_sqe { + struct sss_nic_sqe_ctrl_section *ctrl_sect; + struct sss_nic_sqe_task_section *task_sect; + struct sss_nic_sqe_bd_section *bd_sect0; + struct sss_nic_sqe_bd_section *bd_sect1; + u16 first_bds_num; + u32 wqe_type; + u32 task_type; +}; + +/* ************* SQ_CTRL ************** */ +enum sss_nic_sqe_data_format { + SSSNIC_NORMAL_SQE = 0, +}; + +enum sss_nic_sqe_type { + SSSNIC_SQE_COMPACT_TYPE = 0, + SSSNIC_SQE_EXTENDED_TYPE = 1, +}; + +enum sss_nic_sqe_task_len { + SSSNIC_SQE_TASK_LEN_46BITS = 0, + SSSNIC_SQE_TASK_LEN_128BITS = 1, +}; + +union sss_nic_transport_header { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; +}; + +enum sss_nic_sq_l3_proto_type { + SSSNIC_UNSUPPORT_L3_PORTO_TYPE = 0, + SSSNIC_IPV6_PKT = 1, + SSSNIC_IPV4_PKT_NO_CSO = 2, + SSSNIC_IPV4_PKT_WITH_CSO = 3, +}; + +enum sss_nic_sq_l4_offload_type { + SSSNIC_DISABLE_OFFLOAD = 0, + SSSNIC_TCP_OFFLOAD = 1, + SSSNIC_SCTP_OFFLOAD = 2, + SSSNIC_UDP_OFFLOAD = 3, +}; + +static inline __sum16 sss_nic_csum_magic(union sss_nic_ip *ip, + unsigned short proto) +{ + return (ip->v4->version == SSSNIC_IPV4_VERSION) ? + csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) : + csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0); +} + +#define sss_nic_set_vlan_tx_offload(task_sect, vlan_tag, vlan_type) \ + ((task_sect)->value[3] = SSSNIC_SQE_TASK_SECT_VALUE3_SET((vlan_tag), VLAN_TAG) | \ + SSSNIC_SQE_TASK_SECT_VALUE3_SET((vlan_type), VLAN_TYPE) | \ + SSSNIC_SQE_TASK_SECT_VALUE3_SET(1U, VLAN_TAG_VALID)) + +void sss_nic_get_sq_stats(struct sss_nic_sq_desc *sq_desc, + struct sss_nic_sq_stats *stats) +{ + struct sss_nic_sq_stats *sq_stats = &sq_desc->stats; + unsigned int begin; + + u64_stats_update_begin(&stats->stats_sync); + do { + begin = u64_stats_fetch_begin(&sq_stats->stats_sync); + stats->tx_bytes = sq_stats->tx_bytes; + stats->tx_packets = sq_stats->tx_packets; + stats->tx_busy = sq_stats->tx_busy; + stats->wake = sq_stats->wake; + stats->tx_dropped = sq_stats->tx_dropped; + } while (u64_stats_fetch_retry(&sq_stats->stats_sync, begin)); + u64_stats_update_end(&stats->stats_sync); +} + +#define sss_nic_init_bd_sect(bd_sect, addr, bd_len) \ +do { \ + (bd_sect)->addr_high = sss_hw_be32(upper_32_bits(addr)); \ + (bd_sect)->addr_low = sss_hw_be32(lower_32_bits(addr)); \ + (bd_sect)->len = sss_hw_be32(bd_len); \ +} while (0) + +#define sss_nic_unmap_dma_page(nic_dev, nr_frags, dma_group) \ +do { \ + struct pci_dev *_pdev = (nic_dev)->pdev; \ + int _frag_id; \ +\ + for (_frag_id = 1; _frag_id < (nr_frags) + 1; _frag_id++) \ + dma_unmap_page(&_pdev->dev, (dma_group)[_frag_id].dma, \ + (dma_group)[_frag_id].len, DMA_TO_DEVICE); \ + dma_unmap_single(&_pdev->dev, (dma_group)[0].dma, (dma_group)[0].len, \ + DMA_TO_DEVICE); \ +} while (0) + +static int sss_nic_map_dma_page(struct sss_nic_dev *nic_dev, + struct sk_buff *skb, u16 valid_nr_frag, + struct sss_nic_sq_desc *sq_desc, + struct sss_nic_tx_desc *tx_desc, + struct sss_nic_sqe *sqe) +{ + struct sss_nic_sqe_ctrl_section *ctrl_sect = sqe->ctrl_sect; + struct sss_nic_sqe_bd_section *bd_sect = sqe->bd_sect0; + struct sss_nic_dma_info *dma_group = tx_desc->dma_group; + struct pci_dev *pdev = nic_dev->pdev; + skb_frag_t *frag = NULL; + u32 flag; + int ret; + + dma_group[0].dma = dma_map_single(&pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_group[0].dma)) { + SSSNIC_SQ_STATS_INC(sq_desc, dma_map_err); + return -EFAULT; + } + + dma_group[0].len = skb_headlen(skb); + + ctrl_sect->addr_high = sss_hw_be32(upper_32_bits(dma_group[0].dma)); + ctrl_sect->addr_low = sss_hw_be32(lower_32_bits(dma_group[0].dma)); + ctrl_sect->sect_len = dma_group[0].len; + + for (flag = 0; flag < valid_nr_frag;) { + frag = &(skb_shinfo(skb)->frags[flag]); + if (unlikely(flag == sqe->first_bds_num)) + bd_sect = sqe->bd_sect1; + + flag++; + dma_group[flag].dma = skb_frag_dma_map(&pdev->dev, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_group[flag].dma)) { + SSSNIC_SQ_STATS_INC(sq_desc, dma_map_err); + flag--; + ret = -EFAULT; + goto frag_map_err; + } + dma_group[flag].len = skb_frag_size(frag); + + sss_nic_init_bd_sect(bd_sect, dma_group[flag].dma, + dma_group[flag].len); + bd_sect++; + } + return 0; + +frag_map_err: + sss_nic_unmap_dma_page(nic_dev, flag, dma_group); + return ret; +} + +#ifdef HAVE_IP6_FRAG_ID_ENABLE_UFO +#define sss_nic_ipv6_frag_id(task_sect, skb, ip) \ +do { \ + if ((ip)->v4->version == 6) \ + (task_sect)->value[1] = be32_to_cpu(skb_shinfo(skb)->ip6_frag_id); \ +} while (0) +#else +#define sss_nic_ipv6_frag_id(task_sect, skb, ip) do {} while (0) +#endif + +#define sss_nic_get_inner_transport_info(task_sect, skb, ip, l4, l4_proto, offset, l4_offload) \ +do { \ + if ((l4_proto) == IPPROTO_TCP) { \ + (l4)->tcp->check = ~sss_nic_csum_magic((ip), IPPROTO_TCP); \ + *(l4_offload) = SSSNIC_TCP_OFFLOAD; \ + *(offset) = SSSNIC_TCP_DOFF_TO_BYTES((l4)->tcp->doff) + \ + SSSNIC_TRANSPORT_OFFSET((l4)->hdr, (skb)); \ + } else if ((l4_proto) == IPPROTO_UDP) { \ + sss_nic_ipv6_frag_id(task_sect, (skb), (ip)); \ + *(l4_offload) = SSSNIC_UDP_OFFLOAD; \ + *(offset) = SSSNIC_TRANSPORT_OFFSET((l4)->hdr, (skb)); \ + } \ +} while (0) + +#define sss_nic_check_enc_tx_csum(sq_desc, task_sect, skb, offload) \ +do { \ + union sss_nic_ip _ip; \ + u8 _l4_proto; \ +\ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, TUNNEL_FLAG); \ + _ip.hdr = skb_network_header(skb); \ + if (_ip.v4->version == SSSNIC_IPV4_VERSION) { \ + _l4_proto = _ip.v4->protocol; \ + } else if (_ip.v4->version == SSSNIC_IPV6_VERSION) { \ + union sss_nic_transport_header l4; \ + unsigned char *exthdr; \ + __be16 frag_off; \ +\ + exthdr = _ip.hdr + sizeof(*_ip.v6); \ + _l4_proto = _ip.v6->nexthdr; \ + l4.hdr = skb_transport_header(skb); \ + if (l4.hdr != exthdr) \ + ipv6_skip_exthdr((skb), exthdr - (skb)->data, &_l4_proto, &frag_off); \ + } else { \ + _l4_proto = IPPROTO_RAW; \ + } \ + if (((struct udphdr *)skb_transport_header(skb))->dest != \ + SSSNIC_VXLAN_OFFLOAD_PORT || \ + _l4_proto != IPPROTO_UDP) { \ + SSSNIC_SQ_STATS_INC((sq_desc), unknown_tunnel_proto); \ + /* disable checksum offload */ \ + skb_checksum_help(skb); \ + } else { \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, INNER_L4_EN); \ + *(offload) = SSSNIC_OFFLOAD_TX_CSUM; \ + } \ +} while (0) + +#define sss_nic_check_tx_csum(sq_desc, task_sect, skb, offload) \ +do { \ + if ((skb)->ip_summed == CHECKSUM_PARTIAL) {\ + if ((skb)->encapsulation) \ + sss_nic_check_enc_tx_csum((sq_desc), (task_sect), (skb), (offload)); \ + else {\ + (task_sect)->value[0] |= \ + SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, INNER_L4_EN); \ + *(offload) = SSSNIC_OFFLOAD_TX_CSUM; \ + } \ + } \ +} while (0) + +#define sss_nic_get_inner_proto_type(skb, ip, l4, l4_proto) \ +do { \ + unsigned char *_ext_hdr = NULL; \ + __be16 _frag_off = 0; \ +\ + if ((ip)->v4->version == SSSNIC_IPV4_VERSION) { \ + *(l4_proto) = (ip)->v4->protocol; \ + } else if ((ip)->v4->version == SSSNIC_IPV6_VERSION) { \ + _ext_hdr = (ip)->hdr + sizeof(*((ip)->v6)); \ + *(l4_proto) = (ip)->v6->nexthdr; \ + if (_ext_hdr != (l4)->hdr) \ + ipv6_skip_exthdr((skb), (int)(_ext_hdr - (skb)->data), \ + (l4_proto), &_frag_off); \ + } else { \ + *(l4_proto) = 0; \ + } \ +} while (0) + +#define sss_nic_set_tso_info(task_sect, qinfo, l4_offload, offset, mss) \ +do { \ + if ((l4_offload) == SSSNIC_TCP_OFFLOAD) { \ + *(qinfo) |= SSSNIC_SQE_CTRL_SECT_QINFO_SET(1U, TSO); \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, INNER_L4_EN); \ + } else if ((l4_offload) == SSSNIC_UDP_OFFLOAD) { \ + *(qinfo) |= SSSNIC_SQE_CTRL_SECT_QINFO_SET(1U, UFO); \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, INNER_L4_EN); \ + } \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, INNER_L3_EN); \ + *(qinfo) |= SSSNIC_SQE_CTRL_SECT_QINFO_SET((offset) >> 1, PLDOFF); \ + *(qinfo) = SSSNIC_SQE_CTRL_SECT_QINFO_CLEAR(*(qinfo), MSS); \ + *(qinfo) |= SSSNIC_SQE_CTRL_SECT_QINFO_SET((mss), MSS); \ +} while (0) + +#define sss_nic_get_proto_hdr(task_sect, skb, ip, l4) \ +do { \ + if ((skb)->encapsulation) { \ + u32 gso_type = skb_shinfo(skb)->gso_type; \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, OUT_L3_EN); \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, TUNNEL_FLAG); \ +\ + (l4)->hdr = skb_transport_header(skb); \ + (ip)->hdr = skb_network_header(skb); \ +\ + if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { \ + (l4)->udp->check = ~sss_nic_csum_magic((ip), IPPROTO_UDP); \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, OUT_L4_EN); \ + } \ +\ + (ip)->hdr = skb_inner_network_header(skb); \ + (l4)->hdr = skb_inner_transport_header(skb); \ + } else { \ + (ip)->hdr = skb_network_header(skb); \ + (l4)->hdr = skb_transport_header(skb); \ + } \ +} while (0) + +#define sss_nic_check_tso(task_sect, qinfo, skb, offload) \ +do { \ + enum sss_nic_sq_l4_offload_type _l4_offload = SSSNIC_DISABLE_OFFLOAD; \ + union sss_nic_ip _ip; \ + union sss_nic_transport_header _l4; \ + u32 _offset = 0; \ + u8 _l4_proto; \ + int _ret; \ +\ + _ret = skb_cow_head((skb), 0); \ + if (_ret < 0) \ + *(offload) = SSSNIC_OFFLOAD_TX_DISABLE; \ + else { \ + sss_nic_get_proto_hdr((task_sect), (skb), &_ip, &_l4); \ + sss_nic_get_inner_proto_type(skb, &_ip, &_l4, &_l4_proto); \ + sss_nic_get_inner_transport_info((task_sect), (skb), &_ip, &_l4, \ + _l4_proto, &_offset, &_l4_offload); \ + sss_nic_set_tso_info((task_sect), (qinfo), _l4_offload, _offset, \ + skb_shinfo(skb)->gso_size); \ +\ + if (unlikely(SSSNIC_SQE_CTRL_SECT_QINFO_GET(*(qinfo), PLDOFF) > \ + SSSNIC_PAYLOAD_OFFSET_MAX)) \ + *(offload) = SSSNIC_OFFLOAD_TX_DISABLE; \ + else \ + *(offload) = SSSNIC_OFFLOAD_TSO; \ + } \ +} while (0) + +#define sss_nic_check_tx_offload(sq_desc, task_sect, skb, qinfo, offload) \ +do { \ + if (skb_is_gso(skb) == 0) \ + sss_nic_check_tx_csum((sq_desc), (task_sect), (skb), (offload)); \ + else \ + sss_nic_check_tso((task_sect), (qinfo), (skb), (offload)); \ +\ + if (*(offload) != SSSNIC_OFFLOAD_TX_DISABLE) { \ + if (unlikely(skb_vlan_tag_present(skb))) { \ + sss_nic_set_vlan_tx_offload((task_sect), skb_vlan_tag_get(skb), \ + (sq_desc)->qid % \ + SSSNIC_VLAN_INSERT_MODE_MAX); \ + *(offload) |= SSSNIC_OFFLOAD_TX_VLAN; \ + } \ + } \ +} while (0) + +#ifdef HAVE_SKB_INNER_TRANSPORT_OFFSET +#define sss_nic_get_inner_ihs(skb) \ + (skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb)) +#else +#define sss_nic_get_inner_ihs(skb) \ + ((skb_inner_transport_header(skb) - (skb)->data) + inner_tcp_hdrlen(skb)) +#endif + +#if (defined(HAVE_SKB_INNER_TRANSPORT_HEADER) && defined(HAVE_SK_BUFF_ENCAPSULATION)) +#define sss_nic_get_ihs(skb, ihs) \ +do { \ + if ((skb)->encapsulation) \ + (ihs) = sss_nic_get_inner_ihs(skb); \ + else \ + (ihs) = skb_transport_offset(skb) + tcp_hdrlen(skb); \ +} while (0) +#else +#define sss_nic_get_ihs(skb, ihs) \ + ((ihs) = skb_transport_offset(skb) + tcp_hdrlen(skb)) +#endif + +#define sss_nic_get_pkt_stats(tx_desc, skb) \ +do { \ + u32 _ihs; \ + u32 _hdr_len; \ +\ + if (skb_is_gso(skb)) { \ + sss_nic_get_ihs((skb), _ihs); \ + _hdr_len = (skb_shinfo(skb)->gso_segs - 1) * _ihs; \ + (tx_desc)->bytes = (skb)->len + (u64)_hdr_len; \ + } else { \ + (tx_desc)->bytes = (skb)->len > ETH_ZLEN ? (skb)->len : ETH_ZLEN; \ + } \ + (tx_desc)->nr_pkt_cnt = SSSNIC_DEF_PKT_CNT; \ +} while (0) + +#define sss_nic_get_sq_free_wqebbs(sq) sss_wq_free_wqebb(&(sq)->wq) + +static inline int sss_nic_check_tx_stop(struct sss_nic_sq_desc *sq_desc, + u16 wqebb_cnt) +{ + if (likely(sss_nic_get_sq_free_wqebbs(sq_desc->sq) >= wqebb_cnt)) + return 0; + + /* We need to check again in a case another CPU has free room available. */ + netif_stop_subqueue(sq_desc->netdev, sq_desc->qid); + + if (likely(sss_nic_get_sq_free_wqebbs(sq_desc->sq) < wqebb_cnt)) + return -EBUSY; + + /* wake up queue when there are enough wqebbs */ + netif_start_subqueue(sq_desc->netdev, sq_desc->qid); + + return 0; +} + +#define sss_nic_get_and_update_sq_owner(sq, owner_ptr, curr_pi, wqebb_cnt) \ +do { \ + if (unlikely((curr_pi) + (wqebb_cnt) >= (sq)->wq.q_depth)) \ + (sq)->owner = !(sq)->owner; \ + *(owner_ptr) = (sq)->owner; \ +} while (0) + +#define sss_nic_combo_sqe(sq, sqe, task, curr_pi, owner, offload, sge_cnt) \ +do { \ + void *_wqebb = NULL; \ + void *_second_part_wqebbs_addr = NULL; \ + u16 _tmp_pi; \ + u16 _first_part_wqebbs_num; \ + int _id; \ +\ + (sqe)->ctrl_sect = sss_wq_get_one_wqebb(&(sq)->wq, (curr_pi)); \ + if ((offload) == 0 && (sge_cnt) == 1) { \ + (sqe)->wqe_type = SSSNIC_SQE_COMPACT_TYPE; \ + sss_nic_get_and_update_sq_owner((sq), (owner), *(curr_pi), 1); \ + } else { \ + (sqe)->wqe_type = SSSNIC_SQE_EXTENDED_TYPE; \ +\ + if ((offload) != 0) { \ + (sqe)->task_sect = sss_wq_get_one_wqebb(&(sq)->wq, &_tmp_pi); \ + (sqe)->task_type = SSSNIC_SQE_TASK_LEN_128BITS; \ +\ + for (_id = 0; _id < ARRAY_LEN((sqe)->task_sect->value); _id++) \ + (sqe)->task_sect->value[_id] = sss_hw_be32((task)->value[_id]); \ +\ + } else { \ + (sqe)->task_type = SSSNIC_SQE_TASK_LEN_46BITS; \ + } \ +\ + if ((sge_cnt) > 1) { \ + /* first wqebb contain bd0, so use weqbb_cnt(sge_num-1) */ \ + _wqebb = sss_wq_get_multi_wqebb(&(sq)->wq, (sge_cnt) - 1, &_tmp_pi, \ + &_second_part_wqebbs_addr, \ + &_first_part_wqebbs_num); \ + (sqe)->first_bds_num = _first_part_wqebbs_num; \ + (sqe)->bd_sect1 = _second_part_wqebbs_addr; \ + (sqe)->bd_sect0 = _wqebb; \ + } \ +\ + sss_nic_get_and_update_sq_owner((sq), (owner), *(curr_pi), \ + (sge_cnt) + (u16)!!(offload)); \ + } \ +} while (0) + +#define SSSNIC_FILL_COMPACT_WQE_CTRL_SECT(sqe, ctrl_sect, owner) \ +do { \ + (ctrl_sect)->sect_len |= \ + SSSNIC_SQE_CTRL_SECT_SET((owner), OWNER) | \ + SSSNIC_SQE_CTRL_SECT_SET((sqe)->wqe_type, EXTENDED) | \ + SSSNIC_SQE_CTRL_SECT_SET(SSSNIC_NORMAL_SQE, DATA_FORMAT); \ + (ctrl_sect)->sect_len = sss_hw_be32((ctrl_sect)->sect_len); \ + (ctrl_sect)->qinfo = 0; \ +} while (0) + +#define SSSNIC_FILL_EXTEND_WQE_CTRL_SECT(sqe, ctrl_sect, info, sge_cnt, owner) \ +do { \ + (ctrl_sect)->sect_len |= SSSNIC_SQE_CTRL_SECT_SET((sge_cnt), BUFDESC_NUM) | \ + SSSNIC_SQE_CTRL_SECT_SET((owner), OWNER) | \ + SSSNIC_SQE_CTRL_SECT_SET((sqe)->task_type, TASKSECT_LEN) | \ + SSSNIC_SQE_CTRL_SECT_SET((sqe)->wqe_type, EXTENDED) | \ + SSSNIC_SQE_CTRL_SECT_SET(SSSNIC_NORMAL_SQE, DATA_FORMAT); \ +\ + (ctrl_sect)->sect_len = sss_hw_be32((ctrl_sect)->sect_len); \ + (ctrl_sect)->qinfo = (info); \ + (ctrl_sect)->qinfo |= SSSNIC_SQE_CTRL_SECT_QINFO_SET(1U, UC); \ +\ + if (!SSSNIC_SQE_CTRL_SECT_QINFO_GET((ctrl_sect)->qinfo, MSS)) { \ + (ctrl_sect)->qinfo |= SSSNIC_SQE_CTRL_SECT_QINFO_SET(SSSNIC_DEFAULT_MSS, MSS); \ + } else if (SSSNIC_SQE_CTRL_SECT_QINFO_GET((ctrl_sect)->qinfo, MSS) < SSSNIC_MIN_MSS) { \ + /* mss should not less than 80 */ \ + (ctrl_sect)->qinfo = SSSNIC_SQE_CTRL_SECT_QINFO_CLEAR((ctrl_sect)->qinfo, MSS); \ + ctrl_sect->qinfo |= SSSNIC_SQE_CTRL_SECT_QINFO_SET(SSSNIC_MIN_MSS, MSS); \ + } \ + (ctrl_sect)->qinfo = sss_hw_be32((ctrl_sect)->qinfo); \ +} while (0) + +#define sss_nic_init_sq_ctrl(sqe, info, sge_cnt, owner) \ +do { \ + if ((sqe)->wqe_type == SSSNIC_SQE_COMPACT_TYPE) \ + SSSNIC_FILL_COMPACT_WQE_CTRL_SECT((sqe), (sqe)->ctrl_sect, (owner)); \ + else \ + SSSNIC_FILL_EXTEND_WQE_CTRL_SECT((sqe), (sqe)->ctrl_sect, \ + (info), (sge_cnt), (owner)); \ +} while (0) + +#define sss_nic_rollback_sq_wqebbs(sq, wqebb_cnt, owner) \ +do { \ + if ((owner) != (sq)->owner) \ + (sq)->owner = (u8)(owner); \ + (sq)->wq.pi -= (wqebb_cnt); \ +} while (0) + +#define sss_nic_update_sq_local_ci(sq, wqebb_cnt) \ + sss_update_wq_ci(&(sq)->wq, (wqebb_cnt)) + +static netdev_tx_t sss_nic_send_one_skb(struct sk_buff *skb, + struct net_device *netdev, + struct sss_nic_sq_desc *sq_desc) +{ + u32 qinfo = 0; + u32 offload = 0; + u16 pi = 0; + u16 owner; + u16 sge_cnt; + u16 nr_frags = 0; + u16 wqebb_cnt; + bool find_zero_len = false; + int ret; + int frag_id; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_tx_desc *tx_desc = NULL; + struct sss_nic_sqe sqe = {0}; + struct sss_nic_sqe_task_section task_sect = {0}; + + if (unlikely(skb->len < SSSNIC_SKB_LEN_MIN)) { + if (skb_pad(skb, (int)(SSSNIC_SKB_LEN_MIN - skb->len))) { + SSSNIC_SQ_STATS_INC(sq_desc, skb_pad_err); + goto tx_drop_pad_err; + } + + skb->len = SSSNIC_SKB_LEN_MIN; + } + + for (frag_id = 0; frag_id < skb_shinfo(skb)->nr_frags; frag_id++) { + if (skb_frag_size(&skb_shinfo(skb)->frags[frag_id]) == 0) { + find_zero_len = true; + continue; + } else if (find_zero_len) { + SSSNIC_SQ_STATS_INC(sq_desc, frag_size_zero); + goto tx_drop_pkts; + } + nr_frags++; + } + sge_cnt = nr_frags + 1; + wqebb_cnt = sge_cnt + 1; /* task info need 1 wqebb */ + + if (unlikely(sss_nic_check_tx_stop(sq_desc, wqebb_cnt))) { + SSSNIC_SQ_STATS_INC(sq_desc, tx_busy); + return NETDEV_TX_BUSY; + } + + sss_nic_check_tx_offload(sq_desc, &task_sect, skb, &qinfo, &offload); + if (unlikely(offload == SSSNIC_OFFLOAD_TX_DISABLE)) { + SSSNIC_SQ_STATS_INC(sq_desc, offload_err); + goto tx_drop_pkts; + } else if (offload == 0) { + /* no TS in current wqe */ + wqebb_cnt -= 1; + if (unlikely(sge_cnt == 1 && skb->len > SSSNIC_SKB_LEN_MAX)) + goto tx_drop_pkts; + } + + sss_nic_combo_sqe(sq_desc->sq, &sqe, &task_sect, &pi, &owner, offload, sge_cnt); + + tx_desc = &sq_desc->tx_desc_group[pi]; + tx_desc->nr_frags = nr_frags; + tx_desc->wqebb_cnt = wqebb_cnt; + tx_desc->skb = skb; + ret = sss_nic_map_dma_page(nic_dev, skb, nr_frags, sq_desc, tx_desc, &sqe); + if (ret != 0) { + sss_nic_rollback_sq_wqebbs(sq_desc->sq, wqebb_cnt, owner); + goto tx_drop_pkts; + } + sss_nic_get_pkt_stats(tx_desc, skb); + sss_nic_init_sq_ctrl(&sqe, qinfo, sge_cnt, owner); + sss_nic_write_db(sq_desc->sq, sq_desc->cos, SQ_CFLAG_DP, + sss_nic_get_sq_local_pi(sq_desc->sq)); + return NETDEV_TX_OK; + +tx_drop_pkts: + dev_kfree_skb_any(skb); +tx_drop_pad_err: + SSSNIC_SQ_STATS_INC(sq_desc, tx_dropped); + return NETDEV_TX_OK; +} + +netdev_tx_t sss_nic_loop_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + u16 qid = skb_get_queue_mapping(skb); + struct sss_nic_sq_desc *sq_desc = &nic_dev->sq_desc_group[qid]; + + return sss_nic_send_one_skb(skb, netdev, sq_desc); +} + +netdev_tx_t sss_nic_ndo_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct sss_nic_sq_desc *sq_desc = NULL; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + u16 qid = skb_get_queue_mapping(skb); + + if (unlikely(!netif_carrier_ok(netdev))) { + SSSNIC_STATS_TX_DROP_INC(nic_dev); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (unlikely(qid >= nic_dev->qp_res.qp_num)) { + SSSNIC_STATS_TX_INVALID_QID_INC(nic_dev); + goto out; + } + sq_desc = &nic_dev->sq_desc_group[qid]; + return sss_nic_send_one_skb(skb, netdev, sq_desc); + +out: + dev_kfree_skb_any(skb); + sq_desc = &nic_dev->sq_desc_group[0]; + SSSNIC_SQ_STATS_INC(sq_desc, tx_dropped); + return NETDEV_TX_OK; +} + +#define sss_nic_tx_free_skb(nic_dev, tx_desc) \ +do { \ + sss_nic_unmap_dma_page((nic_dev), (tx_desc)->nr_frags, (tx_desc)->dma_group); \ + dev_kfree_skb_any((tx_desc)->skb); \ + (tx_desc)->skb = NULL; \ +} while (0) + +void sss_nic_free_all_skb(struct sss_nic_dev *nic_dev, u32 sq_depth, + struct sss_nic_tx_desc *tx_desc_group) +{ + struct sss_nic_tx_desc *tx_desc = NULL; + u32 i; + + for (i = 0; i < sq_depth; i++) { + tx_desc = &tx_desc_group[i]; + if (tx_desc->skb) + sss_nic_tx_free_skb(nic_dev, tx_desc); + } +} + +#define sss_nic_stop_subqueue(nic_dev, sq_desc, wake) \ +do { \ + u16 _qid = (sq_desc)->sq->qid; \ + u64 _wake = 0; \ + struct netdev_queue *_netdev_sq; \ +\ + if (unlikely(__netif_subqueue_stopped((nic_dev)->netdev, _qid) && \ + sss_nic_get_sq_free_wqebbs((sq_desc)->sq) >= 1 && \ + test_bit(SSSNIC_INTF_UP, &(nic_dev)->flags))) { \ + _netdev_sq = netdev_get_tx_queue((sq_desc)->netdev, _qid); \ +\ + __netif_tx_lock(_netdev_sq, smp_processor_id()); \ + if (__netif_subqueue_stopped((nic_dev)->netdev, _qid)) { \ + netif_wake_subqueue((nic_dev)->netdev, _qid); \ + _wake++; \ + } \ + __netif_tx_unlock(_netdev_sq); \ + } \ +\ + *(wake) = _wake; \ +} while (0) + +int sss_nic_tx_poll(struct sss_nic_sq_desc *sq_desc, int budget) +{ + struct sss_nic_tx_desc *tx_desc = NULL; + struct sss_nic_dev *nic_dev = netdev_priv(sq_desc->netdev); + u64 tx_byte_cnt = 0; + u64 nr_pkt_cnt = 0; + u64 wake = 0; + u16 sw_ci; + u16 hw_ci; + u16 wqebb_cnt = 0; + int pkt_cnt = 0; + + hw_ci = sss_nic_get_sq_hw_ci(sq_desc->sq); + dma_rmb(); + sw_ci = sss_nic_get_sq_local_ci(sq_desc->sq); + + do { + tx_desc = &sq_desc->tx_desc_group[sw_ci]; + + if (hw_ci == sw_ci || + ((hw_ci - sw_ci) & sq_desc->qid_mask) < tx_desc->wqebb_cnt) + break; + + sw_ci = (sw_ci + tx_desc->wqebb_cnt) & (u16)sq_desc->qid_mask; + prefetch(&sq_desc->tx_desc_group[sw_ci]); + + tx_byte_cnt += tx_desc->bytes; + nr_pkt_cnt += tx_desc->nr_pkt_cnt; + wqebb_cnt += tx_desc->wqebb_cnt; + pkt_cnt++; + + sss_nic_tx_free_skb(nic_dev, tx_desc); + } while (likely(pkt_cnt < budget)); + + sss_nic_update_sq_local_ci(sq_desc->sq, wqebb_cnt); + + sss_nic_stop_subqueue(nic_dev, sq_desc, &wake); + + u64_stats_update_begin(&sq_desc->stats.stats_sync); + sq_desc->stats.tx_bytes += tx_byte_cnt; + sq_desc->stats.tx_packets += nr_pkt_cnt; + sq_desc->stats.wake += wake; + u64_stats_update_end(&sq_desc->stats.stats_sync); + + return pkt_cnt; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.h new file mode 100644 index 0000000000000..faeca6a936858 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_TX_H +#define SSS_NIC_TX_H + +#include +#include +#include +#include +#include + +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" + +void sss_nic_free_all_skb(struct sss_nic_dev *nic_dev, u32 sq_depth, + struct sss_nic_tx_desc *tx_desc_group); +netdev_tx_t sss_nic_loop_start_xmit(struct sk_buff *skb, + struct net_device *netdev); +netdev_tx_t sss_nic_ndo_start_xmit(struct sk_buff *skb, + struct net_device *netdev); +void sss_nic_get_sq_stats(struct sss_nic_sq_desc *sq_desc, + struct sss_nic_sq_stats *stats); +int sss_nic_tx_poll(struct sss_nic_sq_desc *sq_desc, int budget); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.c new file mode 100644 index 0000000000000..c6f6b95b814fb --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" + +#define SSSNIC_SQ_EXTRA_SGE 18 + +#define SSSNIC_FLUSH_SQ_TIMEOUT 1000 + +#define SSSNIC_STOP_SQ_WAIT_TIME_MIN 900 +#define SSSNIC_STOP_SQ_WAIT_TIME_MAX 1000 +#define SSSNIC_STOP_SQ_WAIT_TIME_FORCE_MIN 9900 +#define SSSNIC_STOP_SQ_WAIT_TIME_FORCE_MAX 10000 + +#define SSSNIC_SQ_WQEBB_BD (SSSNIC_SQ_WQEBB_SIZE / 16) + +int sss_nic_alloc_sq_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + struct sss_nic_sq_resource *sq_res = NULL; + int qid; + int id; + u64 bds_size; + u64 len; + + for (qid = 0; qid < qp_res->qp_num; qid++) { + sq_res = &qp_res->sq_res_group[qid]; + bds_size = sizeof(*sq_res->dma_group) * + (qp_res->sq_depth * SSSNIC_SQ_WQEBB_BD + SSSNIC_SQ_EXTRA_SGE); + sq_res->dma_group = kzalloc(bds_size, GFP_KERNEL); + if (!sq_res->dma_group) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to allocate sq %d dma info\n", qid); + goto error; + } + + len = sizeof(*sq_res->tx_desc_group) * qp_res->sq_depth; + sq_res->tx_desc_group = kzalloc(len, GFP_KERNEL); + if (!sq_res->tx_desc_group) { + kfree(sq_res->dma_group); + sq_res->dma_group = NULL; + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc sq %d tx desc\n", qid); + goto error; + } + } + + return 0; + +error: + for (id = 0; id < qid; id++) { + sq_res = &qp_res->sq_res_group[id]; + kfree(sq_res->dma_group); + kfree(sq_res->tx_desc_group); + sq_res->dma_group = NULL; + sq_res->tx_desc_group = NULL; + } + + return -ENOMEM; +} + +void sss_nic_free_sq_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + struct sss_nic_sq_resource *sq_res = NULL; + u16 qid; + + for (qid = 0; qid < qp_res->qp_num; qid++) { + sq_res = &qp_res->sq_res_group[qid]; + + sss_nic_free_all_skb(nic_dev, qp_res->sq_depth, sq_res->tx_desc_group); + kfree(sq_res->dma_group); + kfree(sq_res->tx_desc_group); + sq_res->dma_group = NULL; + sq_res->tx_desc_group = NULL; + } +} + +void sss_nic_init_all_sq(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + struct sss_nic_sq_resource *sq_res = NULL; + struct sss_nic_sq_desc *sq_desc = NULL; + u16 qid; + u32 did; + + for (qid = 0; qid < qp_res->qp_num; qid++) { + sq_desc = &nic_dev->sq_desc_group[qid]; + sq_res = &qp_res->sq_res_group[qid]; + + sq_desc->q_depth = qp_res->sq_depth; + sq_desc->qid_mask = qp_res->sq_depth - 1; + + sq_desc->tx_desc_group = sq_res->tx_desc_group; + for (did = 0; did < qp_res->sq_depth; did++) + sq_desc->tx_desc_group[did].dma_group = + &sq_res->dma_group[did * SSSNIC_SQ_WQEBB_BD]; + + sq_desc->sq = &nic_dev->nic_io->sq_group[qid]; + } +} + +int sss_nic_alloc_sq_desc_group(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_sq_desc *sq_desc = NULL; + struct sss_nic_sq_stats *sq_stats = NULL; + u16 sq_num = nic_dev->max_qp_num; + u16 qid; + + nic_dev->sq_desc_group = kcalloc(sq_num, sizeof(*nic_dev->sq_desc_group), GFP_KERNEL); + if (!nic_dev->sq_desc_group) + return -ENOMEM; + + for (qid = 0; qid < sq_num; qid++) { + sq_desc = &nic_dev->sq_desc_group[qid]; + sq_stats = &sq_desc->stats; + sq_desc->qid = qid; + sq_desc->dev = nic_dev->dev_hdl; + sq_desc->netdev = nic_dev->netdev; + sq_desc->qid_mask = nic_dev->qp_res.sq_depth - 1; + sq_desc->q_depth = nic_dev->qp_res.sq_depth; + u64_stats_init(&sq_stats->stats_sync); + } + + return 0; +} + +void sss_nic_free_sq_desc_group(struct sss_nic_dev *nic_dev) +{ + kfree(nic_dev->sq_desc_group); + nic_dev->sq_desc_group = NULL; +} + +static bool sss_nic_sq_is_null(struct sss_nic_io_queue *sq) +{ + u16 sw_pi = sss_nic_get_sq_local_pi(sq); + u16 hw_ci = sss_nic_get_sq_hw_ci(sq); + + return sw_pi == hw_ci; +} + +static int sss_nic_stop_sq(struct sss_nic_dev *nic_dev, u16 qid) +{ + int ret; + unsigned long timeout; + struct sss_nic_io_queue *sq = nic_dev->sq_desc_group[qid].sq; + + timeout = msecs_to_jiffies(SSSNIC_FLUSH_SQ_TIMEOUT) + jiffies; + do { + if (sss_nic_sq_is_null(sq)) + return 0; + + usleep_range(SSSNIC_STOP_SQ_WAIT_TIME_MIN, SSSNIC_STOP_SQ_WAIT_TIME_MAX); + } while (time_before(jiffies, timeout)); + + timeout = msecs_to_jiffies(SSSNIC_FLUSH_SQ_TIMEOUT) + jiffies; + do { + if (sss_nic_sq_is_null(sq)) + return 0; + + ret = sss_nic_force_drop_tx_pkt(nic_dev); + if (ret != 0) + break; + + usleep_range(SSSNIC_STOP_SQ_WAIT_TIME_FORCE_MIN, + SSSNIC_STOP_SQ_WAIT_TIME_FORCE_MAX); + } while (time_before(jiffies, timeout)); + + if (!sss_nic_sq_is_null(sq)) + return -EFAULT; + + return 0; +} + +void sss_nic_flush_all_sq(struct sss_nic_dev *nic_dev) +{ + u16 qid = 0; + int ret = 0; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + ret = sss_nic_stop_sq(nic_dev, qid); + if (ret != 0) + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to stop sq%u\n", qid); + } +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.h new file mode 100644 index 0000000000000..c72af131707eb --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_TX_INIT_H +#define SSS_NIC_TX_INIT_H + +#include +#include +#include +#include +#include + +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" + +int sss_nic_alloc_sq_desc_group(struct sss_nic_dev *nic_dev); +void sss_nic_free_sq_desc_group(struct sss_nic_dev *nic_dev); +int sss_nic_alloc_sq_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res); +void sss_nic_free_sq_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res); +void sss_nic_init_all_sq(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res); +void sss_nic_flush_all_sq(struct sss_nic_dev *nic_dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.c new file mode 100644 index 0000000000000..1c585ad7a15f6 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.c @@ -0,0 +1,603 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_io_define.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_event.h" + +static u8 vf_link_state; +module_param(vf_link_state, byte, 0444); +MODULE_PARM_DESC(vf_link_state, + "Set vf link state, 0 - link auto, 1 - always link up, 2 - always link down. - default is 0."); + +/* In order to adapt different linux version */ +enum { + SSSNIC_IFLA_VF_LINK_STATE_AUTO, + SSSNIC_IFLA_VF_LINK_STATE_ENABLE, + SSSNIC_IFLA_VF_LINK_STATE_DISABLE, + SSSNIC_IFLA_VF_LINK_STATE_MAX +}; + +#define SSSNIC_CVLAN_INSERT_ENABLE 0x1 +#define SSSNIC_QINQ_INSERT_ENABLE 0X3 + +#define SSSNIC_GET_VLAN_TAG(vlan_id, qos) ((vlan_id) + (u16)((qos) << VLAN_PRIO_SHIFT)) + +typedef void (*sss_nic_link_vf_handler_t)(struct sss_nic_vf_info *); +typedef u8 (*sss_nic_link_state_handler_t)(struct sss_nic_io *nic_io, u16 vf_id); + +static int sss_nic_set_vlan_mode(struct sss_nic_io *nic_io, u16 func_id, + u16 vlan_tag, u16 qid, u32 vlan_mode) +{ + int ret; + u64 out_param = 0; + struct sss_nic_vlan_ctx *vlan_ctx = NULL; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_io->hwdev); + if (!msg_buf) { + nic_err(nic_io->dev_hdl, "Fail to allocate send buf\n"); + return -ENOMEM; + } + + msg_buf->size = sizeof(*vlan_ctx); + vlan_ctx = (struct sss_nic_vlan_ctx *)msg_buf->buf; + vlan_ctx->sel = 0; /* TPID0 in IPSU */ + vlan_ctx->func_id = func_id; + vlan_ctx->mode = vlan_mode; + vlan_ctx->qid = qid; + vlan_ctx->tag = vlan_tag; + + sss_cpu_to_be32(vlan_ctx, sizeof(*vlan_ctx)); + + ret = sss_ctrlq_direct_reply(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_MODIFY_VLAN_CTX, msg_buf, + &out_param, 0, SSS_CHANNEL_NIC); + if (ret != 0 || out_param != 0) { + nic_err(nic_io->dev_hdl, "Fail to set vlan ctx, ret: %d, out_param: 0x%llx\n", + ret, out_param); + sss_free_ctrlq_msg_buf(nic_io->hwdev, msg_buf); + return -EFAULT; + } + + sss_free_ctrlq_msg_buf(nic_io->hwdev, msg_buf); + + return 0; +} + +int sss_nic_set_vf_vlan(struct sss_nic_io *nic_io, u8 opcode, u16 vlan_id, u8 qos, int vf_id) +{ + int ret; + u32 vlan_mode; + u16 os_id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + u16 vlan_tag = SSSNIC_GET_VLAN_TAG(vlan_id, qos); + u16 func_id = sss_get_glb_pf_vf_offset(nic_io->hwdev) + (u16)vf_id; + struct sss_nic_mbx_vf_vlan_cfg cmd_config_info = {0}; + u16 out_len = sizeof(cmd_config_info); + + if (vlan_id == 0 && opcode == SSSNIC_MBX_OPCODE_DEL) + return 0; + + cmd_config_info.vlan_id = vlan_id; + cmd_config_info.func_id = func_id; + cmd_config_info.opcode = opcode; + cmd_config_info.qos = qos; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_CFG_VF_VLAN, + &cmd_config_info, sizeof(cmd_config_info), + &cmd_config_info, &out_len); + if (ret != 0 || out_len == 0 || cmd_config_info.head.state != SSS_MGMT_CMD_SUCCESS) { + nic_err(nic_io->dev_hdl, + "Fail to set VF %d vlan, ret: %d, status: 0x%x, out_len: 0x%x\n", + os_id, ret, cmd_config_info.head.state, out_len); + return -EFAULT; + } + + vlan_mode = (opcode == SSSNIC_MBX_OPCODE_ADD) ? + SSSNIC_QINQ_INSERT_ENABLE : SSSNIC_CVLAN_INSERT_ENABLE; + + ret = sss_nic_set_vlan_mode(nic_io, func_id, vlan_tag, + SSSNIC_CONFIG_ALL_QUEUE_VLAN_CTX, vlan_mode); + if (ret != 0) { + cmd_config_info.opcode = (opcode == SSSNIC_MBX_OPCODE_DEL) ? + SSSNIC_MBX_OPCODE_ADD : SSSNIC_MBX_OPCODE_DEL; + sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_CFG_VF_VLAN, + &cmd_config_info, sizeof(cmd_config_info), + &cmd_config_info, &out_len); + nic_err(nic_io->dev_hdl, + "Fail to set VF %d vlan context, ret: %d\n", os_id, ret); + } + + return ret; +} + +int sss_nic_create_vf_vlan(struct sss_nic_io *nic_io, int vf_id, u16 vlan, u8 qos) +{ + int ret; + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + + ret = sss_nic_set_vf_vlan(nic_io, SSSNIC_MBX_OPCODE_ADD, vlan, qos, vf_id); + if (ret != 0) + return ret; + + nic_io->vf_info_group[id].pf_qos = qos; + nic_io->vf_info_group[id].pf_vlan = vlan; + + nic_info(nic_io->dev_hdl, "Add vf vlan VLAN %u, QOS 0x%x on VF %d\n", + vlan, qos, id); + + return 0; +} + +int sss_nic_destroy_vf_vlan(struct sss_nic_io *nic_io, int vf_id) +{ + int ret; + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info_group; + + vf_info_group = nic_io->vf_info_group; + + ret = sss_nic_set_vf_vlan(nic_io, SSSNIC_MBX_OPCODE_DEL, + vf_info_group[id].pf_vlan, + vf_info_group[id].pf_qos, vf_id); + if (ret != 0) + return ret; + + nic_info(nic_io->dev_hdl, "Kill vf VLAN %u on VF %d\n", + vf_info_group[id].pf_vlan, id); + + vf_info_group[id].pf_qos = 0; + vf_info_group[id].pf_vlan = 0; + + return 0; +} + +u16 sss_nic_vf_info_vlan_prio(struct sss_nic_io *nic_io, int vf_id) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + u16 vlan_prio; + u16 pf_vlan; + u8 pf_qos; + + pf_vlan = nic_io->vf_info_group[id].pf_vlan; + pf_qos = nic_io->vf_info_group[id].pf_qos; + + vlan_prio = SSSNIC_GET_VLAN_PRIO(pf_vlan, pf_qos); + + return vlan_prio; +} + +static u8 sss_nic_ifla_vf_link_state_auto(struct sss_nic_io *nic_io, u16 id) +{ + nic_io->vf_info_group[id].link_forced = false; + nic_io->vf_info_group[id].link_up = !!nic_io->link_status; + + return nic_io->link_status; +} + +static u8 sss_nic_ifla_vf_link_state_enable(struct sss_nic_io *nic_io, u16 id) +{ + nic_io->vf_info_group[id].link_forced = true; + nic_io->vf_info_group[id].link_up = true; + + return SSSNIC_LINK_UP; +} + +static u8 sss_nic_ifla_vf_link_state_disable(struct sss_nic_io *nic_io, u16 id) +{ + nic_io->vf_info_group[id].link_forced = true; + nic_io->vf_info_group[id].link_up = false; + + return SSSNIC_LINK_DOWN; +} + +int sss_nic_set_vf_link_state(struct sss_nic_io *nic_io, u16 vf_id, int link) +{ + u8 link_status = 0; + struct sss_nic_vf_info *vf_info = NULL; + + sss_nic_link_state_handler_t handler[SSSNIC_IFLA_VF_LINK_STATE_MAX] = { + sss_nic_ifla_vf_link_state_auto, + sss_nic_ifla_vf_link_state_enable, + sss_nic_ifla_vf_link_state_disable, + }; + + if (link >= SSSNIC_IFLA_VF_LINK_STATE_MAX) + return -EINVAL; + + if (handler[link]) + link_status = handler[link](nic_io, SSSNIC_HW_VF_ID_TO_OS(vf_id)); + + /* Notify the VF of its new link state */ + vf_info = &nic_io->vf_info_group[SSSNIC_HW_VF_ID_TO_OS(vf_id)]; + if (vf_info->attach) + sss_nic_notify_vf_link_state(nic_io, vf_id, link_status); + + return 0; +} + +int sss_nic_set_vf_spoofchk(struct sss_nic_io *nic_io, u16 vf_id, bool spoofchk) +{ + int ret; + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info = NULL; + struct sss_nic_mbx_set_spoofchk cmd_spoofchk_cfg = {0}; + u16 out_len = sizeof(cmd_spoofchk_cfg); + + cmd_spoofchk_cfg.func_id = sss_get_glb_pf_vf_offset(nic_io->hwdev) + vf_id; + cmd_spoofchk_cfg.state = !!spoofchk; + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_SET_SPOOPCHK_STATE, + &cmd_spoofchk_cfg, + sizeof(cmd_spoofchk_cfg), &cmd_spoofchk_cfg, + &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_spoofchk_cfg)) { + nic_err(nic_io->dev_hdl, "Fail to set VF(%d) spoofchk, ret: %d, status: 0x%x, out_len: 0x%x\n", + id, ret, cmd_spoofchk_cfg.head.state, out_len); + ret = -EINVAL; + } + + vf_info = nic_io->vf_info_group; + vf_info[id].spoofchk = !!spoofchk; + + return ret; +} + +#ifdef HAVE_NDO_SET_VF_TRUST +int sss_nic_set_vf_trust(struct sss_nic_io *nic_io, u16 vf_id, bool trust) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + + if (vf_id > nic_io->max_vf_num) + return -EINVAL; + + nic_io->vf_info_group[id].trust = !!trust; + + return 0; +} + +bool sss_nic_get_vf_trust(struct sss_nic_io *nic_io, int vf_id) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + + if (vf_id > nic_io->max_vf_num) + return -EINVAL; + + return !!nic_io->vf_info_group[id].trust; +} +#endif + +int sss_nic_set_vf_tx_rate_limit(struct sss_nic_io *nic_io, u16 vf_id, u32 min_rate, u32 max_rate) +{ + int ret; + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_mbx_tx_rate_cfg cmd_cfg = {0}; + u16 out_len = sizeof(cmd_cfg); + + cmd_cfg.min_rate = min_rate; + cmd_cfg.max_rate = max_rate; + cmd_cfg.func_id = sss_get_glb_pf_vf_offset(nic_io->hwdev) + vf_id; + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_SET_MAX_MIN_RATE, + &cmd_cfg, sizeof(cmd_cfg), &cmd_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_cfg)) { + nic_err(nic_io->dev_hdl, + "Fail to set VF %d max_rate %u, min_rate %u, ret: %d, status: 0x%x, out_len: 0x%x\n", + id, max_rate, min_rate, ret, cmd_cfg.head.state, + out_len); + return -EIO; + } + + nic_io->vf_info_group[id].max_rate = max_rate; + nic_io->vf_info_group[id].min_rate = min_rate; + + return 0; +} + +void sss_nic_get_vf_attribute(struct sss_nic_io *nic_io, u16 vf_id, + struct ifla_vf_info *ifla_vf) +{ + struct sss_nic_vf_info *vf_info; + + vf_info = nic_io->vf_info_group + SSSNIC_HW_VF_ID_TO_OS(vf_id); + + ether_addr_copy(ifla_vf->mac, vf_info->user_mac); + ifla_vf->vf = SSSNIC_HW_VF_ID_TO_OS(vf_id); + ifla_vf->qos = vf_info->pf_qos; + ifla_vf->vlan = vf_info->pf_vlan; + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + ifla_vf->spoofchk = vf_info->spoofchk; +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST + ifla_vf->trusted = vf_info->trust; +#endif + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + ifla_vf->min_tx_rate = vf_info->min_rate; + ifla_vf->max_tx_rate = vf_info->max_rate; +#else + ifla_vf->tx_rate = vf_info->max_rate; +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + +#ifdef HAVE_NDO_SET_VF_LINK_STATE + if (!vf_info->link_forced) + ifla_vf->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vf_info->link_up) + ifla_vf->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ifla_vf->linkstate = IFLA_VF_LINK_STATE_DISABLE; +#endif +} + +static void sss_nic_init_link_disable_vf(struct sss_nic_vf_info *vf_info) +{ + vf_info->link_forced = true; + vf_info->link_up = false; +} + +static void sss_nic_init_link_enable_vf(struct sss_nic_vf_info *vf_info) +{ + vf_info->link_forced = true; + vf_info->link_up = true; +} + +static void sss_nic_init_link_auto_vf(struct sss_nic_vf_info *vf_info) +{ + vf_info->link_forced = false; +} + +static int sss_nic_init_vf_info(struct sss_nic_io *nic_io, u16 vf_id) +{ + u8 link_state; + struct sss_nic_vf_info *vf_info_group = nic_io->vf_info_group; + sss_nic_link_vf_handler_t handler[SSSNIC_IFLA_VF_LINK_STATE_MAX] = { + sss_nic_init_link_auto_vf, + sss_nic_init_link_enable_vf, + sss_nic_init_link_disable_vf + }; + + if (vf_link_state >= SSSNIC_IFLA_VF_LINK_STATE_MAX) { + vf_link_state = SSSNIC_IFLA_VF_LINK_STATE_AUTO; + nic_warn(nic_io->dev_hdl, "Invalid vf_link_state: %u out of range[%u - %u], adjust to %d\n", + vf_link_state, SSSNIC_IFLA_VF_LINK_STATE_AUTO, + SSSNIC_IFLA_VF_LINK_STATE_DISABLE, SSSNIC_IFLA_VF_LINK_STATE_AUTO); + } + + link_state = vf_link_state; + if (link_state < SSSNIC_IFLA_VF_LINK_STATE_MAX) { + handler[link_state](&vf_info_group[vf_id]); + } else { + nic_err(nic_io->dev_hdl, "Fail to input vf_link_state: %u\n", + link_state); + return -EINVAL; + } + + return 0; +} + +static int sss_nic_register_vf_to_hw(struct sss_nic_io *nic_io) +{ + u16 out_len; + int ret; + struct sss_nic_mbx_attach_vf cmd_register_info = {0}; + + cmd_register_info.op_register = 1; + out_len = sizeof(cmd_register_info); + ret = sss_mbx_send_to_pf(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_MBX_OPCODE_VF_REGISTER, + &cmd_register_info, sizeof(cmd_register_info), + &cmd_register_info, &out_len, 0, + SSS_CHANNEL_NIC); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_register_info)) { + nic_err(nic_io->dev_hdl, "Fail to register VF, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_register_info.head.state, out_len); + return -EIO; + } + + return 0; +} + +static void sss_nic_unregister_vf_to_hw(struct sss_nic_io *nic_io) +{ + int ret; + struct sss_nic_mbx_attach_vf cmd_register_info = {0}; + u16 out_len = sizeof(cmd_register_info); + + cmd_register_info.op_register = 0; + + ret = sss_mbx_send_to_pf(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, SSSNIC_MBX_OPCODE_VF_REGISTER, + &cmd_register_info, sizeof(cmd_register_info), &cmd_register_info, + &out_len, 0, SSS_CHANNEL_NIC); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_register_info)) + nic_err(nic_io->dev_hdl, + "Fail to unregister VF, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_register_info.head.state, out_len); +} + +static void sss_nic_vf_unregister(struct sss_nic_io *nic_io) +{ + sss_nic_unregister_vf_to_hw(nic_io); + sss_unregister_vf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK); + sss_unregister_vf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC); +} + +static int sss_nic_vf_register(struct sss_nic_io *nic_io) +{ + int ret; + + ret = sss_register_vf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + nic_io->hwdev, sss_nic_vf_event_handler); + if (ret != 0) + return ret; + + ret = sss_register_vf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK, + nic_io->hwdev, sss_nic_vf_mag_event_handler); + if (ret != 0) + goto reg_cb_error; + + ret = sss_nic_register_vf_to_hw(nic_io); + if (ret != 0) + goto register_vf_error; + + return 0; + +register_vf_error: + sss_unregister_vf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK); + +reg_cb_error: + sss_unregister_vf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC); + + return ret; +} + +void sss_nic_deinit_pf_vf_info(struct sss_nic_io *nic_io) +{ + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF) + return; + kfree(nic_io->vf_info_group); + nic_io->vf_info_group = NULL; +} + +int sss_nic_init_pf_vf_info(struct sss_nic_io *nic_io) +{ + u16 i; + int ret; + u32 len; + + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF) + return 0; + + nic_io->max_vf_num = sss_get_max_vf_num(nic_io->hwdev); + if (nic_io->max_vf_num == 0) + return 0; + + len = sizeof(*nic_io->vf_info_group) * nic_io->max_vf_num; + nic_io->vf_info_group = kzalloc(len, GFP_KERNEL); + if (!nic_io->vf_info_group) + return -ENOMEM; + + for (i = 0; i < nic_io->max_vf_num; i++) { + ret = sss_nic_init_vf_info(nic_io, i); + if (ret != 0) + goto init_vf_info_error; + } + + return 0; + +init_vf_info_error: + kfree(nic_io->vf_info_group); + nic_io->vf_info_group = NULL; + + return ret; +} + +int sss_nic_register_io_callback(struct sss_nic_io *nic_io) +{ + int ret; + + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF) + return sss_nic_vf_register(nic_io); + + ret = sss_register_mgmt_msg_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + nic_io->hwdev, sss_nic_pf_event_handler); + if (ret != 0) + return ret; + + ret = sss_register_mgmt_msg_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK, + nic_io->hwdev, sss_nic_pf_mag_event_handler); + if (ret != 0) + goto register_pf_mag_event_handler; + + ret = sss_register_pf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + nic_io->hwdev, sss_nic_pf_mbx_handler); + if (ret != 0) + goto register_pf_mbx_cb_error; + + ret = sss_register_pf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK, + nic_io->hwdev, sss_nic_pf_mag_mbx_handler); + if (ret != 0) + goto register_pf_mag_mbx_cb_error; + + return 0; + +register_pf_mag_mbx_cb_error: + sss_unregister_pf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC); + +register_pf_mbx_cb_error: + sss_unregister_mgmt_msg_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK); + +register_pf_mag_event_handler: + sss_unregister_mgmt_msg_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC); + + return ret; +} + +void sss_nic_unregister_io_callback(struct sss_nic_io *nic_io) +{ + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF) { + sss_nic_vf_unregister(nic_io); + } else { + if (nic_io->vf_info_group) { + sss_unregister_pf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK); + sss_unregister_pf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC); + } + sss_unregister_mgmt_msg_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK); + sss_unregister_mgmt_msg_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC); + } +} + +static void sss_nic_clear_vf_info(struct sss_nic_io *nic_io, u16 vf_id) +{ + u16 func_id; + struct sss_nic_vf_info *vf_info; + + func_id = sss_get_glb_pf_vf_offset(nic_io->hwdev) + vf_id; + vf_info = nic_io->vf_info_group + SSSNIC_HW_VF_ID_TO_OS(vf_id); + if (vf_info->specified_mac) + sss_nic_del_mac(nic_io->nic_dev, vf_info->drv_mac, + vf_info->pf_vlan, func_id, SSS_CHANNEL_NIC); + + if (sss_nic_vf_info_vlan_prio(nic_io, vf_id)) + sss_nic_destroy_vf_vlan(nic_io, vf_id); + + if (vf_info->max_rate && SSSNIC_SUPPORT_RATE_LIMIT(nic_io)) + sss_nic_set_vf_tx_rate_limit(nic_io, vf_id, 0, 0); + + if (vf_info->spoofchk) + sss_nic_set_vf_spoofchk(nic_io, vf_id, false); + +#ifdef HAVE_NDO_SET_VF_TRUST + if (vf_info->trust) + sss_nic_set_vf_trust(nic_io, vf_id, false); +#endif + + memset(vf_info, 0, sizeof(*vf_info)); + sss_nic_init_vf_info(nic_io, SSSNIC_HW_VF_ID_TO_OS(vf_id)); +} + +void sss_nic_clear_all_vf_info(struct sss_nic_io *nic_io) +{ + u16 i; + + for (i = 0; i < nic_io->max_vf_num; i++) + sss_nic_clear_vf_info(nic_io, SSSNIC_OS_VF_ID_TO_HW(i)); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.h new file mode 100644 index 0000000000000..4256e118558e1 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_VF_CFG_H +#define SSS_NIC_VF_CFG_H + +#include "sss_nic_cfg_vf_define.h" +#include "sss_nic_io_define.h" + +#define SSSNIC_GET_VF_SPOOFCHK(nic_io, vf_id) \ + (!!(nic_io)->vf_info_group[vf_id].spoofchk) + +int sss_nic_set_vf_spoofchk(struct sss_nic_io *nic_io, u16 vf_id, bool spoofchk); + +int sss_nic_create_vf_vlan(struct sss_nic_io *nic_io, int vf_id, u16 vlan, u8 qos); + +int sss_nic_destroy_vf_vlan(struct sss_nic_io *nic_io, int vf_id); + +u16 sss_nic_vf_info_vlan_prio(struct sss_nic_io *nic_io, int vf_id); + +int sss_nic_set_vf_tx_rate_limit(struct sss_nic_io *nic_io, u16 vf_id, u32 min_rate, u32 max_rate); + +void sss_nic_get_vf_attribute(struct sss_nic_io *nic_io, u16 vf_id, + struct ifla_vf_info *ifla_vf); + +int sss_nic_set_vf_link_state(struct sss_nic_io *nic_io, u16 vf_id, int link); + +void sss_nic_clear_all_vf_info(struct sss_nic_io *nic_io); + +#ifdef HAVE_NDO_SET_VF_TRUST +bool sss_nic_get_vf_trust(struct sss_nic_io *nic_io, int vf_id); +int sss_nic_set_vf_trust(struct sss_nic_io *nic_io, u16 vf_id, bool trust); +#endif + +int sss_nic_set_vf_vlan(struct sss_nic_io *nic_io, u8 opcode, u16 vid, + u8 qos, int vf_id); + +int sss_nic_register_io_callback(struct sss_nic_io *nic_io); + +void sss_nic_unregister_io_callback(struct sss_nic_io *nic_io); + +int sss_nic_init_pf_vf_info(struct sss_nic_io *nic_io); + +void sss_nic_deinit_pf_vf_info(struct sss_nic_io *nic_io); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic.h b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic.h new file mode 100644 index 0000000000000..4aa2a96675563 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_NIC_H +#define SSS_TOOL_NIC_H + +#define SSS_TOOL_DCB_OPCODE_WR BIT(0) /* 1 - write, 0 - read */ + +#define SSS_TOOL_MSG_QOS_DEV_TRUST BIT(0) +#define SSS_TOOL_MSG_QOS_DEV_DFT_COS BIT(1) +#define SSS_TOOL_MSG_QOS_DEV_PCP2COS BIT(2) +#define SSS_TOOL_MSG_QOS_DEV_DSCP2COS BIT(3) + +struct sss_tool_loop_mode { + u32 loop_mode; + u32 loop_ctrl; +}; + +struct sss_tool_wqe_info { + int q_id; + void *slq_handle; + unsigned int wqe_id; +}; + +struct sss_tool_hw_page { + u64 phy_addr; + u64 *map_addr; +}; + +struct sss_tool_sq_info { + u16 q_id; + u16 pi; + u16 ci; /* sw_ci */ + u16 fi; /* hw_ci */ + u32 q_depth; + u16 pi_reverse; /* TODO: what is this? */ + u16 wqebb_size; + u8 priority; + u16 *ci_addr; + u64 cla_addr; + void *slq_handle; + /* TODO: NIC don't use direct wqe */ + struct sss_tool_hw_page direct_wqe; + struct sss_tool_hw_page doorbell; + u32 page_idx; + u32 glb_sq_id; +}; + +struct sss_tool_rq_info { + u16 q_id; + u16 delta; + u16 hw_pi; + u16 ci; /* sw_ci */ + u16 sw_pi; + u16 wqebb_size; + u16 q_depth; + u16 buf_len; + + void *slq_handle; + u64 ci_wqe_page_addr; + u64 ci_cla_tbl_addr; + + u8 coalesc_timer_cfg; + u8 pending_limt; + u16 msix_idx; + u32 msix_vector; +}; + +struct sss_tool_msg_head { + u8 status; + u8 rsvd1[3]; +}; + +struct sss_tool_dcb_state { + struct sss_tool_msg_head head; + + u16 op_code; /* 0 - get dcb state, 1 - set dcb state */ + u8 state; /* 0 - disable, 1 - enable dcb */ + u8 rsvd; +}; + +struct sss_tool_qos_dev_cfg { + struct sss_tool_msg_head head; + + u8 op_code; /* 0:get 1: set */ + u8 rsvd0; + u16 cfg_bitmap; /* bit0 - trust, bit1 - dft_cos, bit2 - pcp2cos, bit3 - dscp2cos */ + + u8 trust; /* 0 - pcp, 1 - dscp */ + u8 dft_cos; + u16 rsvd1; + u8 pcp2cos[8]; /* 必须8个一起配置 */ + + /* 配置dscp2cos时,若cos值设置为0xFF*/ + /*驱动则忽略此dscp优先级的配置*/ + /*允许一次性配置多个dscp跟cos的映射关系 */ + u8 dscp2cos[64]; + u32 rsvd2[4]; +}; + +struct sss_tool_qos_cos_cfg { + struct sss_tool_msg_head head; + + u8 port_id; + u8 func_cos_bitmap; + u8 port_cos_bitmap; + u8 func_max_cos_num; + u32 rsvd2[4]; +}; + +#endif /* SSS_TOOL_NIC_H */ diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.c b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.c new file mode 100644 index 0000000000000..938ac63f1cd9e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.c @@ -0,0 +1,457 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include "sss_nic_cfg.h" +#include "sss_nic_dcb.h" +#include "sss_tool_comm.h" +#include "sss_tool_nic.h" +#include "sss_nic_rx_init.h" +#include "sss_nic_netdev_ops_api.h" + +#define SSS_TOOL_DBG_DFLT_DSCP_VAL 0xFF + +static int sss_tool_update_pcp_cfg(struct sss_nic_dev *nic_dev, + const struct sss_tool_qos_dev_cfg *qos_cfg) +{ + u8 valid_cos_bitmap = 0; + u8 cos_num = 0; + int i; + + if (!(qos_cfg->cfg_bitmap & SSS_TOOL_MSG_QOS_DEV_PCP2COS)) + return 0; + + for (i = 0; i < SSSNIC_DCB_UP_MAX; i++) { + if (!(nic_dev->dft_func_cos_bitmap & BIT(qos_cfg->pcp2cos[i]))) { + tool_err("Invalid pcp cos:%u, func cos valid map is %u", + qos_cfg->pcp2cos[i], nic_dev->dft_func_cos_bitmap); + return -EINVAL; + } + + if ((BIT(qos_cfg->pcp2cos[i]) & valid_cos_bitmap) == 0) { + cos_num++; + valid_cos_bitmap |= (u8)BIT(qos_cfg->pcp2cos[i]); + } + } + + nic_dev->backup_dcb_cfg.pcp_valid_cos_map = valid_cos_bitmap; + nic_dev->backup_dcb_cfg.pcp_user_cos_num = cos_num; + memcpy(nic_dev->backup_dcb_cfg.pcp2cos, qos_cfg->pcp2cos, sizeof(qos_cfg->pcp2cos)); + + return 0; +} + +static int sss_tool_update_dscp_cfg(struct sss_nic_dev *nic_dev, + const struct sss_tool_qos_dev_cfg *qos_cfg) +{ + u8 valid_cos_bitmap = 0; + u8 cos_num = 0; + u8 cos; + int i; + + if (!(qos_cfg->cfg_bitmap & SSS_TOOL_MSG_QOS_DEV_DSCP2COS)) + return 0; + + for (i = 0; i < SSSNIC_DCB_IP_PRI_MAX; i++) { + if (qos_cfg->dscp2cos[i] != SSS_TOOL_DBG_DFLT_DSCP_VAL) + cos = qos_cfg->dscp2cos[i]; + else + cos = nic_dev->backup_dcb_cfg.dscp2cos[i]; + + if (cos >= SSSNIC_DCB_UP_MAX || !(nic_dev->dft_func_cos_bitmap & BIT(cos))) { + tool_err("Invalid dscp cos:%u, func cos valid map is %u", + cos, nic_dev->dft_func_cos_bitmap); + return -EINVAL; + } + + if ((BIT(cos) & valid_cos_bitmap) == 0) { + cos_num++; + valid_cos_bitmap |= (u8)BIT(cos); + } + } + + for (i = 0; i < SSSNIC_DCB_IP_PRI_MAX; i++) { + if (qos_cfg->dscp2cos[i] != SSS_TOOL_DBG_DFLT_DSCP_VAL) + nic_dev->backup_dcb_cfg.dscp2cos[i] = qos_cfg->dscp2cos[i]; + else + nic_dev->backup_dcb_cfg.dscp2cos[i] = nic_dev->hw_dcb_cfg.dscp2cos[i]; + } + + nic_dev->backup_dcb_cfg.dscp_valid_cos_map = valid_cos_bitmap; + nic_dev->backup_dcb_cfg.dscp_user_cos_num = cos_num; + + return 0; +} + +static int sss_tool_update_pcp_dscp_cfg(struct sss_nic_dev *nic_dev, + const struct sss_tool_qos_dev_cfg *qos_cfg) +{ + int ret; + + ret = sss_tool_update_pcp_cfg(nic_dev, qos_cfg); + if (ret != 0) { + tool_err("Fail to update pcp cfg\n"); + return ret; + } + + ret = sss_tool_update_dscp_cfg(nic_dev, qos_cfg); + if (ret != 0) + tool_err("Fail to update dscp cfg\n"); + + return ret; +} + +static int sss_tool_update_wanted_qos_cfg(struct sss_nic_dev *nic_dev, + const void *in_buf) +{ + const struct sss_tool_qos_dev_cfg *qos_cfg = in_buf; + u8 valid_cos_bitmap; + u8 cos_num; + int ret; + + if (qos_cfg->cfg_bitmap & SSS_TOOL_MSG_QOS_DEV_TRUST) { + if (qos_cfg->trust > DCB_DSCP) { + tool_err("Invalid trust:%u of qos cfg\n", qos_cfg->trust); + return -EINVAL; + } + + nic_dev->backup_dcb_cfg.trust = qos_cfg->trust; + } + + if (qos_cfg->cfg_bitmap & SSS_TOOL_MSG_QOS_DEV_DFT_COS) { + if (!(BIT(qos_cfg->dft_cos) & nic_dev->dft_func_cos_bitmap)) { + tool_err("Invalid default cos:%u of qos cfg\n", qos_cfg->dft_cos); + return -EINVAL; + } + + nic_dev->backup_dcb_cfg.default_cos = qos_cfg->dft_cos; + } + + ret = sss_tool_update_pcp_dscp_cfg(nic_dev, qos_cfg); + if (ret != 0) + return ret; + + if (nic_dev->backup_dcb_cfg.trust != DCB_PCP) { + valid_cos_bitmap = nic_dev->backup_dcb_cfg.dscp_valid_cos_map; + cos_num = nic_dev->backup_dcb_cfg.dscp_user_cos_num; + } else { + valid_cos_bitmap = nic_dev->backup_dcb_cfg.pcp_valid_cos_map; + cos_num = nic_dev->backup_dcb_cfg.pcp_user_cos_num; + } + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)) { + if (cos_num > nic_dev->qp_res.qp_num) { + tool_err("Invalid cos num, DCB is on, cos num:%d need less than channel num:%u\n", + cos_num, nic_dev->qp_res.qp_num); + return -EOPNOTSUPP; + } + } + + if (!(BIT(nic_dev->backup_dcb_cfg.default_cos) & valid_cos_bitmap)) { + tool_info("Success to update cos %u to %u\n", + nic_dev->backup_dcb_cfg.default_cos, (u8)fls(valid_cos_bitmap) - 1); + nic_dev->backup_dcb_cfg.default_cos = (u8)fls(valid_cos_bitmap) - 1; + } + + return 0; +} + +static int sss_tool_set_tx_cos_state(struct sss_nic_dev *nic_dev, u8 dcb_en) +{ + int ret; + u8 i; + struct sss_nic_dcb_info dcb_info = {0}; + struct sss_nic_dcb_config *dcb_cfg = &nic_dev->hw_dcb_cfg; + + dcb_info.trust = dcb_cfg->trust; + dcb_info.default_cos = dcb_cfg->default_cos; + dcb_info.dcb_on = dcb_en; + + if (!dcb_en) { + memset(dcb_info.dscp2cos, dcb_cfg->default_cos, sizeof(dcb_info.dscp2cos)); + memset(dcb_info.pcp2cos, dcb_cfg->default_cos, sizeof(dcb_info.pcp2cos)); + + } else { + for (i = 0; i < SSSNIC_DCB_IP_PRI_MAX; i++) + dcb_info.dscp2cos[i] = dcb_cfg->dscp2cos[i]; + for (i = 0; i < SSSNIC_DCB_COS_MAX; i++) + dcb_info.pcp2cos[i] = dcb_cfg->pcp2cos[i]; + } + + ret = sss_nic_set_dcb_info(nic_dev->nic_io, &dcb_info); + if (ret != 0) + tool_err("Fail to set dcb state\n"); + + return ret; +} + +static int sss_tool_configure_dcb_hw(struct sss_nic_dev *nic_dev, u8 dcb_en) +{ + int ret; + u8 user_cos_num = sss_nic_get_user_cos_num(nic_dev); + + ret = sss_nic_set_hw_dcb_state(nic_dev, 1, dcb_en); + if (ret != 0) { + tool_err("Fail to set dcb state\n"); + return ret; + } + + sss_nic_update_qp_cos_map(nic_dev, user_cos_num); + sss_nic_update_sq_cos(nic_dev, dcb_en); + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) { + /* VF does not support DCB, use the default cos */ + nic_dev->hw_dcb_cfg.default_cos = (u8)fls(nic_dev->dft_func_cos_bitmap) - 1; + + return 0; + } + + ret = sss_tool_set_tx_cos_state(nic_dev, dcb_en); + if (ret != 0) { + tool_err("Fail to set tx cos state\n"); + goto set_tx_cos_fail; + } + + ret = sss_nic_update_rx_rss(nic_dev); + if (ret != 0) { + tool_err("Fail to configure rx\n"); + goto update_rx_rss_fail; + } + + if (!dcb_en) + SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE); + else + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE); + + return 0; +update_rx_rss_fail: + sss_tool_set_tx_cos_state(nic_dev, dcb_en ? 0 : 1); + +set_tx_cos_fail: + sss_nic_update_sq_cos(nic_dev, dcb_en ? 0 : 1); + sss_nic_set_hw_dcb_state(nic_dev->hwdev, 1, dcb_en ? 0 : 1); + + return ret; +} + +static int sss_tool_setup_cos(struct net_device *netdev, u8 cos) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (cos > nic_dev->max_cos_num) { + tool_err("Invalid num_tc: %u more then max cos: %u\n", cos, nic_dev->max_cos_num); + return -EINVAL; + } + + if (cos && SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_SAME_RXTX)) { + tool_err("Fail to enable DCB while Symmetric RSS is enabled\n"); + return -EOPNOTSUPP; + } + + return sss_tool_configure_dcb_hw(nic_dev, cos ? 1 : 0); +} + +static void sss_tool_change_qos_cfg(struct sss_nic_dev *nic_dev, + const struct sss_nic_dcb_config *dcb_cfg) +{ + u8 user_cos_num = sss_nic_get_user_cos_num(nic_dev); + + sss_nic_sync_dcb_cfg(nic_dev, dcb_cfg); + sss_nic_update_qp_cos_map(nic_dev, user_cos_num); + + clear_bit(SSSNIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags); +} + +static int sss_tool_dcbcfg_set_up_bitmap(struct sss_nic_dev *nic_dev) +{ + int ret; + u8 user_cos_num = sss_nic_get_user_cos_num(nic_dev); + struct sss_nic_dcb_config old_dcb_cfg; + bool netif_run = false; + + memcpy(&old_dcb_cfg, &nic_dev->hw_dcb_cfg, sizeof(struct sss_nic_dcb_config)); + + if (!memcmp(&nic_dev->backup_dcb_cfg, &old_dcb_cfg, sizeof(struct sss_nic_dcb_config))) { + tool_info("Valid up bitmap is the same, nothing has to change\n"); + return 0; + } + + rtnl_lock(); + if (netif_running(nic_dev->netdev)) { + sss_nic_vport_down(nic_dev); + netif_run = true; + } + + if (test_and_set_bit(SSSNIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags)) { + tool_warn("Cos up map setup in inprocess, please try again later\n"); + ret = -EFAULT; + goto set_qos_cfg_fail; + } + + sss_tool_change_qos_cfg(nic_dev, &nic_dev->backup_dcb_cfg); + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)) { + ret = sss_tool_setup_cos(nic_dev->netdev, user_cos_num); + if (ret != 0) + goto setup_cos_fail; + } + + if (netif_run) { + ret = sss_nic_vport_up(nic_dev); + if (ret != 0) + goto vport_up_fail; + } + + rtnl_unlock(); + + return 0; + +vport_up_fail: + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)) + sss_tool_setup_cos(nic_dev->netdev, user_cos_num ? 0 : user_cos_num); + +setup_cos_fail: + sss_tool_change_qos_cfg(nic_dev, &old_dcb_cfg); + +set_qos_cfg_fail: + if (netif_run) + sss_nic_vport_up(nic_dev); + + rtnl_unlock(); + + return ret; +} + +int sss_tool_dcb_mt_qos_map(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int ret; + u8 i; + struct sss_tool_qos_dev_cfg *qos_out = out_buf; + + if (!out_buf || !out_len || !in_buf) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (in_len != sizeof(*qos_out) || *out_len != sizeof(*qos_out)) { + tool_err("Invalid in len: %u or outlen: %u is not equal to %lu\n", + in_len, *out_len, sizeof(*qos_out)); + return -EINVAL; + } + + memcpy(qos_out, in_buf, sizeof(*qos_out)); + qos_out->head.status = 0; + if (qos_out->op_code & SSS_TOOL_DCB_OPCODE_WR) { + memcpy(&nic_dev->backup_dcb_cfg, &nic_dev->hw_dcb_cfg, + sizeof(struct sss_nic_dcb_config)); + ret = sss_tool_update_wanted_qos_cfg(nic_dev, in_buf); + if (ret != 0) { + qos_out->head.status = SSS_TOOL_EINVAL; + return 0; + } + + ret = sss_tool_dcbcfg_set_up_bitmap(nic_dev); + if (ret != 0) + qos_out->head.status = SSS_TOOL_EIO; + } else { + for (i = 0; i < SSSNIC_DCB_IP_PRI_MAX; i++) + qos_out->dscp2cos[i] = nic_dev->hw_dcb_cfg.dscp2cos[i]; + for (i = 0; i < SSSNIC_DCB_UP_MAX; i++) + qos_out->pcp2cos[i] = nic_dev->hw_dcb_cfg.pcp2cos[i]; + qos_out->trust = nic_dev->hw_dcb_cfg.trust; + qos_out->dft_cos = nic_dev->hw_dcb_cfg.default_cos; + } + + return 0; +} + +int sss_tool_dcb_mt_dcb_state(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int ret; + u8 user_cos_num = sss_nic_get_user_cos_num(nic_dev); + struct sss_tool_dcb_state *dcb_out = out_buf; + const struct sss_tool_dcb_state *dcb_in = in_buf; + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (in_len != sizeof(*dcb_in) || *out_len != sizeof(*dcb_out)) { + tool_err("Invalid in len: %u or out len: %u is not equal to %lu\n", + in_len, *out_len, sizeof(*dcb_in)); + return -EINVAL; + } + + memcpy(dcb_out, dcb_in, sizeof(*dcb_in)); + dcb_out->head.status = 0; + + if (!(dcb_in->op_code & SSS_TOOL_DCB_OPCODE_WR)) { + dcb_out->state = !!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE); + return 0; + } + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE) == dcb_in->state) + return 0; + + if (dcb_in->state && user_cos_num > nic_dev->qp_res.qp_num) { + tool_err("Fail to mt dcb state, cos num %u larger than channel num %u\n", + user_cos_num, nic_dev->qp_res.qp_num); + return -EOPNOTSUPP; + } + + rtnl_lock(); + if (netif_running(nic_dev->netdev)) { + sss_nic_vport_down(nic_dev); + ret = sss_tool_setup_cos(nic_dev->netdev, dcb_in->state ? user_cos_num : 0); + if (ret != 0) { + sss_nic_vport_up(nic_dev); + rtnl_unlock(); + return ret; + } + + ret = sss_nic_vport_up(nic_dev); + if (ret != 0) { + sss_tool_setup_cos(nic_dev->netdev, dcb_in->state ? 0 : user_cos_num); + sss_nic_vport_up(nic_dev); + } + + rtnl_unlock(); + return ret; + } + + ret = sss_tool_setup_cos(nic_dev->netdev, dcb_in->state ? user_cos_num : 0); + rtnl_unlock(); + + return ret; +} + +int sss_tool_dcb_mt_hw_qos_get(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + struct sss_tool_qos_cos_cfg *out_cfg = out_buf; + const struct sss_tool_qos_cos_cfg *in_cfg = in_buf; + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (in_len != sizeof(*in_cfg) || *out_len != sizeof(*out_cfg)) { + tool_err("Invalid in len: %u or out len: %u is not equal to %lu\n", + in_len, *out_len, sizeof(*in_cfg)); + return -EINVAL; + } + + memcpy(out_cfg, in_cfg, sizeof(*in_cfg)); + out_cfg->func_max_cos_num = nic_dev->max_cos_num; + out_cfg->head.status = 0; + out_cfg->port_cos_bitmap = (u8)nic_dev->dft_port_cos_bitmap; + out_cfg->func_cos_bitmap = (u8)nic_dev->dft_func_cos_bitmap; + out_cfg->port_id = sss_get_phy_port_id(nic_dev->hwdev); + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.h b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.h new file mode 100644 index 0000000000000..1fc71d5a65f8f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_NIC_DCB_H +#define SSS_TOOL_NIC_DCB_H + +int sss_tool_dcb_mt_qos_map(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_dcb_mt_dcb_state(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_dcb_mt_hw_qos_get(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.c b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.c new file mode 100644 index 0000000000000..95f4c99236c8d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include "sss_nic_mag_cfg.h" +#include "sss_tool_comm.h" +#include "sss_tool_nic.h" +#include "sss_tool_nic_dcb.h" +#include "sss_tool_nic_qp_info.h" +#include "sss_tool_nic_phy_attr.h" +#include "sss_tool_nic_stats.h" + +typedef int (*sss_tool_cmd_func)(struct sss_nic_dev *nic_dev, + const void *in_buf, u32 in_len, + void *out_buf, u32 *out_len); + +struct sss_tool_cmd_handle { + enum sss_tool_driver_cmd_type cmd_type; + sss_tool_cmd_func func; +}; + +static int sss_tool_get_nic_version(void *out_buf, const u32 *out_len) +{ + struct sss_tool_drv_version_info *ver_info = out_buf; + int ret; + + if (!out_buf || !out_len) { + tool_err("Invalid param, use null pointer.\n"); + return -EINVAL; + } + + if (*out_len != sizeof(*ver_info)) { + tool_err("Invalid out len :%u is not equal to %lu\n", + *out_len, sizeof(*ver_info)); + return -EINVAL; + } + + ret = snprintf(ver_info->ver, sizeof(ver_info->ver), "%s %s", + SSSNIC_DRV_VERSION, __TIME_STR__); + if (ret < 0) + return -EINVAL; + + return 0; +} + +static const struct sss_tool_cmd_handle sss_tool_nic_cmd_handle[] = { + {SSS_TOOL_GET_TX_INFO, sss_tool_get_tx_info}, + {SSS_TOOL_GET_RX_INFO, sss_tool_get_rx_info}, + {SSS_TOOL_GET_TX_WQE_INFO, sss_tool_get_tx_wqe_info}, + {SSS_TOOL_GET_RX_WQE_INFO, sss_tool_get_rx_wqe_info}, + {SSS_TOOL_GET_Q_NUM, sss_tool_get_q_num}, + {SSS_TOOL_GET_RX_CQE_INFO, sss_tool_get_rx_cqe_info}, + {SSS_TOOL_GET_INTER_NUM, sss_tool_get_inter_num}, + {SSS_TOOL_SET_PF_BW_LIMIT, sss_tool_set_pf_bw_limit}, + {SSS_TOOL_GET_PF_BW_LIMIT, sss_tool_get_pf_bw_limit}, + {SSS_TOOL_GET_LOOPBACK_MODE, sss_tool_get_loopback_mode}, + {SSS_TOOL_SET_LOOPBACK_MODE, sss_tool_set_loopback_mode}, + {SSS_TOOL_GET_TX_TIMEOUT, sss_tool_get_netdev_tx_timeout}, + {SSS_TOOL_SET_TX_TIMEOUT, sss_tool_set_netdev_tx_timeout}, + {SSS_TOOL_GET_SSET_COUNT, sss_tool_get_sset_count}, + {SSS_TOOL_GET_SSET_ITEMS, sss_tool_get_sset_stats}, + {SSS_TOOL_GET_XSFP_PRESENT, sss_tool_get_xsfp_present}, + {SSS_TOOL_GET_XSFP_INFO, sss_tool_get_xsfp_info}, + {SSS_TOOL_GET_ULD_DEV_NAME, sss_tool_get_netdev_name}, + {SSS_TOOL_CLEAR_FUNC_STATS, sss_tool_clear_func_stats}, + {SSS_TOOL_SET_LINK_MODE, sss_tool_set_link_mode}, + {SSS_TOOL_DCB_STATE, sss_tool_dcb_mt_dcb_state}, + {SSS_TOOL_QOS_DEV, sss_tool_dcb_mt_qos_map}, + {SSS_TOOL_GET_QOS_COS, sss_tool_dcb_mt_hw_qos_get}, +}; + +static int sss_tool_cmd_to_nic_driver(struct sss_nic_dev *nic_dev, + u32 cmd, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int idx; + int cmd_num = ARRAY_LEN(sss_tool_nic_cmd_handle); + enum sss_tool_driver_cmd_type cmd_type = (enum sss_tool_driver_cmd_type)cmd; + int ret = -EINVAL; + + mutex_lock(&nic_dev->qp_mutex); + for (idx = 0; idx < cmd_num; idx++) { + if (cmd_type == sss_tool_nic_cmd_handle[idx].cmd_type) { + ret = sss_tool_nic_cmd_handle[idx].func + (nic_dev, in_buf, in_len, out_buf, out_len); + break; + } + } + mutex_unlock(&nic_dev->qp_mutex); + + if (idx == cmd_num) + tool_err("Fail to send to nic driver, cmd %d is not exist\n", cmd_type); + + return ret; +} + +int sss_tool_ioctl(void *uld_dev, u32 cmd, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + if (cmd == SSS_TOOL_GET_DRV_VERSION) + return sss_tool_get_nic_version(out_buf, out_len); + + if (!uld_dev) + return -EINVAL; + + return sss_tool_cmd_to_nic_driver(uld_dev, cmd, in_buf, in_len, out_buf, out_len); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.h b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.h new file mode 100644 index 0000000000000..64bbd9c3a40c2 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_NIC_FUNC_H +#define SSS_TOOL_NIC_FUNC_H + +int sss_tool_ioctl(void *uld_dev, u32 cmd, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.c b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.c new file mode 100644 index 0000000000000..af759b829289a --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.c @@ -0,0 +1,415 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include "sss_nic_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_tool_comm.h" +#include "sss_tool_nic.h" +#include "sss_nic_netdev_ops_api.h" + +enum sss_tool_link_mode { + SSS_TOOL_LINK_MODE_AUTO = 0, + SSS_TOOL_LINK_MODE_UP, + SSS_TOOL_LINK_MODE_DOWN, + SSS_TOOL_LINK_MODE_MAX, +}; + +typedef void (*sss_tool_set_link_mode_handler_t)(struct sss_nic_dev *nic_dev); + +int sss_tool_get_loopback_mode(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + struct sss_tool_loop_mode *mode = out_buf; + + if (!out_len || !mode) { + tool_err("Invalid param, use null pointer\n"); + return -EINVAL; + } + + if (*out_len != sizeof(*mode)) { + tool_err("Invalid out len: %u is not equal to %lu\n", + *out_len, sizeof(*mode)); + return -EINVAL; + } + + return sss_nic_get_loopback_mode(nic_dev, (u8 *)&mode->loop_mode, + (u8 *)&mode->loop_ctrl); +} + +int sss_tool_set_loopback_mode(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int ret; + const struct sss_tool_loop_mode *mode = in_buf; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) { + tool_err("Fail to set lookback mode, netdev is down\n"); + return -EFAULT; + } + + if (!mode || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EINVAL; + } + + if (in_len != sizeof(*mode) || *out_len != sizeof(*mode)) { + tool_err("Invalid in len %d or out len %u is not equal to %lu\n", + in_len, *out_len, sizeof(*mode)); + return -EINVAL; + } + + ret = sss_nic_set_loopback_mode(nic_dev->hwdev, (u8)mode->loop_mode, (u8)mode->loop_ctrl); + if (ret == 0) + tool_info("succeed to set loopback mode %u en %u\n", + mode->loop_mode, mode->loop_ctrl); + + return ret; +} + +static bool sss_tool_check_param_valid(struct sss_nic_dev *nic_dev, + const void *in_buf, u32 in_len, + const u32 *out_len) +{ + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) { + tool_err("Fail to set link mode, netdev is down\n"); + return false; + } + + if (!in_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EINVAL; + } + + if (in_len != sizeof(SSS_TOOL_LINK_MODE_MAX) || + *out_len != sizeof(SSS_TOOL_LINK_MODE_MAX)) { + tool_err("Invalid in len %d or out len: %u is not equal to %lu\n", + in_len, *out_len, sizeof(SSS_TOOL_LINK_MODE_MAX)); + return false; + } + + return true; +} + +static void sss_tool_set_link_status(struct sss_nic_dev *nic_dev, bool status) +{ + struct net_device *netdev = nic_dev->netdev; + + if (!SSS_CHANNEL_RES_VALID(nic_dev) || + SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_LP_TEST) || + SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_FORCE_LINK_UP)) + return; + + if (!status) { + if (!netif_carrier_ok(netdev)) + return; + + tool_info("Link down\n"); + nic_dev->link_status = status; + netif_carrier_off(netdev); + + } else { + if (netif_carrier_ok(netdev)) + return; + + tool_info("Link up\n"); + nic_dev->link_status = status; + netif_carrier_on(netdev); + } +} + +static void sss_tool_link_mode_auto(struct sss_nic_dev *nic_dev) +{ + u8 link_status; + + if (sss_nic_get_hw_link_state(nic_dev, &link_status)) + link_status = false; + + sss_tool_set_link_status(nic_dev, (bool)link_status); + tool_info("Success to set link mode to auto, the state is link %s\n", + (link_status ? "up" : "down")); +} + +static void sss_tool_link_mode_up(struct sss_nic_dev *nic_dev) +{ + sss_tool_set_link_status(nic_dev, true); + tool_info("Success to set link mode to up\n"); +} + +static void sss_tool_link_mode_down(struct sss_nic_dev *nic_dev) +{ + sss_tool_set_link_status(nic_dev, false); + tool_info("Success to set link mode to down\n"); +} + +int sss_tool_set_link_mode(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + const enum sss_tool_link_mode *mode = in_buf; + + sss_tool_set_link_mode_handler_t handler[] = { + sss_tool_link_mode_auto, + sss_tool_link_mode_up, + sss_tool_link_mode_down, + }; + + if (!sss_tool_check_param_valid(nic_dev, in_buf, in_len, out_len)) + return -EFAULT; + + if (*mode >= SSS_TOOL_LINK_MODE_MAX) { + tool_err("Fail to set link mode, mode %d\n", *mode); + return -EINVAL; + } + + handler[*mode](nic_dev); + + return 0; +} + +static int sss_tool_update_pf_bw_limit(struct sss_nic_dev *nic_dev, u32 bw_limit) +{ + int ret; + u32 old_bw_limit; + struct sss_nic_port_info port_info = {0}; + struct sss_nic_io *nic_io = nic_dev->nic_io; + + if (!nic_io) + return -EINVAL; + + if (bw_limit > SSSNIC_PF_LIMIT_BW_MAX) { + tool_err("Fail to update pf bw limit, bandwidth: %u large then max limit: %u\n", + bw_limit, SSSNIC_PF_LIMIT_BW_MAX); + return -EINVAL; + } + + old_bw_limit = nic_io->mag_cfg.pf_bw_limit; + nic_io->mag_cfg.pf_bw_limit = bw_limit; + + if (!SSSNIC_SUPPORT_RATE_LIMIT(nic_io)) + return 0; + + ret = sss_nic_get_hw_port_info(nic_dev, &port_info, SSS_CHANNEL_NIC); + if (ret != 0) { + tool_err("Fail to get port info\n"); + nic_io->mag_cfg.pf_bw_limit = bw_limit; + return -EIO; + } + + ret = sss_nic_set_pf_rate(nic_dev, port_info.speed); + if (ret != 0) { + tool_err("Fail to set pf bandwidth\n"); + nic_io->mag_cfg.pf_bw_limit = bw_limit; + return ret; + } + + return 0; +} + +static int sss_tool_check_preconditions(struct sss_nic_dev *nic_dev, + const void *in_buf, u32 in_len, + void *out_buf, u32 *out_len) +{ + int ret; + u8 link_state = 0; + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) { + tool_err("Fail to set VF bandwidth rate, please use ip link cmd\n"); + return -EINVAL; + } + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EINVAL; + } + + if (in_len != sizeof(in_len)) { + tool_err("Invalid in len %d is not equal to %lu\n", + in_len, sizeof(in_len)); + return -EINVAL; + } + + if (*out_len != sizeof(link_state)) { + tool_err("Invalid out len %d is not equal to %lu\n", + in_len, sizeof(link_state)); + return -EINVAL; + } + + ret = sss_nic_get_hw_link_state(nic_dev, &link_state); + if (ret != 0) { + tool_err("Fail to get link state\n"); + return -EIO; + } + + if (!link_state) { + tool_err("Fail to set pf rate, must be link up\n"); + return -EINVAL; + } + + return 0; +} + +int sss_tool_set_pf_bw_limit(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int ret; + u32 pf_bw_limit; + + ret = sss_tool_check_preconditions(nic_dev, in_buf, in_len, out_buf, out_len); + if (ret != 0) + return -EINVAL; + + pf_bw_limit = *((u32 *)in_buf); + + ret = sss_tool_update_pf_bw_limit(nic_dev, pf_bw_limit); + if (ret != 0) { + tool_err("Fail to set pf bandwidth limit to %d%%\n", pf_bw_limit); + if (ret < 0) + return ret; + } + + *((u8 *)out_buf) = (u8)ret; + + return 0; +} + +int sss_tool_get_pf_bw_limit(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + struct sss_nic_io *nic_io = NULL; + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) { + tool_err("Fail to get VF bandwidth rate, please use ip link cmd\n"); + return -EINVAL; + } + + if (!out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EINVAL; + } + + if (*out_len != sizeof(in_len)) { + tool_err("Invalid out len %d is not equal to %lu\n", + *out_len, sizeof(in_len)); + return -EFAULT; + } + + nic_io = nic_dev->nic_io; + if (!nic_io) + return -EINVAL; + + *((u32 *)out_buf) = nic_io->mag_cfg.pf_bw_limit; + + return 0; +} + +int sss_tool_get_netdev_name(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + if (!out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (*out_len != IFNAMSIZ) { + tool_err("Invalid out len %u is not equal to %u\n\n", + *out_len, IFNAMSIZ); + return -EINVAL; + } + + strscpy(out_buf, nic_dev->netdev->name, IFNAMSIZ); + + return 0; +} + +int sss_tool_get_netdev_tx_timeout(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int *tx_timeout = out_buf; + struct net_device *net_dev = nic_dev->netdev; + + if (!out_buf || !out_len) { + tool_err("Fail to get netdev tx timeout, use null pointer\n"); + return -EFAULT; + } + + if (*out_len != sizeof(in_len)) { + tool_err("Fail to get netdev tx timeout, out len %u is not equal to %lu\n", + *out_len, sizeof(in_len)); + return -EINVAL; + } + + *tx_timeout = net_dev->watchdog_timeo; + + return 0; +} + +int sss_tool_set_netdev_tx_timeout(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + const int *tx_timeout = in_buf; + struct net_device *net_dev = nic_dev->netdev; + + if (!in_buf) { + tool_err("Invalid in buf is null\n"); + return -EFAULT; + } + + if (in_len != sizeof(in_len)) { + tool_err("Invalid in len: %u is not equal to %lu\n", + in_len, sizeof(in_len)); + return -EINVAL; + } + + net_dev->watchdog_timeo = *tx_timeout * HZ; + tool_info("Success to set tx timeout check period to %ds\n", *tx_timeout); + + return 0; +} + +int sss_tool_get_xsfp_present(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + struct sss_nic_mbx_get_xsfp_present *sfp_info = out_buf; + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (in_len != sizeof(*sfp_info) || *out_len != sizeof(*sfp_info)) { + tool_err("Invalid in len: %u or out len: %u is not equal to %lu\n", + in_len, *out_len, sizeof(*sfp_info)); + return -EINVAL; + } + + sfp_info->abs_status = sss_nic_if_sfp_absent(nic_dev); + sfp_info->head.state = 0; + + return 0; +} + +int sss_tool_get_xsfp_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int ret; + struct sss_nic_mbx_get_xsfp_info *xsfp_info = out_buf; + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (in_len != sizeof(*xsfp_info) || *out_len != sizeof(*xsfp_info)) { + tool_err("Invalid in len: %u or out len: %u is not equal to %lu\n", + in_len, *out_len, sizeof(*xsfp_info)); + return -EINVAL; + } + + ret = sss_nic_get_sfp_info(nic_dev, xsfp_info); + if (ret != 0) + xsfp_info->head.state = SSS_TOOL_EIO; + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.h b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.h new file mode 100644 index 0000000000000..cbf4fbdce4f7e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_NIC_PHY_ATTR_H +#define SSS_TOOL_NIC_PHY_ATTR_H + +int sss_tool_get_loopback_mode(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_set_loopback_mode(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_set_link_mode(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_set_pf_bw_limit(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_pf_bw_limit(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_netdev_name(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_netdev_tx_timeout(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_set_netdev_tx_timeout(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_xsfp_present(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_xsfp_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.c b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.c new file mode 100644 index 0000000000000..6267b7665609b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include "sss_kernel.h" +#include "sss_nic_tx.h" +#include "sss_tool_comm.h" +#include "sss_tool_nic.h" + +static int sss_tool_get_wqe_info(struct sss_nic_dev *nic_dev, + u16 q_id, u16 wqe_id, u16 wqebb_cnt, + u8 *out_buff, const u16 *out_len, + enum sss_nic_queue_type q_type) +{ + u32 i; + void *src_wqebb = NULL; + u32 offset; + struct sss_nic_io_queue *queue = NULL; + struct sss_nic_io *nic_io = NULL; + + nic_io = nic_dev->nic_io; + if (!nic_io) { + tool_err("Fail to get wqe info, nic_io is NULL.\n"); + return -EINVAL; + } + + if (q_id >= nic_io->max_qp_num) { + tool_err("Fail to get wqe info, q_id[%u] > num_qps_cfg[%u].\n", + q_id, nic_io->max_qp_num); + return -EINVAL; + } + + if (q_type == SSSNIC_RQ) + queue = &nic_io->rq_group[q_id]; + else + queue = &nic_io->sq_group[q_id]; + + if ((wqe_id + wqebb_cnt) > queue->wq.q_depth) { + tool_err("Fail to get wqe info, (idx[%u] + idx[%u]) > q_depth[%u].\n", + wqe_id, wqebb_cnt, queue->wq.q_depth); + return -EINVAL; + } + + if (*out_len != (queue->wq.elem_size * wqebb_cnt)) { + tool_err("Fail to get wqe info, out len :%u is not equal to %d\n", + *out_len, (queue->wq.elem_size * wqebb_cnt)); + return -EINVAL; + } + + for (i = 0; i < wqebb_cnt; i++) { + src_wqebb = sss_wq_wqebb_addr(&queue->wq, + (u16)SSS_WQ_MASK_ID(&queue->wq, wqe_id + i)); + offset = queue->wq.elem_size * i; + memcpy(out_buff + offset, src_wqebb, queue->wq.elem_size); + } + + return 0; +} + +static void sss_tool_get_sq_info(struct sss_nic_io *nic_io, u16 q_id, + struct sss_tool_sq_info *sq_info) +{ + struct sss_nic_io_queue *sq = NULL; + + sq = &nic_io->sq_group[q_id]; + + sq_info->q_depth = sq->wq.q_depth; + sq_info->q_id = q_id; + sq_info->pi = sss_nic_get_sq_local_pi(sq); + sq_info->doorbell.map_addr = (u64 *)sq->db_addr; + sq_info->fi = sss_nic_get_sq_hw_ci(sq); + sq_info->wqebb_size = sq->wq.elem_size; + sq_info->ci = sss_nic_get_sq_local_ci(sq); + sq_info->ci_addr = sq->tx.ci_addr; + sq_info->slq_handle = sq; + sq_info->cla_addr = sq->wq.block_paddr; +} + +static void sss_tool_get_rq_info(struct sss_nic_io *nic_io, u16 q_id, + struct sss_tool_rq_info *rq_info) +{ + struct sss_nic_io_queue *rq = NULL; + + rq = &nic_io->rq_group[q_id]; + + rq_info->msix_idx = rq->msix_id; + rq_info->hw_pi = cpu_to_be16(*rq->rx.pi_vaddr); + rq_info->buf_len = nic_io->rx_buff_len; + rq_info->wqebb_size = rq->wq.elem_size; + rq_info->slq_handle = rq; + rq_info->q_id = q_id; + rq_info->ci_cla_tbl_addr = rq->wq.block_paddr; + rq_info->q_depth = (u16)rq->wq.q_depth; + rq_info->ci_wqe_page_addr = sss_wq_get_first_wqe_page_addr(&rq->wq); +} + +static int sss_tool_get_queue_info(struct sss_nic_dev *nic_dev, u16 q_id, + void *out_buff, enum sss_nic_queue_type q_type) +{ + struct sss_nic_io *nic_io = NULL; + + nic_io = nic_dev->nic_io; + if (!nic_io) { + tool_err("Fail to get wqe info, nic_io is NULL.\n"); + return -EINVAL; + } + + if (q_id >= nic_io->max_qp_num) { + tool_err("Fail to get rq info, input q_id(%u) is larger than max qp num:%u\n", + q_id, nic_io->max_qp_num); + return -EINVAL; + } + + (q_type == SSSNIC_RQ) ? sss_tool_get_rq_info(nic_io, q_id, out_buff) : + sss_tool_get_sq_info(nic_io, q_id, out_buff); + + return 0; +} + +static bool sss_tool_check_input_pointer(struct sss_nic_dev *nic_dev, + const void *in_buf, void *out_buf, u32 *out_len) +{ + if (!SSS_CHANNEL_RES_VALID(nic_dev)) { + tool_err("Invalid input param nic_dev\n"); + return false; + } + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid input param,in_buf/out_buf/out_len\n"); + return false; + } + + return true; +} + +int sss_tool_get_tx_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + u16 q_id; + struct sss_tool_sq_info sq_info = {0}; + + if (!sss_tool_check_input_pointer(nic_dev, in_buf, out_buf, out_len)) + return -EINVAL; + + if (in_len != sizeof(in_len)) { + tool_err("Fail to get tx info, in len :%u is not equal to %lu\n", + in_len, sizeof(in_len)); + return -EINVAL; + } + + if (*out_len != sizeof(sq_info)) { + tool_err("Fail to get tx info, out len :%u is not equal to %lu\n", + *out_len, sizeof(sq_info)); + return -EINVAL; + } + + q_id = (u16)(*((u32 *)in_buf)); + + return sss_tool_get_queue_info(nic_dev, q_id, out_buf, SSSNIC_SQ); +} + +int sss_tool_get_tx_wqe_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + u16 wqebb_cnt = 1; + const struct sss_tool_wqe_info *info = in_buf; + + if (!sss_tool_check_input_pointer(nic_dev, in_buf, out_buf, out_len)) + return -EINVAL; + + if (in_len != sizeof(*info)) { + tool_err("Fail to get tx wqe info, in len %u is not equal to %lu\n", + in_len, sizeof(*info)); + return -EINVAL; + } + + return sss_tool_get_wqe_info(nic_dev, (u16)info->q_id, (u16)info->wqe_id, wqebb_cnt, + out_buf, (u16 *)out_len, SSSNIC_SQ); +} + +int sss_tool_get_rx_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int ret; + u16 q_id; + struct sss_tool_rq_info *rq_info = out_buf; + + if (!sss_tool_check_input_pointer(nic_dev, in_buf, out_buf, out_len)) + return -EINVAL; + + if (in_len != sizeof(u32)) { + tool_err("Invalid in len: %u is not equal to %lu\n", + in_len, sizeof(u32)); + return -EINVAL; + } + + if (*out_len != sizeof(*rq_info)) { + tool_err("Invalid out len: %u is not equal to %lu\n", + *out_len, sizeof(*rq_info)); + return -EINVAL; + } + + q_id = (u16)(*((u32 *)in_buf)); + + ret = sss_tool_get_queue_info(nic_dev, q_id, out_buf, SSSNIC_RQ); + if (ret != 0) { + tool_err("Fail to get rq info, ret: %d.\n", ret); + return ret; + } + + rq_info->pending_limt = nic_dev->rq_desc_group[q_id].last_pending_limt; + rq_info->msix_vector = nic_dev->rq_desc_group[q_id].irq_id; + rq_info->delta = (u16)nic_dev->rq_desc_group[q_id].delta; + rq_info->sw_pi = nic_dev->rq_desc_group[q_id].pi; + rq_info->coalesc_timer_cfg = nic_dev->rq_desc_group[q_id].last_coal_timer; + rq_info->ci = (u16)(nic_dev->rq_desc_group[q_id].ci & + nic_dev->rq_desc_group[q_id].qid_mask); + + return 0; +} + +int sss_tool_get_rx_wqe_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + u16 wqebb_cnt = 1; + const struct sss_tool_wqe_info *info = in_buf; + + if (!sss_tool_check_input_pointer(nic_dev, in_buf, out_buf, out_len)) + return -EINVAL; + + if (in_len != sizeof(struct sss_tool_wqe_info)) { + tool_err("Fail to get rx wqe info, in len: %u is not equal to %lu\n", + in_len, sizeof(struct sss_tool_wqe_info)); + return -EINVAL; + } + + return sss_tool_get_wqe_info(nic_dev, (u16)info->q_id, (u16)info->wqe_id, wqebb_cnt, + out_buf, (u16 *)out_len, SSSNIC_RQ); +} + +int sss_tool_get_rx_cqe_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + u16 wqe_id = 0; + u16 q_id = 0; + const struct sss_tool_wqe_info *info = in_buf; + + if (!sss_tool_check_input_pointer(nic_dev, in_buf, out_buf, out_len)) + return -EINVAL; + + if (in_len != sizeof(struct sss_tool_wqe_info)) { + tool_err("Fail to get rx cqe info, in len: %u is not equal to %lu\n", + in_len, sizeof(struct sss_tool_wqe_info)); + return -EINVAL; + } + + if (*out_len != sizeof(struct sss_nic_cqe)) { + tool_err("Fail to get rx cqe info, out len: %u is not equal to %lu\n", + *out_len, sizeof(struct sss_nic_cqe)); + return -EINVAL; + } + + wqe_id = (u16)info->wqe_id; + q_id = (u16)info->q_id; + + if (q_id >= nic_dev->qp_res.qp_num || wqe_id >= nic_dev->rq_desc_group[q_id].q_depth) { + tool_err("Fail to get rx cqe info, q_id[%u] >= %u, or wqe idx[%u] >= %u.\n", + q_id, nic_dev->qp_res.qp_num, wqe_id, + nic_dev->rq_desc_group[q_id].q_depth); + return -EFAULT; + } + + memcpy(out_buf, nic_dev->rq_desc_group[q_id].rx_desc_group[wqe_id].cqe, + sizeof(struct sss_nic_cqe)); + + return 0; +} + +int sss_tool_get_q_num(struct sss_nic_dev *nic_dev, const void *in_buf, u32 in_len, + void *out_buf, u32 *out_len) +{ + if (!SSS_CHANNEL_RES_VALID(nic_dev)) { + tool_err("Fail to get queue number, netdev is down\n"); + return -EFAULT; + } + + if (!out_buf || !out_len) { + tool_err("Invalid param, use null pointer.\n"); + return -EINVAL; + } + + if (*out_len != sizeof(nic_dev->qp_res.qp_num)) { + tool_err("Invalid out len: %u is not equal to %lu\n", + *out_len, sizeof(nic_dev->qp_res.qp_num)); + return -EINVAL; + } + + *((u16 *)out_buf) = nic_dev->qp_res.qp_num; + + return 0; +} + +int sss_tool_get_inter_num(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + u16 intr_num = sss_nic_intr_num(nic_dev->hwdev); + + if (!out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (*out_len != sizeof(intr_num)) { + tool_err("Invalid out len:%u is not equal to %lu\n", + *out_len, sizeof(intr_num)); + return -EFAULT; + } + + *(u16 *)out_buf = intr_num; + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.h b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.h new file mode 100644 index 0000000000000..c7b674751ecd7 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_NIC_QP_INFO_H +#define SSS_TOOL_NIC_QP_INFO_H + +int sss_tool_get_tx_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_rx_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_tx_wqe_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_rx_wqe_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_rx_cqe_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_q_num(struct sss_nic_dev *nic_dev, const void *in_buf, u32 in_len, + void *out_buf, u32 *out_len); + +int sss_tool_get_inter_num(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.c b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.c new file mode 100644 index 0000000000000..ab06d6eea4248 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include "sss_kernel.h" +#include "sss_tool_comm.h" +#include "sss_tool_nic.h" +#include "sss_nic_netdev_ops_api.h" +#include "sss_nic_ethtool_stats_api.h" + +enum sss_tool_show_set { + SSS_TOOL_SHOW_SSET_IO_STATS = 1, +}; + +static void sss_tool_reset_nicdev_stats(struct sss_nic_dev *nic_dev) +{ + u64_stats_update_begin(&nic_dev->tx_stats.stats_sync); + nic_dev->tx_stats.rsvd1 = 0; + nic_dev->tx_stats.rsvd2 = 0; + nic_dev->tx_stats.tx_drop = 0; + nic_dev->tx_stats.tx_timeout = 0; + nic_dev->tx_stats.tx_invalid_qid = 0; + u64_stats_update_end(&nic_dev->tx_stats.stats_sync); +} + +static void sss_tool_reset_rq_stats(struct sss_nic_rq_stats *rq_stats) +{ + u64_stats_update_begin(&rq_stats->stats_sync); + rq_stats->reset_drop_sge = 0; + rq_stats->rx_packets = 0; + rq_stats->alloc_rx_dma_err = 0; + rq_stats->rx_bytes = 0; + + rq_stats->csum_errors = 0; + rq_stats->rx_dropped = 0; + rq_stats->errors = 0; + rq_stats->large_xdp_pkts = 0; + rq_stats->rx_buf_errors = 0; + rq_stats->alloc_skb_err = 0; + rq_stats->xdp_dropped = 0; + rq_stats->other_errors = 0; + rq_stats->rsvd2 = 0; + u64_stats_update_end(&rq_stats->stats_sync); +} + +static void sss_tool_reset_sq_stats(struct sss_nic_sq_stats *sq_stats) +{ + u64_stats_update_begin(&sq_stats->stats_sync); + sq_stats->unknown_tunnel_proto = 0; + sq_stats->tx_packets = 0; + sq_stats->tx_dropped = 0; + sq_stats->frag_len_overflow = 0; + sq_stats->tx_busy = 0; + sq_stats->wake = 0; + sq_stats->skb_pad_err = 0; + sq_stats->dma_map_err = 0; + sq_stats->frag_size_zero = 0; + sq_stats->tx_bytes = 0; + sq_stats->offload_err = 0; + sq_stats->rsvd1 = 0; + sq_stats->rsvd2 = 0; + u64_stats_update_end(&sq_stats->stats_sync); +} + +int sss_tool_clear_func_stats(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int i; + + if (!out_len) { + tool_err("Invalid out len is null\n"); + return -EINVAL; + } + +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + memset(&nic_dev->net_stats, 0, sizeof(nic_dev->net_stats)); +#endif + sss_tool_reset_nicdev_stats(nic_dev); + for (i = 0; i < nic_dev->max_qp_num; i++) { + sss_tool_reset_rq_stats(&nic_dev->rq_desc_group[i].stats); + sss_tool_reset_sq_stats(&nic_dev->sq_desc_group[i].stats); + } + + *out_len = 0; + + return 0; +} + +int sss_tool_get_sset_count(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + u32 count = 0; + + if (!in_buf || in_len != sizeof(count) || !out_len || + *out_len != sizeof(count) || !out_buf) { + tool_err("Invalid in_len: %u\n", in_len); + return -EINVAL; + } + + if (*((u32 *)in_buf) == SSS_TOOL_SHOW_SSET_IO_STATS) + count = sss_nic_get_io_stats_size(nic_dev); + + *((u32 *)out_buf) = count; + + return 0; +} + +int sss_tool_get_sset_stats(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + struct sss_tool_show_item *items = out_buf; + u32 count; + + if (!in_buf || in_len != sizeof(count) || !out_len || !out_buf) { + tool_err("Invalid in_len: %u\n", in_len); + return -EINVAL; + } + + if (*((u32 *)in_buf) != SSS_TOOL_SHOW_SSET_IO_STATS) { + tool_err("Invalid input para %u stats\n", *((u32 *)in_buf)); + return -EINVAL; + } + + count = sss_nic_get_io_stats_size(nic_dev); + + if (count * sizeof(*items) != *out_len) { + tool_err("Invalid out len: %u is not equal to %lu\n", + *out_len, count * sizeof(*items)); + return -EINVAL; + } + + sss_nic_get_io_stats(nic_dev, items); + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.h b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.h new file mode 100644 index 0000000000000..1c37214deeea3 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_NIC_STATS_H +#define SSS_TOOL_NIC_STATS_H + +int sss_tool_clear_func_stats(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_sset_count(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_sset_stats(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +#endif diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 64aa1e90e2221..95329af450118 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -19,6 +19,7 @@ config SUNGEM_PHY tristate source "drivers/net/ethernet/3com/Kconfig" +source "drivers/net/ethernet/3snic/Kconfig" source "drivers/net/ethernet/actions/Kconfig" source "drivers/net/ethernet/adaptec/Kconfig" source "drivers/net/ethernet/aeroflex/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 7724c9095f27c..dc909f86f04b8 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -4,6 +4,7 @@ # obj-$(CONFIG_NET_VENDOR_3COM) += 3com/ +obj-$(CONFIG_NET_VENDOR_3SNIC) += 3snic/ obj-$(CONFIG_NET_VENDOR_8390) += 8390/ obj-$(CONFIG_NET_VENDOR_ACTIONS) += actions/ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/ From 619564c5530e205c30638cb93e125b4df5644afb Mon Sep 17 00:00:00 2001 From: WangYuli Date: Tue, 13 Aug 2024 14:41:36 +0800 Subject: [PATCH 2/4] x86: config: add default build configs for 3snic Enable 3snic for x86. Signed-off-by: weiwei1 Signed-off-by: WangYuli --- arch/x86/configs/deepin_x86_desktop_defconfig | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/arch/x86/configs/deepin_x86_desktop_defconfig b/arch/x86/configs/deepin_x86_desktop_defconfig index d115b08b1ae04..ff4db0908c224 100644 --- a/arch/x86/configs/deepin_x86_desktop_defconfig +++ b/arch/x86/configs/deepin_x86_desktop_defconfig @@ -1235,6 +1235,8 @@ CONFIG_PCMCIA_3C574=m CONFIG_PCMCIA_3C589=m CONFIG_VORTEX=m CONFIG_TYPHOON=m +CONFIG_NET_VENDOR_3SNIC=y +CONFIG_SSSNIC=m CONFIG_ADAPTEC_STARFIRE=m CONFIG_ET131X=m CONFIG_SLICOSS=m @@ -1246,7 +1248,10 @@ CONFIG_PCNET32=m CONFIG_PCMCIA_NMCLAN=m CONFIG_AMD_XGBE=m CONFIG_AMD_XGBE_DCB=y +CONFIG_PDS_CORE=m CONFIG_AQTION=m +CONFIG_SPI_AX88796C=m +CONFIG_SPI_AX88796C_COMPRESSION=y CONFIG_ATL2=m CONFIG_ATL1=m CONFIG_ATL1E=m @@ -1284,7 +1289,9 @@ CONFIG_ULI526X=m CONFIG_PCMCIA_XIRCOM=m CONFIG_DL2K=m CONFIG_SUNDANCE=m +CONFIG_TSNEP=m CONFIG_PCMCIA_FMVJ18X=m +CONFIG_FUN_ETH=m CONFIG_GVE=m CONFIG_HINIC=m CONFIG_E100=m @@ -1313,8 +1320,10 @@ CONFIG_MLX5_CORE=m CONFIG_MLX5_FPGA=y CONFIG_MLX5_CORE_EN=y CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLX5_MACSEC=y CONFIG_MLX5_EN_IPSEC=y CONFIG_MLX5_EN_TLS=y +CONFIG_MLX5_SF=y CONFIG_MLXSW_CORE=m CONFIG_KS8842=m CONFIG_KS8851=m @@ -1323,7 +1332,9 @@ CONFIG_KSZ884X_PCI=m CONFIG_ENC28J60=m CONFIG_ENCX24J600=m CONFIG_LAN743X=m +CONFIG_VCAP=y CONFIG_YT6801=m +CONFIG_MICROSOFT_MANA=m CONFIG_MYRI10GE=m CONFIG_FEALNX=m CONFIG_NI_XGE_MANAGEMENT_ENET=m @@ -1351,6 +1362,7 @@ CONFIG_R6040=m CONFIG_ATP=m CONFIG_8139CP=m CONFIG_8139TOO=m +CONFIG_8139TOO_TUNE_TWISTER=y CONFIG_8139TOO_8129=y CONFIG_R8169=m CONFIG_ROCKER=m @@ -1360,11 +1372,14 @@ CONFIG_SIS900=m CONFIG_SIS190=m CONFIG_SFC=m CONFIG_SFC_FALCON=m +CONFIG_SFC_SIENA=m +CONFIG_SFC_SIENA_SRIOV=y CONFIG_PCMCIA_SMC91C92=m CONFIG_EPIC100=m CONFIG_SMSC911X=m CONFIG_SMSC9420=m CONFIG_STMMAC_ETH=m +CONFIG_DWMAC_PHYTIUM=m CONFIG_STMMAC_PCI=m CONFIG_HAPPYMEAL=m CONFIG_SUNGEM=m @@ -1374,6 +1389,7 @@ CONFIG_DWC_XLGMAC=m CONFIG_DWC_XLGMAC_PCI=m CONFIG_TEHUTI=m CONFIG_TLAN=m +CONFIG_MSE102X=m CONFIG_VIA_RHINE=m CONFIG_VIA_RHINE_MMIO=y CONFIG_VIA_VELOCITY=m @@ -1382,9 +1398,12 @@ CONFIG_TXGBE=m CONFIG_WIZNET_W5100=m CONFIG_WIZNET_W5300=m CONFIG_WIZNET_W5100_SPI=m +CONFIG_XILINX_EMACLITE=m CONFIG_XILINX_AXI_EMAC=m CONFIG_XILINX_LL_TEMAC=m CONFIG_PCMCIA_XIRC2PS=m +# CONFIG_NET_VENDOR_PHYTIUM is not set +CONFIG_GRTNIC=m CONFIG_NCE=m CONFIG_NE6X=m CONFIG_NE6XVF=m From 3faacd52db00fd9e403ca8afe05d40933cc7d5d8 Mon Sep 17 00:00:00 2001 From: WangYuli Date: Tue, 13 Aug 2024 15:11:28 +0800 Subject: [PATCH 3/4] Loongarch: config: add default build configs for 3snic Enable 3snic for Loongarch. Signed-off-by: weiwei1 Signed-off-by: WangYuli --- arch/loongarch/configs/deepin_loongarch_desktop_defconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/loongarch/configs/deepin_loongarch_desktop_defconfig b/arch/loongarch/configs/deepin_loongarch_desktop_defconfig index e158103ab5259..f0f1d59e7907d 100644 --- a/arch/loongarch/configs/deepin_loongarch_desktop_defconfig +++ b/arch/loongarch/configs/deepin_loongarch_desktop_defconfig @@ -1279,6 +1279,7 @@ CONFIG_PCMCIA_3C574=m CONFIG_PCMCIA_3C589=m CONFIG_VORTEX=m CONFIG_TYPHOON=m +CONFIG_NET_VENDOR_3SNIC=y CONFIG_ADAPTEC_STARFIRE=m CONFIG_ET131X=m CONFIG_SLICOSS=m @@ -1452,6 +1453,7 @@ CONFIG_XILINX_EMACLITE=m CONFIG_XILINX_AXI_EMAC=m CONFIG_XILINX_LL_TEMAC=m CONFIG_PCMCIA_XIRC2PS=m +CONFIG_GRTNIC=m CONFIG_NCE=m CONFIG_FDDI=m CONFIG_DEFXX=m From 588ae67355b7e6b52a1918307da79fac74a19f1b Mon Sep 17 00:00:00 2001 From: WangYuli Date: Tue, 13 Aug 2024 15:48:35 +0800 Subject: [PATCH 4/4] arm64: config: add default build configs for 3snic Enable 3snic for arm64. Signed-off-by: weiwei1 Signed-off-by: WangYuli --- .../configs/deepin_arm64_desktop_defconfig | 80 +++++++++++++++++-- 1 file changed, 75 insertions(+), 5 deletions(-) diff --git a/arch/arm64/configs/deepin_arm64_desktop_defconfig b/arch/arm64/configs/deepin_arm64_desktop_defconfig index 406242be45cf4..6df7139d1e1ae 100644 --- a/arch/arm64/configs/deepin_arm64_desktop_defconfig +++ b/arch/arm64/configs/deepin_arm64_desktop_defconfig @@ -1305,17 +1305,27 @@ CONFIG_NET_DSA_SMSC_LAN9303_I2C=m CONFIG_NET_DSA_SMSC_LAN9303_MDIO=m CONFIG_VORTEX=m CONFIG_TYPHOON=m +CONFIG_NET_VENDOR_3SNIC=y +CONFIG_SSSNIC=m CONFIG_ADAPTEC_STARFIRE=m CONFIG_ET131X=m CONFIG_SLICOSS=m +CONFIG_SUN4I_EMAC=m CONFIG_ACENIC=m CONFIG_ACENIC_OMIT_TIGON_I=y CONFIG_ALTERA_TSE=m -# CONFIG_NET_VENDOR_AMAZON is not set +CONFIG_ENA_ETHERNET=m CONFIG_AMD8111_ETH=m CONFIG_PCNET32=m CONFIG_AMD_XGBE=m CONFIG_AMD_XGBE_DCB=y +CONFIG_PDS_CORE=m +CONFIG_NET_XGENE=m +CONFIG_NET_XGENE_V2=m +CONFIG_AQTION=m +CONFIG_EMAC_ROCKCHIP=m +CONFIG_SPI_AX88796C=m +CONFIG_SPI_AX88796C_COMPRESSION=y CONFIG_ATL2=m CONFIG_ATL1=m CONFIG_ATL1E=m @@ -1326,6 +1336,8 @@ CONFIG_BCMGENET=m CONFIG_TIGON3=m CONFIG_BNX2X=m CONFIG_SYSTEMPORT=m +CONFIG_BNXT=m +CONFIG_BNXT_DCB=y CONFIG_MACB=m CONFIG_MACB_PCI=m CONFIG_THUNDER_NIC_PF=m @@ -1337,9 +1349,12 @@ CONFIG_CHELSIO_T1_1G=y CONFIG_CHELSIO_T4_DCB=y CONFIG_CHELSIO_T4_FCOE=y CONFIG_CHELSIO_T4VF=m +CONFIG_CRYPTO_DEV_CHELSIO_TLS=m CONFIG_CHELSIO_IPSEC_INLINE=m +CONFIG_CHELSIO_TLS_DEVICE=m CONFIG_ENIC=m CONFIG_GEMINI_ETHERNET=m +CONFIG_DM9051=m CONFIG_DNET=m CONFIG_NET_TULIP=y CONFIG_DE2104X=m @@ -1353,10 +1368,22 @@ CONFIG_DM9102=m CONFIG_ULI526X=m CONFIG_DL2K=m CONFIG_SUNDANCE=m +CONFIG_TSNEP=m CONFIG_EZCHIP_NPS_MANAGEMENT_ENET=m +CONFIG_FEC=m +CONFIG_FSL_FMAN=m +CONFIG_FSL_XGMAC_MDIO=m +CONFIG_GIANFAR=m +CONFIG_FSL_DPAA2_SWITCH=m +CONFIG_FSL_ENETC=m +CONFIG_FSL_ENETC_VF=m +CONFIG_FSL_ENETC_QOS=y +CONFIG_FUN_ETH=m +CONFIG_GVE=m CONFIG_HIX5HD2_GMAC=m CONFIG_HISI_FEMAC=m CONFIG_HIP04_ETH=m +CONFIG_HI13X1_GMAC=y CONFIG_HNS_DSAF=m CONFIG_HNS_ENET=m CONFIG_HNS3=m @@ -1376,43 +1403,70 @@ CONFIG_I40E_DCB=y CONFIG_I40EVF=m CONFIG_ICE=m CONFIG_FM10K=m +CONFIG_IGC=m CONFIG_JME=m -CONFIG_MVMDIO=m +CONFIG_ADIN1110=m +CONFIG_LITEX_LITEETH=m +CONFIG_MVNETA=m +CONFIG_MVPP2=m +CONFIG_MVPP2_PTP=y +CONFIG_PXA168_ETH=m CONFIG_SKGE=m CONFIG_SKGE_GENESIS=y CONFIG_SKY2=m +CONFIG_OCTEONTX2_AF=m +CONFIG_OCTEONTX2_PF=m +CONFIG_OCTEONTX2_VF=m +CONFIG_OCTEON_EP=m +CONFIG_PRESTERA=m +CONFIG_NET_VENDOR_MEDIATEK=y +CONFIG_NET_MEDIATEK_SOC=m +CONFIG_NET_MEDIATEK_STAR_EMAC=m CONFIG_MLX4_EN=m # CONFIG_MLX4_DEBUG is not set CONFIG_MLX5_CORE=m CONFIG_MLX5_FPGA=y CONFIG_MLX5_CORE_EN=y CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLX5_MACSEC=y +CONFIG_MLX5_EN_IPSEC=y +CONFIG_MLX5_EN_TLS=y +CONFIG_MLX5_SF=y CONFIG_MLXSW_CORE=m +CONFIG_MLXBF_GIGE=m +CONFIG_KS8842=m +CONFIG_KS8851=m CONFIG_KS8851_MLL=m CONFIG_KSZ884X_PCI=m CONFIG_ENC28J60=m CONFIG_ENC28J60_WRITEVERIFY=y CONFIG_ENCX24J600=m CONFIG_LAN743X=m +CONFIG_LAN966X_SWITCH=m +CONFIG_SPARX5_SWITCH=m CONFIG_YT6801=m CONFIG_MSCC_OCELOT_SWITCH=m CONFIG_MYRI10GE=m CONFIG_FEALNX=m +CONFIG_NI_XGE_MANAGEMENT_ENET=m CONFIG_NATSEMI=m CONFIG_NS83820=m CONFIG_S2IO=m -# CONFIG_NET_VENDOR_NETRONOME is not set +CONFIG_NFP=m CONFIG_NE2K_PCI=m CONFIG_FORCEDETH=m CONFIG_ETHOC=m CONFIG_HAMACHI=m CONFIG_YELLOWFIN=m +CONFIG_IONIC=m CONFIG_QLA3XXX=m CONFIG_QLCNIC=m CONFIG_NETXEN_NIC=m CONFIG_QED=m CONFIG_QEDE=m CONFIG_BNA=m +CONFIG_QCA7000_SPI=m +CONFIG_QCA7000_UART=m CONFIG_QCOM_EMAC=m CONFIG_RMNET=m CONFIG_R6040=m @@ -1423,6 +1477,9 @@ CONFIG_8139TOO_TUNE_TWISTER=y CONFIG_8139TOO_8129=y CONFIG_8139_OLD_RX_RESET=y CONFIG_R8169=m +CONFIG_SH_ETH=m +CONFIG_RAVB=m +CONFIG_RENESAS_ETHER_SWITCH=m CONFIG_ROCKER=m CONFIG_SXGBE_ETH=m CONFIG_SC92031=m @@ -1430,10 +1487,13 @@ CONFIG_SIS900=m CONFIG_SIS190=m CONFIG_SFC=m CONFIG_SFC_FALCON=m +CONFIG_SFC_SIENA=m CONFIG_SMC91X=m CONFIG_EPIC100=m CONFIG_SMSC911X=m CONFIG_SMSC9420=m +CONFIG_SNI_AVE=m +CONFIG_SNI_NETSEC=m CONFIG_STMMAC_ETH=y CONFIG_DWMAC_DWC_QOS_ETH=m CONFIG_DWMAC_MEDIATEK=m @@ -1448,7 +1508,13 @@ CONFIG_NIU=m CONFIG_DWC_XLGMAC=m CONFIG_DWC_XLGMAC_PCI=m CONFIG_TEHUTI=m +CONFIG_TI_CPSW_PHY_SEL=y +CONFIG_TI_K3_AM65_CPSW_NUSS=m +CONFIG_TI_K3_AM65_CPSW_SWITCHDEV=y +CONFIG_TI_K3_AM65_CPTS=m +CONFIG_TI_AM65_CPSW_TAS=y CONFIG_TLAN=m +CONFIG_MSE102X=m CONFIG_VIA_RHINE=m CONFIG_VIA_RHINE_MMIO=y CONFIG_VIA_VELOCITY=m @@ -1456,10 +1522,14 @@ CONFIG_NGBE=m CONFIG_TXGBE=m CONFIG_WIZNET_W5100=m CONFIG_WIZNET_W5300=m -CONFIG_WIZNET_BUS_DIRECT=y +CONFIG_WIZNET_W5100_SPI=m +CONFIG_XILINX_EMACLITE=m +CONFIG_XILINX_AXI_EMAC=m +CONFIG_XILINX_LL_TEMAC=m CONFIG_PHYTMAC=m CONFIG_PHYTMAC_PLATFORM=m CONFIG_PHYTMAC_PCI=m +CONFIG_GRTNIC=m CONFIG_NCE=m CONFIG_NE6X=m CONFIG_NE6XVF=m @@ -1528,7 +1598,6 @@ CONFIG_CAN_KVASER_USB=m CONFIG_CAN_MCBA_USB=m CONFIG_CAN_PEAK_USB=m CONFIG_CAN_UCAN=m -CONFIG_MDIO_BITBANG=m CONFIG_MDIO_GPIO=m CONFIG_MDIO_HISI_FEMAC=y CONFIG_MDIO_MSCC_MIIM=m @@ -3830,6 +3899,7 @@ CONFIG_PHYTIUM_IXIC=y CONFIG_IPACK_BUS=m CONFIG_BOARD_TPCI200=m CONFIG_SERIAL_IPOCTAL=m +# CONFIG_RESET_MCHP_SPARX5 is not set CONFIG_BCM_KONA_USB2_PHY=m CONFIG_PHY_HI6220_USB=m CONFIG_PHY_HISTB_COMBPHY=m