diff --git a/drivers/net/ethernet/motorcomm/yt6801/Makefile b/drivers/net/ethernet/motorcomm/yt6801/Makefile index 93b5c4510eb05..904db987b7cee 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/Makefile +++ b/drivers/net/ethernet/motorcomm/yt6801/Makefile @@ -12,4 +12,4 @@ yt6801-objs := fuxi-gmac-common.o \ fuxi-gmac-pci.o \ fuxi-gmac-phy.o \ fuxi-efuse.o \ - fuxi-gmac-debugfs.o + fuxi-gmac-ioctl.o diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.c index ae4ca3d59ac4c..56cb77d2b453a 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.c +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.c @@ -7,13 +7,13 @@ /* read patch per index. */ bool fxgmac_read_patch_from_efuse_per_index(struct fxgmac_pdata *pdata, - u8 index, u32 *offset, u32 *value) + u8 index, u32 __far *offset, u32 __far *value) { unsigned int wait, i; u32 regval = 0; bool succeed = false; - if (index >= FUXI_EFUSE_MAX_ENTRY) { + if (index >= FXGMAC_EFUSE_MAX_ENTRY) { FXGMAC_PR("Reading efuse out of range, index %d\n", index); return false; } @@ -105,6 +105,95 @@ bool fxgmac_read_patch_from_efuse_per_index(struct fxgmac_pdata *pdata, return succeed; } + +bool fxgmac_read_mac_subsys_from_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr, + u32 *subsys, u32 *revid) +{ + u32 offset = 0, value = 0; + u32 machr = 0, maclr = 0; + bool succeed = true; + u8 index = 0; + + for (index = 0;; index++) { + if (!fxgmac_read_patch_from_efuse_per_index(pdata, index, + &offset, &value)) { + succeed = false; + break; /* reach the last item. */ + } + if (0x00 == offset) { + break; /* reach the blank. */ + } + if (MACA0LR_FROM_EFUSE == offset) { + maclr = value; + } + if (MACA0HR_FROM_EFUSE == offset) { + machr = value; + } + + if ((0x08 == offset) && revid) { + *revid = value; + } + if ((0x2C == offset) && subsys) { + *subsys = value; + } + } + if (mac_addr) { + mac_addr[5] = (u8)(maclr & 0xFF); + mac_addr[4] = (u8)((maclr >> 8) & 0xFF); + mac_addr[3] = (u8)((maclr >> 16) & 0xFF); + mac_addr[2] = (u8)((maclr >> 24) & 0xFF); + mac_addr[1] = (u8)(machr & 0xFF); + mac_addr[0] = (u8)((machr >> 8) & 0xFF); + } + + return succeed; +} + + +bool fxgmac_efuse_read_data(struct fxgmac_pdata *pdata, u32 offset, u32 __far *value) +{ + bool succeed = false; + unsigned int wait; + u32 reg_val = 0; + + if (value) { + *value = 0; + } + + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_ADDR_POS, + EFUSE_OP_ADDR_LEN, offset); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_READ); + writereg(pdata->pAdapter, reg_val, pdata->base_mem + EFUSE_OP_CTRL_0); + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + + if (succeed) { + if (value) { + *value = FXGMAC_GET_REG_BITS(reg_val, + EFUSE_OP_RD_DATA_POS, + EFUSE_OP_RD_DATA_LEN); + } + } else { + FXGMAC_PR("Fail to reading efuse Byte%d\n", offset); + } + + return succeed; +} + +#ifndef COMMENT_UNUSED_CODE_TO_REDUCE_SIZE bool fxgmac_read_patch_from_efuse(struct fxgmac_pdata *pdata, u32 offset, u32 *value) /* read patch per index. */ { @@ -120,7 +209,7 @@ bool fxgmac_read_patch_from_efuse(struct fxgmac_pdata *pdata, u32 offset, return false; } - for (index = 0; index < FUXI_EFUSE_MAX_ENTRY; index++) { + for (index = 0; index < FXGMAC_EFUSE_MAX_ENTRY; index++) { if (!fxgmac_read_patch_from_efuse_per_index( pdata, index, ®_offset, ®_val)) { succeed = false; @@ -146,7 +235,7 @@ bool fxgmac_write_patch_to_efuse_per_index(struct fxgmac_pdata *pdata, u8 index, u32 reg_val; bool succeed = false; u32 cur_reg, cur_val; - u8 max_index = FUXI_EFUSE_MAX_ENTRY; + u8 max_index = FXGMAC_EFUSE_MAX_ENTRY; if (offset >> 16) { FXGMAC_PR( @@ -157,7 +246,7 @@ bool fxgmac_write_patch_to_efuse_per_index(struct fxgmac_pdata *pdata, u8 index, fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, ®_val); if (EFUSE_LED_COMMON_SOLUTION == reg_val) { - max_index = FUXI_EFUSE_MAX_ENTRY_UNDER_LED_COMMON; + max_index = FXGMAC_EFUSE_MAX_ENTRY_UNDER_LED_COMMON; } if (index >= max_index) { @@ -370,55 +459,16 @@ bool fxgmac_write_patch_to_efuse(struct fxgmac_pdata *pdata, u32 offset, return succeed; } -bool fxgmac_read_mac_subsys_from_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr, - u32 *subsys, u32 *revid) -{ - u32 offset = 0, value = 0; - u32 machr = 0, maclr = 0; - bool succeed = true; - u8 index = 0; - - for (index = 0;; index++) { - if (!fxgmac_read_patch_from_efuse_per_index(pdata, index, - &offset, &value)) { - succeed = false; - break; /* reach the last item. */ - } - if (0x00 == offset) { - break; /* reach the blank. */ - } - if (MACA0LR_FROM_EFUSE == offset) { - maclr = value; - } - if (MACA0HR_FROM_EFUSE == offset) { - machr = value; - } - - if ((0x08 == offset) && revid) { - *revid = value; - } - if ((0x2C == offset) && subsys) { - *subsys = value; - } - } - if (mac_addr) { - mac_addr[5] = (u8)(maclr & 0xFF); - mac_addr[4] = (u8)((maclr >> 8) & 0xFF); - mac_addr[3] = (u8)((maclr >> 16) & 0xFF); - mac_addr[2] = (u8)((maclr >> 24) & 0xFF); - mac_addr[1] = (u8)(machr & 0xFF); - mac_addr[0] = (u8)((machr >> 8) & 0xFF); - } - - return succeed; -} - bool fxgmac_write_mac_subsys_to_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr, u32 *subsys, u32 *revid) { - u32 machr = 0, maclr = 0, pcie_cfg_ctrl = PCIE_CFG_CTRL_DEFAULT_VAL; +#ifdef DBG + u32 machr = 0, maclr = 0; +#endif + u32 pcie_cfg_ctrl = PCIE_CFG_CTRL_DEFAULT_VAL; bool succeed = true; if (mac_addr) { +#ifdef DBG machr = readreg(pdata->pAdapter, pdata->base_mem + MACA0HR_FROM_EFUSE); maclr = readreg(pdata->pAdapter, @@ -427,7 +477,7 @@ bool fxgmac_write_mac_subsys_to_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr, (machr >> 8) & 0xFF, machr & 0xFF, (maclr >> 24) & 0xFF, (maclr >> 16) & 0xFF, (maclr >> 8) & 0xFF, maclr & 0xFF); - +#endif if (!fxgmac_write_patch_to_efuse(pdata, MACA0HR_FROM_EFUSE, (((u32)mac_addr[0]) << 8) | mac_addr[1])) { @@ -473,10 +523,13 @@ bool fxgmac_write_mac_subsys_to_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr, bool fxgmac_write_mac_addr_to_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr) { +#ifdef DBG u32 machr = 0, maclr = 0; +#endif bool succeed = true; if (mac_addr) { +#ifdef DBG machr = readreg(pdata->pAdapter, pdata->base_mem + MACA0HR_FROM_EFUSE); maclr = readreg(pdata->pAdapter, @@ -485,7 +538,7 @@ bool fxgmac_write_mac_addr_to_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr) (machr >> 8) & 0xFF, machr & 0xFF, (maclr >> 24) & 0xFF, (maclr >> 16) & 0xFF, (maclr >> 8) & 0xFF, maclr & 0xFF); - +#endif if (!fxgmac_write_patch_to_efuse(pdata, MACA0HR_FROM_EFUSE, (((u32)mac_addr[0]) << 8) | mac_addr[1])) { @@ -585,49 +638,6 @@ bool fxgmac_efuse_load(struct fxgmac_pdata *pdata) return succeed; } -bool fxgmac_efuse_read_data(struct fxgmac_pdata *pdata, u32 offset, u32 *value) -{ - bool succeed = false; - unsigned int wait; - u32 reg_val = 0; - - if (value) { - *value = 0; - } - - reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_ADDR_POS, - EFUSE_OP_ADDR_LEN, offset); - reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, - EFUSE_OP_START_LEN, 1); - reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, - EFUSE_OP_MODE_LEN, - EFUSE_OP_MODE_ROW_READ); - writereg(pdata->pAdapter, reg_val, pdata->base_mem + EFUSE_OP_CTRL_0); - wait = 1000; - while (wait--) { - usleep_range_ex(pdata->pAdapter, 20, 50); - reg_val = readreg(pdata->pAdapter, - pdata->base_mem + EFUSE_OP_CTRL_1); - if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, - EFUSE_OP_DONE_LEN)) { - succeed = true; - break; - } - } - - if (succeed) { - if (value) { - *value = FXGMAC_GET_REG_BITS(reg_val, - EFUSE_OP_RD_DATA_POS, - EFUSE_OP_RD_DATA_LEN); - } - } else { - FXGMAC_PR("Fail to reading efuse Byte%d\n", offset); - } - - return succeed; -} - bool fxgmac_efuse_write_oob(struct fxgmac_pdata *pdata) { bool succeed = false; @@ -1341,4 +1351,5 @@ bool fxgmac_read_led_setting_from_efuse(struct fxgmac_pdata *pdata) } return bsucceed; -} \ No newline at end of file +} +#endif diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.h b/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.h index fa0446958719c..cc99ac0baa2da 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.h +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.h @@ -1,12 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2021 Motorcomm Corporation. */ -#ifndef __FUXI_EFUSE_H__ -#define __FUXI_EFUSE_H__ +#ifndef __FXGMAC_EFUSE_H__ +#define __FXGMAC_EFUSE_H__ +bool fxgmac_read_patch_from_efuse_per_index(struct fxgmac_pdata *pdata, u8 index, u32 __far *offset, u32 __far *value); /* read patch per 0-based index. */ +bool fxgmac_read_mac_subsys_from_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr, u32 *subsys, u32 *revid); +bool fxgmac_efuse_read_data(struct fxgmac_pdata *pdata, u32 offset, u32 __far *value); +#ifndef COMMENT_UNUSED_CODE_TO_REDUCE_SIZE bool fxgmac_read_patch_from_efuse(struct fxgmac_pdata *pdata, u32 offset, u32 *value); /* read patch per register offset. */ -bool fxgmac_read_patch_from_efuse_per_index(struct fxgmac_pdata *pdata, u8 index, u32 *offset, u32 *value); /* read patch per 0-based index. */ bool fxgmac_write_patch_to_efuse(struct fxgmac_pdata *pdata, u32 offset, u32 value); bool fxgmac_write_patch_to_efuse_per_index(struct fxgmac_pdata *pdata, u8 index, u32 offset, u32 value); bool fxgmac_read_mac_subsys_from_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr, u32 *subsys, u32 *revid); @@ -21,5 +24,6 @@ bool fxgmac_efuse_write_oob(struct fxgmac_pdata *pdata); bool fxgmac_efuse_write_led(struct fxgmac_pdata *pdata, u32 value); bool fxgmac_read_led_setting_from_efuse(struct fxgmac_pdata *pdata); bool fxgmac_write_led_setting_to_efuse(struct fxgmac_pdata *pdata); +#endif -#endif /* __FUXI_EFUSE_H__ */ \ No newline at end of file +#endif /* __FXGMAC_EFUSE_H__ */ \ No newline at end of file diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-errno.h b/drivers/net/ethernet/motorcomm/yt6801/fuxi-errno.h new file mode 100644 index 0000000000000..ec085e366af3b --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-errno.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#ifndef __FXGMAC_ERROR_H__ +#define __FXGMAC_ERROR_H__ + +#define EOK 0 + +/* ref linux https://elixir.bootlin.com/linux/v5.18-rc7/source/include/uapi/asm-generic/errno.h#L93 */ +#define EPERM 1 /* Operation not permitted */ +#define ENOENT 2 /* No such file or directory */ +#define ESRCH 3 /* No such process */ +#define EINTR 4 /* Interrupted system call */ +#define EIO 5 /* I/O error */ +#define ENXIO 6 /* No such device or address */ +#define E2BIG 7 /* Argument list too long */ +#define ENOEXEC 8 /* Exec format error */ +#define EBADF 9 /* Bad file number */ +#define ECHILD 10 /* No child processes */ +#define EAGAIN 11 /* Try again */ +#define ENOMEM 12 /* Out of memory */ +#define EACCES 13 /* Permission denied */ +#define EFAULT 14 /* Bad address */ +#define ENOTBLK 15 /* Block device required */ +#define EBUSY 16 /* Device or resource busy */ +#define EEXIST 17 /* File exists */ +#define EXDEV 18 /* Cross-device link */ +#define ENODEV 19 /* No such device */ +#define ENOTDIR 20 /* Not a directory */ +#define EISDIR 21 /* Is a directory */ +#define EINVAL 22 /* Invalid argument */ +#define ENFILE 23 /* File table overflow */ +#define EMFILE 24 /* Too many open files */ +#define ENOTTY 25 /* Not a typewriter */ +#define ETXTBSY 26 /* Text file busy */ +#define EFBIG 27 /* File too large */ +#define ENOSPC 28 /* No space left on device */ +#define ESPIPE 29 /* Illegal seek */ +#define EROFS 30 /* Read-only file system */ +#define EMLINK 31 /* Too many links */ +#define EPIPE 32 /* Broken pipe */ +#define EDOM 33 /* Math argument out of domain of func */ +#define ERANGE 34 /* Math result not representable */ + + + +#define EDEADLK 35 /* Resource deadlock would occur */ +#define ENAMETOOLONG 36 /* File name too long */ +#define ENOLCK 37 /* No record locks available */ + +/* + * This error code is special: arch syscall entry code will return + * -ENOSYS if users try to call a syscall that doesn't exist. To keep + * failures of syscalls that really do exist distinguishable from + * failures due to attempts to use a nonexistent syscall, syscall + * implementations should refrain from returning -ENOSYS. + */ +#define ENOSYS 38 /* Invalid system call number */ + +#define ENOTEMPTY 39 /* Directory not empty */ +#define ELOOP 40 /* Too many symbolic links encountered */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define ENOMSG 42 /* No message of desired type */ +#define EIDRM 43 /* Identifier removed */ +#define ECHRNG 44 /* Channel number out of range */ +#define EL2NSYNC 45 /* Level 2 not synchronized */ +#define EL3HLT 46 /* Level 3 halted */ +#define EL3RST 47 /* Level 3 reset */ +#define ELNRNG 48 /* Link number out of range */ +#define EUNATCH 49 /* Protocol driver not attached */ +#define ENOCSI 50 /* No CSI structure available */ +#define EL2HLT 51 /* Level 2 halted */ +#define EBADE 52 /* Invalid exchange */ +#define EBADR 53 /* Invalid request descriptor */ +#define EXFULL 54 /* Exchange full */ +#define ENOANO 55 /* No anode */ +#define EBADRQC 56 /* Invalid request code */ +#define EBADSLT 57 /* Invalid slot */ + +#define EDEADLOCK EDEADLK + +#define EBFONT 59 /* Bad font file format */ +#define ENOSTR 60 /* Device not a stream */ +#define ENODATA 61 /* No data available */ +#define ETIME 62 /* Timer expired */ +#define ENOSR 63 /* Out of streams resources */ +#define ENONET 64 /* Machine is not on the network */ +#define ENOPKG 65 /* Package not installed */ +#define EREMOTE 66 /* Object is remote */ +#define ENOLINK 67 /* Link has been severed */ +#define EADV 68 /* Advertise error */ +#define ESRMNT 69 /* Srmount error */ +#define ECOMM 70 /* Communication error on send */ +#define EPROTO 71 /* Protocol error */ +#define EMULTIHOP 72 /* Multihop attempted */ +#define EDOTDOT 73 /* RFS specific error */ +#define EBADMSG 74 /* Not a data message */ +#define EOVERFLOW 75 /* Value too large for defined data type */ +#define ENOTUNIQ 76 /* Name not unique on network */ +#define EBADFD 77 /* File descriptor in bad state */ +#define EREMCHG 78 /* Remote address changed */ +#define ELIBACC 79 /* Can not access a needed shared library */ +#define ELIBBAD 80 /* Accessing a corrupted shared library */ +#define ELIBSCN 81 /* .lib section in a.out corrupted */ +#define ELIBMAX 82 /* Attempting to link in too many shared libraries */ +#define ELIBEXEC 83 /* Cannot exec a shared library directly */ +#define EILSEQ 84 /* Illegal byte sequence */ +#define ERESTART 85 /* Interrupted system call should be restarted */ +#define ESTRPIPE 86 /* Streams pipe error */ +#define EUSERS 87 /* Too many users */ +#define ENOTSOCK 88 /* Socket operation on non-socket */ +#define EDESTADDRREQ 89 /* Destination address required */ +#define EMSGSIZE 90 /* Message too long */ +#define EPROTOTYPE 91 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 92 /* Protocol not available */ +#define EPROTONOSUPPORT 93 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 94 /* Socket type not supported */ +#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ +#define EPFNOSUPPORT 96 /* Protocol family not supported */ +#define EAFNOSUPPORT 97 /* Address family not supported by protocol */ +#define EADDRINUSE 98 /* Address already in use */ +#define EADDRNOTAVAIL 99 /* Cannot assign requested address */ +#define ENETDOWN 100 /* Network is down */ +#define ENETUNREACH 101 /* Network is unreachable */ +#define ENETRESET 102 /* Network dropped connection because of reset */ +#define ECONNABORTED 103 /* Software caused connection abort */ +#define ECONNRESET 104 /* Connection reset by peer */ +#define ENOBUFS 105 /* No buffer space available */ +#define EISCONN 106 /* Transport endpoint is already connected */ +#define ENOTCONN 107 /* Transport endpoint is not connected */ +#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */ +#define ETOOMANYREFS 109 /* Too many references: cannot splice */ +#define ETIMEDOUT 110 /* Connection timed out */ +#define ECONNREFUSED 111 /* Connection refused */ +#define EHOSTDOWN 112 /* Host is down */ +#define EHOSTUNREACH 113 /* No route to host */ +#define EALREADY 114 /* Operation already in progress */ +#define EINPROGRESS 115 /* Operation now in progress */ +#define ESTALE 116 /* Stale file handle */ +#define EUCLEAN 117 /* Structure needs cleaning */ +#define ENOTNAM 118 /* Not a XENIX named type file */ +#define ENAVAIL 119 /* No XENIX semaphores available */ +#define EISNAM 120 /* Is a named type file */ +#define EREMOTEIO 121 /* Remote I/O error */ +#define EDQUOT 122 /* Quota exceeded */ + +#define ENOMEDIUM 123 /* No medium found */ +#define EMEDIUMTYPE 124 /* Wrong medium type */ +#define ECANCELED 125 /* Operation Canceled */ +#define ENOKEY 126 /* Required key not available */ +#define EKEYEXPIRED 127 /* Key has expired */ +#define EKEYREVOKED 128 /* Key has been revoked */ +#define EKEYREJECTED 129 /* Key was rejected by service */ + +/* for robust mutexes */ +#define EOWNERDEAD 130 /* Owner died */ +#define ENOTRECOVERABLE 131 /* State not recoverable */ + +#define ERFKILL 132 /* Operation not possible due to RF-kill */ + +#define EHWPOISON 133 /* Memory page has hardware error */ + +#endif // __FXGMAC_ERROR_H__ \ No newline at end of file diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-common.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-common.c index 63cbf948cbfa2..9eeae95d13759 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-common.c +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-common.c @@ -1,14 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2021 Motorcomm Corporation. */ -#include -#include - -#include "fuxi-os.h" #include "fuxi-gmac.h" #include "fuxi-gmac-reg.h" -MODULE_LICENSE("Dual BSD/GPL"); +MODULE_LICENSE("GPL"); static int debug = 16; module_param(debug, int, 0644); @@ -21,7 +17,7 @@ static void fxgmac_read_mac_addr(struct fxgmac_pdata *pdata) struct net_device *netdev = pdata->netdev; struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; - DPRINTK("read mac from eFuse\n"); + /* DPRINTK("read mac from eFuse\n"); */ /* if efuse have mac addr, use it.if not, use static mac address. */ hw_ops->read_mac_subsys_from_efuse(pdata, pdata->mac_addr, NULL, NULL); @@ -35,10 +31,10 @@ static void fxgmac_default_config(struct fxgmac_pdata *pdata) { pdata->tx_osp_mode = DMA_OSP_ENABLE; pdata->tx_sf_mode = MTL_TSF_ENABLE; - pdata->rx_sf_mode = MTL_RSF_DISABLE; /* MTL_RSF_DISABLE 20210514 */ + pdata->rx_sf_mode = MTL_RSF_ENABLE; /* MTL_RSF_DISABLE 20210514 */ pdata->pblx8 = DMA_PBL_X8_ENABLE; /* DMA_PBL_X8_ENABLE 20210514 */ - pdata->tx_pbl = DMA_PBL_32; - pdata->rx_pbl = DMA_PBL_32; /* DMA_PBL_32 20210514 */ + pdata->tx_pbl = DMA_PBL_16; + pdata->rx_pbl = DMA_PBL_4; /* DMA_PBL_32 20210514 */ pdata->tx_threshold = MTL_TX_THRESHOLD_128; pdata->rx_threshold = MTL_RX_THRESHOLD_128; pdata->tx_pause = 1; @@ -50,7 +46,7 @@ static void fxgmac_default_config(struct fxgmac_pdata *pdata) pdata->rss = 0; #endif /* open interrupt moderation default */ - pdata->intr_mod = 1; + pdata->intr_mod = FXGMAC_INT_MODERATION_ENABLED; pdata->crc_check = 1; /* set based on phy status. pdata->phy_speed = SPEED_1000; */ @@ -59,6 +55,9 @@ static void fxgmac_default_config(struct fxgmac_pdata *pdata) pdata->phy_duplex = DUPLEX_FULL; pdata->expansion.phy_link = false; pdata->phy_speed = SPEED_1000; + pdata->expansion.pre_phy_speed = pdata->phy_speed; + pdata->expansion.pre_phy_duplex = pdata->phy_duplex; + pdata->expansion.pre_phy_autoneg = pdata->phy_autoeng; /* default to magic */ pdata->expansion.wol = WAKE_MAGIC; @@ -66,8 +65,8 @@ static void fxgmac_default_config(struct fxgmac_pdata *pdata) strscpy(pdata->drv_name, FXGMAC_DRV_NAME, sizeof(pdata->drv_name)); strscpy(pdata->drv_ver, FXGMAC_DRV_VERSION, sizeof(pdata->drv_ver)); - printk("FXGMAC_DRV_NAME:%s, FXGMAC_DRV_VERSION:%s\n", FXGMAC_DRV_NAME, - FXGMAC_DRV_VERSION); + dev_info(pdata->dev, "FXGMAC_DRV_NAME:%s, FXGMAC_DRV_VERSION:%s\n", + FXGMAC_DRV_NAME, FXGMAC_DRV_VERSION); } static void fxgmac_init_all_ops(struct fxgmac_pdata *pdata) @@ -75,7 +74,7 @@ static void fxgmac_init_all_ops(struct fxgmac_pdata *pdata) fxgmac_init_desc_ops(&pdata->desc_ops); fxgmac_init_hw_ops(&pdata->hw_ops); - DPRINTK("register desc_ops and hw ops\n"); + /* DPRINTK("register desc_ops and hw ops\n"); */ } int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg) @@ -112,7 +111,7 @@ int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg) /* Set the DMA mask */ #ifdef CONFIG_ARM64 - dma_width = FUXI_DMA_BIT_MASK; + dma_width = FXGMAC_DMA_BIT_MASK; #else dma_width = pdata->hw_feat.dma_width; #endif @@ -152,17 +151,11 @@ int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg) pdata->hw_feat.tx_q_cnt); pdata->tx_q_count = pdata->tx_ring_count; -#if !(FXGMAC_NUM_OF_TX_Q_USED) ret = netif_set_real_num_tx_queues(netdev, pdata->tx_q_count); -#else - ret = netif_set_real_num_tx_queues( - netdev, FXGMAC_NUM_OF_TX_Q_USED /*pdata->tx_q_count*/); -#endif DPRINTK("num_online_cpus:%u, tx_ch_cnt:%u, tx_q_cnt:%u, tx_ring_count:%u\n", num_online_cpus(), pdata->hw_feat.tx_ch_cnt, pdata->hw_feat.tx_q_cnt, pdata->tx_ring_count); - if (ret) { dev_err(pdata->dev, "error setting real tx queue count\n"); return ret; @@ -192,7 +185,7 @@ int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg) netif_get_num_default_rss_queues(), pdata->hw_feat.rx_ch_cnt, pdata->hw_feat.rx_q_cnt, pdata->rx_ring_count); DPRINTK("channel_count:%u, netdev tx channel_num=%u\n", - pdata->channel_count, netdev->num_tx_queues); + pdata->channel_count, netdev->real_num_tx_queues); /* Initialize RSS hash key and lookup table */ #if FXGMAC_RSS_HASH_KEY_LINUX @@ -202,7 +195,7 @@ int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg) hw_ops->get_rss_hash_key(pdata, (u8 *)pdata->rss_key); #endif -#if FXGMAC_MSIX_CH0RXDIS_EN +#if FXGMAC_MSIX_CH0RXDIS_ENABLED for (i = 0; i < FXGMAC_RSS_MAX_TABLE_SIZE; i++) { pdata->rss_table[i] = FXGMAC_SET_REG_BITS( pdata->rss_table[i], MAC_RSSDR_DMCH_POS, @@ -294,7 +287,7 @@ int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg) /* Use default watchdog timeout */ netdev->watchdog_timeo = msecs_to_jiffies(5000); /* refer to sunxi-gmac, 5s */ - netdev->gso_max_size = NIC_MAX_TCP_OFFLOAD_SIZE; + netif_set_tso_max_size(netdev, NIC_MAX_TCP_OFFLOAD_SIZE); /* Tx coalesce parameters initialization */ pdata->tx_usecs = FXGMAC_INIT_DMA_TX_USECS; @@ -306,6 +299,8 @@ int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg) pdata->rx_usecs = FXGMAC_INIT_DMA_RX_USECS; pdata->rx_frames = FXGMAC_INIT_DMA_RX_FRAMES; + mutex_init(&pdata->expansion.mutex); + DPRINTK("fxgmac_init callout, ok.\n"); return 0; @@ -320,14 +315,14 @@ static void fxgmac_init_interrupt_scheme(struct fxgmac_pdata *pdata) * otherwise, just roll back to legacy */ vectors = num_online_cpus(); - DPRINTK("num of cpu=%d\n", vectors); if (vectors >= FXGMAC_MAX_DMA_CHANNELS) { - /* 0-3 for rx, 4 for tx, 5 for phy */ + /* 0-3 for rx, 4 for tx, 5 for misc */ req_vectors = FXGMAC_MSIX_INT_NUMS; pdata->expansion.msix_entries = kcalloc( req_vectors, sizeof(struct msix_entry), GFP_KERNEL); if (!pdata->expansion.msix_entries) { - DPRINTK("MSIx, kcalloc err for msix entries, rollback to MSI..\n"); + dev_err(pdata->dev, "MSIx, kcalloc err for msix entries, \ + rollback to MSI\n"); goto enable_msi_interrupt; } else { for (i = 0; i < req_vectors; i++) @@ -337,14 +332,14 @@ static void fxgmac_init_interrupt_scheme(struct fxgmac_pdata *pdata) pdata->pdev, pdata->expansion.msix_entries, req_vectors, req_vectors); if (rc < 0) { - DPRINTK("enable MSIx failed,%d.\n", rc); + dev_err(pdata->dev, "enable MSIx failed,%d.\n", rc); req_vectors = 0; /* indicate failure */ } else { req_vectors = rc; } if (req_vectors >= FXGMAC_MAX_DMA_CHANNELS_PLUS_1TX) { - DPRINTK("enable MSIx ok, cpu=%d, vectors=%d.\n", + dev_info(pdata->dev, "enable MSIx ok, cpu=%d,vectors=%d.\n", vectors, req_vectors); pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( @@ -353,21 +348,22 @@ static void fxgmac_init_interrupt_scheme(struct fxgmac_pdata *pdata) FXGMAC_FLAG_INTERRUPT_LEN, FXGMAC_FLAG_MSIX_ENABLED); pdata->per_channel_irq = 1; - pdata->expansion.phy_irq = + pdata->expansion.misc_irq = pdata->expansion .msix_entries[MSI_ID_PHY_OTHER] .vector; return; } else if (req_vectors) { - DPRINTK("enable MSIx with only %d vector, while we need %d, rollback to MSI.\n", - req_vectors, vectors); + dev_err(pdata->dev, "enable MSIx with only %d vector, \ ++ while we need %d, rollback to MSI.\n", ++ req_vectors, vectors); /* roll back to msi */ pci_disable_msix(pdata->pdev); kfree(pdata->expansion.msix_entries); pdata->expansion.msix_entries = NULL; req_vectors = 0; } else { - DPRINTK("enable MSIx failure and clear msix entries.\n"); + dev_err(pdata->dev, "enable MSIx failure and clear msix entries.\n"); /* roll back to msi */ kfree(pdata->expansion.msix_entries); pdata->expansion.msix_entries = NULL; @@ -382,13 +378,13 @@ static void fxgmac_init_interrupt_scheme(struct fxgmac_pdata *pdata) pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( pdata->expansion.int_flags, FXGMAC_FLAG_INTERRUPT_POS, FXGMAC_FLAG_INTERRUPT_LEN, FXGMAC_FLAG_LEGACY_ENABLED); - DPRINTK("enable MSI failure, rollback to LEGACY.\n"); + dev_err(pdata->dev, "dev_err MSI failure, rollback to LEGACY.\n"); } else { pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( pdata->expansion.int_flags, FXGMAC_FLAG_INTERRUPT_POS, FXGMAC_FLAG_INTERRUPT_LEN, FXGMAC_FLAG_MSI_ENABLED); pdata->dev_irq = pdata->pdev->irq; - DPRINTK("enable MSI ok, irq=%d.\n", pdata->pdev->irq); + dev_info(pdata->dev, "enable MSI ok, cpu=%d, irq=%d.\n", vectors, pdata->pdev->irq); } #else pdata = pdata; @@ -417,23 +413,19 @@ int fxgmac_drv_probe(struct device *dev, struct fxgmac_resources *res) pdata->netdev = netdev; pdata->dev_irq = res->irq; - + pdata->msg_enable = NETIF_MSG_DRV; + pdata->expansion.dev_state = FXGMAC_DEV_PROBE; /* default to legacy interrupt */ pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( pdata->expansion.int_flags, FXGMAC_FLAG_INTERRUPT_POS, FXGMAC_FLAG_INTERRUPT_LEN, FXGMAC_FLAG_LEGACY_ENABLED); - pdata->expansion.phy_irq = pdata->dev_irq; + pdata->expansion.misc_irq = pdata->dev_irq; fxgmac_init_interrupt_scheme(pdata); - pdata->expansion.current_state = CURRENT_STATE_INIT; - - pdata->msg_enable = NETIF_MSG_DRV; - DPRINTK("netif msg_enable init to %08x\n", pdata->msg_enable); - pdata->mac_regs = res->addr; pdata->base_mem = res->addr; - pdata->mac_regs = pdata->mac_regs + FUXI_MAC_REGS_OFFSET; + pdata->mac_regs = pdata->mac_regs + FXGMAC_MAC_REGS_OFFSET; ret = fxgmac_init(pdata, true); if (ret) { @@ -451,12 +443,7 @@ int fxgmac_drv_probe(struct device *dev, struct fxgmac_resources *res) } if (netif_msg_drv(pdata)) DPRINTK("fxgamc_drv_prob callout, netdev num_tx_q=%u\n", - netdev->num_tx_queues); - -#ifdef HAVE_FXGMAC_DEBUG_FS - fxgmac_dbg_init(pdata); - fxgmac_dbg_adapter_init(pdata); -#endif /* HAVE_FXGMAC_DEBUG_FS */ + netdev->real_num_tx_queues); return 0; @@ -473,9 +460,6 @@ int fxgmac_drv_remove(struct device *dev) struct fxgmac_pdata *pdata = netdev_priv(netdev); struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; -#ifdef HAVE_FXGMAC_DEBUG_FS - fxgmac_dbg_adapter_exit(pdata); -#endif /*HAVE_FXGMAC_DEBUG_FS */ hw_ops->led_under_shutdown(pdata); unregister_netdev(netdev); @@ -583,8 +567,7 @@ void fxgmac_get_all_hw_features(struct fxgmac_pdata *pdata) hw_feat->version = readl(pdata->mac_regs + MAC_VR); if (netif_msg_drv(pdata)) - DPRINTK("get offset 0x110, ver=%#x\n", - readl(pdata->mac_regs + 0x110)); + DPRINTK("Mac ver=%#x\n", hw_feat->version); /* Hardware feature register 0 */ hw_feat->phyifsel = FXGMAC_GET_REG_BITS( @@ -715,7 +698,7 @@ void fxgmac_get_all_hw_features(struct fxgmac_pdata *pdata) hw_feat->tc_cnt++; hw_feat->hwfr3 = mac_hfr3; - DPRINTK("HWFR3: %u\n", mac_hfr3); + /* DPRINTK("HWFR3: %u\n", mac_hfr3); */ } void fxgmac_print_all_hw_features(struct fxgmac_pdata *pdata) diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-debugfs.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-debugfs.c deleted file mode 100644 index 4596d91b6e282..0000000000000 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-debugfs.c +++ /dev/null @@ -1,787 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2021 Motorcomm Corporation. */ - -#include "fuxi-gmac.h" -#include "fuxi-gmac-reg.h" -#ifdef HAVE_FXGMAC_DEBUG_FS -#include -#endif -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define TEST_MAC_HEAD 14 -#define TEST_TCP_HEAD_LEN_OFFSET 12 -#define TEST_TCP_OFFLOAD_LEN_OFFSET 48 -#define TEST_TCP_FIX_HEAD_LEN 24 -#define TEST_TCP_MSS_OFFSET 56 - -#define DF_MAX_NIC_NUM 16 - -#ifdef HAVE_FXGMAC_DEBUG_FS - -/** - * fxgmac_dbg_netdev_ops_read - read for netdev_ops datum - * @filp: the opened file - * @buffer: where to write the data for the user to read - * @count: the size of the user's buffer - * @ppos: file position offset - **/ -static ssize_t fxgmac_dbg_netdev_ops_read(struct file *filp, - char __user *buffer, size_t count, - loff_t *ppos) -{ - struct fxgmac_pdata *pdata = filp->private_data; - char *buf; - int len; - - /* don't allow partial reads */ - if (*ppos != 0) - return 0; - - buf = kasprintf(GFP_KERNEL, "%s: %s\n", pdata->netdev->name, - pdata->expansion.fxgmac_dbg_netdev_ops_buf); - if (!buf) - return -ENOMEM; - - if (count < strlen(buf)) { - kfree(buf); - return -ENOSPC; - } - - len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); - - kfree(buf); - return len; -} - -/** - * fxgmac_dbg_netdev_ops_write - write into netdev_ops datum - * @filp: the opened file - * @buffer: where to find the user's data - * @count: the length of the user's data - * @ppos: file position offset - **/ -static ssize_t fxgmac_dbg_netdev_ops_write(struct file *filp, - const char __user *buffer, - size_t count, loff_t *ppos) -{ - struct fxgmac_pdata *pdata = filp->private_data; - int len; - - /* don't allow partial writes */ - if (*ppos != 0) - return 0; - if (count >= sizeof(pdata->expansion.fxgmac_dbg_netdev_ops_buf)) - return -ENOSPC; - - len = simple_write_to_buffer( - pdata->expansion.fxgmac_dbg_netdev_ops_buf, - sizeof(pdata->expansion.fxgmac_dbg_netdev_ops_buf) - 1, ppos, - buffer, count); - if (len < 0) - return len; - - pdata->expansion.fxgmac_dbg_netdev_ops_buf[len] = '\0'; - - if (strncmp(pdata->expansion.fxgmac_dbg_netdev_ops_buf, "tx_timeout", - 10) == 0) { - DPRINTK("tx_timeout called\n"); - } else { - FXGMAC_PR("Unknown command: %s\n", - pdata->expansion.fxgmac_dbg_netdev_ops_buf); - FXGMAC_PR("Available commands:\n"); - FXGMAC_PR(" tx_timeout\n"); - } - return count; -} -#endif - -static void fxgmac_dbg_tx_pkt(struct fxgmac_pdata *pdata, u8 *pcmd_data) -{ - unsigned int pktLen = 0; - struct sk_buff *skb; - pfxgmac_test_packet pPkt; - u8 *pTx_data = NULL; - u8 *pSkb_data = NULL; - u32 offload_len = 0; - u8 ipHeadLen, tcpHeadLen, headTotalLen; - static u32 lastGsoSize = 806; /* initial default value */ - - /* get fxgmac_test_packet */ - pPkt = (pfxgmac_test_packet)(pcmd_data + sizeof(struct ext_ioctl_data)); - pktLen = pPkt->length; - - /* get pkt data */ - pTx_data = (u8 *)pPkt + sizeof(fxgmac_test_packet); - - /* alloc sk_buff */ - skb = alloc_skb(pktLen, GFP_ATOMIC); - if (!skb) { - DPRINTK("alloc skb fail\n"); - return; - } - - /* copy data to skb */ - pSkb_data = skb_put(skb, pktLen); - memset(pSkb_data, 0, pktLen); - memcpy(pSkb_data, pTx_data, pktLen); - - /* set skb parameters */ - skb->dev = pdata->netdev; - skb->pkt_type = PACKET_OUTGOING; - skb->protocol = ntohs(ETH_P_IP); - skb->no_fcs = 1; - skb->ip_summed = CHECKSUM_PARTIAL; - if (skb->len > 1514) { - /* TSO packet */ - /* set tso test flag */ - pdata->expansion.fxgmac_test_tso_flag = true; - - /* get protocol head length */ - ipHeadLen = (pSkb_data[TEST_MAC_HEAD] & 0xF) * 4; - tcpHeadLen = (pSkb_data[TEST_MAC_HEAD + ipHeadLen + - TEST_TCP_HEAD_LEN_OFFSET] >> - 4 & - 0xF) * - 4; - headTotalLen = TEST_MAC_HEAD + ipHeadLen + tcpHeadLen; - offload_len = (pSkb_data[TEST_TCP_OFFLOAD_LEN_OFFSET] << 8 | - pSkb_data[TEST_TCP_OFFLOAD_LEN_OFFSET + 1]) & - 0xFFFF; - /* set tso skb parameters */ - skb->transport_header = ipHeadLen + TEST_MAC_HEAD; - skb->network_header = TEST_MAC_HEAD; - skb->inner_network_header = TEST_MAC_HEAD; - skb->mac_len = TEST_MAC_HEAD; - - /* set skb_shinfo parameters */ - if (tcpHeadLen > TEST_TCP_FIX_HEAD_LEN) { - skb_shinfo(skb)->gso_size = - (pSkb_data[TEST_TCP_MSS_OFFSET] << 8 | - pSkb_data[TEST_TCP_MSS_OFFSET + 1]) & - 0xFFFF; - } else { - skb_shinfo(skb)->gso_size = 0; - } - if (skb_shinfo(skb)->gso_size != 0) { - lastGsoSize = skb_shinfo(skb)->gso_size; - } else { - skb_shinfo(skb)->gso_size = lastGsoSize; - } - /* get segment size */ - if (offload_len % skb_shinfo(skb)->gso_size == 0) { - skb_shinfo(skb)->gso_segs = - offload_len / skb_shinfo(skb)->gso_size; - pdata->expansion.fxgmac_test_last_tso_len = - skb_shinfo(skb)->gso_size + headTotalLen; - } else { - skb_shinfo(skb)->gso_segs = - offload_len / skb_shinfo(skb)->gso_size + 1; - pdata->expansion.fxgmac_test_last_tso_len = - offload_len % skb_shinfo(skb)->gso_size + - headTotalLen; - } - pdata->expansion.fxgmac_test_tso_seg_num = - skb_shinfo(skb)->gso_segs; - - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; - skb_shinfo(skb)->frag_list = NULL; - skb->csum_start = skb_headroom(skb) + TEST_MAC_HEAD + ipHeadLen; - skb->csum_offset = skb->len - TEST_MAC_HEAD - ipHeadLen; - - pdata->expansion.fxgmac_test_packet_len = - skb_shinfo(skb)->gso_size + headTotalLen; - } else { - /* set non-TSO packet parameters */ - pdata->expansion.fxgmac_test_packet_len = skb->len; - } - - /* send data */ - if (dev_queue_xmit(skb) != NET_XMIT_SUCCESS) { - DPRINTK("xmit data fail \n"); - } -} - -static void fxgmac_dbg_rx_pkt(struct fxgmac_pdata *pdata, u8 *pcmd_data) -{ - unsigned int totalLen = 0; - struct sk_buff *rx_skb; - struct ext_ioctl_data *pcmd; - fxgmac_test_packet pkt; - void *addr = 0; - u8 *rx_data = (u8 *)kzalloc(FXGMAC_MAX_DBG_RX_DATA, GFP_KERNEL); - if (!rx_data) - return; - - /* initial dest data region */ - pcmd = (struct ext_ioctl_data *)pcmd_data; - addr = pcmd->cmd_buf.buf; - while (pdata->expansion.fxgmac_test_skb_arr_in_index != - pdata->expansion.fxgmac_test_skb_arr_out_index) { - /* get received skb data */ - rx_skb = - pdata->expansion.fxgmac_test_skb_array - [pdata->expansion.fxgmac_test_skb_arr_out_index]; - - if (rx_skb->len + sizeof(fxgmac_test_packet) + totalLen < - 64000) { - pkt.length = rx_skb->len; - pkt.type = 0x80; - pkt.buf[0].offset = - totalLen + sizeof(fxgmac_test_packet); - pkt.buf[0].length = rx_skb->len; - - /* get data from skb */ - memcpy(rx_data, rx_skb->data, rx_skb->len); - - /* update next pointer */ - if ((pdata->expansion.fxgmac_test_skb_arr_out_index + - 1) % FXGMAC_MAX_DBG_TEST_PKT == - pdata->expansion.fxgmac_test_skb_arr_in_index) { - pkt.next = NULL; - } else { - pkt.next = - (pfxgmac_test_packet)(addr + totalLen + - sizeof(fxgmac_test_packet) + - pkt.length); - } - - /* copy data to user space */ - if (copy_to_user((void *)(addr + totalLen), - (void *)(&pkt), - sizeof(fxgmac_test_packet))) { - DPRINTK("cppy pkt data to user fail..."); - } - if (copy_to_user((void *)(addr + totalLen + - sizeof(fxgmac_test_packet)), - (void *)rx_data, rx_skb->len)) { - DPRINTK("cppy data to user fail..."); - } - - /* update total length */ - totalLen += (sizeof(fxgmac_test_packet) + rx_skb->len); - - /* free skb */ - kfree_skb(rx_skb); - pdata->expansion.fxgmac_test_skb_array - [pdata->expansion.fxgmac_test_skb_arr_out_index] = - NULL; - - /* update gCurSkbOutIndex */ - pdata->expansion.fxgmac_test_skb_arr_out_index = - (pdata->expansion.fxgmac_test_skb_arr_out_index + - 1) % - FXGMAC_MAX_DBG_TEST_PKT; - } else { - DPRINTK("receive data more receive buffer... \n"); - break; - } - } - - if (rx_data) - kfree(rx_data); -} - -/* Based on the current application scenario, we only use CMD_DATA for data. - * if you use other struct, you should recalculate in_total_size - */ -long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - bool ret = true; - int regval = 0; - struct fxgmac_pdata *pdata = file->private_data; - struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; - FXGMAC_PDATA_OF_PLATFORM *ex = &pdata->expansion; - CMD_DATA ex_data; - struct ext_ioctl_data pcmd; - u8 *data = NULL; - u8 *buf = NULL; - int in_total_size, in_data_size, out_total_size; - int ioctl_cmd_size = sizeof(struct ext_ioctl_data); - u8 mac[ETH_ALEN] = { 0 }; - struct sk_buff *tmpskb; - - if (!arg) { - DPRINTK("[%s] command arg is %lx !\n", __func__, arg); - goto err; - } - - /* check device type */ - if (_IOC_TYPE(cmd) != IOC_MAGIC) { - DPRINTK("[%s] command type [%c] error!\n", __func__, - _IOC_TYPE(cmd)); - goto err; - } - - /* check command number*/ - if (_IOC_NR(cmd) > IOC_MAXNR) { - DPRINTK("[%s] command numer [%d] exceeded!\n", __func__, - _IOC_NR(cmd)); - goto err; - } - - if (copy_from_user(&pcmd, (void *)arg, ioctl_cmd_size)) { - DPRINTK("copy data from user fail... \n"); - goto err; - } - - in_total_size = pcmd.cmd_buf.size_in; - in_data_size = in_total_size - ioctl_cmd_size; - out_total_size = pcmd.cmd_buf.size_out; - - buf = (u8 *)kzalloc(in_total_size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - if (copy_from_user(buf, (void *)arg, in_total_size)) { - DPRINTK("copy data from user fail... \n"); - goto err; - } - data = buf + ioctl_cmd_size; - - if (arg != 0) { - switch (pcmd.cmd_type) { - /* ioctl diag begin */ - case FUXI_DFS_IOCTL_DIAG_BEGIN: - DPRINTK("Debugfs received diag begin command.\n"); - if (netif_running(pdata->netdev)) { - fxgmac_restart_dev(pdata); - } - - /* release last loopback test abnormal exit buffer */ - while (ex->fxgmac_test_skb_arr_in_index != - ex->fxgmac_test_skb_arr_out_index) { - tmpskb = - ex->fxgmac_test_skb_array - [ex->fxgmac_test_skb_arr_out_index]; - if (tmpskb) { - kfree_skb(tmpskb); - ex->fxgmac_test_skb_array - [ex->fxgmac_test_skb_arr_out_index] = - NULL; - } - - ex->fxgmac_test_skb_arr_out_index = - (ex->fxgmac_test_skb_arr_out_index + - 1) % - FXGMAC_MAX_DBG_TEST_PKT; - } - - /* init loopback test parameters */ - ex->fxgmac_test_skb_arr_in_index = 0; - ex->fxgmac_test_skb_arr_out_index = 0; - ex->fxgmac_test_tso_flag = false; - ex->fxgmac_test_tso_seg_num = 0; - ex->fxgmac_test_last_tso_len = 0; - ex->fxgmac_test_packet_len = 0; - break; - - /* ioctl diag end */ - case FUXI_DFS_IOCTL_DIAG_END: - DPRINTK("Debugfs received diag end command.\n"); - if (netif_running(pdata->netdev)) { - fxgmac_restart_dev(pdata); - } - break; - - /* ioctl diag tx pkt */ - case FUXI_DFS_IOCTL_DIAG_TX_PKT: - fxgmac_dbg_tx_pkt(pdata, buf); - break; - - /* ioctl diag rx pkt */ - case FUXI_DFS_IOCTL_DIAG_RX_PKT: - fxgmac_dbg_rx_pkt(pdata, buf); - break; - - /* ioctl device reset */ - case FUXI_DFS_IOCTL_DEVICE_RESET: - DPRINTK("Debugfs received device reset command.\n"); - if (netif_running(pdata->netdev)) { - fxgmac_restart_dev(pdata); - } - break; - - case FXGMAC_EFUSE_LED_TEST: - DPRINTK("Debugfs received device led test command.\n"); - memcpy(&pdata->led, data, sizeof(struct led_setting)); - fxgmac_restart_dev(pdata); - break; - - case FXGMAC_EFUSE_UPDATE_LED_CFG: - DPRINTK("Debugfs received device led update command.\n"); - memcpy(&pdata->ledconfig, data, - sizeof(struct led_setting)); - ret = hw_ops->write_led_config(pdata); - hw_ops->read_led_config(pdata); - hw_ops->led_under_active(pdata); - break; - - case FXGMAC_EFUSE_WRITE_LED: - memcpy(&ex_data, data, sizeof(CMD_DATA)); - DPRINTK("FXGMAC_EFUSE_WRITE_LED, val = 0x%x\n", - ex_data.val0); - ret = hw_ops->write_led(pdata, ex_data.val0); - break; - - case FXGMAC_EFUSE_WRITE_OOB: - DPRINTK("FXGMAC_EFUSE_WRITE_OOB.\n"); - ret = hw_ops->write_oob(pdata); - break; - - case FXGMAC_EFUSE_READ_REGIONABC: - memcpy(&ex_data, data, sizeof(CMD_DATA)); - ret = hw_ops->read_efuse_data(pdata, ex_data.val0, - &ex_data.val1); - DPRINTK("FXGMAC_EFUSE_READ_REGIONABC, address = 0x%x, val = 0x%x\n", - ex_data.val0, ex_data.val1); - if (ret) { - memcpy(data, &ex_data, sizeof(CMD_DATA)); - out_total_size = - ioctl_cmd_size + sizeof(CMD_DATA); - if (copy_to_user((void *)arg, (void *)buf, - out_total_size)) - goto err; - } - break; - - case FXGMAC_EFUSE_WRITE_PATCH_REG: - memcpy(&ex_data, data, sizeof(CMD_DATA)); - DPRINTK("FXGMAC_EFUSE_WRITE_PATCH_REG, address = 0x%x, val = 0x%x\n", - ex_data.val0, ex_data.val1); - ret = hw_ops->write_patch_to_efuse(pdata, ex_data.val0, - ex_data.val1); - break; - - case FXGMAC_EFUSE_READ_PATCH_REG: - memcpy(&ex_data, data, sizeof(CMD_DATA)); - ret = hw_ops->read_patch_from_efuse(pdata, ex_data.val0, - &ex_data.val1); - DPRINTK("FXGMAC_EFUSE_READ_PATCH_REG, address = 0x%x, val = 0x%x\n", - ex_data.val0, ex_data.val1); - if (ret) { - memcpy(data, &ex_data, sizeof(CMD_DATA)); - out_total_size = - ioctl_cmd_size + sizeof(CMD_DATA); - if (copy_to_user((void *)arg, (void *)buf, - out_total_size)) - goto err; - } - break; - - case FXGMAC_EFUSE_WRITE_PATCH_PER_INDEX: - memcpy(&ex_data, data, sizeof(CMD_DATA)); - ret = hw_ops->write_patch_to_efuse_per_index( - pdata, ex_data.val0, ex_data.val1, - ex_data.val2); - DPRINTK("FXGMAC_EFUSE_WRITE_PATCH_PER_INDEX, index = %d, address = 0x%x, val = 0x%x\n", - ex_data.val0, ex_data.val1, ex_data.val2); - break; - - case FXGMAC_EFUSE_READ_PATCH_PER_INDEX: - memcpy(&ex_data, data, sizeof(CMD_DATA)); - ret = hw_ops->read_patch_from_efuse_per_index( - pdata, ex_data.val0, &ex_data.val1, - &ex_data.val2); - DPRINTK("FXGMAC_EFUSE_READ_PATCH_PER_INDEX, address = 0x%x, val = 0x%x\n", - ex_data.val1, ex_data.val2); - if (ret) { - memcpy(data, &ex_data, sizeof(CMD_DATA)); - out_total_size = - ioctl_cmd_size + sizeof(CMD_DATA); - if (copy_to_user((void *)arg, (void *)buf, - out_total_size)) - goto err; - } - break; - - case FXGMAC_EFUSE_LOAD: - DPRINTK("FXGMAC_EFUSE_LOAD.\n"); - ret = hw_ops->efuse_load(pdata); - break; - - case FXGMAC_GET_MAC_DATA: - ret = hw_ops->read_mac_subsys_from_efuse(pdata, mac, - NULL, NULL); - if (ret) { - memcpy(data, mac, ETH_ALEN); - out_total_size = ioctl_cmd_size + ETH_ALEN; - if (copy_to_user((void *)arg, (void *)buf, - out_total_size)) - goto err; - } - break; - - case FXGMAC_SET_MAC_DATA: - if (in_data_size != ETH_ALEN) - goto err; - memcpy(mac, data, ETH_ALEN); - ret = hw_ops->write_mac_subsys_to_efuse(pdata, mac, - NULL, NULL); - if (ret) { - eth_hw_addr_set(pdata->netdev, mac); - memcpy(pdata->mac_addr, mac, ETH_ALEN); - hw_ops->set_mac_address(pdata, mac); - hw_ops->set_mac_hash(pdata); - } - break; - - case FXGMAC_GET_SUBSYS_ID: - memcpy(&ex_data, data, sizeof(CMD_DATA)); - ret = hw_ops->read_mac_subsys_from_efuse( - pdata, NULL, &ex_data.val0, NULL); - if (ret) { - ex_data.val1 = 0xFFFF; /* invalid value */ - memcpy(data, &ex_data, sizeof(CMD_DATA)); - out_total_size = - ioctl_cmd_size + sizeof(CMD_DATA); - if (copy_to_user((void *)arg, (void *)buf, - out_total_size)) - goto err; - } - break; - - case FXGMAC_SET_SUBSYS_ID: - memcpy(&ex_data, data, sizeof(CMD_DATA)); - ret = hw_ops->write_mac_subsys_to_efuse( - pdata, NULL, &ex_data.val0, NULL); - break; - - case FXGMAC_GET_GMAC_REG: - memcpy(&ex_data, data, sizeof(CMD_DATA)); - ex_data.val1 = hw_ops->get_gmac_register( - pdata, (u8 *)(pdata->mac_regs + ex_data.val0)); - memcpy(data, &ex_data, sizeof(CMD_DATA)); - out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); - if (copy_to_user((void *)arg, (void *)buf, - out_total_size)) - goto err; - break; - - case FXGMAC_SET_GMAC_REG: - memcpy(&ex_data, data, sizeof(CMD_DATA)); - regval = hw_ops->set_gmac_register( - pdata, (u8 *)(pdata->mac_regs + ex_data.val0), - ex_data.val1); - ret = (regval == 0 ? true : false); - break; - - case FXGMAC_GET_PHY_REG: - memcpy(&ex_data, data, sizeof(CMD_DATA)); - regval = hw_ops->read_ephy_reg(pdata, ex_data.val0, - &ex_data.val1); - if (regval != -1) { - memcpy(data, &ex_data, sizeof(CMD_DATA)); - out_total_size = - ioctl_cmd_size + sizeof(CMD_DATA); - if (copy_to_user((void *)arg, (void *)buf, - out_total_size)) - goto err; - } - ret = (regval == -1 ? false : true); - break; - - case FXGMAC_SET_PHY_REG: - memcpy(&ex_data, data, sizeof(CMD_DATA)); - regval = hw_ops->write_ephy_reg(pdata, ex_data.val0, - ex_data.val1); - ret = (regval == 0 ? true : false); - break; - - case FXGMAC_GET_PCIE_LOCATION: - ex_data.val0 = pdata->pdev->bus->number; - ex_data.val1 = PCI_SLOT(pdata->pdev->devfn); - ex_data.val2 = PCI_FUNC(pdata->pdev->devfn); - memcpy(data, &ex_data, sizeof(CMD_DATA)); - out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); - if (copy_to_user((void *)arg, (void *)buf, - out_total_size)) - goto err; - break; - - case FXGMAC_GET_GSO_SIZE: - ex_data.val0 = pdata->netdev->gso_max_size; - memcpy(data, &ex_data, sizeof(CMD_DATA)); - out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); - if (copy_to_user((void *)arg, (void *)buf, - out_total_size)) - goto err; - break; - - case FXGMAC_SET_GSO_SIZE: - memcpy(&ex_data, data, sizeof(CMD_DATA)); - pdata->netdev->gso_max_size = ex_data.val0; - break; - - case FXGMAC_SET_RX_MODERATION: - memcpy(&ex_data, data, sizeof(CMD_DATA)); - regval = readreg(pdata->pAdapter, - pdata->base_mem + INT_MOD); - regval = FXGMAC_SET_REG_BITS(regval, INT_MOD_RX_POS, - INT_MOD_RX_LEN, - ex_data.val0); - writereg(pdata->pAdapter, regval, - pdata->base_mem + INT_MOD); - break; - - case FXGMAC_SET_TX_MODERATION: - memcpy(&ex_data, data, sizeof(CMD_DATA)); - regval = readreg(pdata->pAdapter, - pdata->base_mem + INT_MOD); - regval = FXGMAC_SET_REG_BITS(regval, INT_MOD_TX_POS, - INT_MOD_TX_LEN, - ex_data.val0); - writereg(pdata->pAdapter, regval, - pdata->base_mem + INT_MOD); - break; - - case FXGMAC_GET_TXRX_MODERATION: - regval = readreg(pdata->pAdapter, - pdata->base_mem + INT_MOD); - ex_data.val0 = FXGMAC_GET_REG_BITS( - regval, INT_MOD_RX_POS, INT_MOD_RX_LEN); - ex_data.val1 = FXGMAC_GET_REG_BITS( - regval, INT_MOD_TX_POS, INT_MOD_TX_LEN); - memcpy(data, &ex_data, sizeof(CMD_DATA)); - out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); - if (copy_to_user((void *)arg, (void *)buf, - out_total_size)) - goto err; - break; - - default: - DPRINTK("Debugfs received invalid command: %x.\n", - pcmd.cmd_type); - ret = false; - break; - } - } - - if (buf) - kfree(buf); - return ret ? FXGMAC_SUCCESS : FXGMAC_FAIL; - -err: - if (buf) - kfree(buf); - return FXGMAC_FAIL; -} - -#ifdef HAVE_FXGMAC_DEBUG_FS - -static struct file_operations fxgmac_dbg_netdev_ops_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = fxgmac_dbg_netdev_ops_read, - .write = fxgmac_dbg_netdev_ops_write, - .unlocked_ioctl = fxgmac_dbg_netdev_ops_ioctl, -}; - -/** - * fxgmac_dbg_adapter_init - setup the debugfs directory for the adapter - * @adapter: the adapter that is starting up - **/ -void fxgmac_dbg_adapter_init(struct fxgmac_pdata *pdata) -{ - const char *name = pdata->drv_name; - struct dentry *pfile; - - pdata->expansion.dbg_adapter = - debugfs_create_dir(name, pdata->expansion.fxgmac_dbg_root); - if (pdata->expansion.dbg_adapter) { - pfile = debugfs_create_file("netdev_ops", 0600, - pdata->expansion.dbg_adapter, pdata, - &fxgmac_dbg_netdev_ops_fops); - if (!pfile) - DPRINTK("debugfs netdev_ops for %s failed\n", name); - } else { - DPRINTK("debugfs entry for %s failed\n", name); - } -} - -/** - * fxgmac_dbg_adapter_exit - clear out the adapter's debugfs entries - * @adapter: board private structure - **/ -void fxgmac_dbg_adapter_exit(struct fxgmac_pdata *pdata) -{ - if (pdata->expansion.dbg_adapter) - debugfs_remove_recursive(pdata->expansion.dbg_adapter); - pdata->expansion.dbg_adapter = NULL; -} - -/** - * fxgmac_dbg_init - start up debugfs for the driver - **/ -void fxgmac_dbg_init(struct fxgmac_pdata *pdata) -{ - unsigned int i; - char num[3]; - const char debug_path[] = "/sys/kernel/debug/"; - const char file_prefix[] = "fuxi_"; - char file_path[50]; - char file_name[8]; - - /* init file_path */ - memset(file_path, '\0', sizeof(file_path)); - memcpy(file_path, debug_path, sizeof(debug_path)); - - for (i = 0; i < DF_MAX_NIC_NUM; i++) { - /* init num and filename */ - memset(num, '\0', sizeof(num)); - memset(file_name, '\0', sizeof(file_name)); - - /* int to string */ - sprintf(num, "%d", i); - - /* file name */ - memcpy(file_name, file_prefix, sizeof(file_prefix)); - memcpy(file_name + strlen(file_prefix), num, sizeof(num)); - - /* file path */ - memcpy(file_path + sizeof(debug_path) - 1, file_name, - sizeof(file_name)); - - /* whether file exist */ - pdata->expansion.fxgmac_dbg_root = - debugfs_lookup(file_name, NULL); - if (!pdata->expansion.fxgmac_dbg_root) { - /* create file */ - pdata->expansion.fxgmac_dbg_root = - debugfs_create_dir(file_name, NULL); - if (IS_ERR(pdata->expansion.fxgmac_dbg_root)) - DPRINTK("fxgmac init of debugfs failed\n"); - - break; - } - } -} - -/** - * fxgmac_dbg_exit - clean out the driver's debugfs entries - **/ -void fxgmac_dbg_exit(struct fxgmac_pdata *pdata) -{ - if (pdata->expansion.fxgmac_dbg_root) - debugfs_remove_recursive(pdata->expansion.fxgmac_dbg_root); -} - -#endif /* HAVE_XLGMAC_DEBUG_FS */ diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-desc.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-desc.c index 969d84eb44e2a..933e91e1408dd 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-desc.c +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-desc.c @@ -19,17 +19,31 @@ static void fxgmac_unmap_desc_data(struct fxgmac_pdata *pdata, desc_data->skb_dma_len = 0; } - if (desc_data->rx.buf.dma_base) { - dma_unmap_single(pdata->dev, desc_data->rx.buf.dma_base, - pdata->rx_buf_size, DMA_FROM_DEVICE); - desc_data->rx.buf.dma_base = 0; - } - if (desc_data->skb) { dev_kfree_skb_any(desc_data->skb); desc_data->skb = NULL; } + if (desc_data->rx.hdr.pa.pages) + put_page(desc_data->rx.hdr.pa.pages); + + if (desc_data->rx.hdr.pa_unmap.pages) { + dma_unmap_page(pdata->dev, desc_data->rx.hdr.pa_unmap.pages_dma, + desc_data->rx.hdr.pa_unmap.pages_len, + DMA_FROM_DEVICE); + put_page(desc_data->rx.hdr.pa_unmap.pages); + } + + if (desc_data->rx.buf.pa.pages) + put_page(desc_data->rx.buf.pa.pages); + + if (desc_data->rx.buf.pa_unmap.pages) { + dma_unmap_page(pdata->dev, desc_data->rx.buf.pa_unmap.pages_dma, + desc_data->rx.buf.pa_unmap.pages_len, + DMA_FROM_DEVICE); + put_page(desc_data->rx.buf.pa_unmap.pages); + } + memset(&desc_data->tx, 0, sizeof(desc_data->tx)); memset(&desc_data->rx, 0, sizeof(desc_data->rx)); @@ -62,6 +76,28 @@ static void fxgmac_free_ring(struct fxgmac_pdata *pdata, ring->desc_data_head = NULL; } + if (ring->rx_hdr_pa.pages) { + dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma, + ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE); + put_page(ring->rx_hdr_pa.pages); + + ring->rx_hdr_pa.pages = NULL; + ring->rx_hdr_pa.pages_len = 0; + ring->rx_hdr_pa.pages_offset = 0; + ring->rx_hdr_pa.pages_dma = 0; + } + + if (ring->rx_buf_pa.pages) { + dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma, + ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE); + put_page(ring->rx_buf_pa.pages); + + ring->rx_buf_pa.pages = NULL; + ring->rx_buf_pa.pages_len = 0; + ring->rx_buf_pa.pages_offset = 0; + ring->rx_buf_pa.pages_dma = 0; + } + if (ring->dma_desc_head) { dma_free_coherent( pdata->dev, @@ -147,12 +183,16 @@ static int fxgmac_alloc_rings(struct fxgmac_pdata *pdata) "error initializing Rx ring\n"); goto err_init_ring; } - if (netif_msg_drv(pdata)) - DPRINTK("fxgmac_alloc_ring..ch=%u, tx_desc_cnt=%u, rx_desc_cnt=%u\n", - i, pdata->tx_desc_count, pdata->rx_desc_count); + if (netif_msg_drv(pdata)) { + DPRINTK("fxgmac_alloc_ring..ch=%u,", i); + if (i < pdata->tx_ring_count) + DPRINTK(" tx_desc_cnt=%u,", pdata->tx_desc_count); + + DPRINTK(" rx_desc_cnt=%u.\n", pdata->rx_desc_count); + } } if (netif_msg_drv(pdata)) - DPRINTK("alloc_rings callout ok\n"); + DPRINTK("alloc_rings callout ok ch=%u\n", i); return 0; @@ -168,19 +208,19 @@ static void fxgmac_free_channels(struct fxgmac_pdata *pdata) if (!pdata->channel_head) return; if (netif_msg_drv(pdata)) - DPRINTK("free_channels, tx_ring=%p\n", + DPRINTK("free_channels, tx_ring=%p", pdata->channel_head->tx_ring); kfree(pdata->channel_head->tx_ring); pdata->channel_head->tx_ring = NULL; if (netif_msg_drv(pdata)) - DPRINTK("free_channels, rx_ring=%p\n", + DPRINTK(" , rx_ring=%p", pdata->channel_head->rx_ring); kfree(pdata->channel_head->rx_ring); pdata->channel_head->rx_ring = NULL; if (netif_msg_drv(pdata)) - DPRINTK("free_channels, channel=%p\n", pdata->channel_head); + DPRINTK(" , channel=%p\n", pdata->channel_head); kfree(pdata->channel_head); pdata->channel_head = NULL; @@ -202,22 +242,20 @@ static int fxgmac_alloc_channels(struct fxgmac_pdata *pdata) channel_head = kcalloc(pdata->channel_count, sizeof(struct fxgmac_channel), GFP_KERNEL); if (netif_msg_drv(pdata)) - DPRINTK("alloc_channels, channel_head=%p, size=%d*%ld\n", + DPRINTK("alloc_channels, channel_head=%p, size=%d*%d\n", channel_head, pdata->channel_count, sizeof(struct fxgmac_channel)); if (!channel_head) return ret; - netif_dbg(pdata, drv, pdata->netdev, "channel_head=%p\n", channel_head); - tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct fxgmac_ring), GFP_KERNEL); if (!tx_ring) goto err_tx_ring; if (netif_msg_drv(pdata)) - DPRINTK("alloc_channels, tx_ring=%p, size=%d*%ld\n", tx_ring, + DPRINTK("alloc_channels, tx_ring=%p, size=%d*%d\n", tx_ring, pdata->tx_ring_count, sizeof(struct fxgmac_ring)); rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct fxgmac_ring), GFP_KERNEL); @@ -225,7 +263,7 @@ static int fxgmac_alloc_channels(struct fxgmac_pdata *pdata) goto err_rx_ring; if (netif_msg_drv(pdata)) - DPRINTK("alloc_channels, rx_ring=%p, size=%d*%ld\n", rx_ring, + DPRINTK("alloc_channels, rx_ring=%p, size=%d*%d\n", rx_ring, pdata->rx_ring_count, sizeof(struct fxgmac_ring)); for (i = 0, channel = channel_head; i < pdata->channel_count; @@ -285,11 +323,6 @@ static int fxgmac_alloc_channels(struct fxgmac_pdata *pdata) if (i < pdata->rx_ring_count) channel->rx_ring = rx_ring++; - - netif_dbg(pdata, drv, pdata->netdev, - "%s: dma_regs=%p, tx_ring=%p, rx_ring=%p\n", - channel->name, channel->dma_regs, channel->tx_ring, - channel->rx_ring); } pdata->channel_head = channel_head; @@ -338,34 +371,135 @@ static int fxgmac_alloc_channels_and_rings(struct fxgmac_pdata *pdata) return ret; } +static void fxgmac_set_buffer_data(struct fxgmac_buffer_data *bd, + struct fxgmac_page_alloc *pa, + unsigned int len) +{ + get_page(pa->pages); + bd->pa = *pa; + + bd->dma_base = pa->pages_dma; + bd->dma_off = pa->pages_offset; + bd->dma_len = len; + + pa->pages_offset += len; + if ((pa->pages_offset + len) > pa->pages_len) { + /* This data descriptor is responsible for unmapping page(s) */ + bd->pa_unmap = *pa; + + /* Get a new allocation next time */ + pa->pages = NULL; + pa->pages_len = 0; + pa->pages_offset = 0; + pa->pages_dma = 0; + } +} + +static int fxgmac_alloc_pages(struct fxgmac_pdata *pdata, + struct fxgmac_page_alloc *pa, + gfp_t gfp, int order) +{ + struct page *pages = NULL; + dma_addr_t pages_dma; + + /* Try to obtain pages, decreasing order if necessary */ + gfp |= __GFP_COMP | __GFP_NOWARN; + while (order >= 0) { + pages = alloc_pages(gfp, order); + if (pages) + break; + + order--; + } + if (!pages) + return -ENOMEM; + + /* Map the pages */ + pages_dma = dma_map_page(pdata->dev, pages, 0, + PAGE_SIZE << order, DMA_FROM_DEVICE); + if (dma_mapping_error(pdata->dev, pages_dma)) { + put_page(pages); + return -ENOMEM; + } + + pa->pages = pages; + pa->pages_len = PAGE_SIZE << order; + pa->pages_offset = 0; + pa->pages_dma = pages_dma; + + return 0; +} + static int fxgmac_map_rx_buffer(struct fxgmac_pdata *pdata, struct fxgmac_ring *ring, struct fxgmac_desc_data *desc_data) { - struct sk_buff *skb; - skb = __netdev_alloc_skb_ip_align(pdata->netdev, pdata->rx_buf_size, - GFP_ATOMIC); - if (!skb) { - netdev_err(pdata->netdev, "%s: Rx init fails; skb is NULL\n", - __func__); - return -ENOMEM; - } + int ret; - desc_data->skb = skb; - desc_data->rx.buf.dma_base = dma_map_single( - pdata->dev, skb->data, pdata->rx_buf_size, DMA_FROM_DEVICE); - if (dma_mapping_error(pdata->dev, desc_data->rx.buf.dma_base)) { - netdev_err(pdata->netdev, "%s: DMA mapping error\n", __func__); - dev_kfree_skb_any(skb); - return -EINVAL; + if (!ring->rx_hdr_pa.pages) { + ret = fxgmac_alloc_pages(pdata, &ring->rx_hdr_pa, + GFP_ATOMIC, 0); + if (ret) + return ret; } + /* Set up the header page info */ + fxgmac_set_buffer_data(&desc_data->rx.hdr, &ring->rx_hdr_pa, + pdata->rx_buf_size); + return 0; } +static void fxgmac_tx_desc_reset(struct fxgmac_desc_data *desc_data) +{ + struct fxgmac_dma_desc *dma_desc = desc_data->dma_desc; + + /* Reset the Tx descriptor + * Set buffer 1 (lo) address to zero + * Set buffer 1 (hi) address to zero + * Reset all other control bits (IC, TTSE, B2L & B1L) + * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) + */ + dma_desc->desc0 = 0; + dma_desc->desc1 = 0; + dma_desc->desc2 = 0; + dma_desc->desc3 = 0; + + /* Make sure ownership is written to the descriptor */ + dma_wmb(); +} + +static void fxgmac_tx_desc_init_channel(struct fxgmac_channel *channel) +{ + struct fxgmac_ring *ring = channel->tx_ring; + struct fxgmac_desc_data *desc_data; + int start_index = ring->cur; + unsigned int i; + start_index = start_index; + /* Initialize all descriptors */ + for (i = 0; i < ring->dma_desc_count; i++) { + desc_data = FXGMAC_GET_DESC_DATA(ring, i); + + /* Initialize Tx descriptor */ + fxgmac_tx_desc_reset(desc_data); + } + + ///* Update the total number of Tx descriptors */ + //writereg(ring->dma_desc_count - 1, FXGMAC_DMA_REG(channel, DMA_CH_TDRLR)); + + writereg(channel->pdata->pAdapter, channel->pdata->tx_desc_count - 1, FXGMAC_DMA_REG(channel, DMA_CH_TDRLR)); + + /* Update the starting address of descriptor ring */ + + desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); + writereg(channel->pdata->pAdapter, upper_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_TDLR_HI)); + writereg(channel->pdata->pAdapter, lower_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_TDLR_LO)); +} + static void fxgmac_tx_desc_init(struct fxgmac_pdata *pdata) { - struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; struct fxgmac_desc_data *desc_data; struct fxgmac_dma_desc *dma_desc; struct fxgmac_channel *channel; @@ -399,13 +533,98 @@ static void fxgmac_tx_desc_init(struct fxgmac_pdata *pdata) ring->dirty = 0; memset(&ring->tx, 0, sizeof(ring->tx)); - hw_ops->tx_desc_init(channel); + fxgmac_tx_desc_init_channel(channel); + } +} + +static void fxgmac_rx_desc_reset(struct fxgmac_pdata *pdata, + struct fxgmac_desc_data *desc_data, + unsigned int index) +{ + struct fxgmac_dma_desc *dma_desc = desc_data->dma_desc; + dma_addr_t hdr_dma; + + /* Reset the Rx descriptor + * Set buffer 1 (lo) address to header dma address (lo) + * Set buffer 1 (hi) address to header dma address (hi) + * Set buffer 2 (lo) address to buffer dma address (lo) + * Set buffer 2 (hi) address to buffer dma address (hi) and + * set control bits OWN and INTE + */ + hdr_dma = desc_data->rx.hdr.dma_base + desc_data->rx.hdr.dma_off; + //buf_dma = desc_data->rx.buf.dma_base + desc_data->rx.buf.dma_off; + dma_desc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma)); + dma_desc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma)); + dma_desc->desc2 = 0;//cpu_to_le32(lower_32_bits(buf_dma)); + dma_desc->desc3 = 0;//cpu_to_le32(upper_32_bits(buf_dma)); + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + RX_NORMAL_DESC3_INTE_POS, + RX_NORMAL_DESC3_INTE_LEN, + 1); + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + RX_NORMAL_DESC3_BUF2V_POS, + RX_NORMAL_DESC3_BUF2V_LEN, + 0); + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + RX_NORMAL_DESC3_BUF1V_POS, + RX_NORMAL_DESC3_BUF1V_LEN, + 1); + + /* Since the Rx DMA engine is likely running, make sure everything + * is written to the descriptor(s) before setting the OWN bit + * for the descriptor + */ + dma_wmb(); + + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + RX_NORMAL_DESC3_OWN_POS, + RX_NORMAL_DESC3_OWN_LEN, + 1); + + /* Make sure ownership is written to the descriptor */ + dma_wmb(); +} + +static void fxgmac_rx_desc_init_channel(struct fxgmac_channel *channel) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->rx_ring; + unsigned int start_index = ring->cur; + struct fxgmac_desc_data *desc_data; + unsigned int i; + + + /* Initialize all descriptors */ + for (i = 0; i < ring->dma_desc_count; i++) { + desc_data = FXGMAC_GET_DESC_DATA(ring, i); + + /* Initialize Rx descriptor */ + fxgmac_rx_desc_reset(pdata, desc_data, i); } + + /* Update the total number of Rx descriptors */ + writereg(pdata->pAdapter, ring->dma_desc_count - 1, FXGMAC_DMA_REG(channel, DMA_CH_RDRLR)); + + /* Update the starting address of descriptor ring */ + desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); + writereg(pdata->pAdapter, upper_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_RDLR_HI)); + writereg(pdata->pAdapter, lower_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_RDLR_LO)); + + /* Update the Rx Descriptor Tail Pointer */ + desc_data = FXGMAC_GET_DESC_DATA(ring, start_index + + ring->dma_desc_count - 1); + writereg(pdata->pAdapter, lower_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_RDTR_LO)); } static void fxgmac_rx_desc_init(struct fxgmac_pdata *pdata) { - struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; struct fxgmac_desc_data *desc_data; struct fxgmac_dma_desc *dma_desc; struct fxgmac_channel *channel; @@ -438,7 +657,7 @@ static void fxgmac_rx_desc_init(struct fxgmac_pdata *pdata) ring->cur = 0; ring->dirty = 0; - hw_ops->rx_desc_init(channel); + fxgmac_rx_desc_init_channel(channel); } } @@ -591,7 +810,7 @@ static int fxgmac_map_tx_skb(struct fxgmac_channel *channel, void fxgmac_init_desc_ops(struct fxgmac_desc_ops *desc_ops) { - desc_ops->alloc_channles_and_rings = fxgmac_alloc_channels_and_rings; + desc_ops->alloc_channels_and_rings = fxgmac_alloc_channels_and_rings; desc_ops->free_channels_and_rings = fxgmac_free_channels_and_rings; desc_ops->map_tx_skb = fxgmac_map_tx_skb; desc_ops->map_rx_buffer = fxgmac_map_rx_buffer; diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ethtool.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ethtool.c index 05aa42f90ad83..c053fc0d0b185 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ethtool.c +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ethtool.c @@ -1,10 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2021 Motorcomm Corporation. */ -#include -#include -#include - #include "fuxi-gmac.h" #include "fuxi-gmac-reg.h" @@ -115,7 +111,6 @@ static void fxgmac_ethtool_get_drvinfo(struct net_device *netdev, devid = FXGMAC_GET_REG_BITS(ver, MAC_VR_DEVID_POS, MAC_VR_DEVID_LEN); userver = FXGMAC_GET_REG_BITS(ver, MAC_VR_USERVER_POS, MAC_VR_USERVER_LEN); - /*DPRINTK("xlgma: No userver (%x) here, sver (%x) should be 0x51\n", userver, sver);*/ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "S.D.U: %x.%x.%x", sver, devid, userver); } @@ -251,7 +246,7 @@ static void fxgmac_get_reta(struct fxgmac_pdata *pdata, u32 *indir) { int i, reta_size = FXGMAC_RSS_MAX_TABLE_SIZE; u16 rss_m; -#ifdef FXGMAC_ONE_CHANNLE +#ifdef FXGMAC_ONE_CHANNEL rss_m = FXGMAC_MAX_DMA_CHANNELS; #else rss_m = FXGMAC_MAX_DMA_CHANNELS - @@ -267,10 +262,6 @@ static int fxgmac_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, { struct fxgmac_pdata *pdata = netdev_priv(netdev); - /* ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP) - * ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR) - * ETH_RSS_HASH_CRC32 __ETH_RSS_HASH(CRC32) - */ if (hfunc) { *hfunc = ETH_RSS_HASH_TOP; DPRINTK("fxmac, get_rxfh for hash function\n"); @@ -306,7 +297,7 @@ static int fxgmac_set_rxfh(struct net_device *netdev, const u32 *indir, /* Fill out the redirection table */ if (indir) { -#if FXGMAC_MSIX_CH0RXDIS_EN +#if FXGMAC_MSIX_CH0RXDIS_ENABLED max_queues = max_queues; reta_entries = reta_entries; i = i; @@ -444,9 +435,6 @@ static int fxgmac_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, case ETHTOOL_GRXCLSRLALL: cmd->rule_cnt = 0; ret = 0; - /*ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, - (u32 *)rule_locs); - */ DPRINTK("fxmac, get_rxnfc for classify both cnt and rules\n"); break; case ETHTOOL_GRXFH: @@ -460,7 +448,6 @@ static int fxgmac_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } -#define UDP_RSS_FLAGS (BIT(MAC_RSSCR_UDP4TE_POS) | BIT(MAC_RSSCR_UDP6TE_POS)) static int fxgmac_set_rss_hash_opt(struct fxgmac_pdata *pdata, struct ethtool_rxnfc *nfc) { @@ -635,7 +622,10 @@ static int fxgmac_set_ringparam(struct net_device *netdev, { struct fxgmac_pdata *pdata = netdev_priv(netdev); struct fxgmac_desc_ops *desc_ops = &pdata->desc_ops; + if (pdata->expansion.dev_state != FXGMAC_DEV_START) + return 0; + fxgmac_lock(pdata); DPRINTK("fxmac, set_ringparam callin\n"); pdata->tx_desc_count = ring->tx_pending; @@ -644,8 +634,9 @@ static int fxgmac_set_ringparam(struct net_device *netdev, fxgmac_stop(pdata); fxgmac_free_tx_data(pdata); fxgmac_free_rx_data(pdata); - desc_ops->alloc_channles_and_rings(pdata); + desc_ops->alloc_channels_and_rings(pdata); fxgmac_start(pdata); + fxgmac_unlock(pdata); return 0; } @@ -656,11 +647,6 @@ static void fxgmac_get_wol(struct net_device *netdev, { struct fxgmac_pdata *pdata = netdev_priv(netdev); - /* for further feature implementation - * wol->supported = WAKE_PHY | WAKE_UCAST | WAKE_MCAST | - * WAKE_BCAST | WAKE_MAGIC; - */ - wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC | WAKE_ARP; #if FXGMAC_WOL_UPON_EPHY_LINK @@ -674,15 +660,130 @@ static void fxgmac_get_wol(struct net_device *netdev, return; } wol->wolopts = pdata->expansion.wol; - DPRINTK("fxmac, get_wol, 0x%x, 0x%x\n", wol->wolopts, - pdata->expansion.wol); +} + +// only supports four patterns, and patterns will be cleared on every call +static void fxgmac_set_pattern_data(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 ip_addr, i = 0; + u8 type_offset, op_offset, tip_offset; + struct pattern_packet packet; + struct wol_bitmap_pattern pattern[4]; // for WAKE_UCAST, WAKE_BCAST, WAKE_MCAST, WAKE_ARP. + + memset(pattern, 0, sizeof(struct wol_bitmap_pattern) * 4); + + //config ucast + if (pdata->expansion.wol & WAKE_UCAST) { + pattern[i].mask_info[0] = 0x3F; + pattern[i].mask_size = sizeof(pattern[0].mask_info); + memcpy(pattern[i].pattern_info, pdata->mac_addr, ETH_ALEN); + pattern[i].pattern_offset = 0; + i++; + } + + // config bcast + if (pdata->expansion.wol & WAKE_BCAST) { + pattern[i].mask_info[0] = 0x3F; + pattern[i].mask_size = sizeof(pattern[0].mask_info); + memset(pattern[i].pattern_info, 0xFF, ETH_ALEN); + pattern[i].pattern_offset = 0; + i++; + } + + // config mcast + if (pdata->expansion.wol & WAKE_MCAST) { + pattern[i].mask_info[0] = 0x7; + pattern[i].mask_size = sizeof(pattern[0].mask_info); + pattern[i].pattern_info[0] = 0x1; + pattern[i].pattern_info[1] = 0x0; + pattern[i].pattern_info[2] = 0x5E; + pattern[i].pattern_offset = 0; + i++; + } + + // config arp + if (pdata->expansion.wol & WAKE_ARP) { + memset(pattern[i].mask_info, 0, sizeof(pattern[0].mask_info)); + type_offset = offsetof(struct pattern_packet, ar_pro); + pattern[i].mask_info[type_offset / 8] |= 1 << type_offset % 8; + type_offset++; + pattern[i].mask_info[type_offset / 8] |= 1 << type_offset % 8; + op_offset = offsetof(struct pattern_packet, ar_op); + pattern[i].mask_info[op_offset / 8] |= 1 << op_offset % 8; + op_offset++; + pattern[i].mask_info[op_offset / 8] |= 1 << op_offset % 8; + tip_offset = offsetof(struct pattern_packet, ar_tip); + pattern[i].mask_info[tip_offset / 8] |= 1 << tip_offset % 8; + tip_offset++; + pattern[i].mask_info[tip_offset / 8] |= 1 << type_offset % 8; + tip_offset++; + pattern[i].mask_info[tip_offset / 8] |= 1 << type_offset % 8; + tip_offset++; + pattern[i].mask_info[tip_offset / 8] |= 1 << type_offset % 8; + + packet.ar_pro = 0x0 << 8 | 0x08; // arp type is 0x0800, notice that ar_pro and ar_op is big endian + packet.ar_op = 0x1 << 8; // 1 is arp request,2 is arp replay, 3 is rarp request, 4 is rarp replay + ip_addr = fxgmac_get_netdev_ip4addr(pdata); + packet.ar_tip[0] = ip_addr & 0xFF; + packet.ar_tip[1] = (ip_addr >> 8) & 0xFF; + packet.ar_tip[2] = (ip_addr >> 16) & 0xFF; + packet.ar_tip[3] = (ip_addr >> 24) & 0xFF; + memcpy(pattern[i].pattern_info, &packet, MAX_PATTERN_SIZE); + pattern[i].mask_size = sizeof(pattern[0].mask_info); + pattern[i].pattern_offset = 0; + i++; + } + + hw_ops->set_wake_pattern(pdata, pattern, i); +} + +void fxgmac_config_wol(struct fxgmac_pdata *pdata, int en) +{ + /* enable or disable WOL. this function only set wake-up type, and power related configure + * will be in other place, see power management. + */ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + if (!pdata->hw_feat.rwk) { + netdev_err(pdata->netdev, "error configuring WOL - not supported.\n"); + return; + } + + hw_ops->disable_wake_magic_pattern(pdata); + hw_ops->disable_wake_pattern(pdata); + hw_ops->disable_wake_link_change(pdata); + + if (en) { + /* config mac address for rx of magic or ucast */ + hw_ops->set_mac_address(pdata, (u8 *)(pdata->netdev->dev_addr)); + + /* Enable Magic packet */ + if (pdata->expansion.wol & WAKE_MAGIC) { + hw_ops->enable_wake_magic_pattern(pdata); + } + + /* Enable global unicast packet */ + if (pdata->expansion.wol & WAKE_UCAST + || pdata->expansion.wol & WAKE_MCAST + || pdata->expansion.wol & WAKE_BCAST + || pdata->expansion.wol & WAKE_ARP) { + hw_ops->enable_wake_pattern(pdata); + } + + /* Enable ephy link change */ + if ((FXGMAC_WOL_UPON_EPHY_LINK) && (pdata->expansion.wol & WAKE_PHY)) { + hw_ops->enable_wake_link_change(pdata); + } + } + device_set_wakeup_enable(/*pci_dev_to_dev*/(pdata->dev), en); + + DPRINTK("config_wol callout\n"); } static int fxgmac_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct fxgmac_pdata *pdata = netdev_priv(netdev); - struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; int ret; /* currently, we do not support these options */ @@ -729,9 +830,9 @@ static int fxgmac_set_wol(struct net_device *netdev, if (wol->wolopts & WAKE_ARP) pdata->expansion.wol |= WAKE_ARP; - hw_ops->set_pattern_data(pdata); + fxgmac_set_pattern_data(pdata); - hw_ops->config_wol(pdata, (!!(pdata->expansion.wol))); + fxgmac_config_wol(pdata, (!!(pdata->expansion.wol))); DPRINTK("fxmac, set_wol, opt=0x%x, 0x%x\n", wol->wolopts, pdata->expansion.wol); @@ -762,7 +863,6 @@ static void fxgmac_get_regs(struct net_device *netdev, regs_buff[REG_MII_PHYSID2]; } -#if FXGMAC_PAUSE_FEATURE_ENABLED static int fxgmac_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { @@ -770,9 +870,10 @@ static int fxgmac_get_link_ksettings(struct net_device *netdev, struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; u32 duplex, regval, link_status; u32 adv = 0xFFFFFFFF; + int ret; - regval = fxgmac_ephy_autoneg_ability_get(pdata, &adv); - if (regval) + ret = fxgmac_ephy_autoneg_ability_get(pdata, &adv); + if (ret < 0) return -ETIMEDOUT; ethtool_link_ksettings_zero_link_mode(cmd, supported); @@ -788,14 +889,22 @@ static int fxgmac_get_link_ksettings(struct net_device *netdev, /* Indicate pause support */ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); - ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); - ethtool_link_ksettings_add_link_mode(cmd, advertising, Asym_Pause); + ret = hw_ops->read_ephy_reg(pdata, REG_MII_ADVERTISE, ®val); + if (ret < 0) + return ret; + + if (FXGMAC_GET_REG_BITS(regval, PHY_MII_ADVERTISE_PAUSE_POS, PHY_MII_ADVERTISE_PAUSE_LEN)) + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + if (FXGMAC_GET_REG_BITS(regval, PHY_MII_ADVERTISE_ASYPAUSE_POS, PHY_MII_ADVERTISE_ASYPAUSE_LEN)) + ethtool_link_ksettings_add_link_mode(cmd, advertising, Asym_Pause); ethtool_link_ksettings_add_link_mode(cmd, supported, MII); cmd->base.port = PORT_MII; ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); - hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); + ret = hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); + if (ret < 0) + return ret; regval = FXGMAC_GET_REG_BITS(regval, PHY_CR_AUTOENG_POS, PHY_CR_AUTOENG_LEN); if (regval) { @@ -855,8 +964,12 @@ static int fxgmac_get_link_ksettings(struct net_device *netdev, } cmd->base.autoneg = pdata->phy_autoeng ? regval : 0; - hw_ops->read_ephy_reg(pdata, REG_MII_SPEC_STATUS, ®val); - link_status = regval & (BIT(FUXI_EPHY_LINK_STATUS_BIT)); + regval = 0; + ret = hw_ops->read_ephy_reg(pdata, REG_MII_SPEC_STATUS, ®val); + if (ret < 0) + return ret; + + link_status = regval & (BIT(FXGMAC_EPHY_LINK_STATUS_BIT)); if (link_status) { duplex = FXGMAC_GET_REG_BITS(regval, PHY_MII_SPEC_DUPLEX_POS, PHY_MII_SPEC_DUPLEX_LEN); @@ -894,61 +1007,59 @@ static int fxgmac_set_link_ksettings(struct net_device *netdev, (!pdata->phy_autoeng && cmd->base.speed == SPEED_1000)) { ret = hw_ops->read_ephy_reg(pdata, REG_MII_ADVERTISE, &adv); if (ret < 0) - return -ETIMEDOUT; + return ret; adv &= ~REG_BIT_ADVERTISE_100_10_CAP; adv |= ethtool_adv_to_mii_adv_t(advertising); ret = hw_ops->write_ephy_reg(pdata, REG_MII_ADVERTISE, adv); if (ret < 0) - return -ETIMEDOUT; + return ret; ret = hw_ops->read_ephy_reg(pdata, REG_MII_CTRL1000, &adv); if (ret < 0) - return -ETIMEDOUT; + return ret; adv &= ~REG_BIT_ADVERTISE_1000_CAP; adv |= ethtool_adv_to_mii_ctrl1000_t(advertising); ret = hw_ops->write_ephy_reg(pdata, REG_MII_CTRL1000, adv); if (ret < 0) - return -ETIMEDOUT; + return ret; ret = hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, &adv); if (ret < 0) - return -ETIMEDOUT; + return ret; adv = FXGMAC_SET_REG_BITS(adv, PHY_CR_AUTOENG_POS, PHY_CR_AUTOENG_LEN, 1); ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, adv); if (ret < 0) - return -ETIMEDOUT; + return ret; ret = hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, &adv); if (ret < 0) - return -ETIMEDOUT; + return ret; adv = FXGMAC_SET_REG_BITS(adv, PHY_CR_RE_AUTOENG_POS, PHY_CR_RE_AUTOENG_LEN, 1); ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, adv); if (ret < 0) - return -ETIMEDOUT; + return ret; } else { pdata->phy_duplex = cmd->base.duplex; pdata->phy_speed = cmd->base.speed; - fxgmac_phy_force_speed(pdata, pdata->phy_speed); - fxgmac_phy_force_duplex(pdata, pdata->phy_duplex); - fxgmac_phy_force_autoneg(pdata, pdata->phy_autoeng); + fxgmac_phy_force_mode(pdata); } - ret = fxgmac_ephy_soft_reset(pdata); - if (ret) { - printk("%s: ephy soft reset timeout.\n", __func__); - return -ETIMEDOUT; - } + /* Save speed is used to restore it when resuming */ + pdata->expansion.pre_phy_speed = cmd->base.speed; + pdata->expansion.pre_phy_autoneg = cmd->base.autoneg; + pdata->expansion.pre_phy_duplex = cmd->base.duplex; return 0; } +#if FXGMAC_PAUSE_FEATURE_ENABLED static void fxgmac_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct fxgmac_pdata *pdata = netdev_priv(netdev); - pause->autoneg = 1; + pause->autoneg = pdata->phy_autoeng; pause->rx_pause = pdata->rx_pause; pause->tx_pause = pdata->tx_pause; @@ -963,10 +1074,16 @@ static int fxgmac_set_pauseparam(struct net_device *netdev, struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; unsigned int pre_rx_pause = pdata->rx_pause; unsigned int pre_tx_pause = pdata->tx_pause; + u32 adv; + int ret; + int enable_pause = 0; pdata->rx_pause = pause->rx_pause; pdata->tx_pause = pause->tx_pause; + if (pdata->rx_pause || pdata->tx_pause) + enable_pause = 1; + if (pre_rx_pause != pdata->rx_pause) { hw_ops->config_rx_flow_control(pdata); DPRINTK("fxgmac set pause parameter, rx from %d to %d\n", @@ -978,6 +1095,32 @@ static int fxgmac_set_pauseparam(struct net_device *netdev, pre_tx_pause, pdata->tx_pause); } + if (pause->autoneg) { + ret = hw_ops->read_ephy_reg(pdata, REG_MII_ADVERTISE, &adv); + if (ret < 0) + return ret; + adv = FXGMAC_SET_REG_BITS(adv, PHY_MII_ADVERTISE_PAUSE_POS, + PHY_MII_ADVERTISE_PAUSE_LEN, + enable_pause); + adv = FXGMAC_SET_REG_BITS(adv, PHY_MII_ADVERTISE_ASYPAUSE_POS, + PHY_MII_ADVERTISE_ASYPAUSE_LEN, + enable_pause); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_ADVERTISE, adv); + if (ret < 0) { + return ret; + } + + ret = hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, &adv); + if (ret < 0) + return ret; + adv = FXGMAC_SET_REG_BITS(adv, PHY_CR_RE_AUTOENG_POS, PHY_CR_RE_AUTOENG_LEN, 1); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, adv); + if (ret < 0) + return ret; + } else { + DPRINTK("Can't set phy pause because autoneg is off.\n"); + } + DPRINTK("fxgmac set pause parameter, autoneg=%d, rx=%d, tx=%d\n", pause->autoneg, pause->rx_pause, pause->tx_pause); @@ -985,14 +1128,6 @@ static int fxgmac_set_pauseparam(struct net_device *netdev, } #endif /*FXGMAC_PAUSE_FEATURE_ENABLED*/ -/* yzhang added for debug sake. descriptors status checking - * 2021.03.29 - */ -#define FXGMAC_ETH_GSTRING_LEN 32 - -#define FXGMAC_TEST_LEN (sizeof(fxgmac_gstrings_test) / FXGMAC_ETH_GSTRING_LEN) -#define DBG_ETHTOOL_CHECK_NUM_OF_DESC 5 - static void fxgmac_ethtool_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { @@ -1038,7 +1173,6 @@ static void fxgmac_ethtool_get_ethtool_stats(struct net_device *netdev, int i; #if FXGMAC_PM_FEATURE_ENABLED - /* 20210709 for net power down */ if (!test_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate)) #endif { @@ -1057,6 +1191,53 @@ static inline bool fxgmac_removed(void __iomem *addr) } #define FXGMAC_REMOVED(a) fxgmac_removed(a) +static int fxgmac_ethtool_reset(struct net_device *netdev, u32 *flag) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 val; + int ret = 0; + + val = (*flag & ETH_RESET_ALL) || (*flag & ETH_RESET_PHY); + if (!val) { + DPRINTK("Operation not support.\n"); + return -EINVAL; + } + + switch (*flag) { + case ETH_RESET_ALL: + fxgmac_restart_dev(pdata); + *flag = 0; + break; + case ETH_RESET_PHY: + /* + * power off and on the phy in order to properly + * configure the MAC timing + */ + hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, &val); + val = FXGMAC_SET_REG_BITS(val, PHY_CR_POWER_POS, + PHY_CR_POWER_LEN, + PHY_POWER_DOWN); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, val); + if (ret < 0) + return ret; + usleep_range_ex(pdata->pAdapter, 9000, 10000); + val = FXGMAC_SET_REG_BITS(val, PHY_CR_POWER_POS, + PHY_CR_POWER_LEN, + PHY_POWER_UP); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, val); + if (ret < 0) + return ret; + + *flag = 0; + break; + default: + break; + } + + return 0; +} + static const struct ethtool_ops fxgmac_ethtool_ops = { .get_drvinfo = fxgmac_ethtool_get_drvinfo, .get_link = ethtool_op_get_link, @@ -1065,6 +1246,7 @@ static const struct ethtool_ops fxgmac_ethtool_ops = { .get_channels = fxgmac_ethtool_get_channels, .get_coalesce = fxgmac_ethtool_get_coalesce, .set_coalesce = fxgmac_ethtool_set_coalesce, + .reset = fxgmac_ethtool_reset, #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) /* The process of set is to get first and then set, @@ -1099,10 +1281,8 @@ static const struct ethtool_ops fxgmac_ethtool_ops = { .set_wol = fxgmac_set_wol, #endif #if (FXGMAC_PAUSE_FEATURE_ENABLED) -#ifdef ETHTOOL_GLINKSETTINGS .get_link_ksettings = fxgmac_get_link_ksettings, .set_link_ksettings = fxgmac_set_link_ksettings, -#endif /* ETHTOOL_GLINKSETTINGS */ .get_pauseparam = fxgmac_get_pauseparam, .set_pauseparam = fxgmac_set_pauseparam, #endif diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-hw.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-hw.c index 0517968365d74..4c2191e3f3759 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-hw.c +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-hw.c @@ -12,9 +12,6 @@ static void fxgmac_pwr_clock_gate(struct fxgmac_pdata *pdata); static int fxgmac_tx_complete(struct fxgmac_dma_desc *dma_desc) { -#if (FXGMAC_DUMMY_TX_DEBUG) - return 1; -#endif return !FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, TX_NORMAL_DESC3_OWN_LEN); } @@ -27,7 +24,7 @@ static int fxgmac_disable_rx_csum(struct fxgmac_pdata *pdata) regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_IPC_POS, MAC_CR_IPC_LEN, 0); writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); - DPRINTK("fxgmac disable rx checksum.\n"); + DPRINTK("fxgmac disable rx checksum.\n, set val = %x.\n", regval); return 0; } @@ -39,17 +36,17 @@ static int fxgmac_enable_rx_csum(struct fxgmac_pdata *pdata) regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_IPC_POS, MAC_CR_IPC_LEN, 1); writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); - DPRINTK("fxgmac enable rx checksum.\n"); + DPRINTK("fxgmac enable rx checksum.\n, set val = %x.\n", regval); return 0; } static int fxgmac_set_mac_address(struct fxgmac_pdata *pdata, u8 *addr) { - unsigned int mac_addr_hi, mac_addr_lo; + u32 mac_addr_hi, mac_addr_lo; - mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); - mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | - (addr[0] << 0); + mac_addr_hi = (((u32)(addr[5]) << 8) | ((u32)(addr[4]) << 0)); + mac_addr_lo = (((u32)(addr[3]) << 24) | ((u32)(addr[2]) << 16) | + ((u32)(addr[1]) << 8) | ((u32)(addr[0]) << 0)); writereg(pdata->pAdapter, mac_addr_hi, pdata->mac_regs + MAC_MACA0HR); writereg(pdata->pAdapter, mac_addr_lo, pdata->mac_regs + MAC_MACA0LR); @@ -59,9 +56,9 @@ static int fxgmac_set_mac_address(struct fxgmac_pdata *pdata, u8 *addr) #if !defined(DPDK) static void fxgmac_set_mac_reg(struct fxgmac_pdata *pdata, - struct netdev_hw_addr *ha, unsigned int *mac_reg) + struct netdev_hw_addr *ha, unsigned int __far *mac_reg) { - unsigned int mac_addr_hi, mac_addr_lo; + u32 mac_addr_hi, mac_addr_lo; u8 *mac_addr; mac_addr_lo = 0; @@ -78,8 +75,11 @@ static void fxgmac_set_mac_reg(struct fxgmac_pdata *pdata, mac_addr[1] = ha->addr[5]; netif_dbg(pdata, drv, pdata->netdev, - "adding mac address %pM at %#x\n", ha->addr, - *mac_reg); + "adding mac address %pM\n", + ha->addr); + netif_dbg(pdata, drv, pdata->netdev, + "adding mac addredd at %#x\n", + *mac_reg); mac_addr_hi = FXGMAC_SET_REG_BITS( mac_addr_hi, MAC_MACA1HR_AE_POS, MAC_MACA1HR_AE_LEN, 1); @@ -162,7 +162,7 @@ static int fxgmac_enable_rx_vlan_stripping(struct fxgmac_pdata *pdata) regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, MAC_VLANTR_EVLS_LEN, 0x3); writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANTR); - DPRINTK("fxgmac enable MAC rx vlan stripping.\n"); + DPRINTK("fxgmac enable MAC rx vlan stripping, set val = %x\n", regval); return 0; } @@ -175,7 +175,7 @@ static int fxgmac_disable_rx_vlan_stripping(struct fxgmac_pdata *pdata) regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, MAC_VLANTR_EVLS_LEN, 0); writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANTR); - DPRINTK("fxgmac disable MAC rx vlan stripping.\n"); + DPRINTK("fxgmac disable MAC rx vlan stripping, set val = %x\n", regval); return 0; } @@ -305,19 +305,15 @@ static int fxgmac_set_promiscuous_mode(struct fxgmac_pdata *pdata, val); writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PFR); - DbgPrintF(MP_TRACE, "" STR_FORMAT " - promiscuous mode=%d, reg=%x.", - __FUNCTION__, enable, regval); - DbgPrintF( - MP_TRACE, - "" STR_FORMAT - " - note, vlan filter is called when set promiscuous mode=%d.", - __FUNCTION__, enable); + DbgPrintF(MP_TRACE, "promiscuous mode=%d", enable); + DbgPrintF(MP_TRACE, "set val = %x", regval); + DbgPrintF(MP_TRACE, "note, vlan filter is called when set promiscuous mode=%d", enable); /* Hardware will still perform VLAN filtering in promiscuous mode */ if (enable) { fxgmac_disable_rx_vlan_filtering(pdata); } else { - if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { + if (FXGMAC_RX_VLAN_FILTERING_ENABLED) { fxgmac_enable_rx_vlan_filtering(pdata); } } @@ -344,8 +340,9 @@ static int fxgmac_enable_rx_broadcast(struct fxgmac_pdata *pdata, val); writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PFR); - DbgPrintF(MP_TRACE, "%s - bcast en=%d, bit-val=%d, reg=%x.", - __FUNCTION__, enable, val, regval); + DbgPrintF(MP_TRACE, "bcast en=%d", enable); + DbgPrintF(MP_TRACE, "bit-val=%d", val); + DbgPrintF(MP_TRACE, "reg=%x", regval); return 0; } @@ -367,9 +364,8 @@ static int fxgmac_set_all_multicast_mode(struct fxgmac_pdata *pdata, val); writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PFR); - DbgPrintF(MP_TRACE, - "" STR_FORMAT " - Enable all Multicast=%d, regval=%#x.", - __FUNCTION__, enable, regval); + DbgPrintF(MP_TRACE, "Enable all Multicast=%d", enable); + DbgPrintF(MP_TRACE, "set val = %#x.", regval); return 0; } @@ -381,7 +377,7 @@ static void fxgmac_set_mac_addn_addrs(struct fxgmac_pdata *pdata) struct net_device *netdev = pdata->netdev; struct netdev_hw_addr *ha; #endif - unsigned int addn_macs; + u32 addn_macs; unsigned int mac_reg; mac_reg = MAC_MACA1HR; @@ -451,7 +447,7 @@ static void fxgmac_config_multicast_mac_hash_table(struct fxgmac_pdata *pdata, unsigned char *pmc_mac, int b_add) { - unsigned int hash_reg, reg_bit; + u32 hash_reg, reg_bit; unsigned int j; u32 crc, reversal_crc, regval; @@ -464,7 +460,7 @@ static void fxgmac_config_multicast_mac_hash_table(struct fxgmac_pdata *pdata, } DBGPRINT( MP_TRACE, - ("> 24), hash_reg, reg_bit); @@ -489,10 +485,11 @@ static void fxgmac_config_multicast_mac_hash_table(struct fxgmac_pdata *pdata, static void fxgmac_set_mac_hash_table(struct fxgmac_pdata *pdata) { #ifndef DPDK -#if FUXI_MAC_HASH_TABLE +#if FXGMAC_MAC_HASH_TABLE struct net_device *netdev = pdata->netdev; struct netdev_hw_addr *ha; + fxgmac_config_multicast_mac_hash_table(pdata, (unsigned char *)0, 1); netdev_for_each_mc_addr(ha, netdev) { fxgmac_config_multicast_mac_hash_table(pdata, ha->addr, 1); } @@ -504,7 +501,7 @@ static void fxgmac_set_mac_hash_table(struct fxgmac_pdata *pdata) #endif } -static int fxgmac_add_mac_addresses(struct fxgmac_pdata *pdata) +static int fxgmac_set_mc_addresses(struct fxgmac_pdata *pdata) { if (pdata->hw_feat.hash_table_size) fxgmac_set_mac_hash_table(pdata); @@ -514,6 +511,15 @@ static int fxgmac_add_mac_addresses(struct fxgmac_pdata *pdata) return 0; } +static void fxgmac_set_multicast_mode(struct fxgmac_pdata *pdata, + unsigned int enable) +{ + if (enable) + fxgmac_set_mc_addresses(pdata); + else + fxgmac_config_multicast_mac_hash_table(pdata, (unsigned char *)0, 1); +} + static void fxgmac_config_mac_address(struct fxgmac_pdata *pdata) { u32 regval; @@ -524,7 +530,7 @@ static void fxgmac_config_mac_address(struct fxgmac_pdata *pdata) regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PFR); regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_HPF_POS, MAC_PFR_HPF_LEN, 1); -#if FUXI_MAC_HASH_TABLE +#if FXGMAC_MAC_HASH_TABLE regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_HUC_POS, MAC_PFR_HUC_LEN, 1); #endif @@ -560,7 +566,7 @@ static int fxgmac_config_jumbo(struct fxgmac_pdata *pdata) static void fxgmac_config_checksum_offload(struct fxgmac_pdata *pdata) { - if (pdata->netdev->features & NETIF_F_RXCSUM) + if (FXGMAC_RX_CHECKSUM_ENABLED) fxgmac_enable_rx_csum(pdata); else fxgmac_disable_rx_csum(pdata); @@ -587,16 +593,21 @@ static void fxgmac_config_vlan_support(struct fxgmac_pdata *pdata) static int fxgmac_config_rx_mode(struct fxgmac_pdata *pdata) { - struct net_device *netdev = pdata->netdev; - unsigned int pr_mode, am_mode; + unsigned int pr_mode, am_mode, mu_mode, bd_mode; + +#ifndef FXGMAC_NETDEV_MU_MODE_ENABLED +#define FXGMAC_NETDEV_MU_MODE_ENABLED 0 +#endif - pr_mode = ((netdev->flags & IFF_PROMISC) != 0); - am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); + pr_mode = FXGMAC_NETDEV_PR_MODE_ENABLED; + am_mode = FXGMAC_NETDEV_AM_MODE_ENABLED; + mu_mode = FXGMAC_NETDEV_MU_MODE_ENABLED; + bd_mode = FXGMAC_NETDEV_BD_MODE_ENABLED; + fxgmac_enable_rx_broadcast(pdata, bd_mode); fxgmac_set_promiscuous_mode(pdata, pr_mode); fxgmac_set_all_multicast_mode(pdata, am_mode); - - fxgmac_add_mac_addresses(pdata); + fxgmac_set_multicast_mode(pdata, mu_mode); return 0; } @@ -604,6 +615,7 @@ static int fxgmac_config_rx_mode(struct fxgmac_pdata *pdata) static void fxgmac_prepare_tx_stop(struct fxgmac_pdata *pdata, struct fxgmac_channel *channel) { +#ifdef FXGMAC_WAIT_TX_STOP unsigned int tx_dsr, tx_pos, tx_qidx; unsigned long tx_timeout; unsigned int tx_status; @@ -623,7 +635,7 @@ static void fxgmac_prepare_tx_stop(struct fxgmac_pdata *pdata, DMA_DSRX_TPS_START; } -#if FXGMAC_TX_HANG_TIMER_EN +#if FXGMAC_TX_HANG_TIMER_ENABLED tx_timeout = jiffies + msecs_to_jiffies(100); /* 100ms */ #else tx_timeout = jiffies + (FXGMAC_DMA_STOP_TIMEOUT * HZ); @@ -643,6 +655,10 @@ static void fxgmac_prepare_tx_stop(struct fxgmac_pdata *pdata, netdev_info(pdata->netdev, "timed out waiting for Tx DMA channel %u to stop\n", channel->queue_index); +#else + pdata = pdata; + channel = channel; +#endif } static void fxgmac_enable_tx(struct fxgmac_pdata *pdata) @@ -653,7 +669,7 @@ static void fxgmac_enable_tx(struct fxgmac_pdata *pdata) unsigned int i; u32 regval; -#if FXGMAC_TX_HANG_TIMER_EN +#if FXGMAC_TX_HANG_TIMER_ENABLED pdata->tx_hang_restart_queuing = 0; #endif @@ -675,6 +691,11 @@ static void fxgmac_enable_tx(struct fxgmac_pdata *pdata) for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; + if (!txq) { + DPRINTK("Tx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return; + } /* Enable Tx DMA channel */ FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1); } @@ -682,6 +703,12 @@ static void fxgmac_enable_tx(struct fxgmac_pdata *pdata) /* Enable each Tx queue */ for (i = 0; i < pdata->tx_q_count; i++) { + +#if FXGMAC_FAKE_4_TX_QUEUE_ENABLED + if (i > 0) + break; +#endif + regval = readreg(pdata->pAdapter, FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS, @@ -715,7 +742,7 @@ static void fxgmac_disable_tx(struct fxgmac_pdata *pdata) fxgmac_prepare_tx_stop(pdata, channel); -#if FXGMAC_TX_HANG_TIMER_EN +#if FXGMAC_TX_HANG_TIMER_ENABLED pdata->tx_hang_restart_queuing = 0; #endif } @@ -728,6 +755,11 @@ static void fxgmac_disable_tx(struct fxgmac_pdata *pdata) for (i = 0; i < pdata->tx_q_count; i++) { txq = dev->data->tx_queues[i]; + if (!txq) { + DPRINTK("Tx queue not setup for port %d\n", + dev->data->port_id); + return; + } fxgmac_txq_prepare_tx_stop(pdata, i); } #endif @@ -766,6 +798,11 @@ static void fxgmac_disable_tx(struct fxgmac_pdata *pdata) #else for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; + if (!txq) { + DPRINTK("Tx queue not setup for port %d\n", + dev->data->port_id); + return; + } FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0); } #endif @@ -774,14 +811,15 @@ static void fxgmac_disable_tx(struct fxgmac_pdata *pdata) static void fxgmac_prepare_rx_stop(struct fxgmac_pdata *pdata, unsigned int queue) { - unsigned int rx_status, prxq; + u32 rx_status, prxq; +#if defined(FXGMAC_WAIT_RX_STOP_BY_PRXQ_RXQSTS) unsigned int rxqsts; unsigned long rx_timeout; /* The Rx engine cannot be stopped if it is actively processing * packets. Wait for the Rx queue to empty the Rx fifo. Don't * wait forever though... */ -#if FXGMAC_TX_HANG_TIMER_EN +#if FXGMAC_TX_HANG_TIMER_ENABLED rx_timeout = jiffies + msecs_to_jiffies(500); /* 500ms, larger is better */ #else @@ -804,6 +842,19 @@ static void fxgmac_prepare_rx_stop(struct fxgmac_pdata *pdata, netdev_info(pdata->netdev, "timed out waiting for Rx queue %u to empty\n", queue); +#else + unsigned int busy = 100; + do { + rx_status = readreg(pdata->pAdapter, FXGMAC_MTL_REG(pdata, queue, MTL_Q_RQDR)); + prxq = FXGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_PRXQ_POS, MTL_Q_RQDR_PRXQ_LEN); + busy--; + usleep_range_ex(pdata->pAdapter, 500, 1000); + } while ((prxq) && (busy)); + if (0 == busy) { + rx_status = readreg(pdata->pAdapter, FXGMAC_MTL_REG(pdata, queue, MTL_Q_RQDR)); + DbgPrintF(MP_WARN, "warning !!!timed out waiting for Rx queue %u to empty\n", queue); + } +#endif } static void fxgmac_enable_rx(struct fxgmac_pdata *pdata) @@ -811,7 +862,8 @@ static void fxgmac_enable_rx(struct fxgmac_pdata *pdata) #ifndef DPDK struct fxgmac_channel *channel; #endif - unsigned int regval, i; + unsigned int i; + u32 regval; /* Enable each Rx DMA channel */ #ifndef DPDK @@ -832,6 +884,11 @@ static void fxgmac_enable_rx(struct fxgmac_pdata *pdata) for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; + if (!rxq) { + DPRINTK("Rx queue not setup for port %d\n", + dev->data->port_id); + return; + } /* Enable Rx DMA channel */ FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1); } @@ -866,7 +923,7 @@ static void fxgmac_enable_channel_rx(struct fxgmac_pdata *pdata, unsigned int queue) { struct fxgmac_channel *channel; - unsigned int regval; + u32 regval; /* Enable Rx DMA channel */ channel = pdata->channel_head + queue; @@ -922,6 +979,11 @@ static void fxgmac_disable_rx(struct fxgmac_pdata *pdata) for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; + if (!rxq) { + DPRINTK("Rx queue not setup for port %d\n", + dev->data->port_id); + return; + } fxgmac_prepare_rx_stop(pdata, i); } #endif @@ -948,495 +1010,42 @@ static void fxgmac_disable_rx(struct fxgmac_pdata *pdata) #else for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; + if (!rxq) { + DPRINTK("Rx queue not setup for port %d\n", + dev->data->port_id); + return; + } FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0); } #endif } -static void fxgmac_tx_start_xmit(struct fxgmac_channel *channel, - struct fxgmac_ring *ring) -{ - struct fxgmac_pdata *pdata = channel->pdata; - struct fxgmac_desc_data *desc_data; - - /* Make sure everything is written before the register write */ - wmb(); - - /* Issue a poll command to Tx DMA by writing address - * of next immediate free descriptor - */ - desc_data = FXGMAC_GET_DESC_DATA(ring, ring->cur); - -#if !(FXGMAC_DUMMY_TX_DEBUG) - writereg(pdata->pAdapter, lower_32_bits(desc_data->dma_desc_addr), - FXGMAC_DMA_REG(channel, DMA_CH_TDTR_LO)); -#else - DPRINTK("dummy tx, fxgmac_tx_start_xmit, tail reg=0x%lx, val=%08x\n", - FXGMAC_DMA_REG(channel, DMA_CH_TDTR_LO) - pdata->mac_regs, - (u32)lower_32_bits(desc_data->dma_desc_addr)); -#endif - if (netif_msg_tx_done(pdata)) - DPRINTK("tx_start_xmit: dump before wr reg, dma base=0x%016llx, reg=0x%08x, tx timer usecs=%u, tx_timer_active=%u\n", - desc_data->dma_desc_addr, - readreg(pdata->pAdapter, - FXGMAC_DMA_REG(channel, DMA_CH_TDTR_LO)), - pdata->tx_usecs, channel->tx_timer_active); - - ring->tx.xmit_more = 0; -} - -static void fxgmac_dev_xmit(struct fxgmac_channel *channel) -{ - struct fxgmac_pdata *pdata = channel->pdata; - struct fxgmac_ring *ring = channel->tx_ring; - unsigned int tso_context, vlan_context; - struct fxgmac_desc_data *desc_data; - struct fxgmac_dma_desc *dma_desc; - struct fxgmac_pkt_info *pkt_info; - unsigned int csum, tso, vlan; - int start_index = ring->cur; - int cur_index = ring->cur; - int i; - - if (netif_msg_tx_done(pdata)) - DPRINTK("dev_xmit callin, desc cur=%d\n", cur_index); - - pkt_info = &ring->pkt_info; - csum = FXGMAC_GET_REG_BITS(pkt_info->attributes, - TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, - TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN); - tso = FXGMAC_GET_REG_BITS(pkt_info->attributes, - TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, - TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN); - vlan = FXGMAC_GET_REG_BITS(pkt_info->attributes, - TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, - TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN); - - if (tso && (pkt_info->mss != ring->tx.cur_mss)) - tso_context = 1; - else - tso_context = 0; - - if ((tso_context) && (netif_msg_tx_done(pdata))) { - /* tso is initialized to start... */ - DPRINTK("fxgmac_dev_xmit, tso_%s tso=0x%x, pkt_mss=%d, cur_mss=%d\n", - (pkt_info->mss) ? "start" : "stop", tso, pkt_info->mss, - ring->tx.cur_mss); - } - - if (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)) - vlan_context = 1; - else - vlan_context = 0; - - if (vlan && (netif_msg_tx_done(pdata))) - DPRINTK("fxgmac_dev_xmi:pkt vlan=%d, ring vlan=%d, vlan_context=%d\n", - pkt_info->vlan_ctag, ring->tx.cur_vlan_ctag, - vlan_context); - - desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); - dma_desc = desc_data->dma_desc; - - /* Create a context descriptor if this is a TSO pkt_info */ - if (tso_context || vlan_context) { - if (tso_context) { - if (netif_msg_tx_done(pdata)) - DPRINTK("xlgamc dev xmit, construct tso context descriptor, mss=%u\n", - pkt_info->mss); - - /* Set the MSS size */ - dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc2, TX_CONTEXT_DESC2_MSS_POS, - TX_CONTEXT_DESC2_MSS_LEN, pkt_info->mss); - - /* Mark it as a CONTEXT descriptor */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_CONTEXT_DESC3_CTXT_POS, - TX_CONTEXT_DESC3_CTXT_LEN, 1); - - /* Indicate this descriptor contains the MSS */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_CONTEXT_DESC3_TCMSSV_POS, - TX_CONTEXT_DESC3_TCMSSV_LEN, 1); - - ring->tx.cur_mss = pkt_info->mss; - } - - if (vlan_context) { - netif_dbg(pdata, tx_queued, pdata->netdev, - "VLAN context descriptor, ctag=%u\n", - pkt_info->vlan_ctag); - - /* Mark it as a CONTEXT descriptor */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_CONTEXT_DESC3_CTXT_POS, - TX_CONTEXT_DESC3_CTXT_LEN, 1); - - /* Set the VLAN tag */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_CONTEXT_DESC3_VT_POS, - TX_CONTEXT_DESC3_VT_LEN, pkt_info->vlan_ctag); - - /* Indicate this descriptor contains the VLAN tag */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_CONTEXT_DESC3_VLTV_POS, - TX_CONTEXT_DESC3_VLTV_LEN, 1); - - ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag; - } - - cur_index = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); - desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); - dma_desc = desc_data->dma_desc; - } - - /* Update buffer address (for TSO this is the header) */ - dma_desc->desc0 = cpu_to_le32(lower_32_bits(desc_data->skb_dma)); - dma_desc->desc1 = cpu_to_le32(upper_32_bits(desc_data->skb_dma)); - - /* Update the buffer length */ - dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc2, - TX_NORMAL_DESC2_HL_B1L_POS, - TX_NORMAL_DESC2_HL_B1L_LEN, - desc_data->skb_dma_len); - - /* VLAN tag insertion check */ - if (vlan) { - dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc2, TX_NORMAL_DESC2_VTIR_POS, - TX_NORMAL_DESC2_VTIR_LEN, TX_NORMAL_DESC2_VLAN_INSERT); - pdata->stats.tx_vlan_packets++; - } - - /* Timestamp enablement check */ - if (FXGMAC_GET_REG_BITS(pkt_info->attributes, - TX_PACKET_ATTRIBUTES_PTP_POS, - TX_PACKET_ATTRIBUTES_PTP_LEN)) - dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc2, TX_NORMAL_DESC2_TTSE_POS, - TX_NORMAL_DESC2_TTSE_LEN, 1); - - /* Mark it as First Descriptor */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, - TX_NORMAL_DESC3_FD_POS, - TX_NORMAL_DESC3_FD_LEN, 1); - - /* Mark it as a NORMAL descriptor */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, - TX_NORMAL_DESC3_CTXT_POS, - TX_NORMAL_DESC3_CTXT_LEN, 0); - - /* Set OWN bit if not the first descriptor */ - if (cur_index != start_index) - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, - TX_NORMAL_DESC3_OWN_LEN, 1); - - if (tso) { - /* Enable TSO */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_NORMAL_DESC3_TSE_POS, - TX_NORMAL_DESC3_TSE_LEN, 1); - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_NORMAL_DESC3_TCPPL_POS, - TX_NORMAL_DESC3_TCPPL_LEN, pkt_info->tcp_payload_len); - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_NORMAL_DESC3_TCPHDRLEN_POS, - TX_NORMAL_DESC3_TCPHDRLEN_LEN, - pkt_info->tcp_header_len / 4); - - pdata->stats.tx_tso_packets++; - } else { - /* Enable CRC and Pad Insertion */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_NORMAL_DESC3_CPC_POS, - TX_NORMAL_DESC3_CPC_LEN, 0); - - /* Enable HW CSUM */ - if (csum) - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_NORMAL_DESC3_CIC_POS, - TX_NORMAL_DESC3_CIC_LEN, 0x3); - - /* Set the total length to be transmitted */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, - TX_NORMAL_DESC3_FL_POS, - TX_NORMAL_DESC3_FL_LEN, - pkt_info->length); - } - if (netif_msg_tx_done(pdata)) - DPRINTK("dev_xmit before more descs, desc cur=%d, start=%d, desc=%#x,%#x,%#x,%#x\n", - cur_index, start_index, dma_desc->desc0, - dma_desc->desc1, dma_desc->desc2, dma_desc->desc3); - - if (start_index <= cur_index) - i = cur_index - start_index + 1; - else - i = ring->dma_desc_count - start_index + cur_index; - - for (; i < pkt_info->desc_count; i++) { - cur_index = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); - - desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); - dma_desc = desc_data->dma_desc; - - /* Update buffer address */ - dma_desc->desc0 = - cpu_to_le32(lower_32_bits(desc_data->skb_dma)); - dma_desc->desc1 = - cpu_to_le32(upper_32_bits(desc_data->skb_dma)); - - /* Update the buffer length */ - dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc2, TX_NORMAL_DESC2_HL_B1L_POS, - TX_NORMAL_DESC2_HL_B1L_LEN, desc_data->skb_dma_len); - - /* Set OWN bit */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, - TX_NORMAL_DESC3_OWN_LEN, 1); - - /* Mark it as NORMAL descriptor */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_NORMAL_DESC3_CTXT_POS, - TX_NORMAL_DESC3_CTXT_LEN, 0); - - /* Enable HW CSUM */ - if (csum) - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_NORMAL_DESC3_CIC_POS, - TX_NORMAL_DESC3_CIC_LEN, 0x3); - } - - /* Set LAST bit for the last descriptor */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, - TX_NORMAL_DESC3_LD_POS, - TX_NORMAL_DESC3_LD_LEN, 1); - - dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc2, - TX_NORMAL_DESC2_IC_POS, - TX_NORMAL_DESC2_IC_LEN, 1); - - /* Save the Tx info to report back during cleanup */ - desc_data->tx.packets = pkt_info->tx_packets; - desc_data->tx.bytes = pkt_info->tx_bytes; - - if (netif_msg_tx_done(pdata)) - DPRINTK("dev_xmit last descs, desc cur=%d, desc=%#x,%#x,%#x,%#x\n", - cur_index, dma_desc->desc0, dma_desc->desc1, - dma_desc->desc2, dma_desc->desc3); - - /* In case the Tx DMA engine is running, make sure everything - * is written to the descriptor(s) before setting the OWN bit - * for the first descriptor - */ - dma_wmb(); - - /* Set OWN bit for the first descriptor */ - desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); - dma_desc = desc_data->dma_desc; - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, - TX_NORMAL_DESC3_OWN_POS, - TX_NORMAL_DESC3_OWN_LEN, 1); - - if (netif_msg_tx_done(pdata)) - DPRINTK("dev_xmit first descs, start=%d, desc=%#x,%#x,%#x,%#x\n", - start_index, dma_desc->desc0, dma_desc->desc1, - dma_desc->desc2, dma_desc->desc3); - - if (netif_msg_tx_queued(pdata)) - fxgmac_dump_tx_desc(pdata, ring, start_index, - pkt_info->desc_count, 1); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) - if (netif_msg_tx_done(pdata)) - DPRINTK("dev_xmit about to call tx_start_xmit, ring xmit_more=%d, txq_stopped=%x\n", - ring->tx.xmit_more, - netif_xmit_stopped(netdev_get_tx_queue( - pdata->netdev, channel->queue_index))); -#else /* ( LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,165))*/ - if (netif_msg_tx_done(pdata)) - DPRINTK("dev_xmit about to call tx_start_xmit, pkt xmit_more=%d, txq_stopped=%x\n", - pkt_info->skb->xmit_more, - netif_xmit_stopped(netdev_get_tx_queue( - pdata->netdev, channel->queue_index))); -#endif - - /* Make sure ownership is written to the descriptor */ - smp_wmb(); - - ring->cur = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); - - fxgmac_tx_start_xmit(channel, ring); - - /* yzhang for reduce debug output */ - if (netif_msg_tx_done(pdata)) { - DPRINTK("dev_xmit callout %s: descriptors %u to %u written\n", - channel->name, start_index & (ring->dma_desc_count - 1), - (ring->cur - 1) & (ring->dma_desc_count - 1)); - } -} - -static void fxgmac_get_rx_tstamp(struct fxgmac_pkt_info *pkt_info, - struct fxgmac_dma_desc *dma_desc) -{ - u64 nsec; - - nsec = le32_to_cpu(dma_desc->desc1); - nsec <<= 32; - nsec |= le32_to_cpu(dma_desc->desc0); - if (nsec != 0xffffffffffffffffULL) { - pkt_info->rx_tstamp = nsec; - pkt_info->attributes = FXGMAC_SET_REG_BITS( - pkt_info->attributes, - RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS, - RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN, 1); - } -} - -static void fxgmac_tx_desc_reset(struct fxgmac_desc_data *desc_data) -{ - struct fxgmac_dma_desc *dma_desc = desc_data->dma_desc; - - /* Reset the Tx descriptor - * Set buffer 1 (lo) address to zero - * Set buffer 1 (hi) address to zero - * Reset all other control bits (IC, TTSE, B2L & B1L) - * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) - */ - dma_desc->desc0 = 0; - dma_desc->desc1 = 0; - dma_desc->desc2 = 0; - dma_desc->desc3 = 0; - - /* Make sure ownership is written to the descriptor */ - dma_wmb(); -} - -static void fxgmac_tx_desc_init(struct fxgmac_channel *channel) -{ - struct fxgmac_ring *ring = channel->tx_ring; - struct fxgmac_desc_data *desc_data; - int start_index = ring->cur; - unsigned int i; - start_index = start_index; - - /* Initialize all descriptors */ - for (i = 0; i < ring->dma_desc_count; i++) { - desc_data = FXGMAC_GET_DESC_DATA(ring, i); - - /* Initialize Tx descriptor */ - fxgmac_tx_desc_reset(desc_data); - } - - writereg(channel->pdata->pAdapter, channel->pdata->tx_desc_count - 1, - FXGMAC_DMA_REG(channel, DMA_CH_TDRLR)); - - /* Update the starting address of descriptor ring */ - desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); - writereg(channel->pdata->pAdapter, - upper_32_bits(desc_data->dma_desc_addr), - FXGMAC_DMA_REG(channel, DMA_CH_TDLR_HI)); - writereg(channel->pdata->pAdapter, - lower_32_bits(desc_data->dma_desc_addr), - FXGMAC_DMA_REG(channel, DMA_CH_TDLR_LO)); -} - -static void fxgmac_rx_desc_reset(struct fxgmac_pdata *pdata, - struct fxgmac_desc_data *desc_data, - unsigned int index) -{ - struct fxgmac_dma_desc *dma_desc = desc_data->dma_desc; - - /* Reset the Rx descriptor - * Set buffer 1 (lo) address to header dma address (lo) - * Set buffer 1 (hi) address to header dma address (hi) - * Set buffer 2 (lo) address to buffer dma address (lo) - * Set buffer 2 (hi) address to buffer dma address (hi) and - * set control bits OWN and INTE - */ - dma_desc->desc0 = - cpu_to_le32(lower_32_bits(desc_data->rx.buf.dma_base)); - dma_desc->desc1 = - cpu_to_le32(upper_32_bits(desc_data->rx.buf.dma_base)); - dma_desc->desc2 = 0; - dma_desc->desc3 = 0; - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, - RX_NORMAL_DESC3_INTE_POS, - RX_NORMAL_DESC3_INTE_LEN, 1); - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, - RX_NORMAL_DESC3_BUF2V_POS, - RX_NORMAL_DESC3_BUF2V_LEN, 0); - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, - RX_NORMAL_DESC3_BUF1V_POS, - RX_NORMAL_DESC3_BUF1V_LEN, 1); - - /* Since the Rx DMA engine is likely running, make sure everything - * is written to the descriptor(s) before setting the OWN bit - * for the descriptor - */ - dma_wmb(); - - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, - RX_NORMAL_DESC3_OWN_POS, - RX_NORMAL_DESC3_OWN_LEN, 1); - - /* Make sure ownership is written to the descriptor */ - dma_wmb(); -} - -static void fxgmac_rx_desc_init(struct fxgmac_channel *channel) -{ - struct fxgmac_pdata *pdata = channel->pdata; - struct fxgmac_ring *ring = channel->rx_ring; - unsigned int start_index = ring->cur; - struct fxgmac_desc_data *desc_data; - unsigned int i; - - /* Initialize all descriptors */ - for (i = 0; i < ring->dma_desc_count; i++) { - desc_data = FXGMAC_GET_DESC_DATA(ring, i); - - /* Initialize Rx descriptor */ - fxgmac_rx_desc_reset(pdata, desc_data, i); - } - - /* Update the total number of Rx descriptors */ - writereg(pdata->pAdapter, ring->dma_desc_count - 1, - FXGMAC_DMA_REG(channel, DMA_CH_RDRLR)); - - /* Update the starting address of descriptor ring */ - desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); - writereg(pdata->pAdapter, upper_32_bits(desc_data->dma_desc_addr), - FXGMAC_DMA_REG(channel, DMA_CH_RDLR_HI)); - writereg(pdata->pAdapter, lower_32_bits(desc_data->dma_desc_addr), - FXGMAC_DMA_REG(channel, DMA_CH_RDLR_LO)); - - /* Update the Rx Descriptor Tail Pointer */ - desc_data = FXGMAC_GET_DESC_DATA( - ring, start_index + ring->dma_desc_count - 1); - writereg(pdata->pAdapter, lower_32_bits(desc_data->dma_desc_addr), - FXGMAC_DMA_REG(channel, DMA_CH_RDTR_LO)); -} - static int fxgmac_is_context_desc(struct fxgmac_dma_desc *dma_desc) { /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ - return FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, TX_NORMAL_DESC3_CTXT_POS, - TX_NORMAL_DESC3_CTXT_LEN); + int regval; + regval = (int)FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_CTXT_POS, + TX_NORMAL_DESC3_CTXT_LEN); + return regval; } static int fxgmac_is_last_desc(struct fxgmac_dma_desc *dma_desc) { /* Rx and Tx share LD bit, so check TDES3.LD bit */ - return FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, TX_NORMAL_DESC3_LD_POS, - TX_NORMAL_DESC3_LD_LEN); + int regval; + regval = (int)FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_LD_POS, + TX_NORMAL_DESC3_LD_LEN); + return regval; } static int fxgmac_disable_tx_flow_control(struct fxgmac_pdata *pdata) { unsigned int max_q_count, q_count; - unsigned int reg, regval; + unsigned int reg; unsigned int i; + u32 regval; /* Clear MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) { @@ -1467,8 +1076,9 @@ static int fxgmac_disable_tx_flow_control(struct fxgmac_pdata *pdata) static int fxgmac_enable_tx_flow_control(struct fxgmac_pdata *pdata) { unsigned int max_q_count, q_count; - unsigned int reg, regval; + unsigned int reg; unsigned int i; + u32 regval; /* Set MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) { @@ -1572,6 +1182,10 @@ static int fxgmac_config_rx_coalesce(struct fxgmac_pdata *pdata) for (i = 0; i < pdata->expansion.eth_dev->data->nb_rx_queues; i++) { rxq = pdata->expansion.eth_dev->data->rx_queues[i]; + if (!rxq) { + DPRINTK("Rx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return -1; FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RIWT, RWT, pdata->rx_riwt); } #endif @@ -1645,6 +1259,11 @@ static void fxgmac_config_rx_buffer_size(struct fxgmac_pdata *pdata) for (i = 0; i < pdata->expansion.eth_dev->data->nb_rx_queues; i++) { rxq = pdata->expansion.eth_dev->data->rx_queues[i]; + if (!rxq) { + DPRINTK("Rx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return; + } rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; @@ -1661,31 +1280,35 @@ static void fxgmac_config_rx_buffer_size(struct fxgmac_pdata *pdata) static void fxgmac_config_tso_mode(struct fxgmac_pdata *pdata) { + u32 tso; #ifndef DPDK struct fxgmac_channel *channel; unsigned int i; u32 regval; + tso = pdata->hw_feat.tso; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) break; - if (pdata->hw_feat.tso) { - regval = readreg(pdata->pAdapter, - FXGMAC_DMA_REG(channel, DMA_CH_TCR)); - regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_TCR_TSE_POS, - DMA_CH_TCR_TSE_LEN, 1); - writereg(pdata->pAdapter, regval, - FXGMAC_DMA_REG(channel, DMA_CH_TCR)); - } + regval = readreg(pdata->pAdapter, FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_TCR_TSE_POS, + DMA_CH_TCR_TSE_LEN, tso); + writereg(pdata->pAdapter, regval, FXGMAC_DMA_REG(channel, DMA_CH_TCR)); } #else struct fxgmac_tx_queue *txq; unsigned int i; + tso = pdata->hw_feat.tso; for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { txq = pdata->expansion.eth_dev->data->tx_queues[i]; - FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, TSE, pdata->tx_pbl); + if (!txq) { + DPRINTK("Tx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return; + } + FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, TSE, tso); } #endif } @@ -1714,7 +1337,12 @@ static void fxgmac_config_sph_mode(struct fxgmac_pdata *pdata) for (i = 0; i < pdata->expansion.eth_dev->data->nb_rx_queues; i++) { rxq = pdata->expansion.eth_dev->data->rx_queues[i]; - FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_CR, SPH, pdata->rx_pbl); + if (!rxq) { + DPRINTK("Rx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return; + } + FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_CR, SPH, 0); } #endif @@ -1724,11 +1352,11 @@ static void fxgmac_config_sph_mode(struct fxgmac_pdata *pdata) writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_ECR); } -static unsigned int fxgmac_usec_to_riwt(struct fxgmac_pdata *pdata, +static unsigned long fxgmac_usec_to_riwt(struct fxgmac_pdata *pdata, unsigned int usec) { unsigned long rate; - unsigned int ret; + unsigned long ret; rate = pdata->sysclk_rate; @@ -1742,11 +1370,11 @@ static unsigned int fxgmac_usec_to_riwt(struct fxgmac_pdata *pdata, return ret; } -static unsigned int fxgmac_riwt_to_usec(struct fxgmac_pdata *pdata, +static unsigned long fxgmac_riwt_to_usec(struct fxgmac_pdata *pdata, unsigned int riwt) { unsigned long rate; - unsigned int ret; + unsigned long ret; rate = pdata->sysclk_rate; @@ -1809,16 +1437,16 @@ static void fxgmac_config_mtl_mode(struct fxgmac_pdata *pdata) static void fxgmac_config_queue_mapping(struct fxgmac_pdata *pdata) { unsigned int ppq, ppq_extra, prio, prio_queues; - unsigned int queue; - unsigned int reg, regval; + unsigned int reg; unsigned int mask; unsigned int i, j; + u32 regval; /* Map the MTL Tx Queues to Traffic Classes * Note: Tx Queues >= Traffic Classes */ - queue = 0; - DPRINTK("need to map TXq(%u) to TC\n", queue); + // queue = 0; + // DPRINTK("need to map TXq(%u) to TC\n", queue); /* Map the 8 VLAN priority values to available MTL Rx queues */ prio_queues = @@ -1832,14 +1460,18 @@ static void fxgmac_config_queue_mapping(struct fxgmac_pdata *pdata) mask = 0; for (j = 0; j < ppq; j++) { netif_dbg(pdata, drv, pdata->netdev, - "PRIO%u mapped to RXq%u\n", prio, i); + "PRIO%u,", prio); + netif_dbg(pdata, drv, pdata->netdev, + " mapped to RXq%u\n", i); mask |= (1 << prio); prio++; } if (i < ppq_extra) { netif_dbg(pdata, drv, pdata->netdev, - "PRIO%u mapped to RXq%u\n", prio, i); + "PRIO%u.", i); + netif_dbg(pdata, drv, pdata->netdev, + " mapped to Rxq%u", i); mask |= (1 << prio); prio++; } @@ -1879,11 +1511,11 @@ static void fxgmac_config_queue_mapping(struct fxgmac_pdata *pdata) writereg(pdata->pAdapter, regval, pdata->mac_regs + reg); } -static unsigned int fxgmac_calculate_per_queue_fifo(unsigned int fifo_size, +static u32 fxgmac_calculate_per_queue_fifo(unsigned long fifo_size, unsigned int queue_count) { - unsigned int q_fifo_size; - unsigned int p_fifo; + unsigned long q_fifo_size; + unsigned long p_fifo; /* Calculate the configured fifo size */ q_fifo_size = 1 << (fifo_size + 7); @@ -1904,16 +1536,51 @@ static unsigned int fxgmac_calculate_per_queue_fifo(unsigned int fifo_size, return p_fifo; } +static u32 fxgmac_calculate_max_checksum_size(struct fxgmac_pdata *pdata) +{ + u32 fifo_size; + + fifo_size = fxgmac_calculate_per_queue_fifo( + pdata->hw_feat.tx_fifo_size, + pdata->tx_q_count); + + /* Each increment in the queue fifo size represents 256 bytes of + * fifo, with 0 representing 256 bytes. Distribute the fifo equally + * between the queues. + */ + fifo_size = (fifo_size + 1) * 256; + + /* Packet size < TxQSize - (PBL + N)*(DATAWIDTH/8), + * Datawidth = 128 + * If Datawidth = 32, N = 7, elseif Datawidth != 32, N = 5. + * TxQSize is indicated by TQS field of MTL_TxQ#_Operation_Mode register + * PBL = TxPBL field in the DMA_CH#_TX_Control register in all DMA configurations. + */ + fifo_size -= (pdata->tx_pbl * (pdata->pblx8 ? 8 : 1) + 5) * (FXGMAC_DATA_WIDTH / 8); + fifo_size -= 256; + + return fifo_size; +} + static void fxgmac_config_tx_fifo_size(struct fxgmac_pdata *pdata) { - unsigned int fifo_size; + u32 fifo_size; unsigned int i; u32 regval; fifo_size = fxgmac_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size, - pdata->tx_q_count); +#if FXGMAC_FAKE_4_TX_QUEUE_ENABLED + 1);//force to 1 queue +#else + pdata->tx_q_count); +#endif for (i = 0; i < pdata->tx_q_count; i++) { +#if FXGMAC_FAKE_4_TX_QUEUE_ENABLED + //DPRINTK("Tx idx > 0,break\n"); + if (i > 0) + break; +#endif regval = readreg(pdata->pAdapter, FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TQS_POS, @@ -1923,13 +1590,16 @@ static void fxgmac_config_tx_fifo_size(struct fxgmac_pdata *pdata) } netif_info(pdata, drv, pdata->netdev, - "%d Tx hardware queues, %d byte fifo per queue\n", - pdata->tx_q_count, ((fifo_size + 1) * 256)); + "%d Tx hardware queues,", + pdata->tx_q_count); + netif_info(pdata, drv, pdata->netdev, + " %d byte fifo per queue\n", + ((fifo_size + 1) * 256)); } static void fxgmac_config_rx_fifo_size(struct fxgmac_pdata *pdata) { - unsigned int fifo_size; + u32 fifo_size; unsigned int i; u32 regval; @@ -1946,8 +1616,11 @@ static void fxgmac_config_rx_fifo_size(struct fxgmac_pdata *pdata) } netif_info(pdata, drv, pdata->netdev, - "%d Rx hardware queues, %d byte fifo per queue\n", - pdata->rx_q_count, ((fifo_size + 1) * 256)); + "%d Rx hardware queues,", + pdata->rx_q_count); + netif_info(pdata, drv, pdata->netdev, + " %d byte fifo per queue\n", + ((fifo_size + 1) * 256)); } static void fxgmac_config_flow_control_threshold(struct fxgmac_pdata *pdata) @@ -2050,6 +1723,11 @@ static int fxgmac_config_osp_mode(struct fxgmac_pdata *pdata) for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { txq = pdata->expansion.eth_dev->data->tx_queues[i]; + if (!txq) { + DPRINTK("Tx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return -1; + } FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP, pdata->tx_osp_mode); } @@ -2079,6 +1757,11 @@ static int fxgmac_config_pblx8(struct fxgmac_pdata *pdata) for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { txq = pdata->expansion.eth_dev->data->tx_queues[i]; + if (!txq) { + DPRINTK("Tx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return -1; + } FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8, pdata->pblx8); } #endif @@ -2086,7 +1769,7 @@ static int fxgmac_config_pblx8(struct fxgmac_pdata *pdata) return 0; } -static int fxgmac_get_tx_pbl_val(struct fxgmac_pdata *pdata) +static u32 fxgmac_get_tx_pbl_val(struct fxgmac_pdata *pdata) { u32 regval; @@ -2122,6 +1805,11 @@ static int fxgmac_config_tx_pbl_val(struct fxgmac_pdata *pdata) for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { txq = pdata->expansion.eth_dev->data->tx_queues[i]; + if (!txq) { + DPRINTK("Tx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return -1; + } FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL, pdata->tx_pbl); } #endif @@ -2129,7 +1817,7 @@ static int fxgmac_config_tx_pbl_val(struct fxgmac_pdata *pdata) return 0; } -static int fxgmac_get_rx_pbl_val(struct fxgmac_pdata *pdata) +static u32 fxgmac_get_rx_pbl_val(struct fxgmac_pdata *pdata) { u32 regval; @@ -2165,6 +1853,11 @@ static int fxgmac_config_rx_pbl_val(struct fxgmac_pdata *pdata) for (i = 0; i < pdata->expansion.eth_dev->data->nb_rx_queues; i++) { rxq = pdata->expansion.eth_dev->data->rx_queues[i]; + if (!txq) { + DPRINTK("Tx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return -1; + } FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL, pdata->rx_pbl); } #endif @@ -2183,7 +1876,7 @@ static u64 fxgmac_mmc_read(struct fxgmac_pdata *pdata, unsigned int reg_lo) static void fxgmac_tx_mmc_int(struct fxgmac_pdata *pdata) { - unsigned int mmc_isr = + u32 mmc_isr = readreg(pdata->pAdapter, pdata->mac_regs + MMC_TISR); struct fxgmac_stats *stats = &pdata->stats; @@ -2321,7 +2014,7 @@ static void fxgmac_tx_mmc_int(struct fxgmac_pdata *pdata) static void fxgmac_rx_mmc_int(struct fxgmac_pdata *pdata) { - unsigned int mmc_isr = + u32 mmc_isr = readreg(pdata->pAdapter, pdata->mac_regs + MMC_RISR); struct fxgmac_stats *stats = &pdata->stats; @@ -2615,14 +2308,14 @@ static void fxgmac_config_mmc(struct fxgmac_pdata *pdata) regval = FXGMAC_SET_REG_BITS(regval, MMC_CR_CR_POS, MMC_CR_CR_LEN, 1); writereg(pdata->pAdapter, regval, pdata->mac_regs + MMC_CR); -#if defined(FUXI_MISC_INT_HANDLE_FEATURE_EN) && FUXI_MISC_INT_HANDLE_FEATURE_EN +#if FXGMAC_MISC_INT_HANDLE_FEATURE_ENABLED writereg(pdata->pAdapter, 0xffffffff, pdata->mac_regs + MMC_IPCRXINTMASK); #endif } static int fxgmac_write_rss_reg(struct fxgmac_pdata *pdata, unsigned int type, - unsigned int index, unsigned int val) + unsigned int index, u32 val) { int ret = 0; type = type; @@ -2660,7 +2353,6 @@ static int fxgmac_write_rss_options(struct fxgmac_pdata *pdata) return 0; } -#if !defined(DPDK) static int fxgmac_read_rss_hash_key(struct fxgmac_pdata *pdata, u8 *key_buf) { unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); @@ -2683,7 +2375,6 @@ static int fxgmac_read_rss_hash_key(struct fxgmac_pdata *pdata, u8 *key_buf) return 0; } -#endif static int fxgmac_write_rss_hash_key(struct fxgmac_pdata *pdata) { @@ -2692,10 +2383,9 @@ static int fxgmac_write_rss_hash_key(struct fxgmac_pdata *pdata) int ret; while (key_regs--) { - ret = fxgmac_write_rss_reg( - pdata, FXGMAC_RSS_HASH_KEY_TYPE, - MGMT_RSS_KEY0 + key_regs * MGMT_RSS_KEY_REG_INC, - cpu_to_be32(*key)); + ret = fxgmac_write_rss_reg(pdata, (unsigned int)FXGMAC_RSS_HASH_KEY_TYPE, + (unsigned int)(MGMT_RSS_KEY0 + key_regs * MGMT_RSS_KEY_REG_INC), + (unsigned int)(cpu_to_be32 (*key))); if (ret) return ret; key++; @@ -2743,7 +2433,7 @@ static int fxgmac_write_rss_lookup_table(struct fxgmac_pdata *pdata) static int fxgmac_set_rss_hash_key(struct fxgmac_pdata *pdata, const u8 *key) { - memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); + memcpy(pdata->rss_key, (void *)key, sizeof(pdata->rss_key)); return fxgmac_write_rss_hash_key(pdata); } @@ -2754,7 +2444,7 @@ static int fxgmac_set_rss_lookup_table(struct fxgmac_pdata *pdata, unsigned int i; u32 tval; -#if FXGMAC_MSIX_CH0RXDIS_EN +#if FXGMAC_MSIX_CH0RXDIS_ENABLED DPRINTK("Set_rss_table, rss ctrl eth=0x%08x\n", 0); return 0; @@ -2789,6 +2479,7 @@ static int fxgmac_enable_rss(struct fxgmac_pdata *pdata) u32 regval; u32 size = 0; +#ifdef FXGMAC_USE_DEFAULT_RSS_KEY_TBALE int ret; if (!pdata->hw_feat.rss) { @@ -2806,6 +2497,7 @@ static int fxgmac_enable_rss(struct fxgmac_pdata *pdata) if (ret) { return ret; } +#endif regval = readreg(pdata->pAdapter, pdata->base_mem + MGMT_RSS_CTRL); @@ -2814,7 +2506,7 @@ static int fxgmac_enable_rss(struct fxgmac_pdata *pdata) regval = FXGMAC_SET_REG_BITS(regval, MGMT_RSS_CTRL_TBL_SIZE_POS, MGMT_RSS_CTRL_TBL_SIZE_LEN, size); -#if FXGMAC_MSIX_CH0RXDIS_EN +#if FXGMAC_MSIX_CH0RXDIS_ENABLED /* set default cpu id to 1 */ regval = FXGMAC_SET_REG_BITS(regval, 8, 2, 1); #endif @@ -2827,7 +2519,7 @@ static int fxgmac_enable_rss(struct fxgmac_pdata *pdata) MGMT_RSS_CTRL_OPT_LEN, pdata->rss_options); writereg(pdata->pAdapter, regval, (pdata->base_mem + MGMT_RSS_CTRL)); - DPRINTK("enable_rss callout, rss ctrl reg=0x%08x\n", regval); + DPRINTK("enable_rss callout, set val = 0x%08x\n", regval); return 0; } @@ -2839,7 +2531,7 @@ static int fxgmac_disable_rss(struct fxgmac_pdata *pdata) if (!pdata->hw_feat.rss) return -EOPNOTSUPP; -#if FXGMAC_MSIX_CH0RXDIS_EN +#if FXGMAC_MSIX_CH0RXDIS_ENABLED DPRINTK("Disable_rss, rss ctrl eth=0x%08x\n", 0); return 0; @@ -2850,7 +2542,7 @@ static int fxgmac_disable_rss(struct fxgmac_pdata *pdata) MAC_RSSCR_RSSE_LEN, 0); writereg(pdata->pAdapter, regval, (pdata->base_mem + MGMT_RSS_CTRL)); - DPRINTK("disable_rss, rss ctrl reg=0x%08x\n", regval); + DPRINTK("disable_rss, set val = 0x%08x\n", regval); return 0; } @@ -2868,11 +2560,11 @@ static void fxgmac_config_rss(struct fxgmac_pdata *pdata) ret = fxgmac_disable_rss(pdata); if (ret) { - DBGPRINT(MP_ERROR, - ("fxgmac_config_rss: error configuring RSS\n")); + DPRINTK("fxgmac_config_rss: error configuring RSS\n"); } } +#if defined(FXGMAC_POWER_MANAGEMENT) static void fxgmac_update_aoe_ipv4addr(struct fxgmac_pdata *pdata, u8 *ip_addr) { unsigned int regval, ipval = 0; @@ -2891,12 +2583,14 @@ static void fxgmac_update_aoe_ipv4addr(struct fxgmac_pdata *pdata, u8 *ip_addr) DPRINTK("%s, covert IP dotted-addr %s to binary 0x%08x ok.\n", __FUNCTION__, ip_addr, cpu_to_be32(ipval)); } else { +#ifdef FXGMAC_AOE_FEATURE_ENABLED /* get ipv4 addr from net device */ ipval = fxgmac_get_netdev_ip4addr(pdata); DPRINTK("%s, Get net device binary IP ok, 0x%08x\n", __FUNCTION__, cpu_to_be32(ipval)); ipval = cpu_to_be32(ipval); +#endif } regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_ARP_PROTO_ADDR); @@ -3043,8 +2737,8 @@ static int fxgmac_set_ns_offload(struct fxgmac_pdata *pdata, unsigned int index, return 0; } -static void fxgmac_update_ns_offload_ipv6addr(struct fxgmac_pdata *pdata, - unsigned int param) +#ifdef FXGMAC_NS_OFFLOAD_ENABLED +static void fxgmac_update_ns_offload_ipv6addr(struct fxgmac_pdata *pdata, unsigned int param) { struct net_device *netdev = pdata->netdev; unsigned char addr_buf[5][16]; @@ -3084,6 +2778,7 @@ static void fxgmac_update_ns_offload_ipv6addr(struct fxgmac_pdata *pdata, if (pdata->expansion.ns_offload_tab_idx >= 2) pdata->expansion.ns_offload_tab_idx = 0; } +#endif static int fxgmac_enable_ns_offload(struct fxgmac_pdata *pdata) { @@ -3529,7 +3224,7 @@ static int fxgmac_disable_wake_magic_pattern(struct fxgmac_pdata *pdata) return 0; } -#if defined(FUXI_PM_WPI_READ_FEATURE_EN) && FUXI_PM_WPI_READ_FEATURE_EN +#if FXGMAC_PM_WPI_READ_FEATURE_ENABLED /* * enable Wake packet indication. called to enable before sleep/hibernation * and no needed to call disable for that, fxgmac_get_wake_packet_indication will clear to normal once done. @@ -3608,7 +3303,7 @@ static void fxgmac_get_wake_packet_indication(struct fxgmac_pdata *pdata, * wake_pattern_number, HW should tell, tbd */ for (i = 0; i < MAX_PATTERN_COUNT; i++) { - if (regval & (MGMT_WOL_CTRL_WPI_RWK_PKT_NUMBER << i)) { + if (regval & ((u32)MGMT_WOL_CTRL_WPI_RWK_PKT_NUMBER << i)) { *wake_pattern_number = i; break; } @@ -3734,51 +3429,9 @@ static int fxgmac_disable_wake_link_change(struct fxgmac_pdata *pdata) writereg(pdata->pAdapter, regval, pdata->base_mem + WOL_CTL); return 0; } +#endif // FXGMAC_POWER_MANAGEMENT -static void fxgmac_config_wol(struct fxgmac_pdata *pdata, int en) -{ - /* enable or disable WOL. this function only set wake-up type, and power related configure - * will be in other place, see power management. - */ - if (!pdata->hw_feat.rwk) { - netdev_err(pdata->netdev, - "error configuring WOL - not supported.\n"); - return; - } - - fxgmac_disable_wake_magic_pattern(pdata); - fxgmac_disable_wake_pattern(pdata); - fxgmac_disable_wake_link_change(pdata); - - if (en) { - /* config mac address for rx of magic or ucast */ - fxgmac_set_mac_address(pdata, (u8 *)(pdata->netdev->dev_addr)); - - /* Enable Magic packet */ - if (pdata->expansion.wol & WAKE_MAGIC) { - fxgmac_enable_wake_magic_pattern(pdata); - } - - /* Enable global unicast packet */ - if (pdata->expansion.wol & WAKE_UCAST || - pdata->expansion.wol & WAKE_MCAST || - pdata->expansion.wol & WAKE_BCAST || - pdata->expansion.wol & WAKE_ARP) { - fxgmac_enable_wake_pattern(pdata); - } - - /* Enable ephy link change */ - if ((FXGMAC_WOL_UPON_EPHY_LINK) && - (pdata->expansion.wol & WAKE_PHY)) { - fxgmac_enable_wake_link_change(pdata); - } - } - device_set_wakeup_enable(/*pci_dev_to_dev*/ (pdata->dev), en); - - DPRINTK("config_wol callout\n"); -} - -static int fxgmac_get_ephy_state(struct fxgmac_pdata *pdata) +static u32 fxgmac_get_ephy_state(struct fxgmac_pdata *pdata) { u32 value; value = readreg(pdata->pAdapter, pdata->base_mem + MGMT_EPHY_CTRL); @@ -3788,7 +3441,7 @@ static int fxgmac_get_ephy_state(struct fxgmac_pdata *pdata) static void fxgmac_enable_dma_interrupts(struct fxgmac_pdata *pdata) { #ifndef DPDK - unsigned int dma_ch_isr, dma_ch_ier; + u32 dma_ch_isr, dma_ch_ier; struct fxgmac_channel *channel; unsigned int i; @@ -3870,6 +3523,11 @@ static void fxgmac_enable_dma_interrupts(struct fxgmac_pdata *pdata) for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { txq = pdata->expansion.eth_dev->data->tx_queues[i]; + if (!txq) { + DPRINTK("Tx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return; + } /* Clear all the interrupts which are set */ dma_ch_isr = FXGMAC_DMA_IOREAD(txq, DMA_CH_SR); @@ -3903,8 +3561,8 @@ static void fxgmac_enable_dma_interrupts(struct fxgmac_pdata *pdata) static void fxgmac_enable_mtl_interrupts(struct fxgmac_pdata *pdata) { - unsigned int q_count, i; - unsigned int mtl_q_isr; + unsigned int i; + u32 mtl_q_isr, q_count; q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); for (i = 0; i < q_count; i++) { @@ -3922,7 +3580,7 @@ static void fxgmac_enable_mtl_interrupts(struct fxgmac_pdata *pdata) static void fxgmac_enable_mac_interrupts(struct fxgmac_pdata *pdata) { - unsigned int mac_ier = 0; + u32 mac_ier = 0; u32 regval; /* Enable Timestamp interrupt */ @@ -3931,14 +3589,13 @@ static void fxgmac_enable_mac_interrupts(struct fxgmac_pdata *pdata) writereg(pdata->pAdapter, mac_ier, pdata->mac_regs + MAC_IER); - /* Enable all counter interrupts */ regval = readreg(pdata->pAdapter, pdata->mac_regs + MMC_RIER); regval = FXGMAC_SET_REG_BITS(regval, MMC_RIER_ALL_INTERRUPTS_POS, - MMC_RIER_ALL_INTERRUPTS_LEN, 0xffffffff); + MMC_RIER_ALL_INTERRUPTS_LEN, FXGMAC_MMC_IER_ALL_DEFAULT); writereg(pdata->pAdapter, regval, pdata->mac_regs + MMC_RIER); regval = readreg(pdata->pAdapter, pdata->mac_regs + MMC_TIER); regval = FXGMAC_SET_REG_BITS(regval, MMC_TIER_ALL_INTERRUPTS_POS, - MMC_TIER_ALL_INTERRUPTS_LEN, 0xffffffff); + MMC_TIER_ALL_INTERRUPTS_LEN, FXGMAC_MMC_IER_ALL_DEFAULT); writereg(pdata->pAdapter, regval, pdata->mac_regs + MMC_TIER); } @@ -4012,10 +3669,11 @@ static int fxgmac_check_phy_link(struct fxgmac_pdata *pdata, u32 *speed, { u16 link_reg = 0; - struct net_device *netdev = pdata->netdev; - if (netdev->base_addr) { - link_reg = - (u16)(*((u32 *)(netdev->base_addr + MGMT_EPHY_CTRL))); + (void) link_up_wait_to_complete; + if (pdata->base_mem) { + link_reg = (u16)readreg(pdata->pAdapter, pdata->base_mem + MGMT_EPHY_CTRL); + + pdata->phy_duplex = !!(link_reg&0x4);//need check /* * check register address 0x1004 @@ -4025,6 +3683,7 @@ static int fxgmac_check_phy_link(struct fxgmac_pdata *pdata, u32 *speed, * b[1] ephy_link * b[0] ephy_reset. should be set to 1 before use phy. */ + *link_up = false; if (link_reg & MGMT_EPHY_CTRL_STA_EPHY_RELEASE) { if (link_up) { @@ -4038,11 +3697,11 @@ static int fxgmac_check_phy_link(struct fxgmac_pdata *pdata, u32 *speed, MGMT_EPHY_CTRL_STA_SPEED_MASK) >> MGMT_EPHY_CTRL_STA_SPEED_POS; } else { - DPRINTK("fxgmac_check_phy_link ethernet PHY not released.\n"); + DPRINTK("fxgmac_check_phy_link ethernet PHY not released link reg %d.\n", link_reg); return -1; } } else { - DPRINTK("fxgmac_check_phy_link null base addr err\n"); + DPRINTK("fxgmac_check_phy_link null base addr err link reg %d\n", link_reg); return -1; } @@ -4083,14 +3742,16 @@ static int fxgmac_write_ephy_reg(struct fxgmac_pdata *pdata, u32 reg_id, busy--; } while ((regval & MAC_MDIO_ADDRESS_BUSY) && (busy)); - DPRINTK("fxgmac_write_ephy_reg id %d %s, ctrl=0x%08x, data=0x%08x\n", - reg_id, (regval & 0x1) ? "err" : "ok", regval, data); + DPRINTK("fxgmac_write_ephy_reg id %d,", reg_id); + DPRINTK(" %s,", (regval & 0x1)?"err" : "ok"); + DPRINTK(" ctrl=0x%08x,", regval); + DPRINTK(" data=0x%08x\n", data); - return (regval & MAC_MDIO_ADDRESS_BUSY) ? -1 : 0; /* -1 indicates err */ + return (regval & MAC_MDIO_ADDRESS_BUSY) ? -ETIMEDOUT : 0; //-1 indicates err } static int fxgmac_read_ephy_reg(struct fxgmac_pdata *pdata, u32 reg_id, - u32 *data) + u32 __far *data) { u32 regval = 0, regret; u32 mdioctrl = reg_id * 0x10000 + 0x800020d; @@ -4106,14 +3767,20 @@ static int fxgmac_read_ephy_reg(struct fxgmac_pdata *pdata, u32 reg_id, if (0 == (regval & MAC_MDIO_ADDRESS_BUSY)) { regret = readreg(pdata->pAdapter, pdata->mac_regs + MAC_MDIO_DATA); - if (data) - *data = regret; - return regret; + if (data) { + *data = regret; + //DPRINTK("fxgmac_read_ephy_reg ok, reg=0x%02x, ctrl=0x%08x, data=0x%08x\n", reg_id, regval, *data); + return 0; + } else { + return -ENOBUFS; + } } - DPRINTK("fxgmac_read_ephy_reg id=0x%02x err, busy=%d, ctrl=0x%08x.\n", - reg_id, busy, regval); - return -1; + DPRINTK("fxgmac_read_ephy_reg id=0x%02x err,", reg_id); + DPRINTK(" busy=%d,", busy); + DPRINTK(" ctrl=0x%08x\n", regval); + + return -ETIMEDOUT; } static int fxgmac_write_ephy_mmd_reg(struct fxgmac_pdata *pdata, u32 reg_id, @@ -4133,19 +3800,25 @@ static int fxgmac_write_ephy_mmd_reg(struct fxgmac_pdata *pdata, u32 reg_id, busy--; } while ((regval & MAC_MDIO_ADDRESS_BUSY) && (busy)); - DPRINTK("fxgmac_write_ephy_mmd_reg id %d mmd %d %s, ctrl=0x%08x, data=0x%08x\n", - reg_id, mmd, (regval & 0x1) ? "err" : "ok", regval, data); + DPRINTK("fxgmac_write_ephy_mmd_reg id %d,", reg_id); + DPRINTK(" mmd %d,", mmd); + DPRINTK(" %s,", (regval & 0x1) ? "err" : "ok"); + DPRINTK(" ctrl=0x%08x,", regval); + DPRINTK(" data=0x%08x\n", data); return (regval & MAC_MDIO_ADDRESS_BUSY) ? -1 : 0; /* -1 indicates err */ } static void fxgmac_config_flow_control(struct fxgmac_pdata *pdata) { +#ifndef FXGMAC_NOT_REPORT_PHY_FC_CAPABILITY u32 regval = 0; +#endif fxgmac_config_tx_flow_control(pdata); fxgmac_config_rx_flow_control(pdata); +#ifndef FXGMAC_NOT_REPORT_PHY_FC_CAPABILITY fxgmac_read_ephy_reg(pdata, REG_MII_ADVERTISE, ®val); /* set auto negotiation advertisement pause ability */ if (pdata->tx_pause || pdata->rx_pause) { @@ -4169,24 +3842,27 @@ static void fxgmac_config_flow_control(struct fxgmac_pdata *pdata) regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_RESET_POS, PHY_CR_RESET_LEN, 1); fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); +#endif } static int fxgmac_set_ephy_autoneg_advertise(struct fxgmac_pdata *pdata, struct fxphy_ag_adv phy_ag_adv) { - u32 regval = 0, ret = 0; + u32 regval = 0; + int ret = 0; if (phy_ag_adv.auto_neg_en) { fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_AUTOENG_POS, PHY_CR_AUTOENG_LEN, 1); - ret |= fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); } else { fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_AUTOENG_POS, PHY_CR_AUTOENG_LEN, 0); - ret |= fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); } + ret = fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); + if (ret < 0) + return ret; fxgmac_read_ephy_reg(pdata, REG_MII_CTRL1000, ®val); if (phy_ag_adv.full_1000m) { @@ -4207,7 +3883,9 @@ static int fxgmac_set_ephy_autoneg_advertise(struct fxgmac_pdata *pdata, PHY_MII_CTRL1000_1000HALF_POS, PHY_MII_CTRL1000_1000HALF_LEN, 0); } - ret |= fxgmac_write_ephy_reg(pdata, REG_MII_CTRL1000, regval); + ret = fxgmac_write_ephy_reg(pdata, REG_MII_CTRL1000, regval); + if (ret < 0) + return ret; fxgmac_read_ephy_reg(pdata, REG_MII_ADVERTISE, ®val); @@ -4248,12 +3926,15 @@ static int fxgmac_set_ephy_autoneg_advertise(struct fxgmac_pdata *pdata, PHY_MII_ADVERTISE_10HALF_LEN, 0); } - ret |= fxgmac_write_ephy_reg(pdata, REG_MII_ADVERTISE, regval); + ret = fxgmac_write_ephy_reg(pdata, REG_MII_ADVERTISE, regval); + if (ret < 0) + return ret; + /* after change the auto negotiation advertisement need to soft reset */ fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_RESET_POS, PHY_CR_RESET_LEN, 1); - ret |= fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); + ret = fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); return ret; } @@ -4420,11 +4101,16 @@ void fxgmac_release_phy(struct fxgmac_pdata *pdata) /* led index use bit0~bit5 */ value = FXGMAC_GET_REG_BITS(value, EFUSE_LED_POS, EFUSE_LED_LEN); fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_ANALOG_CFG2); - fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, - REG_MII_EXT_ANALOG_CFG2_LED_VALUE); - fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_ANALOG_CFG8); - fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, - REG_MII_EXT_ANALOG_CFG8_LED_VALUE); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, REG_MII_EXT_ANALOG_CFG2_VALUE); + + cfg_r32(pdata, REG_PCI_SUB_VENDOR_ID, &value); + if (AISTONEID_137D1D05_ADJUST_SI == value) { + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_ANALOG_CFG8); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, REG_MII_EXT_ANALOG_CFG8_137D1D05_VALUE); + } else { + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_ANALOG_CFG8); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, REG_MII_EXT_ANALOG_CFG8_VALUE); + } if (EFUSE_LED_COMMON_SOLUTION != value) { fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, @@ -4748,149 +4434,10 @@ static void fxgmac_config_led_under_disable(struct fxgmac_pdata *pdata) } } -extern void fxgmac_diag_get_rx_info(struct fxgmac_channel *channel); - -static int fxgmac_dev_read(struct fxgmac_channel *channel) -{ - struct fxgmac_pdata *pdata = channel->pdata; - struct fxgmac_ring *ring = channel->rx_ring; - struct net_device *netdev = pdata->netdev; - struct fxgmac_desc_data *desc_data; - struct fxgmac_dma_desc *dma_desc; - struct fxgmac_pkt_info *pkt_info; - unsigned int err, etlt, l34t; - - static unsigned int cnt_incomplete; - - desc_data = FXGMAC_GET_DESC_DATA(ring, ring->cur); - dma_desc = desc_data->dma_desc; - pkt_info = &ring->pkt_info; - - /* Check for data availability */ - if (FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_OWN_POS, - RX_NORMAL_DESC3_OWN_LEN)) { - return 1; - } - - /* Make sure descriptor fields are read after reading the OWN bit */ - dma_rmb(); - - if (netif_msg_rx_status(pdata)) - fxgmac_dump_rx_desc(pdata, ring, ring->cur); - - if (FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_CTXT_POS, - RX_NORMAL_DESC3_CTXT_LEN)) { - /* Timestamp Context Descriptor */ - fxgmac_get_rx_tstamp(pkt_info, dma_desc); - - pkt_info->attributes = FXGMAC_SET_REG_BITS( - pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_POS, - RX_PACKET_ATTRIBUTES_CONTEXT_LEN, 1); - pkt_info->attributes = FXGMAC_SET_REG_BITS( - pkt_info->attributes, - RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, - RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN, 0); - if (netif_msg_rx_status(pdata)) - DPRINTK("dev_read context desc, ch=%s\n", channel->name); - return 0; - } - - /* Normal Descriptor, be sure Context Descriptor bit is off */ - pkt_info->attributes = FXGMAC_SET_REG_BITS( - pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_POS, - RX_PACKET_ATTRIBUTES_CONTEXT_LEN, 0); - - /* Get the header length */ - if (FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_FD_POS, - RX_NORMAL_DESC3_FD_LEN)) { - desc_data->rx.hdr_len = FXGMAC_GET_REG_BITS_LE( - dma_desc->desc2, RX_NORMAL_DESC2_HL_POS, - RX_NORMAL_DESC2_HL_LEN); - if (desc_data->rx.hdr_len) - pdata->stats.rx_split_header_packets++; - } - l34t = 0; - - /* Get the pkt_info length */ - desc_data->rx.len = FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, - RX_NORMAL_DESC3_PL_POS, - RX_NORMAL_DESC3_PL_LEN); - - if (!FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_LD_POS, - RX_NORMAL_DESC3_LD_LEN)) { - /* Not all the data has been transferred for this pkt_info */ - pkt_info->attributes = FXGMAC_SET_REG_BITS( - pkt_info->attributes, - RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, - RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, 1); - cnt_incomplete++; - if ((cnt_incomplete < 2) && netif_msg_rx_status(pdata)) - DPRINTK("dev_read NOT last desc, pkt incomplete yet,%u\n", - cnt_incomplete); - - return 0; - } - if ((cnt_incomplete) && netif_msg_rx_status(pdata)) - DPRINTK("dev_read rx back to normal and incomplete cnt=%u\n", - cnt_incomplete); - cnt_incomplete = 0; /* when back to normal, reset cnt */ - - /* This is the last of the data for this pkt_info */ - pkt_info->attributes = FXGMAC_SET_REG_BITS( - pkt_info->attributes, RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, - RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, 0); - - /* Set checksum done indicator as appropriate */ - if (netdev->features & NETIF_F_RXCSUM) - pkt_info->attributes = FXGMAC_SET_REG_BITS( - pkt_info->attributes, - RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, - RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, 1); - - /* Check for errors (only valid in last descriptor) */ - err = FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_ES_POS, - RX_NORMAL_DESC3_ES_LEN); - etlt = FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_ETLT_POS, - RX_NORMAL_DESC3_ETLT_LEN); - if ((err) && netif_msg_rx_status(pdata)) { - DPRINTK("dev_read:head_len=%u, pkt_len=%u, err=%u, etlt=%#x, desc2=0x%08x, desc3=0x%08x\n", - desc_data->rx.hdr_len, desc_data->rx.len, err, etlt, - dma_desc->desc2, dma_desc->desc3); - } - - if (!err || !etlt) { - /* No error if err is 0 or etlt is 0 */ - if ((etlt == 0x4 /*yzhang changed to 0x4, 0x09*/) && - (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { - pkt_info->attributes = FXGMAC_SET_REG_BITS( - pkt_info->attributes, - RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, - RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN, 1); - pkt_info->vlan_ctag = FXGMAC_GET_REG_BITS_LE( - dma_desc->desc0, RX_NORMAL_DESC0_OVT_POS, - RX_NORMAL_DESC0_OVT_LEN); - netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n", - pkt_info->vlan_ctag); - } - } else { - if (etlt == 0x05 || etlt == 0x06) - pkt_info->attributes = FXGMAC_SET_REG_BITS( - pkt_info->attributes, - RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, - RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, 0); - else - pkt_info->errors = FXGMAC_SET_REG_BITS( - pkt_info->errors, RX_PACKET_ERRORS_FRAME_POS, - RX_PACKET_ERRORS_FRAME_LEN, 1); - } - - return 0; -} - static int fxgmac_enable_int(struct fxgmac_channel *channel, enum fxgmac_int int_id) { - unsigned int dma_ch_ier; + u32 dma_ch_ier; dma_ch_ier = readreg(channel->pdata->pAdapter, FXGMAC_DMA_REG(channel, DMA_CH_IER)); @@ -4952,7 +4499,7 @@ static int fxgmac_enable_int(struct fxgmac_channel *channel, static int fxgmac_disable_int(struct fxgmac_channel *channel, enum fxgmac_int int_id) { - unsigned int dma_ch_ier; + u32 dma_ch_ier; dma_ch_ier = readreg(channel->pdata->pAdapter, FXGMAC_DMA_REG(channel, DMA_CH_IER)); @@ -5014,7 +4561,7 @@ static int fxgmac_disable_int(struct fxgmac_channel *channel, static int fxgmac_dismiss_DMA_int(struct fxgmac_channel *channel, int int_id) { - unsigned int dma_ch_ier; + u32 dma_ch_ier; int_id = int_id; dma_ch_ier = readreg(channel->pdata->pAdapter, @@ -5027,8 +4574,8 @@ static int fxgmac_dismiss_DMA_int(struct fxgmac_channel *channel, int int_id) static void fxgmac_dismiss_MTL_Q_int(struct fxgmac_pdata *pdata) { - unsigned int q_count, i; - unsigned int mtl_q_isr; + unsigned int i; + u32 mtl_q_isr, q_count; q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); for (i = 0; i < q_count; i++) { @@ -5085,12 +4632,7 @@ static int fxgmac_dismiss_MAC_DBG_int(struct fxgmac_pdata *pdata) int fxgmac_dismiss_all_int(struct fxgmac_pdata *pdata) { struct fxgmac_channel *channel; - unsigned int i, regval; - struct net_device *netdev = pdata->netdev; - - if (netif_msg_drv(pdata)) { - DPRINTK("fxgmac_dismiss_all_int callin\n"); - } + unsigned int i; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { @@ -5102,11 +4644,10 @@ int fxgmac_dismiss_all_int(struct fxgmac_pdata *pdata) fxgmac_dismiss_MAC_LPI_int(pdata); fxgmac_dismiss_MAC_DBG_int(pdata); - /* control module int to PCIe slot */ - if (netdev->base_addr) { - regval = (unsigned int)(*( - (u32 *)(netdev->base_addr + MGMT_INT_CTRL0))); + if (netif_msg_drv(pdata)) { + DPRINTK("fxgmac_dismiss_all_int callin %d\n", i); } + return 0; } @@ -5116,11 +4657,16 @@ static void fxgmac_set_interrupt_moderation(struct fxgmac_pdata *pdata) pdata->intr_mod_timer = INT_MOD_IN_US; - time = (pdata->intr_mod) ? pdata->intr_mod_timer : 0; +#if defined(FXGMAC_INTERRUPT_TX_INTERVAL) time = (pdata->intr_mod) ? pdata->tx_usecs : 0; +#else + time = (pdata->intr_mod) ? pdata->intr_mod_timer : 0; +#endif value = FXGMAC_SET_REG_BITS(value, INT_MOD_TX_POS, INT_MOD_TX_LEN, time); +#if defined(FXGMAC_INTERRUPT_RX_INTERVAL) time = (pdata->intr_mod) ? pdata->rx_usecs : 0; +#endif value = FXGMAC_SET_REG_BITS(value, INT_MOD_RX_POS, INT_MOD_RX_LEN, time); writereg(pdata->pAdapter, value, pdata->base_mem + INT_MOD); @@ -5145,10 +4691,11 @@ static void fxgmac_disable_msix_interrupt(struct fxgmac_pdata *pdata) MSIX_TBL_MASK_OFFSET + intid * 16); } } -static void fxgmac_enable_msix_rxtxphyinterrupt(struct fxgmac_pdata *pdata) +static int fxgmac_enable_msix_rxtxphyinterrupt(struct fxgmac_pdata *pdata) { u32 intid, regval = 0; -#if !(FUXI_EPHY_INTERRUPT_D0_OFF) + int ret = 0; +#if !(FXGMAC_EPHY_INTERRUPT_D0_OFF) struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; #endif @@ -5160,17 +4707,21 @@ static void fxgmac_enable_msix_rxtxphyinterrupt(struct fxgmac_pdata *pdata) writereg(pdata->pAdapter, 0, pdata->base_mem + MSIX_TBL_BASE_ADDR + MSIX_TBL_MASK_OFFSET + MSI_ID_PHY_OTHER * 16); -#if !(FUXI_EPHY_INTERRUPT_D0_OFF) +#if !(FXGMAC_EPHY_INTERRUPT_D0_OFF) hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, NULL); /* clear phy interrupt */ regval = FXGMAC_SET_REG_BITS(0, PHY_INT_MASK_LINK_UP_POS, PHY_INT_MASK_LINK_UP_LEN, 1); regval = FXGMAC_SET_REG_BITS(regval, PHY_INT_MASK_LINK_DOWN_POS, PHY_INT_MASK_LINK_DOWN_LEN, 1); - hw_ops->write_ephy_reg( + ret = hw_ops->write_ephy_reg( pdata, REG_MII_INT_MASK, regval); /* enable phy interrupt ASIC bit10 linkup bit11 linkdown */ + return ret; +#else + return 0; #endif + } static void fxgmac_enable_msix_one_interrupt(struct fxgmac_pdata *pdata, u32 intid) @@ -5212,8 +4763,8 @@ static int fxgmac_flush_tx_queues(struct fxgmac_pdata *pdata) MTL_Q_TQOMR_FTQ_LEN, 1); writereg(pdata->pAdapter, regval, FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); - DPRINTK("fxgmac_flush_tx_queues, reg=0x%p, val=0x%08x\n", - FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR), regval); + DPRINTK("fxgmac_flush_tx_queues, reg=0x%p,", FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + DPRINTK(" val=0x%08x\n", regval); } for (i = 0; i < pdata->tx_q_count; i++) { @@ -5227,8 +4778,8 @@ static int fxgmac_flush_tx_queues(struct fxgmac_pdata *pdata) MTL_Q_TQOMR_FTQ_LEN); } while (--count && regval); - DPRINTK("fxgmac_flush_tx_queues wait... reg=0x%p, val=0x%08x\n", - FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR), regval); + DPRINTK("fxgmac_flush_tx_queues wait... reg=0x%p,", FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + DPRINTK(" ... val=0x%08x\n", regval); if (regval) { /*(!count)*/ return -EBUSY; } @@ -5241,10 +4792,18 @@ static void fxgmac_config_dma_bus(struct fxgmac_pdata *pdata) { u32 regval; + //set no fix burst length regval = readreg(pdata->pAdapter, pdata->mac_regs + DMA_SBMR); /* Set enhanced addressing mode */ regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_EAME_POS, DMA_SBMR_EAME_LEN, 1); + + /* Out standing read/write requests*/ + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_RD_OSR_LMT_POS, + DMA_SBMR_RD_OSR_LMT_LEN, 0x7); + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_WR_OSR_LMT_POS, + DMA_SBMR_WR_OSR_LMT_LEN, 0x7); + /* Set the System Bus mode */ regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_FB_POS, DMA_SBMR_FB_LEN, 0); @@ -5261,7 +4820,8 @@ static void fxgmac_config_dma_bus(struct fxgmac_pdata *pdata) static void fxgmac_legacy_link_speed_setting(struct fxgmac_pdata *pdata) { - unsigned int i = 0, regval = 0; + unsigned int i = 0; + u32 regval = 0; fxgmac_phy_config(pdata); for (i = 0, regval = fxgmac_get_ephy_state(pdata); @@ -5275,121 +4835,111 @@ static void fxgmac_legacy_link_speed_setting(struct fxgmac_pdata *pdata) NULL); /* clear phy interrupt. */ } -static void fxgmac_pre_powerdown(struct fxgmac_pdata *pdata, bool phyloopback) +#if defined(FXGMAC_FIX_SHUT_DOWN_ISSUE) +static void fxgmac_link_speed_down_fix_shutdown_issue(struct fxgmac_pdata *pdata) { + LONGLONG tick_interval; + ULONG tick_inc; + LARGE_INTEGER tick_count; + unsigned int i = 0; unsigned int regval = 0; + if ((ULONG)pdata->phy_speed != ((PMP_ADAPTER)pdata->pAdapter)->usLinkSpeed) { + DbgPrintF(MP_TRACE, "%s change phy speed", __FUNCTION__); + pdata->phy_speed = ((PMP_ADAPTER)pdata->pAdapter)->usLinkSpeed; + + if (((PMP_ADAPTER)pdata->pAdapter)->RegParameter.LinkChgWol) { + fxgmac_phy_config(pdata); + //sleep fixed value(6s) + for (i = 0; i < PHY_LINK_TIMEOUT; i++) { + usleep_range_ex(pdata->pAdapter, 2000, 2000); + } + + fxgmac_read_ephy_reg(pdata, REG_MII_INT_STATUS, NULL); // clear phy interrupt. + } else { + regval = fxgmac_get_ephy_state(pdata); + KeQueryTickCount(&tick_count); + tick_inc = KeQueryTimeIncrement(); + tick_interval = tick_count.QuadPart - ((PMP_ADAPTER)pdata->pAdapter)->D0_entry_tick_count.QuadPart; + tick_interval *= tick_inc; + tick_interval /= 10; + + /*DbgPrintF(MP_TRACE, "base tick %lld", ((PMP_ADAPTER)pdata->pAdapter)->D0_entry_tick_count.QuadPart); + DbgPrintF(MP_TRACE, "current tick %lld", tick_count.QuadPart); + DbgPrintF(MP_TRACE, "tick inc is %u", tick_inc); + DbgPrintF(MP_TRACE, "tick_interval is %lld", tick_interval); + DbgPrintF(MP_TRACE, "regval is 0x%x", regval);*/ + if (((regval & MGMT_EPHY_CTRL_STA_EPHY_RELEASE) && (regval & MGMT_EPHY_CTRL_STA_EPHY_LINKUP)) + || ((regval & MGMT_EPHY_CTRL_STA_EPHY_RELEASE) && !(regval & MGMT_EPHY_CTRL_STA_EPHY_LINKUP) && (tick_interval < RESUME_MAX_TIME)) + ) { + fxgmac_legacy_link_speed_setting(pdata); + } + } + } +} +#endif + +static void fxgmac_pre_powerdown(struct fxgmac_pdata *pdata, bool phyloopback) +{ + u32 regval = 0; + int speed = SPEED_10; +#ifdef FXGMAC_LINK_SPEED_CHECK_PHY_LINK + int link; +#endif + speed = speed; fxgmac_disable_rx(pdata); /* HERE, WE NEED TO CONSIDER PHY CONFIG...TBD */ - DPRINTK("fxgmac_config_powerdown, phy and mac status update\n"); - /* for phy cable loopback, it can't configure phy speed, it will cause os resume again by link change although it has finished speed setting, */ + DPRINTK("fxgmac_config_powerdown, phy and mac status update speed %d\n", speed); + //2022-11-09 xiaojiang comment + //for phy cable loopback,it can't configure phy speed, it will cause os resume again by link change although it has finished speed setting, if (!phyloopback) { - /* When the Linux platform enters the s4 state, it goes through - * the suspend->resume->suspend process. The process of - * suspending again after resume is fast, and PHY - * auto-negotiation is not yet complete, so the - * auto-negotiation of PHY must be carried out again. When the - * Linux platform enters the s4 state, force speed to 10M. - */ - pdata->phy_speed = SPEED_10; + fxgmac_read_ephy_reg(pdata, REG_MII_LPA, ®val); + if (!FXGMAC_GET_REG_BITS(regval, PHY_MII_LINK_PARNTNER_10FULL_POS, PHY_MII_LINK_PARNTNER_10FULL_LEN) + && !FXGMAC_GET_REG_BITS(regval, PHY_MII_LINK_PARNTNER_10HALF_POS, PHY_MII_LINK_PARNTNER_10HALF_LEN)) { + #if defined(FXGMAC_LINK_SPEED_NOT_USE_LOCAL_VARIABLE) + if (SPEED_10 == ((PMP_ADAPTER)pdata->pAdapter)->usLinkSpeed) { + ((PMP_ADAPTER)pdata->pAdapter)->usLinkSpeed = SPEED_100; + } + #else + speed = SPEED_100; + #endif + } + + #if defined(FXGMAC_FIX_SHUT_DOWN_ISSUE) + fxgmac_link_speed_down_fix_shutdown_issue(pdata); + #elif defined(FXGMAC_LINK_SPEED_CHECK_PHY_LINK) + /* + When the Linux platform enters the s4 state, it goes through the suspend->resume->suspend process. + The process of suspending again after resume is fast, and PHY auto-negotiation is not yet complete, + so the auto-negotiation of PHY must be carried out again.Windows platforms and UEFI platforms do + not need to auto-negotiate again, as they will not have such a process. + When the Linux platform enters the s4 state, force speed to 10M. + */ + regval = fxgmac_get_ephy_state(pdata); + link = FXGMAC_GET_REG_BITS(regval, MGMT_EPHY_CTRL_STA_EPHY_LINKUP_POS, + MGMT_EPHY_CTRL_STA_EPHY_LINKUP_LEN); + if (link && (speed != pdata->phy_speed)) { + pdata->phy_speed = speed; + fxgmac_legacy_link_speed_setting(pdata); + } + #else fxgmac_legacy_link_speed_setting(pdata); + #endif } fxgmac_config_mac_speed(pdata); - /* After enable OOB_WOL from efuse, mac will loopcheck phy status, and - * lead to panic sometimes. So we should disable it from powerup, - * enable it from power down. - */ + /* After enable OOB_WOL from efuse, mac will loopcheck phy status, and lead to panic sometimes. + So we should disable it from powerup, enable it from power down.*/ regval = (u32)readreg(pdata->pAdapter, pdata->base_mem + OOB_WOL_CTRL); - regval = FXGMAC_SET_REG_BITS(regval, OOB_WOL_CTRL_DIS_POS, - OOB_WOL_CTRL_DIS_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, OOB_WOL_CTRL_DIS_POS, OOB_WOL_CTRL_DIS_LEN, 0); writereg(pdata->pAdapter, regval, pdata->base_mem + OOB_WOL_CTRL); usleep_range_ex(pdata->pAdapter, 2000, 2000); /* after enable OOB_WOL, recofigure mac addr again */ fxgmac_set_mac_address(pdata, pdata->mac_addr); -} - -/* only supports four patterns, and patterns will be cleared on every call */ -static void fxgmac_set_pattern_data(struct fxgmac_pdata *pdata) -{ - u32 ip_addr, i = 0; - u8 type_offset, op_offset, tip_offset; - struct pattern_packet packet; - struct wol_bitmap_pattern - pattern[4]; /* for WAKE_UCAST, WAKE_BCAST, WAKE_MCAST, WAKE_ARP. */ - - memset(pattern, 0, sizeof(struct wol_bitmap_pattern) * 4); - - /* config ucast */ - if (pdata->expansion.wol & WAKE_UCAST) { - pattern[i].mask_info[0] = 0x3F; - pattern[i].mask_size = sizeof(pattern[0].mask_info); - memcpy(pattern[i].pattern_info, pdata->mac_addr, ETH_ALEN); - pattern[i].pattern_offset = 0; - i++; - } - - /* config bcast */ - if (pdata->expansion.wol & WAKE_BCAST) { - pattern[i].mask_info[0] = 0x3F; - pattern[i].mask_size = sizeof(pattern[0].mask_info); - memset(pattern[i].pattern_info, 0xFF, ETH_ALEN); - pattern[i].pattern_offset = 0; - i++; - } - - /* config mcast */ - if (pdata->expansion.wol & WAKE_MCAST) { - pattern[i].mask_info[0] = 0x7; - pattern[i].mask_size = sizeof(pattern[0].mask_info); - pattern[i].pattern_info[0] = 0x1; - pattern[i].pattern_info[1] = 0x0; - pattern[i].pattern_info[2] = 0x5E; - pattern[i].pattern_offset = 0; - i++; - } - - /* config arp */ - if (pdata->expansion.wol & WAKE_ARP) { - memset(pattern[i].mask_info, 0, sizeof(pattern[0].mask_info)); - type_offset = offsetof(struct pattern_packet, ar_pro); - pattern[i].mask_info[type_offset / 8] |= 1 << type_offset % 8; - type_offset++; - pattern[i].mask_info[type_offset / 8] |= 1 << type_offset % 8; - op_offset = offsetof(struct pattern_packet, ar_op); - pattern[i].mask_info[op_offset / 8] |= 1 << op_offset % 8; - op_offset++; - pattern[i].mask_info[op_offset / 8] |= 1 << op_offset % 8; - tip_offset = offsetof(struct pattern_packet, ar_tip); - pattern[i].mask_info[tip_offset / 8] |= 1 << tip_offset % 8; - tip_offset++; - pattern[i].mask_info[tip_offset / 8] |= 1 << type_offset % 8; - tip_offset++; - pattern[i].mask_info[tip_offset / 8] |= 1 << type_offset % 8; - tip_offset++; - pattern[i].mask_info[tip_offset / 8] |= 1 << type_offset % 8; - - packet.ar_pro = - 0x0 << 8 | - 0x08; /* arp type is 0x0800, notice that ar_pro and ar_op is big endian */ - packet.ar_op = - 0x1 - << 8; /* 1 is arp request,2 is arp replay, 3 is rarp request, 4 is rarp replay */ - ip_addr = fxgmac_get_netdev_ip4addr(pdata); - packet.ar_tip[0] = ip_addr & 0xFF; - packet.ar_tip[1] = (ip_addr >> 8) & 0xFF; - packet.ar_tip[2] = (ip_addr >> 16) & 0xFF; - packet.ar_tip[3] = (ip_addr >> 24) & 0xFF; - memcpy(pattern[i].pattern_info, &packet, MAX_PATTERN_SIZE); - pattern[i].mask_size = sizeof(pattern[0].mask_info); - pattern[i].pattern_offset = 0; - i++; - } - - fxgmac_set_wake_pattern(pdata, pattern, i); + //fxgmac_suspend_clock_gate(pdata); } static void fxgmac_config_powerdown(struct fxgmac_pdata *pdata, @@ -5432,13 +4982,6 @@ static void fxgmac_config_powerdown(struct fxgmac_pdata *pdata, fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, 0x00); } - if (!test_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate)) { - netdev_err( - pdata->netdev, - "fxgmac powerstate is %lu when config power to down.\n", - pdata->expansion.powerstate); - } - #if FXGMAC_WOL_FEATURE_ENABLED fxgmac_config_wol(pdata, wol); #endif @@ -5455,12 +4998,24 @@ static void fxgmac_config_powerdown(struct fxgmac_pdata *pdata, fxgmac_enable_ns_offload(pdata); #endif +#if FXGMAC_PM_WPI_READ_FEATURE_ENABLED + fxgmac_enable_wake_packet_indication(pdata, 1); +#endif + /* Enable MAC Rx TX */ +#ifdef FXGMAC_WOL_INTEGRATED_WOL_PARAMETER if (1) { +#else + if (magic_en || remote_pattern_en || offloadcount) { +#endif regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_RE_POS, MAC_CR_RE_LEN, 1); +#if defined(FXGMAC_AOE_FEATURE_ENABLED) || defined(FXGMAC_NS_OFFLOAD_ENABLED) if (pdata->hw_feat.aoe) { +#else + if (offloadcount) { +#endif regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_TE_POS, MAC_CR_TE_LEN, 1); } @@ -5480,16 +5035,10 @@ static void fxgmac_config_powerdown(struct fxgmac_pdata *pdata, MAC_PMT_STA_PWRDWN_LEN, 1); writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PMT_STA); - /* adjust sigdet threshold - * redmine.motor-comm.com/issues/5093 - * fix issue can not wake up os on some FT-D2000 platform, y - * this modification is only temporarif it is 55mv, wol maybe failed. - */ - regval = readreg(pdata->pAdapter, pdata->base_mem + MGMT_SIGDET); - regval = FXGMAC_SET_REG_BITS(regval, MGMT_SIGDET_POS, MGMT_SIGDET_LEN, - MGMT_SIGDET_40MV); + regval = FXGMAC_SET_REG_BITS(regval, MGMT_SIGDET_POS, MGMT_SIGDET_LEN, MGMT_SIGDET_55MV); writereg(pdata->pAdapter, regval, pdata->base_mem + MGMT_SIGDET); + DPRINTK("fxgmac_config_powerdown callout, reg=0x%08x\n", regval); } @@ -5497,13 +5046,6 @@ static void fxgmac_config_powerup(struct fxgmac_pdata *pdata) { u32 regval = 0; - if (test_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate)) { - netdev_err( - pdata->netdev, - "fxgmac powerstate is %lu when config power to up.\n", - pdata->expansion.powerstate); - } - /* After enable OOB_WOL from efuse, mac will loopcheck phy status, and lead to panic sometimes. * So we should disable it from powerup, enable it from power down. */ @@ -5611,7 +5153,7 @@ static unsigned char fxgmac_suspend_int(void *context) { /* ULONG_PTR addr; */ u32 intid; -#if FUXI_EPHY_INTERRUPT_D0_OFF +#if FXGMAC_EPHY_INTERRUPT_D0_OFF u32 regval = 0; #endif u32 val_mgmt_intcrtl0; @@ -5644,7 +5186,7 @@ static unsigned char fxgmac_suspend_int(void *context) /* since Msix interrupt masked now, enable EPHY interrupt for case of link change wakeup */ fxgmac_read_ephy_reg(pdata, REG_MII_INT_STATUS, NULL); /* clear phy interrupt */ -#if FUXI_EPHY_INTERRUPT_D0_OFF +#if FXGMAC_EPHY_INTERRUPT_D0_OFF regval = FXGMAC_SET_REG_BITS(0, PHY_INT_MASK_LINK_UP_POS, PHY_INT_MASK_LINK_UP_LEN, 1); regval = FXGMAC_SET_REG_BITS(regval, PHY_INT_MASK_LINK_DOWN_POS, @@ -5683,7 +5225,7 @@ static int fxgmac_suspend_txrx(struct fxgmac_pdata *pdata) DMA_CH_TCR_ST_LEN, 0); writereg(pdata->pAdapter, regval, FXGMAC_DMA_REG(channel, DMA_CH_TCR)); - DBGPRINT(MP_TRACE, (" %s disable tx dma", __FUNCTION__)); + DBGPRINT(MP_TRACE, (" disable channel %d tx dma", i)); } do { @@ -5695,8 +5237,7 @@ static int fxgmac_suspend_txrx(struct fxgmac_pdata *pdata) if (0 != (regval & MAC_DBG_STA_TX_BUSY)) { regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_DBG_STA); - DbgPrintF(MP_WARN, - "warning !!!timed out waiting for Tx MAC to stop\n"); + DbgPrintF(MP_WARN, "warning !!!timed out waiting for Tx MAC to stop regval %x\n", regval); return -1; } /* wait empty Tx queue */ @@ -5740,7 +5281,7 @@ static int fxgmac_suspend_txrx(struct fxgmac_pdata *pdata) DMA_CH_RCR_SR_LEN, 0); writereg(pdata->pAdapter, regval, FXGMAC_DMA_REG(channel, DMA_CH_RCR)); - DBGPRINT(MP_TRACE, (" %s disable rx dma", __FUNCTION__)); + DBGPRINT(MP_TRACE, (" disable channel %d rx dma", i)); } return 0; } @@ -5771,7 +5312,7 @@ static void fxgmac_resume_int(struct fxgmac_pdata *pdata) MSIX_TBL_MASK_OFFSET + intid * 16); } -#if FUXI_EPHY_INTERRUPT_D0_OFF +#if FXGMAC_EPHY_INTERRUPT_D0_OFF fxgmac_write_ephy_reg(pdata, REG_MII_INT_MASK, 0x0); /* disable phy interrupt */ fxgmac_read_ephy_reg(pdata, REG_MII_INT_STATUS, @@ -5786,6 +5327,16 @@ static void fxgmac_resume_int(struct fxgmac_pdata *pdata) #endif } +static void fxgmac_config_wol_wait_time(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->base_mem + WOL_CTL); + regval = FXGMAC_SET_REG_BITS(regval, WOL_WAIT_TIME_POS, WOL_WAIT_TIME_LEN, + FXGMAC_WOL_WAIT_TIME); + writereg(pdata->pAdapter, regval, pdata->base_mem + WOL_CTL); +} + static int fxgmac_hw_init(struct fxgmac_pdata *pdata) { struct fxgmac_desc_ops *desc_ops = &pdata->desc_ops; @@ -5793,16 +5344,16 @@ static int fxgmac_hw_init(struct fxgmac_pdata *pdata) u32 regval = 0; if (netif_msg_drv(pdata)) { - DPRINTK("fxgmac hw init call in\n"); + DPRINTK("fxgmac hw init call in regval %x\n", regval); } /* Flush Tx queues */ ret = fxgmac_flush_tx_queues(pdata); if (ret) { - if (netif_msg_drv(pdata)) { - DPRINTK("fxgmac_hw_init call flush tx queue err.\n"); - } - return ret; +#ifdef FXGMAC_FLUSH_TX_CHECK_ENABLED + dev_err(pdata->dev, "fxgmac_hw_init call flush tx queue err.\n"); + return ret; +#endif } /* Initialize DMA related features */ @@ -5817,7 +5368,6 @@ static int fxgmac_hw_init(struct fxgmac_pdata *pdata) fxgmac_config_tso_mode(pdata); fxgmac_config_sph_mode(pdata); fxgmac_config_rss(pdata); - fxgmac_config_wol(pdata, pdata->expansion.wol); desc_ops->tx_desc_init(pdata); desc_ops->rx_desc_init(pdata); @@ -5849,6 +5399,8 @@ static int fxgmac_hw_init(struct fxgmac_pdata *pdata) fxgmac_config_mmc(pdata); fxgmac_enable_mac_interrupts(pdata); + fxgmac_config_wol_wait_time(pdata); + /* enable EPhy link change interrupt */ fxgmac_read_ephy_reg(pdata, REG_MII_INT_STATUS, NULL); /* clear phy interrupt */ @@ -5860,7 +5412,7 @@ static int fxgmac_hw_init(struct fxgmac_pdata *pdata) regval); /* enable phy interrupt */ if (netif_msg_drv(pdata)) { - DPRINTK("fxgmac hw init callout\n"); + DPRINTK("fxgmac hw init callout %x\n", regval); } return 0; } @@ -5872,6 +5424,29 @@ static void fxgmac_save_nonstick_reg(struct fxgmac_pdata *pdata) pdata->reg_nonstick[(i - REG_PCIE_TRIGGER) >> 2] = readreg(pdata->pAdapter, pdata->base_mem + i); } + cfg_r32(pdata, REG_PCI_COMMAND, &pdata->expansion.cfg_pci_cmd); + cfg_r32(pdata, REG_CACHE_LINE_SIZE, &pdata->expansion.cfg_cache_line_size); + cfg_r32(pdata, REG_MEM_BASE, &pdata->expansion.cfg_mem_base); + cfg_r32(pdata, REG_MEM_BASE_HI, &pdata->expansion.cfg_mem_base_hi); + cfg_r32(pdata, REG_IO_BASE, &pdata->expansion.cfg_io_base); + cfg_r32(pdata, REG_INT_LINE, &pdata->expansion.cfg_int_line); + cfg_r32(pdata, REG_DEVICE_CTRL1, &pdata->expansion.cfg_device_ctrl1); + cfg_r32(pdata, REG_PCI_LINK_CTRL, &pdata->expansion.cfg_pci_link_ctrl); + cfg_r32(pdata, REG_DEVICE_CTRL2, &pdata->expansion.cfg_device_ctrl2); + cfg_r32(pdata, REG_MSIX_CAPABILITY, &pdata->expansion.cfg_msix_capability); + + DbgPrintF(MP_TRACE, "%s:\nCFG%02x-%02x\nCFG%02x-%02x\nCFG%02x-%02x\nCFG%02x-%02x\nCFG%02x-%02x\nCFG%02x-%02x\nCFG%02x-%02x\nCFG%02x-%02x\nCFG%02x-%02x\nCFG%02x-%02x\n", + __FUNCTION__, + REG_PCI_COMMAND, pdata->expansion.cfg_pci_cmd, + REG_CACHE_LINE_SIZE, pdata->expansion.cfg_cache_line_size, + REG_MEM_BASE, pdata->expansion.cfg_mem_base, + REG_MEM_BASE_HI, pdata->expansion.cfg_mem_base_hi, + REG_IO_BASE, pdata->expansion.cfg_io_base, + REG_INT_LINE, pdata->expansion.cfg_int_line, + REG_DEVICE_CTRL1, pdata->expansion.cfg_device_ctrl1, + REG_PCI_LINK_CTRL, pdata->expansion.cfg_pci_link_ctrl, + REG_DEVICE_CTRL2, pdata->expansion.cfg_device_ctrl2, + REG_MSIX_CAPABILITY, pdata->expansion.cfg_msix_capability); } static void fxgmac_restore_nonstick_reg(struct fxgmac_pdata *pdata) @@ -5884,6 +5459,7 @@ static void fxgmac_restore_nonstick_reg(struct fxgmac_pdata *pdata) } } +#if defined(FXGMAC_ESD_RESTORE_PCIE_CFG) static void fxgmac_esd_restore_pcie_cfg(struct fxgmac_pdata *pdata) { cfg_w32(pdata, REG_PCI_COMMAND, pdata->expansion.cfg_pci_cmd); @@ -5899,6 +5475,7 @@ static void fxgmac_esd_restore_pcie_cfg(struct fxgmac_pdata *pdata) cfg_w32(pdata, REG_MSIX_CAPABILITY, pdata->expansion.cfg_msix_capability); } +#endif static int fxgmac_hw_exit(struct fxgmac_pdata *pdata) { @@ -5938,21 +5515,21 @@ static int fxgmac_hw_exit(struct fxgmac_pdata *pdata) return 0; } -static int fxgmac_set_gmac_register(struct fxgmac_pdata *pdata, u8 *address, +static int fxgmac_set_gmac_register(struct fxgmac_pdata *pdata, IOMEM address, unsigned int data) { - if (address < (u8 *)(pdata->base_mem)) { + if (address < pdata->base_mem) { return -1; } writereg(pdata->pAdapter, data, address); return 0; } -static u32 fxgmac_get_gmac_register(struct fxgmac_pdata *pdata, u8 *address) +static u32 fxgmac_get_gmac_register(struct fxgmac_pdata *pdata, IOMEM address) { u32 regval = 0; - if (address > (u8 *)(pdata->base_mem)) { + if (address > pdata->base_mem) { regval = readreg(pdata->pAdapter, address); } return regval; @@ -6052,6 +5629,71 @@ static int fxgmac_pcie_init(struct fxgmac_pdata *pdata, bool ltr_en, return 0; } +static void fxgmac_clear_misc_int_status(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 regval, i, q_count; + + /* clear phy interrupt status */ + hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, NULL); + hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, NULL); + /* clear other interrupt status of misc interrupt */ + regval = pdata->hw_ops.get_gmac_register(pdata, pdata->mac_regs + MAC_ISR); + if (regval) { + if (regval & (1 << MGMT_MAC_PHYIF_STA_POS)) + pdata->hw_ops.get_gmac_register(pdata, pdata->mac_regs + MAC_PHYIF_STA); + + if ((regval & (1 << MGMT_MAC_AN_SR0_POS)) || + (regval & (1 << MGMT_MAC_AN_SR0_POS)) || + (regval & (1 << MGMT_MAC_AN_SR0_POS))) + pdata->hw_ops.get_gmac_register(pdata, pdata->mac_regs + MAC_AN_SR); + + if (regval & (1 << MGMT_MAC_PMT_STA_POS)) + pdata->hw_ops.get_gmac_register(pdata, pdata->mac_regs + MAC_PMT_STA); + + if (regval & (1 << MGMT_MAC_LPI_STA_POS)) + pdata->hw_ops.get_gmac_register(pdata, pdata->mac_regs + MAC_LPI_STA); + + if (regval & (1 << MGMT_MAC_MMC_STA_POS)) { + if (regval & (1 << MGMT_MAC_RX_MMC_STA_POS)) + hw_ops->rx_mmc_int(pdata); + + if (regval & (1 << MGMT_MAC_TX_MMC_STA_POS)) + hw_ops->tx_mmc_int(pdata); + + if (regval & (1 << MGMT_MMC_IPCRXINT_POS)) + pdata->hw_ops.get_gmac_register(pdata, pdata->mac_regs + MMC_IPCRXINT); + + } + + if ((regval & (1 << MGMT_MAC_TX_RX_STA0_POS)) || (regval & (1 << MGMT_MAC_TX_RX_STA1_POS))) + pdata->hw_ops.get_gmac_register(pdata, pdata->mac_regs + MAC_TX_RX_STA); + + if (regval & (1 << MGMT_MAC_GPIO_SR_POS)) + pdata->hw_ops.get_gmac_register(pdata, pdata->mac_regs + MAC_GPIO_SR); + } + + /* MTL_Interrupt_Status, write 1 clear */ + regval = pdata->hw_ops.get_gmac_register(pdata, pdata->mac_regs + MTL_INT_SR); + pdata->hw_ops.set_gmac_register(pdata, pdata->mac_regs + MTL_INT_SR, regval); + + /* MTL_Q(#i)_Interrupt_Control_Status, write 1 clear */ + q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); + for (i = 0; i < q_count; i++) { + /* Clear all the interrupts which are set */ + regval = pdata->hw_ops.get_gmac_register(pdata, pdata->mac_regs + MTL_Q_INT_CTL_SR + i * MTL_Q_INC); + pdata->hw_ops.set_gmac_register(pdata, pdata->mac_regs + MTL_Q_INT_CTL_SR + i * MTL_Q_INC, regval); + } + + /* MTL_ECC_Interrupt_Status, write 1 clear */ + regval = pdata->hw_ops.get_gmac_register(pdata, pdata->mac_regs + MTL_ECC_INT_SR); + pdata->hw_ops.set_gmac_register(pdata, pdata->mac_regs + MTL_ECC_INT_SR, regval); + + /* DMA_ECC_Interrupt_Status, write 1 clear */ + regval = pdata->hw_ops.get_gmac_register(pdata, pdata->mac_regs + DMA_ECC_INT_SR); + pdata->hw_ops.set_gmac_register(pdata, pdata->mac_regs + DMA_ECC_INT_SR, regval); +} + static void fxgmac_trigger_pcie(struct fxgmac_pdata *pdata, u32 code) { writereg(pdata->pAdapter, code, pdata->base_mem + REG_PCIE_TRIGGER); @@ -6063,7 +5705,9 @@ void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) hw_ops->exit = fxgmac_hw_exit; hw_ops->save_nonstick_reg = fxgmac_save_nonstick_reg; hw_ops->restore_nonstick_reg = fxgmac_restore_nonstick_reg; +#if defined(FXGMAC_ESD_RESTORE_PCIE_CFG) hw_ops->esd_restore_pcie_cfg = fxgmac_esd_restore_pcie_cfg; +#endif hw_ops->set_gmac_register = fxgmac_set_gmac_register; hw_ops->get_gmac_register = fxgmac_get_gmac_register; @@ -6074,9 +5718,6 @@ void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) hw_ops->enable_rx = fxgmac_enable_rx; hw_ops->disable_rx = fxgmac_disable_rx; hw_ops->enable_channel_rx = fxgmac_enable_channel_rx; - hw_ops->dev_xmit = fxgmac_dev_xmit; - hw_ops->dev_read = fxgmac_dev_read; - hw_ops->config_tso = fxgmac_config_tso_mode; hw_ops->enable_int = fxgmac_enable_int; hw_ops->disable_int = fxgmac_disable_int; hw_ops->set_interrupt_moderation = fxgmac_set_interrupt_moderation; @@ -6088,9 +5729,11 @@ void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) hw_ops->disable_msix_one_interrupt = fxgmac_disable_msix_one_interrupt; hw_ops->enable_mgm_interrupt = fxgmac_enable_mgm_interrupt; hw_ops->disable_mgm_interrupt = fxgmac_disable_mgm_interrupt; + hw_ops->dismiss_all_int = fxgmac_dismiss_all_int; + hw_ops->clear_misc_int_status = fxgmac_clear_misc_int_status; hw_ops->set_mac_address = fxgmac_set_mac_address; - hw_ops->set_mac_hash = fxgmac_add_mac_addresses; + hw_ops->set_mac_hash = fxgmac_set_mc_addresses; hw_ops->config_rx_mode = fxgmac_config_rx_mode; hw_ops->enable_rx_csum = fxgmac_enable_rx_csum; hw_ops->disable_rx_csum = fxgmac_disable_rx_csum; @@ -6100,17 +5743,10 @@ void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) hw_ops->get_xlgmii_phy_status = fxgmac_check_phy_link; /* For descriptor related operation */ - hw_ops->tx_desc_init = fxgmac_tx_desc_init; - hw_ops->rx_desc_init = fxgmac_rx_desc_init; - hw_ops->tx_desc_reset = fxgmac_tx_desc_reset; - hw_ops->rx_desc_reset = fxgmac_rx_desc_reset; hw_ops->is_last_desc = fxgmac_is_last_desc; hw_ops->is_context_desc = fxgmac_is_context_desc; - hw_ops->tx_start_xmit = fxgmac_tx_start_xmit; - hw_ops->set_pattern_data = fxgmac_set_pattern_data; - hw_ops->config_wol = fxgmac_config_wol; - hw_ops->get_rss_hash_key = fxgmac_read_rss_hash_key; - hw_ops->write_rss_lookup_table = fxgmac_write_rss_lookup_table; + + hw_ops->config_tso = fxgmac_config_tso_mode; #if FXGMAC_SANITY_CHECK_ENABLED hw_ops->diag_sanity_check = fxgmac_diag_sanity_check; #endif @@ -6154,6 +5790,7 @@ void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) hw_ops->config_tx_pbl_val = fxgmac_config_tx_pbl_val; hw_ops->get_tx_pbl_val = fxgmac_get_tx_pbl_val; hw_ops->config_pblx8 = fxgmac_config_pblx8; + hw_ops->calculate_max_checksum_size = fxgmac_calculate_max_checksum_size; /* For MMC statistics support */ hw_ops->tx_mmc_int = fxgmac_tx_mmc_int; @@ -6167,8 +5804,11 @@ void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) hw_ops->set_rss_options = fxgmac_write_rss_options; hw_ops->set_rss_hash_key = fxgmac_set_rss_hash_key; hw_ops->set_rss_lookup_table = fxgmac_set_rss_lookup_table; + hw_ops->get_rss_hash_key = fxgmac_read_rss_hash_key; + hw_ops->write_rss_lookup_table = fxgmac_write_rss_lookup_table; - /*For Offload*/ + /*For Power Management*/ +#if defined(FXGMAC_POWER_MANAGEMENT) hw_ops->set_arp_offload = fxgmac_update_aoe_ipv4addr; hw_ops->enable_arp_offload = fxgmac_enable_arp_offload; hw_ops->disable_arp_offload = fxgmac_disable_arp_offload; @@ -6189,10 +5829,11 @@ void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) hw_ops->enable_wake_pattern = fxgmac_enable_wake_pattern; hw_ops->disable_wake_pattern = fxgmac_disable_wake_pattern; hw_ops->set_wake_pattern_mask = fxgmac_set_wake_pattern_mask; -#if defined(FUXI_PM_WPI_READ_FEATURE_EN) && FUXI_PM_WPI_READ_FEATURE_EN +#if FXGMAC_PM_WPI_READ_FEATURE_ENABLED hw_ops->enable_wake_packet_indication = fxgmac_enable_wake_packet_indication; hw_ops->get_wake_packet_indication = fxgmac_get_wake_packet_indication; +#endif #endif /*For phy write /read*/ @@ -6234,6 +5875,10 @@ void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) hw_ops->enable_rx_broadcast = fxgmac_enable_rx_broadcast; /* efuse relevant operation. */ + hw_ops->read_patch_from_efuse_per_index = fxgmac_read_patch_from_efuse_per_index; /* read patch per index. */ + hw_ops->read_mac_subsys_from_efuse = fxgmac_read_mac_subsys_from_efuse; + hw_ops->read_efuse_data = fxgmac_efuse_read_data; +#ifndef COMMENT_UNUSED_CODE_TO_REDUCE_SIZE hw_ops->read_patch_from_efuse = fxgmac_read_patch_from_efuse; /* read patch per register. */ hw_ops->read_patch_from_efuse_per_index = @@ -6241,14 +5886,13 @@ void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) hw_ops->write_patch_to_efuse = fxgmac_write_patch_to_efuse; hw_ops->write_patch_to_efuse_per_index = fxgmac_write_patch_to_efuse_per_index; - hw_ops->read_mac_subsys_from_efuse = fxgmac_read_mac_subsys_from_efuse; hw_ops->write_mac_subsys_to_efuse = fxgmac_write_mac_subsys_to_efuse; hw_ops->efuse_load = fxgmac_efuse_load; - hw_ops->read_efuse_data = fxgmac_efuse_read_data; hw_ops->write_oob = fxgmac_efuse_write_oob; hw_ops->write_led = fxgmac_efuse_write_led; hw_ops->write_led_config = fxgmac_write_led_setting_to_efuse; hw_ops->read_led_config = fxgmac_read_led_setting_from_efuse; +#endif /* */ hw_ops->pcie_init = fxgmac_pcie_init; diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ioctl.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ioctl.c new file mode 100644 index 0000000000000..e679d8abffbc3 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ioctl.c @@ -0,0 +1,521 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#include "fuxi-gmac.h" +#include "fuxi-gmac-reg.h" + +static void fxgmac_dbg_tx_pkt(struct fxgmac_pdata *pdata, u8 *pcmd_data) +{ + unsigned int pktLen = 0; + struct sk_buff *skb; + pfxgmac_test_packet pPkt; + u8 *pTx_data = NULL; + u8 *pSkb_data = NULL; + u32 offload_len = 0; + u8 ipHeadLen, tcpHeadLen, headTotalLen; + static u32 lastGsoSize = 806;//initial default value + //int i = 0; + + /* get fxgmac_test_packet */ + pPkt = (pfxgmac_test_packet)(pcmd_data + sizeof(struct ext_ioctl_data)); + pktLen = pPkt->length; + + /* get pkt data */ + pTx_data = (u8 *)pPkt + sizeof(fxgmac_test_packet); + + /* alloc sk_buff */ + skb = alloc_skb(pktLen, GFP_ATOMIC); + if (!skb) { + DPRINTK("alloc skb fail\n"); + return; + } + + /* copy data to skb */ + pSkb_data = skb_put(skb, pktLen); + memset(pSkb_data, 0, pktLen); + memcpy(pSkb_data, pTx_data, pktLen); + + /* set skb parameters */ + skb->dev = pdata->netdev; + skb->pkt_type = PACKET_OUTGOING; + skb->protocol = ntohs(ETH_P_IP); + skb->no_fcs = 1; + skb->ip_summed = CHECKSUM_PARTIAL; + if (skb->len > 1514) { + /* TSO packet */ + /* set tso test flag */ + pdata->expansion.fxgmac_test_tso_flag = true; + + /* get protocol head length */ + ipHeadLen = (pSkb_data[TEST_MAC_HEAD] & 0xF) * 4; + tcpHeadLen = (pSkb_data[TEST_MAC_HEAD + ipHeadLen + TEST_TCP_HEAD_LEN_OFFSET] >> 4 & 0xF) * 4; + headTotalLen = TEST_MAC_HEAD + ipHeadLen + tcpHeadLen; + offload_len = (pSkb_data[TEST_TCP_OFFLOAD_LEN_OFFSET] << 8 | + pSkb_data[TEST_TCP_OFFLOAD_LEN_OFFSET + 1]) & 0xFFFF; + /* set tso skb parameters */ + //skb->ip_summed = CHECKSUM_PARTIAL; + skb->transport_header = ipHeadLen + TEST_MAC_HEAD; + skb->network_header = TEST_MAC_HEAD; + skb->inner_network_header = TEST_MAC_HEAD; + skb->mac_len = TEST_MAC_HEAD; + + /* set skb_shinfo parameters */ + if (tcpHeadLen > TEST_TCP_FIX_HEAD_LEN) { + skb_shinfo(skb)->gso_size = (pSkb_data[TEST_TCP_MSS_OFFSET] << 8 | + pSkb_data[TEST_TCP_MSS_OFFSET + 1]) & 0xFFFF; + } else { + skb_shinfo(skb)->gso_size = 0; + } + if (skb_shinfo(skb)->gso_size != 0) { + lastGsoSize = skb_shinfo(skb)->gso_size; + } else { + skb_shinfo(skb)->gso_size = lastGsoSize; + } + //DPRINTK("offload_len is %d, skb_shinfo(skb)->gso_size is %d", offload_len, skb_shinfo(skb)->gso_size); + /* get segment size */ + if (offload_len % skb_shinfo(skb)->gso_size == 0) { + skb_shinfo(skb)->gso_segs = offload_len / skb_shinfo(skb)->gso_size; + pdata->expansion.fxgmac_test_last_tso_len = skb_shinfo(skb)->gso_size + headTotalLen; + } else { + skb_shinfo(skb)->gso_segs = offload_len / skb_shinfo(skb)->gso_size + 1; + pdata->expansion.fxgmac_test_last_tso_len = offload_len % skb_shinfo(skb)->gso_size + headTotalLen; + } + pdata->expansion.fxgmac_test_tso_seg_num = skb_shinfo(skb)->gso_segs; + + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + skb_shinfo(skb)->frag_list = NULL; + skb->csum_start = skb_headroom(skb) + TEST_MAC_HEAD + ipHeadLen; + skb->csum_offset = skb->len - TEST_MAC_HEAD - ipHeadLen; + + pdata->expansion.fxgmac_test_packet_len = skb_shinfo(skb)->gso_size + headTotalLen; + } else { + /* set non-TSO packet parameters */ + pdata->expansion.fxgmac_test_packet_len = skb->len; + } + + /* send data */ + if (dev_queue_xmit(skb) != NET_XMIT_SUCCESS) + DPRINTK("xmit data fail \n"); +} + +static void fxgmac_dbg_rx_pkt(struct fxgmac_pdata *pdata, u8 *pcmd_data) +{ + unsigned int totalLen = 0; + struct sk_buff *rx_skb; + struct ext_ioctl_data *pcmd; + fxgmac_test_packet pkt; + void *addr = 0; + u8 *rx_data = (u8 *)kzalloc(FXGMAC_MAX_DBG_RX_DATA, GFP_KERNEL); + if (!rx_data) + return; + + /* initial dest data region */ + pcmd = (struct ext_ioctl_data *)pcmd_data; + addr = pcmd->cmd_buf.buf; + while (pdata->expansion.fxgmac_test_skb_arr_in_index != pdata->expansion.fxgmac_test_skb_arr_out_index) { + /* get received skb data */ + rx_skb = pdata->expansion.fxgmac_test_skb_array[pdata->expansion.fxgmac_test_skb_arr_out_index]; + + if (rx_skb->len + sizeof(fxgmac_test_packet) + totalLen < 64000) { + pkt.length = rx_skb->len; + pkt.type = 0x80; + pkt.buf[0].offset = totalLen + sizeof(fxgmac_test_packet); + pkt.buf[0].length = rx_skb->len; + + /* get data from skb */ + //DPRINTK("FXG:rx_skb->len=%d", rx_skb->len); + memcpy(rx_data, rx_skb->data, rx_skb->len); + + /* update next pointer */ + if ((pdata->expansion.fxgmac_test_skb_arr_out_index + 1) % FXGMAC_MAX_DBG_TEST_PKT == pdata->expansion.fxgmac_test_skb_arr_in_index) { + pkt.next = NULL; + } else { + pkt.next = (pfxgmac_test_packet)(addr + totalLen + sizeof(fxgmac_test_packet) + pkt.length); + } + + /* copy data to user space */ + if (copy_to_user((void *)(addr + totalLen), (void *)(&pkt), sizeof(fxgmac_test_packet))) { + DPRINTK("cppy pkt data to user fail..."); + } + //FXGMAC_PR("FXG:rx_skb->len=%d", rx_skb->len); + if (copy_to_user((void *)(addr + totalLen + sizeof(fxgmac_test_packet)), (void *)rx_data, rx_skb->len)) { + DPRINTK("cppy data to user fail..."); + } + + /* update total length */ + totalLen += (sizeof(fxgmac_test_packet) + rx_skb->len); + + /* free skb */ + kfree_skb(rx_skb); + pdata->expansion.fxgmac_test_skb_array[pdata->expansion.fxgmac_test_skb_arr_out_index] = NULL; + + /* update gCurSkbOutIndex */ + pdata->expansion.fxgmac_test_skb_arr_out_index = (pdata->expansion.fxgmac_test_skb_arr_out_index + 1) % FXGMAC_MAX_DBG_TEST_PKT; + } else { + DPRINTK("receive data more receive buffer... \n"); + break; + } + } + + if (rx_data) + kfree(rx_data); +} + +// Based on the current application scenario,we only use CMD_DATA for data. +// if you use other struct, you should recalculate in_total_size +long fxgmac_netdev_ops_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + bool ret = true; + int regval = 0; + struct fxgmac_pdata *pdata = file->private_data; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + FXGMAC_PDATA_OF_PLATFORM *ex = &pdata->expansion; + CMD_DATA ex_data; + struct ext_ioctl_data pcmd; + u8 *data = NULL; + u8 *buf = NULL; + int in_total_size, in_data_size, out_total_size; + int ioctl_cmd_size = sizeof(struct ext_ioctl_data); + u8 mac[ETH_ALEN] = {0}; + struct sk_buff *tmpskb; + + if (!arg) { + DPRINTK("[%s] command arg is %lx !\n", __func__, arg); + goto err; + } + + /* check device type */ + if (_IOC_TYPE(cmd) != IOC_MAGIC) { + DPRINTK("[%s] command type [%c] error!\n", __func__, _IOC_TYPE(cmd)); + goto err; + } + + /* check command number*/ + if (_IOC_NR(cmd) > IOC_MAXNR) { + DPRINTK("[%s] command number [%d] exceeded!\n", __func__, _IOC_NR(cmd)); + goto err; + } + + //buf = (u8*)kzalloc(FXGMAC_MAX_DBG_BUF_LEN, GFP_KERNEL); + if (copy_from_user(&pcmd, (void *)arg, ioctl_cmd_size)) { + DPRINTK("copy data from user fail... \n"); + goto err; + } + + in_total_size = pcmd.cmd_buf.size_in; + in_data_size = in_total_size - ioctl_cmd_size; + out_total_size = pcmd.cmd_buf.size_out; + + buf = (u8 *)kzalloc(in_total_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, (void *)arg, in_total_size)) { + DPRINTK("copy data from user fail... \n"); + goto err; + } + data = buf + ioctl_cmd_size; + + if (arg != 0) { + switch (pcmd.cmd_type) { + /* ioctl diag begin */ + case FXGMAC_DFS_IOCTL_DIAG_BEGIN: + DPRINTK("Debugfs received diag begin command.\n"); + if (netif_running(pdata->netdev)) + fxgmac_restart_dev(pdata); + + /* release last loopback test abnormal exit buffer */ + while (ex->fxgmac_test_skb_arr_in_index != + ex->fxgmac_test_skb_arr_out_index) { + tmpskb = ex->fxgmac_test_skb_array[ex->fxgmac_test_skb_arr_out_index]; + if (tmpskb) { + kfree_skb(tmpskb); + ex->fxgmac_test_skb_array[ex->fxgmac_test_skb_arr_out_index] = NULL; + } + + ex->fxgmac_test_skb_arr_out_index = (ex->fxgmac_test_skb_arr_out_index + 1) % FXGMAC_MAX_DBG_TEST_PKT; + } + + /* init loopback test parameters */ + ex->fxgmac_test_skb_arr_in_index = 0; + ex->fxgmac_test_skb_arr_out_index = 0; + ex->fxgmac_test_tso_flag = false; + ex->fxgmac_test_tso_seg_num = 0; + ex->fxgmac_test_last_tso_len = 0; + ex->fxgmac_test_packet_len = 0; + break; + + /* ioctl diag end */ + case FXGMAC_DFS_IOCTL_DIAG_END: + DPRINTK("Debugfs received diag end command.\n"); + if (netif_running(pdata->netdev)) + fxgmac_restart_dev(pdata); + + break; + + /* ioctl diag tx pkt */ + case FXGMAC_DFS_IOCTL_DIAG_TX_PKT: + fxgmac_dbg_tx_pkt(pdata, buf); + break; + + /* ioctl diag rx pkt */ + case FXGMAC_DFS_IOCTL_DIAG_RX_PKT: + fxgmac_dbg_rx_pkt(pdata, buf); + break; + + /* ioctl device reset */ + case FXGMAC_DFS_IOCTL_DEVICE_RESET: + DPRINTK("Debugfs received device reset command.\n"); + if (netif_running(pdata->netdev)) + fxgmac_restart_dev(pdata); + + break; + + case FXGMAC_EFUSE_LED_TEST: + DPRINTK("Debugfs received device led test command.\n"); + memcpy(&pdata->led, data, sizeof(struct led_setting)); + fxgmac_restart_dev(pdata); + break; + + case FXGMAC_EFUSE_UPDATE_LED_CFG: + DPRINTK("Debugfs received device led update command.\n"); + memcpy(&pdata->ledconfig, data, sizeof(struct led_setting)); + ret = hw_ops->write_led_config(pdata); + hw_ops->read_led_config(pdata); + hw_ops->led_under_active(pdata); + break; + + case FXGMAC_EFUSE_WRITE_LED: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + DPRINTK("FXGMAC_EFUSE_WRITE_LED, val = 0x%x\n", ex_data.val0); + ret = hw_ops->write_led(pdata, ex_data.val0); + break; + + case FXGMAC_EFUSE_WRITE_OOB: + DPRINTK("FXGMAC_EFUSE_WRITE_OOB.\n"); + ret = hw_ops->write_oob(pdata); + break; + + case FXGMAC_EFUSE_READ_REGIONABC: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ret = hw_ops->read_efuse_data(pdata, ex_data.val0, &ex_data.val1); + /* + * DPRINTK("FXGMAC_EFUSE_READ_REGIONABC, address = 0x%x, val = 0x%x\n", + * ex_data.val0, + * ex_data.val1); + */ + if (ret) { + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, out_total_size)) + goto err; + } + break; + + case FXGMAC_EFUSE_WRITE_PATCH_REG: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + /* + * DPRINTK("FXGMAC_EFUSE_WRITE_PATCH_REG, address = 0x%x, val = 0x%x\n", + * ex_data.val0, + * ex_data.val1); + */ + ret = hw_ops->write_patch_to_efuse(pdata, ex_data.val0, ex_data.val1); + break; + + case FXGMAC_EFUSE_READ_PATCH_REG: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ret = hw_ops->read_patch_from_efuse(pdata, ex_data.val0, &ex_data.val1); + /* + * DPRINTK("FXGMAC_EFUSE_READ_PATCH_REG, address = 0x%x, val = 0x%x\n", + * ex_data.val0, ex_data.val1); + */ + if (ret) { + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, out_total_size)) + goto err; + } + break; + + case FXGMAC_EFUSE_WRITE_PATCH_PER_INDEX: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ret = hw_ops->write_patch_to_efuse_per_index(pdata, ex_data.val0, + ex_data.val1, + ex_data.val2); + /* DPRINTK("FXGMAC_EFUSE_WRITE_PATCH_PER_INDEX, index = %d, address = 0x%x, val = 0x%x\n", + * ex_data.val0, ex_data.val1, ex_data.val2); + */ + break; + + case FXGMAC_EFUSE_READ_PATCH_PER_INDEX: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ret = hw_ops->read_patch_from_efuse_per_index(pdata, ex_data.val0, + &ex_data.val1, + &ex_data.val2); + /* DPRINTK("FXGMAC_EFUSE_READ_PATCH_PER_INDEX, address = 0x%x, val = 0x%x\n", + * ex_data.val1, ex_data.val2); + */ + if (ret) { + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, out_total_size)) + goto err; + } + break; + + case FXGMAC_EFUSE_LOAD: + DPRINTK("FXGMAC_EFUSE_LOAD.\n"); + ret = hw_ops->efuse_load(pdata); + break; + + case FXGMAC_GET_MAC_DATA: + ret = hw_ops->read_mac_subsys_from_efuse(pdata, mac, NULL, NULL); + if (ret) { + memcpy(data, mac, ETH_ALEN); + out_total_size = ioctl_cmd_size + ETH_ALEN; + if (copy_to_user((void *)arg, (void *)buf, out_total_size)) + goto err; + } + break; + + case FXGMAC_SET_MAC_DATA: + if (in_data_size != ETH_ALEN) + goto err; + memcpy(mac, data, ETH_ALEN); + ret = hw_ops->write_mac_subsys_to_efuse(pdata, mac, NULL, NULL); + if (ret) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) + eth_hw_addr_set(pdata->netdev, mac); +#else + memcpy(pdata->netdev->dev_addr, mac, ETH_ALEN); +#endif + + memcpy(pdata->mac_addr, mac, ETH_ALEN); + + hw_ops->set_mac_address(pdata, mac); + hw_ops->set_mac_hash(pdata); + } + break; + + case FXGMAC_GET_SUBSYS_ID: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ret = hw_ops->read_mac_subsys_from_efuse(pdata, + NULL, + &ex_data.val0, + NULL); + if (ret) { + ex_data.val1 = 0xFFFF; // invalid value + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, out_total_size)) + goto err; + } + break; + + case FXGMAC_SET_SUBSYS_ID: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ret = hw_ops->write_mac_subsys_to_efuse(pdata, + NULL, + &ex_data.val0, + NULL); + break; + + case FXGMAC_GET_REG: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ex_data.val1 = hw_ops->get_gmac_register(pdata, + (u8 *)(pdata->base_mem + ex_data.val0)); + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, out_total_size)) + goto err; + break; + + case FXGMAC_SET_REG: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + regval = hw_ops->set_gmac_register(pdata, + (u8 *)(pdata->base_mem + ex_data.val0), + ex_data.val1); + ret = (regval == 0 ? true : false); + break; + + case FXGMAC_GET_PHY_REG: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + regval = hw_ops->read_ephy_reg(pdata, ex_data.val0, &ex_data.val1); + if (regval != -1) { + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, out_total_size)) + goto err; + } + ret = (regval == -1 ? false : true); + break; + + case FXGMAC_SET_PHY_REG: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + regval = hw_ops->write_ephy_reg(pdata, ex_data.val0, ex_data.val1); + ret = (regval == 0 ? true : false); + break; + + case FXGMAC_GET_PCIE_LOCATION: + ex_data.val0 = pdata->pdev->bus->number; + ex_data.val1 = PCI_SLOT(pdata->pdev->devfn); + ex_data.val2 = PCI_FUNC(pdata->pdev->devfn); + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, out_total_size)) + goto err; + break; + + case FXGMAC_GET_GSO_SIZE: + ex_data.val0 = pdata->netdev->gso_max_size; + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, out_total_size)) + goto err; + break; + + case FXGMAC_SET_GSO_SIZE: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + pdata->netdev->gso_max_size = ex_data.val0; + break; + + case FXGMAC_SET_RX_MODERATION: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + regval = readreg(pdata->pAdapter, pdata->base_mem + INT_MOD); + regval = FXGMAC_SET_REG_BITS(regval, INT_MOD_RX_POS, INT_MOD_RX_LEN, ex_data.val0); + writereg(pdata->pAdapter, regval, pdata->base_mem + INT_MOD); + break; + + case FXGMAC_SET_TX_MODERATION: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + regval = readreg(pdata->pAdapter, pdata->base_mem + INT_MOD); + regval = FXGMAC_SET_REG_BITS(regval, INT_MOD_TX_POS, INT_MOD_TX_LEN, ex_data.val0); + writereg(pdata->pAdapter, regval, pdata->base_mem + INT_MOD); + break; + + case FXGMAC_GET_TXRX_MODERATION: + regval = readreg(pdata->pAdapter, pdata->base_mem + INT_MOD); + ex_data.val0 = FXGMAC_GET_REG_BITS(regval, INT_MOD_RX_POS, INT_MOD_RX_LEN); + ex_data.val1 = FXGMAC_GET_REG_BITS(regval, INT_MOD_TX_POS, INT_MOD_TX_LEN); + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, out_total_size)) + goto err; + break; + + default: + DPRINTK("Debugfs received invalid command: %x.\n", pcmd.cmd_type); + ret = false; + break; + } + } + + if (buf) + kfree(buf); + return ret ? FXGMAC_SUCCESS : FXGMAC_FAIL; + +err: + if (buf) + kfree(buf); + return FXGMAC_FAIL; +} diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-net.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-net.c index b8734efb36426..3fc29f1b40bc4 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-net.c +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-net.c @@ -1,13 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2021 Motorcomm Corporation. */ -#include -#include -#include -#include -#include -#include - #include "fuxi-os.h" #include "fuxi-gmac.h" #include "fuxi-gmac-reg.h" @@ -15,6 +8,145 @@ static int fxgmac_one_poll_rx(struct napi_struct *, int); static int fxgmac_one_poll_tx(struct napi_struct *, int); static int fxgmac_all_poll(struct napi_struct *, int); +static int fxgmac_dev_read(struct fxgmac_channel *channel); + + +void fxgmac_lock(struct fxgmac_pdata *pdata) +{ + mutex_lock(&pdata->expansion.mutex); +} + +void fxgmac_unlock(struct fxgmac_pdata *pdata) +{ + mutex_unlock(&pdata->expansion.mutex); +} + +#ifdef FXGMAC_ESD_CHECK_ENABLED +static void fxgmac_schedule_esd_work(struct fxgmac_pdata *pdata) +{ + set_bit(FXGMAC_FLAG_TASK_ESD_CHECK_PENDING, pdata->expansion.task_flags); + schedule_delayed_work(&pdata->expansion.esd_work, FXGMAC_ESD_INTERVAL); +} + +static void fxgmac_update_esd_stats(struct fxgmac_pdata *pdata) +{ + u32 value; + + value = readreg(pdata->pAdapter, pdata->mac_regs + MMC_TXEXCESSIVECOLLSIONFRAMES); + pdata->expansion.esd_stats.tx_abort_excess_collisions += value; + + value = readreg(pdata->pAdapter, pdata->mac_regs + MMC_TXUNDERFLOWERROR_LO); + pdata->expansion.esd_stats.tx_dma_underrun += value; + + value = readreg(pdata->pAdapter, pdata->mac_regs + MMC_TXCARRIERERRORFRAMES); + pdata->expansion.esd_stats.tx_lost_crs += value; + + value = readreg(pdata->pAdapter, pdata->mac_regs + MMC_TXLATECOLLISIONFRAMES); + pdata->expansion.esd_stats.tx_late_collisions += value; + + value = readreg(pdata->pAdapter, pdata->mac_regs + MMC_RXCRCERROR_LO); + pdata->expansion.esd_stats.rx_crc_errors += value; + + value = readreg(pdata->pAdapter, pdata->mac_regs + MMC_RXALIGNERROR); + pdata->expansion.esd_stats.rx_align_errors += value; + + value = readreg(pdata->pAdapter, pdata->mac_regs + MMC_RXRUNTERROR); + pdata->expansion.esd_stats.rx_runt_errors += value; + + value = readreg(pdata->pAdapter, pdata->mac_regs + MMC_TXSINGLECOLLISION_G); + pdata->expansion.esd_stats.single_collisions += value; + + value = readreg(pdata->pAdapter, pdata->mac_regs + MMC_TXMULTIPLECOLLISION_G); + pdata->expansion.esd_stats.multi_collisions += value; + + value = readreg(pdata->pAdapter, pdata->mac_regs + MMC_TXDEFERREDFRAMES); + pdata->expansion.esd_stats.tx_deferred_frames += value; +} + +static void fxgmac_check_esd_work(struct fxgmac_pdata *pdata) +{ + FXGMAC_ESD_STATS *stats = &pdata->expansion.esd_stats; + int i = 0; + u32 regval; + + /* ESD test will make recv crc errors more than 4,294,967,xxx in one second. */ + if (stats->rx_crc_errors > FXGMAC_ESD_ERROR_THRESHOLD || + stats->rx_align_errors > FXGMAC_ESD_ERROR_THRESHOLD || + stats->rx_runt_errors > FXGMAC_ESD_ERROR_THRESHOLD || + stats->tx_abort_excess_collisions > FXGMAC_ESD_ERROR_THRESHOLD || + stats->tx_dma_underrun > FXGMAC_ESD_ERROR_THRESHOLD || + stats->tx_lost_crs > FXGMAC_ESD_ERROR_THRESHOLD || + stats->tx_late_collisions > FXGMAC_ESD_ERROR_THRESHOLD || + stats->single_collisions > FXGMAC_ESD_ERROR_THRESHOLD || + stats->multi_collisions > FXGMAC_ESD_ERROR_THRESHOLD || + stats->tx_deferred_frames > FXGMAC_ESD_ERROR_THRESHOLD) { + dev_info(pdata->dev, "%s - Error:\n", __func__); + dev_info(pdata->dev, "rx_crc_errors %ul.\n", stats->rx_crc_errors); + dev_info(pdata->dev, "rx_align_errors %ul.\n", stats->rx_align_errors); + dev_info(pdata->dev, "rx_runt_errors %ul.\n", stats->rx_runt_errors); + dev_info(pdata->dev, "tx_abort_excess_collisions %ul.\n", stats->tx_abort_excess_collisions); + dev_info(pdata->dev, "tx_dma_underrun %ul.\n", stats->tx_dma_underrun); + dev_info(pdata->dev, "tx_lost_crs %ul.\n", stats->tx_lost_crs); + dev_info(pdata->dev, "tx_late_collisions %ul.\n", stats->tx_late_collisions); + dev_info(pdata->dev, "single_collisions %ul.\n", stats->single_collisions); + dev_info(pdata->dev, "multi_collisions %ul.\n", stats->multi_collisions); + dev_info(pdata->dev, "tx_deferred_frames %ul.\n", stats->tx_deferred_frames); + + dev_info(pdata->dev, "esd error triggered, restart NIC...\n"); + cfg_r32(pdata, REG_PCI_COMMAND, ®val); + while ((regval == FXGMAC_PCIE_LINK_DOWN) && (i++ < FXGMAC_PCIE_RECOVER_TIMES)) { + usleep_range_ex(pdata->pAdapter, 200, 200); + cfg_r32(pdata, REG_PCI_COMMAND, ®val); + dev_info(pdata->dev, "pcie recovery link cost %d(200us)\n", i); + } + + if (regval == FXGMAC_PCIE_LINK_DOWN) { + dev_info(pdata->dev, "pcie link down, recovery failed.\n"); + return; + } + + if (regval & FXGMAC_PCIE_IO_MEM_MASTER_ENABLE) { + pdata->hw_ops.esd_restore_pcie_cfg(pdata); + cfg_r32(pdata, REG_PCI_COMMAND, ®val); + dev_info(pdata->dev, "pci command reg is %x after restoration.\n", regval); + fxgmac_restart_dev(pdata); + } + } + + memset(stats, 0, sizeof(FXGMAC_ESD_STATS)); +} + +static void fxgmac_esd_work(struct work_struct *work) +{ + struct fxgmac_pdata *pdata = container_of(work, + struct fxgmac_pdata, + expansion.esd_work.work); + + rtnl_lock(); + if (!netif_running(pdata->netdev) || + !test_and_clear_bit(FXGMAC_FLAG_TASK_ESD_CHECK_PENDING, pdata->expansion.task_flags)) + goto out_unlock; + + fxgmac_update_esd_stats(pdata); + fxgmac_check_esd_work(pdata); + fxgmac_schedule_esd_work(pdata); + +out_unlock: + rtnl_unlock(); +} + +static void fxgmac_cancel_esd_work(struct fxgmac_pdata *pdata) +{ + struct work_struct *work = &pdata->expansion.esd_work.work; + + if (!work->func) { + dev_info(pdata->dev, "work func is NULL.\n"); + return; + } + + cancel_delayed_work_sync(&pdata->expansion.esd_work); +} +#endif unsigned int fxgmac_get_netdev_ip4addr(struct fxgmac_pdata *pdata) { @@ -127,31 +259,51 @@ inline unsigned int fxgmac_rx_dirty_desc(struct fxgmac_ring *ring) return dirty; } -static int fxgmac_maybe_stop_tx_queue(struct fxgmac_channel *channel, +static netdev_tx_t fxgmac_maybe_stop_tx_queue(struct fxgmac_channel *channel, struct fxgmac_ring *ring, unsigned int count) { struct fxgmac_pdata *pdata = channel->pdata; if (count > fxgmac_tx_avail_desc(ring)) { - netif_info( - pdata, drv, pdata->netdev, - "Tx queue stopped, not enough descriptors available\n"); + if (netif_msg_tx_done(pdata)) { + netif_info( + pdata, drv, pdata->netdev, + "Tx queue stopped, not enough descriptors available\n"); + } + + /* Avoid wrongly optimistic queue wake-up: tx poll thread must + * not miss a ring update when it notices a stopped queue. + */ + smp_wmb(); netif_stop_subqueue(pdata->netdev, channel->queue_index); ring->tx.queue_stopped = 1; - /* If we haven't notified the hardware because of xmit_more - * support, tell it now - */ - if (ring->tx.xmit_more) - pdata->hw_ops.tx_start_xmit(channel, ring); - if (netif_msg_tx_done(pdata)) - DPRINTK("about stop tx q, ret BUSY\n"); - - return NETDEV_TX_BUSY; + /* Sync with tx poll: + * - publish queue status and cur ring index (write barrier) + * - refresh dirty ring index (read barrier). + * May the current thread have a pessimistic view of the ring + * status and forget to wake up queue, a racing tx poll thread + * can't. + */ + smp_mb(); + if (count <= fxgmac_tx_avail_desc(ring)) { + ring->tx.queue_stopped = 0; + netif_start_subqueue(pdata->netdev, channel->queue_index); + fxgmac_tx_start_xmit(channel, ring); + } else { + /* If we haven't notified the hardware because of xmit_more + * support, tell it now + */ + if (ring->tx.xmit_more) + fxgmac_tx_start_xmit(channel, ring); + if (netif_msg_tx_done(pdata)) + DPRINTK("about stop tx q, ret BUSY\n"); + return NETDEV_TX_BUSY; + } } - return 0; + return NETDEV_TX_OK; } static void fxgmac_prep_vlan(struct sk_buff *skb, @@ -303,8 +455,13 @@ static void fxgmac_prep_tx_pkt(struct fxgmac_pdata *pdata, static int fxgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) { unsigned int rx_buf_size; + unsigned int max_mtu; - if (mtu > FXGMAC_JUMBO_PACKET_MTU) { + /* On the Linux platform, the MTU size does not include the length + * of the MAC address and the length of the Type, but FXGMAC_JUMBO_PACKET_MTU include them. + */ + max_mtu = FXGMAC_JUMBO_PACKET_MTU - ETH_HLEN; + if (mtu > max_mtu) { netdev_alert(netdev, "MTU exceeds maximum supported value\n"); return -EINVAL; } @@ -342,103 +499,52 @@ static void fxgmac_enable_rx_tx_ints(struct fxgmac_pdata *pdata) } } -static void fxgmac_phy_process(struct fxgmac_pdata *pdata) -{ - int cur_link = 0; - int regval = 0; - int cur_speed = 0; - struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; - - regval = hw_ops->get_ephy_state(pdata); - - /* We should make sure that PHY is done with the reset */ - if (regval & MGMT_EPHY_CTRL_STA_EPHY_RESET) { - pdata->expansion.phy_link = false; - return; - } - - cur_link = FXGMAC_GET_REG_BITS(regval, - MGMT_EPHY_CTRL_STA_EPHY_LINKUP_POS, - MGMT_EPHY_CTRL_STA_EPHY_LINKUP_LEN); - if (pdata->expansion.phy_link != cur_link) { - pdata->expansion.phy_link = cur_link; - if (pdata->expansion.phy_link) { - cur_speed = FXGMAC_GET_REG_BITS( - regval, MGMT_EPHY_CTRL_STA_SPEED_POS, - MGMT_EPHY_CTRL_STA_SPEED_LEN); - pdata->phy_speed = (cur_speed == 2) ? SPEED_1000 : - (cur_speed == 1) ? SPEED_100 : - SPEED_10; - pdata->phy_duplex = FXGMAC_GET_REG_BITS( - regval, MGMT_EPHY_CTRL_STA_EPHY_DUPLEX_POS, - MGMT_EPHY_CTRL_STA_EPHY_DUPLEX_LEN); - hw_ops->config_mac_speed(pdata); - - hw_ops->enable_rx(pdata); - hw_ops->enable_tx(pdata); - netif_carrier_on(pdata->netdev); - if (netif_running(pdata->netdev)) { - netif_tx_wake_all_queues(pdata->netdev); - DPRINTK("%s now is link up, mac_speed=%d.\n", - FXGMAC_DRV_NAME, pdata->phy_speed); - } - } else { - netif_carrier_off(pdata->netdev); - netif_tx_stop_all_queues(pdata->netdev); - pdata->phy_speed = SPEED_UNKNOWN; - pdata->phy_duplex = DUPLEX_UNKNOWN; - hw_ops->disable_rx(pdata); - hw_ops->disable_tx(pdata); - DPRINTK("%s now is link down\n", FXGMAC_DRV_NAME); - } - } -} - -static int fxgmac_phy_poll(struct napi_struct *napi, int budget) +static int fxgmac_misc_poll(struct napi_struct *napi, int budget) { struct fxgmac_pdata *pdata = - container_of(napi, struct fxgmac_pdata, expansion.napi_phy); + container_of(napi, struct fxgmac_pdata, expansion.napi_misc); struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; - fxgmac_phy_process(pdata); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) if (napi_complete_done(napi, 0)) hw_ops->enable_msix_one_interrupt(pdata, MSI_ID_PHY_OTHER); +#else + napi_complete(napi); + hw_ops->enable_msix_one_interrupt(pdata, MSI_ID_PHY_OTHER); +#endif return 0; } -static irqreturn_t fxgmac_phy_isr(int irq, void *data) +static irqreturn_t fxgmac_misc_isr(int irq, void *data) { struct fxgmac_pdata *pdata = data; struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; u32 regval; regval = readreg(pdata->pAdapter, pdata->base_mem + MGMT_INT_CTRL0); - if (!(regval & MGMT_INT_CTRL0_INT_STATUS_PHY)) + if (!(regval & MGMT_INT_CTRL0_INT_STATUS_MISC)) return IRQ_HANDLED; hw_ops->disable_msix_one_interrupt(pdata, MSI_ID_PHY_OTHER); - hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, NULL); - if (napi_schedule_prep(&pdata->expansion.napi_phy)) { - __napi_schedule_irqoff(&pdata->expansion.napi_phy); - } + hw_ops->clear_misc_int_status(pdata); + + napi_schedule_irqoff(&pdata->expansion.napi_misc); return IRQ_HANDLED; } static irqreturn_t fxgmac_isr(int irq, void *data) { - unsigned int dma_isr, dma_ch_isr, mac_isr; + unsigned int dma_ch_isr; struct fxgmac_pdata *pdata = data; struct fxgmac_channel *channel; struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; - unsigned int i, ti, ri; + unsigned int i; u32 val; - dma_isr = readreg(pdata->pAdapter, pdata->mac_regs + DMA_ISR); - val = readreg(pdata->pAdapter, pdata->base_mem + MGMT_INT_CTRL0); - if (!(val & MGMT_INT_CTRL0_INT_STATUS_RXTXPHY_MASK)) + if (!(val & MGMT_INT_CTRL0_INT_STATUS_RXTXMISC_MASK)) return IRQ_HANDLED; hw_ops->disable_mgm_interrupt(pdata); @@ -450,24 +556,6 @@ static irqreturn_t fxgmac_isr(int irq, void *data) channel = pdata->channel_head + i; dma_ch_isr = readl(FXGMAC_DMA_REG(channel, DMA_CH_SR)); - netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n", - i, dma_ch_isr); - - /* The TI or RI interrupt bits may still be set even if using - * per channel DMA interrupts. Check to be sure those are not - * enabled before using the private data napi structure. - */ - ti = FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS, - DMA_CH_SR_TI_LEN); - ri = FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS, - DMA_CH_SR_RI_LEN); - if (!pdata->per_channel_irq && (ti || ri)) { - if (napi_schedule_prep(&pdata->expansion.napi)) { - pdata->stats.napi_poll_isr++; - /* Turn on polling */ - __napi_schedule_irqoff(&pdata->expansion.napi); - } - } if (FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS, DMA_CH_SR_TPS_LEN)) @@ -496,29 +584,14 @@ static irqreturn_t fxgmac_isr(int irq, void *data) writel(dma_ch_isr, FXGMAC_DMA_REG(channel, DMA_CH_SR)); } - if (FXGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS, - DMA_ISR_MACIS_LEN)) { - mac_isr = readl(pdata->mac_regs + MAC_ISR); + if (pdata->expansion.mgm_intctrl_val & MGMT_INT_CTRL0_INT_STATUS_MISC) + hw_ops->clear_misc_int_status(pdata); - if (FXGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS, - MAC_ISR_MMCTXIS_LEN)) - hw_ops->tx_mmc_int(pdata); - if (FXGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS, - MAC_ISR_MMCRXIS_LEN)) - hw_ops->rx_mmc_int(pdata); - - /* Clear all interrupt signals */ - writel(mac_isr, (pdata->mac_regs + MAC_ISR)); - } - - if (pdata->expansion.mgm_intctrl_val & MGMT_INT_CTRL0_INT_STATUS_PHY) { - hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, &val); - if (napi_schedule_prep(&pdata->expansion.napi)) { - pdata->stats.napi_poll_isr++; - /* Turn on polling */ - __napi_schedule_irqoff(&pdata->expansion.napi); - } + if (napi_schedule_prep(&pdata->expansion.napi)) { + pdata->stats.napi_poll_isr++; + /* Turn on polling */ + __napi_schedule_irqoff(&pdata->expansion.napi); } return IRQ_HANDLED; @@ -537,31 +610,27 @@ static irqreturn_t fxgmac_dma_isr(int irq, void *data) hw_ops->disable_msix_one_interrupt(pdata, message_id); regval = 0; regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_SR_TI_POS, - DMA_CH_SR_TI_LEN, 1); + DMA_CH_SR_TI_LEN, 1); writereg(pdata->pAdapter, regval, - FXGMAC_DMA_REG(channel, DMA_CH_SR)); - if (napi_schedule_prep(&channel->expansion.napi_tx)) { - __napi_schedule_irqoff(&channel->expansion.napi_tx); - } + FXGMAC_DMA_REG(channel, DMA_CH_SR)); + napi_schedule_irqoff(&channel->expansion.napi_tx); } else { message_id = channel->queue_index; hw_ops->disable_msix_one_interrupt(pdata, message_id); regval = 0; regval = readreg(pdata->pAdapter, - FXGMAC_DMA_REG(channel, DMA_CH_SR)); + FXGMAC_DMA_REG(channel, DMA_CH_SR)); regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_SR_RI_POS, - DMA_CH_SR_RI_LEN, 1); + DMA_CH_SR_RI_LEN, 1); writereg(pdata->pAdapter, regval, - FXGMAC_DMA_REG(channel, DMA_CH_SR)); - if (napi_schedule_prep(&channel->expansion.napi_rx)) { - __napi_schedule_irqoff(&channel->expansion.napi_rx); - } + FXGMAC_DMA_REG(channel, DMA_CH_SR)); + napi_schedule_irqoff(&channel->expansion.napi_rx); } return IRQ_HANDLED; } -#if FXGMAC_TX_HANG_TIMER_EN +#if FXGMAC_TX_HANG_TIMER_ENABLED #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) static void fxgmac_tx_hang_timer_handler(struct timer_list *t) #else @@ -636,12 +705,24 @@ static void fxgmac_napi_enable(struct fxgmac_pdata *pdata, unsigned int add) { struct fxgmac_channel *channel; unsigned int i; + u32 tx_napi = 0, rx_napi = 0, misc_napi = 0; + + misc_napi = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MISC_NAPI_FREE_POS, + FXGMAC_FLAG_MISC_NAPI_FREE_LEN); + tx_napi = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_TX_NAPI_FREE_POS, + FXGMAC_FLAG_TX_NAPI_FREE_LEN); + rx_napi = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_RX_NAPI_FREE_POS, + FXGMAC_FLAG_RX_NAPI_FREE_LEN); if (pdata->per_channel_irq) { channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { - if (add) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + if (!FXGMAC_GET_REG_BITS(rx_napi, + i, FXGMAC_FLAG_PER_CHAN_RX_NAPI_FREE_LEN)) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) netif_napi_add_weight( pdata->netdev, &channel->expansion.napi_rx, @@ -654,9 +735,14 @@ static void fxgmac_napi_enable(struct fxgmac_pdata *pdata, unsigned int add) #endif } napi_enable(&channel->expansion.napi_rx); - - if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + pdata->expansion.int_flags = + FXGMAC_SET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_RX_NAPI_FREE_POS + i, + FXGMAC_FLAG_PER_CHAN_RX_NAPI_FREE_LEN, + FXGMAC_NAPI_ENABLE); + + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i) && !tx_napi) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) netif_napi_add_weight( pdata->netdev, &channel->expansion.napi_tx, @@ -668,28 +754,40 @@ static void fxgmac_napi_enable(struct fxgmac_pdata *pdata, unsigned int add) NAPI_POLL_WEIGHT); #endif napi_enable(&channel->expansion.napi_tx); + pdata->expansion.int_flags = + FXGMAC_SET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_TX_NAPI_FREE_POS, + FXGMAC_FLAG_TX_NAPI_FREE_LEN, + FXGMAC_NAPI_ENABLE); } if (netif_msg_drv(pdata)) DPRINTK("napi_enable, msix ch%d napi enabled done, add=%d\n", i, add); } - /* for phy */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) - netif_napi_add_weight(pdata->netdev, &pdata->expansion.napi_phy, - fxgmac_phy_poll, NAPI_POLL_WEIGHT); + /* for misc */ + if (!misc_napi) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) + netif_napi_add_weight(pdata->netdev, &pdata->expansion.napi_misc, + fxgmac_misc_poll, NAPI_POLL_WEIGHT); #else - netif_napi_add(pdata->netdev, &pdata->expansion.napi_phy, - fxgmac_phy_poll, NAPI_POLL_WEIGHT); + netif_napi_add(pdata->netdev, &pdata->expansion.napi_misc, + fxgmac_misc_poll, NAPI_POLL_WEIGHT); #endif - napi_enable(&pdata->expansion.napi_phy); + napi_enable(&pdata->expansion.napi_misc); + pdata->expansion.int_flags = + FXGMAC_SET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MISC_NAPI_FREE_POS, + FXGMAC_FLAG_MISC_NAPI_FREE_LEN, + FXGMAC_NAPI_ENABLE); + } } else { i = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, FXGMAC_FLAG_LEGACY_NAPI_FREE_POS, FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN); if (!i) { if (add) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) netif_napi_add_weight(pdata->netdev, &pdata->expansion.napi, fxgmac_all_poll, @@ -706,7 +804,7 @@ static void fxgmac_napi_enable(struct fxgmac_pdata *pdata, unsigned int add) pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( pdata->expansion.int_flags, FXGMAC_FLAG_LEGACY_NAPI_FREE_POS, - FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN, 1); + FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN, FXGMAC_NAPI_ENABLE); } } } @@ -715,31 +813,59 @@ static void fxgmac_napi_disable(struct fxgmac_pdata *pdata, unsigned int del) { struct fxgmac_channel *channel; unsigned int i; + u32 tx_napi = 0, rx_napi = 0, misc_napi = 0; if (pdata->per_channel_irq) { + misc_napi = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MISC_NAPI_FREE_POS, + FXGMAC_FLAG_MISC_NAPI_FREE_LEN); + tx_napi = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_TX_NAPI_FREE_POS, + FXGMAC_FLAG_TX_NAPI_FREE_LEN); + rx_napi = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_RX_NAPI_FREE_POS, + FXGMAC_FLAG_RX_NAPI_FREE_LEN); channel = pdata->channel_head; if (channel != NULL) { for (i = 0; i < pdata->channel_count; i++, channel++) { - napi_disable(&channel->expansion.napi_rx); + if (FXGMAC_GET_REG_BITS(rx_napi, + i, FXGMAC_FLAG_PER_CHAN_RX_NAPI_FREE_LEN)) { + napi_disable(&channel->expansion.napi_rx); if (del) { netif_napi_del( &channel->expansion.napi_rx); } + pdata->expansion.int_flags = + FXGMAC_SET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_RX_NAPI_FREE_POS + i, + FXGMAC_FLAG_PER_CHAN_RX_NAPI_FREE_LEN, + FXGMAC_NAPI_DISABLE); + } - if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i) && tx_napi) { napi_disable( &channel->expansion.napi_tx); netif_napi_del( &channel->expansion.napi_tx); - } - if (netif_msg_drv(pdata)) - DPRINTK("napi_disable, msix ch%d napi disabled done, del=%d\n", - i, del); + pdata->expansion.int_flags = + FXGMAC_SET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_TX_NAPI_FREE_POS, + FXGMAC_FLAG_TX_NAPI_FREE_LEN, + FXGMAC_NAPI_DISABLE); + } + if (netif_msg_drv(pdata)) + DPRINTK("napi_disable, msix ch%d napi disabled done, del=%d\n", + i, del); } - napi_disable(&pdata->expansion.napi_phy); - netif_napi_del(&pdata->expansion.napi_phy); + napi_disable(&pdata->expansion.napi_misc); + netif_napi_del(&pdata->expansion.napi_misc); + pdata->expansion.int_flags = + FXGMAC_SET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MISC_NAPI_FREE_POS, + FXGMAC_FLAG_MISC_NAPI_FREE_LEN, + FXGMAC_NAPI_DISABLE); } } else { i = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, @@ -753,7 +879,7 @@ static void fxgmac_napi_disable(struct fxgmac_pdata *pdata, unsigned int del) pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( pdata->expansion.int_flags, FXGMAC_FLAG_LEGACY_NAPI_FREE_POS, - FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN, 0); + FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN, FXGMAC_NAPI_DISABLE); } } } @@ -765,6 +891,7 @@ static int fxgmac_request_irqs(struct fxgmac_pdata *pdata) unsigned int i; int ret; u32 msi, msix, need_free; + u32 misc = 0, tx = 0, rx = 0; msi = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, FXGMAC_FLAG_MSI_POS, FXGMAC_FLAG_MSI_LEN); @@ -793,20 +920,19 @@ static int fxgmac_request_irqs(struct fxgmac_pdata *pdata) pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( pdata->expansion.int_flags, FXGMAC_FLAG_LEGACY_IRQ_FREE_POS, - FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN, 1); + FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN, FXGMAC_IRQ_ENABLE); } } if (!pdata->per_channel_irq) return 0; - ret = devm_request_irq(pdata->dev, pdata->expansion.phy_irq, - fxgmac_phy_isr, 0, netdev->name, pdata); - if (ret) { - netdev_alert(netdev, "error requesting phy irq %d, ret = %d\n", - pdata->expansion.phy_irq, ret); - return ret; - } + tx = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_TX_IRQ_FREE_POS, + FXGMAC_FLAG_TX_IRQ_FREE_LEN); + rx = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_RX_IRQ_FREE_POS, + FXGMAC_FLAG_RX_IRQ_FREE_LEN); channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { @@ -814,7 +940,7 @@ static int fxgmac_request_irqs(struct fxgmac_pdata *pdata) sizeof(channel->expansion.dma_irq_name) - 1, "%s-ch%d-Rx-%u", netdev_name(netdev), i, channel->queue_index); - if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i) && !tx) { snprintf(channel->expansion.dma_irq_name_tx, sizeof(channel->expansion.dma_irq_name_tx) - 1, "%s-ch%d-Tx-%u", netdev_name(netdev), i, @@ -826,52 +952,102 @@ static int fxgmac_request_irqs(struct fxgmac_pdata *pdata) channel->expansion.dma_irq_name_tx, channel); if (ret) { - DPRINTK("fxgmac_req_irqs, err with MSIx irq request for ch %d tx, ret=%d\n", - i, ret); - /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ - devm_free_irq(pdata->dev, - channel->expansion.dma_irq_tx, - channel); - return ret; + netdev_alert(netdev, "fxgmac_req_irqs, err with MSIx irq \ + request for ch %d tx, ret=%d\n", i, ret); + goto err_irq; } + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_TX_IRQ_FREE_POS, + FXGMAC_FLAG_TX_IRQ_FREE_LEN, + FXGMAC_IRQ_ENABLE); + if (netif_msg_drv(pdata)) DPRINTK("fxgmac_req_irqs, MSIx irq_tx request ok, ch=%d, irq=%d,%s\n", - i, channel->expansion.dma_irq_tx, - channel->expansion.dma_irq_name_tx); + i, channel->expansion.dma_irq_tx, + channel->expansion.dma_irq_name_tx); } - ret = devm_request_irq(pdata->dev, channel->dma_irq, - fxgmac_dma_isr, 0, - channel->expansion.dma_irq_name, - channel); + + if (!FXGMAC_GET_REG_BITS(rx, i, FXGMAC_FLAG_PER_CHAN_RX_IRQ_FREE_LEN)) { + ret = devm_request_irq(pdata->dev, channel->dma_irq, + fxgmac_dma_isr, 0, + channel->expansion.dma_irq_name, + channel); + if (ret) { + netdev_alert(netdev, "error requesting irq %d\n", + channel->dma_irq); + goto err_irq; + } + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_RX_IRQ_FREE_POS + i, + FXGMAC_FLAG_PER_CHAN_RX_IRQ_FREE_LEN, + FXGMAC_IRQ_ENABLE); + } + } + + misc = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MISC_IRQ_FREE_POS, + FXGMAC_FLAG_MISC_IRQ_FREE_LEN); + if (!misc) { + snprintf(pdata->expansion.misc_irq_name, + sizeof(pdata->expansion.misc_irq_name) - 1, + "%s-misc", netdev_name(netdev)); + ret = devm_request_irq(pdata->dev, + pdata->expansion.misc_irq, + fxgmac_misc_isr, + 0, + pdata->expansion.misc_irq_name, + pdata); if (ret) { - netdev_alert(netdev, "error requesting irq %d\n", - channel->dma_irq); + netdev_alert(netdev, + "error requesting misc irq %d, ret = %d\n", + pdata->expansion.misc_irq, + ret); goto err_irq; } + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MISC_IRQ_FREE_POS, + FXGMAC_FLAG_MISC_IRQ_FREE_LEN, + FXGMAC_IRQ_ENABLE); } - if (netif_msg_drv(pdata)) DPRINTK("fxgmac_req_irqs, MSIx irq request ok, total=%d,%d~%d\n", - i, (pdata->channel_head)[0].dma_irq, - (pdata->channel_head)[i - 1].dma_irq); + i, (pdata->channel_head)[0].dma_irq, (pdata->channel_head)[i-1].dma_irq); return 0; err_irq: - DPRINTK("fxgmac_req_irqs, err with MSIx irq request at %d, ret=%d\n", i, - ret); + netdev_alert(netdev, "fxgmac_req_irqs, err with MSIx irq request at %d, \ + ret=%d\n", i, ret); if (pdata->per_channel_irq) { for (i--, channel--; i < pdata->channel_count; i--, channel--) { - if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { - devm_free_irq(pdata->dev, - channel->expansion.dma_irq_tx, - channel); + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i) && tx) { + pdata->expansion.int_flags = + FXGMAC_SET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_TX_IRQ_FREE_POS, + FXGMAC_FLAG_TX_IRQ_FREE_LEN, + FXGMAC_IRQ_DISABLE); + devm_free_irq(pdata->dev, channel->expansion.dma_irq_tx, channel); + } + + if (FXGMAC_GET_REG_BITS(rx, i, FXGMAC_FLAG_PER_CHAN_RX_IRQ_FREE_LEN)) { + pdata->expansion.int_flags = + FXGMAC_SET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_RX_IRQ_FREE_POS + i, + FXGMAC_FLAG_PER_CHAN_RX_IRQ_FREE_LEN, + FXGMAC_IRQ_DISABLE); + devm_free_irq(pdata->dev, channel->dma_irq, channel); } - devm_free_irq(pdata->dev, channel->dma_irq, channel); } - devm_free_irq(pdata->dev, pdata->expansion.phy_irq, pdata); + if (misc) { + pdata->expansion.int_flags = + FXGMAC_SET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MISC_IRQ_FREE_POS, + FXGMAC_FLAG_MISC_IRQ_FREE_LEN, + FXGMAC_IRQ_DISABLE); + devm_free_irq(pdata->dev, pdata->expansion.misc_irq, pdata); + } } return ret; } @@ -881,42 +1057,72 @@ static void fxgmac_free_irqs(struct fxgmac_pdata *pdata) struct fxgmac_channel *channel; unsigned int i = 0; u32 need_free, msix; + u32 misc = 0, tx = 0, rx = 0; msix = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, - FXGMAC_FLAG_MSIX_POS, FXGMAC_FLAG_MSIX_LEN); + FXGMAC_FLAG_MSIX_POS, + FXGMAC_FLAG_MSIX_LEN); need_free = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, - FXGMAC_FLAG_LEGACY_IRQ_FREE_POS, - FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN); + FXGMAC_FLAG_LEGACY_IRQ_FREE_POS, + FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN); if (!msix) { if (need_free) { devm_free_irq(pdata->dev, pdata->dev_irq, pdata); - pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( - pdata->expansion.int_flags, - FXGMAC_FLAG_LEGACY_IRQ_FREE_POS, - FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN, 0); + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_LEGACY_IRQ_FREE_POS, + FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN, + FXGMAC_IRQ_DISABLE); } } if (!pdata->per_channel_irq) return; + misc = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MISC_IRQ_FREE_POS, + FXGMAC_FLAG_MISC_IRQ_FREE_LEN); + tx = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_TX_IRQ_FREE_POS, + FXGMAC_FLAG_TX_IRQ_FREE_LEN); + rx = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_RX_IRQ_FREE_POS, + FXGMAC_FLAG_RX_IRQ_FREE_LEN); + channel = pdata->channel_head; if (channel != NULL) { for (i = 0; i < pdata->channel_count; i++, channel++) { - if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { - devm_free_irq(pdata->dev, - channel->expansion.dma_irq_tx, - channel); + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i) && tx) { + pdata->expansion.int_flags = + FXGMAC_SET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_TX_IRQ_FREE_POS, + FXGMAC_FLAG_TX_IRQ_FREE_LEN, + FXGMAC_IRQ_DISABLE); + devm_free_irq(pdata->dev, channel->expansion.dma_irq_tx, channel); if (netif_msg_drv(pdata)) DPRINTK("fxgmac_free_irqs, MSIx irq_tx clear done, ch=%d\n", i); } - devm_free_irq(pdata->dev, channel->dma_irq, channel); + + if (FXGMAC_GET_REG_BITS(rx, i, FXGMAC_FLAG_PER_CHAN_RX_IRQ_FREE_LEN)) { + pdata->expansion.int_flags = + FXGMAC_SET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_RX_IRQ_FREE_POS + i, + FXGMAC_FLAG_PER_CHAN_RX_IRQ_FREE_LEN, + FXGMAC_IRQ_DISABLE); + devm_free_irq(pdata->dev, channel->dma_irq, channel); + } } - devm_free_irq(pdata->dev, pdata->expansion.phy_irq, pdata); + if (misc) { + pdata->expansion.int_flags = + FXGMAC_SET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MISC_IRQ_FREE_POS, + FXGMAC_FLAG_MISC_IRQ_FREE_LEN, + FXGMAC_IRQ_DISABLE); + devm_free_irq(pdata->dev, pdata->expansion.misc_irq, pdata); + } } if (netif_msg_drv(pdata)) DPRINTK("fxgmac_free_irqs, MSIx rx irq clear done, total=%d\n", @@ -976,7 +1182,7 @@ void fxgmac_free_rx_data(struct fxgmac_pdata *pdata) static int fxgmac_disable_pci_msi_config(struct pci_dev *pdev) { u16 pcie_cap_offset; - u32 pcie_msi_mask_bits; + u32 pcie_msi_mask_bits = 0; int ret = 0; pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_MSI); @@ -984,7 +1190,7 @@ static int fxgmac_disable_pci_msi_config(struct pci_dev *pdev) ret = pci_read_config_dword(pdev, pcie_cap_offset, &pcie_msi_mask_bits); if (ret) { - printk(KERN_ERR + DPRINTK(KERN_ERR "read pci config space MSI cap. failed, %d\n", ret); ret = -EFAULT; @@ -996,7 +1202,7 @@ static int fxgmac_disable_pci_msi_config(struct pci_dev *pdev) PCI_CAP_ID_MSI_ENABLE_LEN, 0); ret = pci_write_config_dword(pdev, pcie_cap_offset, pcie_msi_mask_bits); if (ret) { - printk(KERN_ERR "write pci config space MSI mask failed, %d\n", + DPRINTK(KERN_ERR "write pci config space MSI mask failed, %d\n", ret); ret = -EFAULT; } @@ -1007,7 +1213,7 @@ static int fxgmac_disable_pci_msi_config(struct pci_dev *pdev) static int fxgmac_disable_pci_msix_config(struct pci_dev *pdev) { u16 pcie_cap_offset; - u32 pcie_msi_mask_bits; + u32 pcie_msi_mask_bits = 0; int ret = 0; pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_MSIX); @@ -1015,7 +1221,7 @@ static int fxgmac_disable_pci_msix_config(struct pci_dev *pdev) ret = pci_read_config_dword(pdev, pcie_cap_offset, &pcie_msi_mask_bits); if (ret) { - printk(KERN_ERR + DPRINTK(KERN_ERR "read pci config space MSIX cap. failed, %d\n", ret); ret = -EFAULT; @@ -1027,7 +1233,7 @@ static int fxgmac_disable_pci_msix_config(struct pci_dev *pdev) PCI_CAP_ID_MSIX_ENABLE_LEN, 0); ret = pci_write_config_dword(pdev, pcie_cap_offset, pcie_msi_mask_bits); if (ret) { - printk(KERN_ERR "write pci config space MSIX mask failed, %d\n", + DPRINTK(KERN_ERR "write pci config space MSIX mask failed, %d\n", ret); ret = -EFAULT; } @@ -1039,21 +1245,27 @@ int fxgmac_start(struct fxgmac_pdata *pdata) { struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; struct net_device *netdev = pdata->netdev; - int ret; unsigned int pcie_low_power = 0; u32 regval; + int ret; if (netif_msg_drv(pdata)) DPRINTK("fxgmac start callin here.\n"); - /* must reset software again here, to avoid flushing tx queue error caused by the system only run probe - * when installing driver on the arm platform. - */ + if (pdata->expansion.dev_state != FXGMAC_DEV_OPEN && + pdata->expansion.dev_state != FXGMAC_DEV_STOP && + pdata->expansion.dev_state != FXGMAC_DEV_RESUME) + return 0; + + /* must reset software again here, to avoid flushing tx queue error + * caused by the system only run probe + * when installing driver on the arm platform. + */ hw_ops->exit(pdata); if (FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, - FXGMAC_FLAG_LEGACY_POS, - FXGMAC_FLAG_LEGACY_LEN)) { + FXGMAC_FLAG_LEGACY_POS, + FXGMAC_FLAG_LEGACY_LEN)) { /* * we should disable msi and msix here when we use legacy interrupt, for two reasons: * 1. Exit will restore msi and msix config regisiter, that may enable them. @@ -1075,13 +1287,21 @@ int fxgmac_start(struct fxgmac_pdata *pdata) pcie_low_power & PCIE_LP_ASPM_L1SS, pcie_low_power & PCIE_LP_ASPM_L1, pcie_low_power & PCIE_LP_ASPM_L0S); + if (test_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate)) { + netdev_err(pdata->netdev, "fxgmac powerstate is %lu when config power to up.\n", pdata->expansion.powerstate); + } hw_ops->config_power_up(pdata); - fxgmac_dismiss_all_int(pdata); + hw_ops->dismiss_all_int(pdata); + + //control module int to PCIe slot + if (netdev->base_addr) { + regval = (unsigned int)(*((u32 *)(netdev->base_addr + MGMT_INT_CTRL0))); + } ret = hw_ops->init(pdata); if (ret) { - printk("fxgmac hw init error.\n"); + DPRINTK("fxgmac hw init error.\n"); return ret; } fxgmac_napi_enable(pdata, 1); @@ -1104,21 +1324,31 @@ int fxgmac_start(struct fxgmac_pdata *pdata) writel(0xF0000000, (volatile void *)(netdev->base_addr + MGMT_INT_CTRL0)); +#if FXGMAC_INT_MODERATION_ENABLED hw_ops->set_interrupt_moderation(pdata); +#endif if (pdata->per_channel_irq) hw_ops->enable_msix_rxtxphyinterrupt(pdata); fxgmac_enable_rx_tx_ints(pdata); +#ifdef FXGMAC_ESD_CHECK_ENABLED + fxgmac_schedule_esd_work(pdata); +#endif + + fxgmac_set_phy_link_ksettings(pdata); hw_ops->led_under_active(pdata); + pdata->expansion.dev_state = FXGMAC_DEV_START; + fxgmac_phy_timer_init(pdata); return 0; err_napi: + fxgmac_phy_timer_destroy(pdata); fxgmac_napi_disable(pdata, 1); hw_ops->exit(pdata); - DPRINTK("fxgmac start callout with irq err.\n"); + dev_err(pdata->dev, "fxgmac start callout with irq err.\n"); return ret; } @@ -1130,6 +1360,11 @@ void fxgmac_stop(struct fxgmac_pdata *pdata) struct netdev_queue *txq; unsigned int i; + if (pdata->expansion.dev_state != FXGMAC_DEV_START) + return; + + pdata->expansion.dev_state = FXGMAC_DEV_STOP; + if (pdata->per_channel_irq) { hw_ops->disable_msix_interrupt(pdata); } else { @@ -1157,39 +1392,32 @@ void fxgmac_stop(struct fxgmac_pdata *pdata) } } - switch (pdata->expansion.current_state) { - case CURRENT_STATE_SUSPEND: - hw_ops->led_under_sleep(pdata); - break; - case CURRENT_STATE_SHUTDOWN: - case CURRENT_STATE_RESTART: - hw_ops->led_under_shutdown(pdata); - break; - case CURRENT_STATE_CLOSE: - break; - default: - break; - } + fxgmac_phy_timer_destroy(pdata); } void fxgmac_restart_dev(struct fxgmac_pdata *pdata) { + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; int ret; /* If not running, "restart" will happen on open */ - if (!netif_running(pdata->netdev)) + if (!netif_running(pdata->netdev) && + pdata->expansion.dev_state != FXGMAC_DEV_START) return; - pdata->expansion.current_state = CURRENT_STATE_RESTART; + fxgmac_lock(pdata); fxgmac_stop(pdata); + hw_ops->led_under_shutdown(pdata); fxgmac_free_tx_data(pdata); fxgmac_free_rx_data(pdata); ret = fxgmac_start(pdata); if (ret) { - printk("fxgmac_restart_dev: fxgmac_start failed.\n"); + DPRINTK("fxgmac_restart_dev: fxgmac_start failed.\n"); } + + fxgmac_unlock(pdata); } static void fxgmac_restart(struct work_struct *work) @@ -1221,7 +1449,7 @@ void fxgmac_net_powerup(struct fxgmac_pdata *pdata) ret = fxgmac_start(pdata); if (ret) { - printk("fxgmac_net_powerup: fxgmac_start error\n"); + DPRINTK("fxgmac_net_powerup: fxgmac_start error\n"); return; } @@ -1266,11 +1494,20 @@ void fxgmac_net_powerdown(struct fxgmac_pdata *pdata, unsigned int wol) /* synchronize_rcu() needed for pending XDP buffers to drain */ synchronize_rcu(); - fxgmac_stop(pdata); /* some works are redundent in this call */ +#ifdef FXGMAC_ESD_CHECK_ENABLED + fxgmac_cancel_esd_work(pdata); +#endif + + fxgmac_stop(pdata); //some works are redundent in this call /* must call it after software reset */ hw_ops->pre_power_down(pdata, false); + if (!test_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate)) { + netdev_err(pdata->netdev, + "fxgmac powerstate is %lu when config power to down.\n", pdata->expansion.powerstate); + } + /* set mac to lowpower mode and enable wol accordingly */ hw_ops->config_power_down(pdata, wol); @@ -1294,23 +1531,27 @@ static int fxgmac_open(struct net_device *netdev) if (netif_msg_drv(pdata)) DPRINTK("fxgmac_open callin\n"); + fxgmac_lock(pdata); + pdata->expansion.dev_state = FXGMAC_DEV_OPEN; desc_ops = &pdata->desc_ops; - /* TODO: Initialize the phy */ - /* Calculate the Rx buffer size before allocating rings */ ret = fxgmac_calc_rx_buf_size(netdev, netdev->mtu); if (ret < 0) - return ret; + goto unlock; pdata->rx_buf_size = ret; /* Allocate the channels and rings */ - ret = desc_ops->alloc_channles_and_rings(pdata); + ret = desc_ops->alloc_channels_and_rings(pdata); if (ret) - return ret; + goto unlock; INIT_WORK(&pdata->expansion.restart_work, fxgmac_restart); +#ifdef FXGMAC_ESD_CHECK_ENABLED + INIT_DELAYED_WORK(&pdata->expansion.esd_work, fxgmac_esd_work); +#endif + ret = fxgmac_start(pdata); if (ret) goto err_channels_and_rings; @@ -1318,31 +1559,35 @@ static int fxgmac_open(struct net_device *netdev) if (netif_msg_drv(pdata)) DPRINTK("fxgmac_open callout\n"); + fxgmac_unlock(pdata); + return 0; err_channels_and_rings: desc_ops->free_channels_and_rings(pdata); DPRINTK("fxgmac_open callout with channel alloc err\n"); +unlock: + fxgmac_unlock(pdata); return ret; } static int fxgmac_close(struct net_device *netdev) { struct fxgmac_pdata *pdata = netdev_priv(netdev); - struct fxgmac_desc_ops *desc_ops; + struct fxgmac_desc_ops *desc_ops = &pdata->desc_ops; if (netif_msg_drv(pdata)) DPRINTK("fxgmac_close callin\n"); - desc_ops = &pdata->desc_ops; - - pdata->expansion.current_state = - (pdata->expansion.current_state == CURRENT_STATE_SHUTDOWN) ? - pdata->expansion.current_state : - CURRENT_STATE_CLOSE; + fxgmac_lock(pdata); /* Stop the device */ fxgmac_stop(pdata); + pdata->expansion.dev_state = FXGMAC_DEV_CLOSE; + +#ifdef FXGMAC_ESD_CHECK_ENABLED + fxgmac_cancel_esd_work(pdata); +#endif /* Free the channels and rings */ desc_ops->free_channels_and_rings(pdata); @@ -1352,6 +1597,7 @@ static int fxgmac_close(struct net_device *netdev) if (netif_msg_drv(pdata)) DPRINTK("fxgmac_close callout\n"); + fxgmac_unlock(pdata); return 0; } @@ -1365,7 +1611,7 @@ static void fxgmac_tx_timeout(struct net_device *netdev, unsigned int unused) struct fxgmac_pdata *pdata = netdev_priv(netdev); netdev_warn(netdev, "tx timeout, device restarting\n"); -#if FXGMAC_TX_HANG_TIMER_EN +#if FXGMAC_TX_HANG_TIMER_ENABLED if (!pdata->tx_hang_restart_queuing) schedule_work(&pdata->expansion.restart_work); #else @@ -1373,19 +1619,17 @@ static void fxgmac_tx_timeout(struct net_device *netdev, unsigned int unused) #endif } -static int fxgmac_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t fxgmac_xmit(struct sk_buff *skb, struct net_device *netdev) { struct fxgmac_pdata *pdata = netdev_priv(netdev); struct fxgmac_pkt_info *tx_pkt_info; struct fxgmac_desc_ops *desc_ops; struct fxgmac_channel *channel; - struct fxgmac_hw_ops *hw_ops; struct netdev_queue *txq; struct fxgmac_ring *ring; int ret; desc_ops = &pdata->desc_ops; - hw_ops = &pdata->hw_ops; if (netif_msg_tx_done(pdata)) DPRINTK("xmit callin, skb->len=%d, q=%d\n", skb->len, @@ -1418,7 +1662,6 @@ static int fxgmac_xmit(struct sk_buff *skb, struct net_device *netdev) if (ret) { netif_err(pdata, tx_err, netdev, "error processing TSO packet\n"); - DPRINTK("dev_xmit, tx err for TSO\n"); dev_kfree_skb_any(skb); return ret; } @@ -1426,7 +1669,7 @@ static int fxgmac_xmit(struct sk_buff *skb, struct net_device *netdev) if (!desc_ops->map_tx_skb(channel, skb)) { dev_kfree_skb_any(skb); - DPRINTK("xmit, map tx skb err\n"); + netif_err(pdata, tx_err, netdev, "xmit, map tx skb err\n"); return NETDEV_TX_OK; } @@ -1437,10 +1680,8 @@ static int fxgmac_xmit(struct sk_buff *skb, struct net_device *netdev) tx_pkt_info->tx_bytes); /* Configure required descriptor fields for transmission */ - hw_ops->dev_xmit(channel); -#if FXGMAC_DUMMY_TX_DEBUG - DPRINTK("tx hw_ops->dev_xmit ok\n"); -#endif + fxgmac_dev_xmit(channel); + if (netif_msg_pktdata(pdata)) fxgmac_dbg_pkt(netdev, skb, true); @@ -1512,7 +1753,12 @@ static int fxgmac_set_mac_address(struct net_device *netdev, void *addr) return 0; } -/* cmd = [0x89F0, 0x89FF] */ +/* + * cmd = [0x89F0, 0x89FF] + * When using it, we must pay attention to the thread synchronization + * of this interface. Because it's an external call that isn't + * initiated by the OS. + */ static int fxgmac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct file f; @@ -1526,7 +1772,7 @@ static int fxgmac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) switch (cmd) { case FXGMAC_DEV_CMD: - ret = fxgmac_dbg_netdev_ops_ioctl( + ret = fxgmac_netdev_ops_ioctl( &f, FXGMAC_IOCTL_DFS_COMMAND, (unsigned long)(ifr->ifr_data)); break; @@ -1549,11 +1795,21 @@ static int fxgmac_siocdevprivate(struct net_device *dev, struct ifreq *ifr, static int fxgmac_change_mtu(struct net_device *netdev, int mtu) { struct fxgmac_pdata *pdata = netdev_priv(netdev); - int ret; + int ret, max_mtu; #ifdef FXGMAC_DEBUG int old_mtu = netdev->mtu; #endif + /* On the Linux platform, the MTU size does not include the length + * of the MAC address and the length of the Type, but FXGMAC_JUMBO_PACKET_MTU include them. + */ + max_mtu = FXGMAC_JUMBO_PACKET_MTU - ETH_HLEN; + if (mtu > max_mtu) { + netdev_alert(netdev, "MTU exceeds maximum supported value\n"); + return -EINVAL; + } + + fxgmac_lock(pdata); fxgmac_stop(pdata); fxgmac_free_tx_data(pdata); @@ -1573,6 +1829,8 @@ static int fxgmac_change_mtu(struct net_device *netdev, int mtu) if (netif_running(netdev)) fxgmac_start(pdata); + netdev_update_features(netdev); + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) DPRINTK("fxgmac, set MTU from %d to %d. min, max=(%d,%d)\n", old_mtu, netdev->mtu, netdev->min_mtu, netdev->max_mtu); @@ -1580,6 +1838,8 @@ static int fxgmac_change_mtu(struct net_device *netdev, int mtu) DPRINTK("fxgmac, set MTU from %d to %d.\n", old_mtu, netdev->mtu); #endif + fxgmac_unlock(pdata); + return 0; } @@ -1637,8 +1897,25 @@ static void fxgmac_poll_controller(struct net_device *netdev) } #endif /* CONFIG_NET_POLL_CONTROLLER */ +static netdev_features_t fxgmac_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + u32 fifo_size; + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + fifo_size = hw_ops->calculate_max_checksum_size(pdata); + + if (netdev->mtu > fifo_size) { + features &= ~NETIF_F_IP_CSUM; + features &= ~NETIF_F_IPV6_CSUM; + } + + return features; +} + static int fxgmac_set_features(struct net_device *netdev, - netdev_features_t features) + netdev_features_t features) { netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter, tso; struct fxgmac_pdata *pdata = netdev_priv(netdev); @@ -1653,11 +1930,11 @@ static int fxgmac_set_features(struct net_device *netdev, tso = pdata->expansion.netdev_features & (NETIF_F_TSO | NETIF_F_TSO6); if ((features & (NETIF_F_TSO | NETIF_F_TSO6)) && !tso) { - printk("enable tso.\n"); + DPRINTK("enable tso.\n"); pdata->hw_feat.tso = 1; hw_ops->config_tso(pdata); } else if (!(features & (NETIF_F_TSO | NETIF_F_TSO6)) && tso) { - printk("disable tso.\n"); + DPRINTK("disable tso.\n"); pdata->hw_feat.tso = 0; hw_ops->config_tso(pdata); } @@ -1717,6 +1994,7 @@ static const struct net_device_ops fxgmac_netdev_ops = { .ndo_poll_controller = fxgmac_poll_controller, #endif .ndo_set_features = fxgmac_set_features, + .ndo_fix_features = fxgmac_fix_features, .ndo_set_rx_mode = fxgmac_set_rx_mode, }; @@ -1730,11 +2008,16 @@ static void fxgmac_rx_refresh(struct fxgmac_channel *channel) struct fxgmac_pdata *pdata = channel->pdata; struct fxgmac_ring *ring = channel->rx_ring; struct fxgmac_desc_data *desc_data; - struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct fxgmac_desc_ops *desc_ops = &pdata->desc_ops; while (ring->dirty != ring->cur) { desc_data = FXGMAC_GET_DESC_DATA(ring, ring->dirty); - hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty); + /* Reset desc_data values */ + desc_ops->unmap_desc_data(pdata, desc_data); + + if (desc_ops->map_rx_buffer(pdata, ring, desc_data)) + break; + desc_ops->rx_desc_reset(pdata, desc_data, ring->dirty); ring->dirty = FXGMAC_GET_ENTRY(ring->dirty, ring->dma_desc_count); } @@ -1757,19 +2040,27 @@ static struct sk_buff *fxgmac_create_skb(struct fxgmac_pdata *pdata, unsigned int len) { struct sk_buff *skb; - skb = __netdev_alloc_skb_ip_align(pdata->netdev, len, GFP_ATOMIC); - if (!skb) { - netdev_err(pdata->netdev, "%s: Rx init fails; skb is NULL\n", - __func__); + unsigned int copy_len; + u8 *packet; + + skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len); + if (!skb) return NULL; - } - dma_sync_single_for_cpu(pdata->dev, desc_data->rx.buf.dma_base, len, - DMA_FROM_DEVICE); - skb_copy_to_linear_data(skb, desc_data->skb->data, len); - skb_put(skb, len); - dma_sync_single_for_device(pdata->dev, desc_data->rx.buf.dma_base, len, - DMA_FROM_DEVICE); + /* Start with the header buffer which may contain just the header + * or the header plus data + */ + dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base, + desc_data->rx.hdr.dma_off, + desc_data->rx.hdr.dma_len, + DMA_FROM_DEVICE); + + packet = page_address(desc_data->rx.hdr.pa.pages) + + desc_data->rx.hdr.pa.pages_offset; + copy_len = len; + copy_len = min(desc_data->rx.hdr.dma_len, copy_len); + skb_copy_to_linear_data(skb, packet, copy_len); + skb_put(skb, copy_len); return skb; } @@ -1791,8 +2082,11 @@ static int fxgmac_tx_poll(struct fxgmac_channel *channel) static int fxgmac_restart_need; static u32 change_cnt; static u32 reg_cur_pre = 0xffffffff; + (void) reg_cur_pre; + (void) change_cnt; + (void) fxgmac_restart_need; -#if FXGMAC_TX_HANG_TIMER_EN +#if FXGMAC_TX_HANG_TIMER_ENABLED static u32 reg_cur; #endif @@ -1878,7 +2172,7 @@ static int fxgmac_tx_poll(struct fxgmac_channel *channel) } } #endif -#if FXGMAC_TX_HANG_TIMER_EN +#if FXGMAC_TX_HANG_TIMER_ENABLED if ((!pdata->tx_hang_restart_queuing) && (!channel->expansion.tx_hang_timer_active)) { reg_cur = ring->dirty; @@ -1938,7 +2232,7 @@ static int fxgmac_tx_poll(struct fxgmac_channel *channel) /* Free the SKB and reset the descriptor for re-use */ desc_ops->unmap_desc_data(pdata, desc_data); - hw_ops->tx_desc_reset(desc_data); + desc_ops->tx_desc_reset(desc_data); processed++; ring->dirty = @@ -1950,6 +2244,7 @@ static int fxgmac_tx_poll(struct fxgmac_channel *channel) netdev_tx_completed_queue(txq, tx_packets, tx_bytes); + smp_wmb(); if ((ring->tx.queue_stopped == 1) && (fxgmac_tx_avail_desc(ring) > FXGMAC_TX_DESC_MIN_FREE)) { ring->tx.queue_stopped = 0; @@ -1968,18 +2263,14 @@ static int fxgmac_rx_poll(struct fxgmac_channel *channel, int budget) struct fxgmac_pdata *pdata = channel->pdata; struct fxgmac_ring *ring = channel->rx_ring; struct net_device *netdev = pdata->netdev; - unsigned int len; + unsigned int len, max_len; unsigned int context_next, context; struct fxgmac_desc_data *desc_data; struct fxgmac_pkt_info *pkt_info; unsigned int incomplete; - struct fxgmac_hw_ops *hw_ops; struct napi_struct *napi; struct sk_buff *skb; int packet_count = 0; - u32 ipce, iphe; - - hw_ops = &pdata->hw_ops; /* Nothing to do if there isn't a Rx ring for this channel */ if (!ring) @@ -2005,7 +2296,7 @@ static int fxgmac_rx_poll(struct fxgmac_channel *channel, int budget) if (fxgmac_rx_dirty_desc(ring) > FXGMAC_RX_DESC_MAX_DIRTY) fxgmac_rx_refresh(channel); - if (hw_ops->dev_read(channel)) + if (fxgmac_dev_read(channel)) break; ring->cur = FXGMAC_GET_ENTRY(ring->cur, ring->dma_desc_count); @@ -2029,27 +2320,19 @@ static int fxgmac_rx_poll(struct fxgmac_channel *channel, int budget) netif_err(pdata, rx_err, netdev, "error in received packet\n"); dev_kfree_skb(skb); + pdata->netdev->stats.rx_dropped++; goto next_packet; } if (!context) { len = desc_data->rx.len; - if (len > pdata->rx_buf_size) { - if (net_ratelimit()) - netdev_err( - pdata->netdev, - "len %d larger than size (%d)\n", - len, pdata->rx_buf_size); - pdata->netdev->stats.rx_dropped++; - goto next_packet; - } if (len == 0) { if (net_ratelimit()) - netdev_err( - pdata->netdev, + netif_err(pdata, rx_err, netdev, "A packet of length 0 was received\n"); pdata->netdev->stats.rx_length_errors++; + pdata->netdev->stats.rx_dropped++; goto next_packet; } @@ -2058,38 +2341,39 @@ static int fxgmac_rx_poll(struct fxgmac_channel *channel, int budget) len); if (unlikely(!skb)) { if (net_ratelimit()) - netdev_warn( - pdata->netdev, - "create skb failed\n"); + netif_err(pdata, rx_err, netdev, "create skb failed\n"); + pdata->netdev->stats.rx_dropped++; goto next_packet; } } + + max_len = netdev->mtu + ETH_HLEN; + if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + skb->protocol == htons(ETH_P_8021Q)) + max_len += VLAN_HLEN; + if (len > max_len) { + if (net_ratelimit()) + netif_err(pdata, rx_err, netdev, + "len %d larger than max size %d\n", + len, max_len); + pdata->netdev->stats.rx_length_errors++; + pdata->netdev->stats.rx_dropped++; + dev_kfree_skb(skb); + goto next_packet; + } } - if (!skb) + if (!skb) { + pdata->netdev->stats.rx_dropped++; goto next_packet; + } if (netif_msg_pktdata(pdata)) fxgmac_print_pkt(netdev, skb, false); skb_checksum_none_assert(skb); - if (netdev->features & NETIF_F_RXCSUM) { - ipce = FXGMAC_GET_REG_BITS_LE( - desc_data->dma_desc->desc1, - RX_NORMAL_DESC1_WB_IPCE_POS, - RX_NORMAL_DESC1_WB_IPCE_LEN); - iphe = FXGMAC_GET_REG_BITS_LE( - desc_data->dma_desc->desc1, - RX_NORMAL_DESC1_WB_IPHE_POS, - RX_NORMAL_DESC1_WB_IPHE_LEN); - /* if csum error, let the stack verify checksum errors.otherwise don't verify */ - if (!ipce && !iphe && - FXGMAC_GET_REG_BITS( - pkt_info->attributes, - RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, - RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN)) - skb->ip_summed = CHECKSUM_UNNECESSARY; - } + if (netdev->features & NETIF_F_RXCSUM) + skb->ip_summed = CHECKSUM_UNNECESSARY; if (FXGMAC_GET_REG_BITS(pkt_info->attributes, RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, @@ -2228,8 +2512,6 @@ static int fxgmac_rx_poll(struct fxgmac_channel *channel, int budget) pdata->netdev->stats.rx_bytes += len; } - fxgmac_rx_refresh(channel); - return packet_count; } @@ -2237,11 +2519,10 @@ static int fxgmac_one_poll_tx(struct napi_struct *napi, int budget) { struct fxgmac_channel *channel = container_of(napi, struct fxgmac_channel, expansion.napi_tx); - int ret = 0; struct fxgmac_pdata *pdata = channel->pdata; struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; - ret = fxgmac_tx_poll(channel); + fxgmac_tx_poll(channel); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) if (napi_complete_done(napi, 0)) { hw_ops->enable_msix_one_interrupt(pdata, MSI_ID_TXQ0); @@ -2264,9 +2545,9 @@ static int fxgmac_one_poll_rx(struct napi_struct *napi, int budget) processed = fxgmac_rx_poll(channel, budget); if (processed < budget) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) - /* if there no interrupt occured when this interrupt running, struct napi's state is NAPIF_STATE_SCHED, + /* if there no interrupt occurred when this interrupt running, struct napi's state is NAPIF_STATE_SCHED, * napi_complete_done return true and we can enable irq, it will not cause unbalanced iqr issure. - * if there more interrupt occured when this interrupt running, struct napi's state is NAPIF_STATE_SCHED | NAPIF_STATE_MISSED + * if there more interrupt occurred when this interrupt running, struct napi's state is NAPIF_STATE_SCHED | NAPIF_STATE_MISSED * because napi_schedule_prep will make it. At this time napi_complete_done will return false and * schedule poll again because of NAPIF_STATE_MISSED, it will cause unbalanced irq issure. */ @@ -2307,18 +2588,16 @@ static int fxgmac_all_poll(struct napi_struct *napi, int budget) } } while (false); - /* for phy, we needn't to process any packet, so processed will be 0 */ - if (pdata->expansion.mgm_intctrl_val & MGMT_INT_CTRL0_INT_STATUS_PHY) { - fxgmac_phy_process(pdata); - pdata->expansion.mgm_intctrl_val &= - ~MGMT_INT_CTRL0_INT_STATUS_PHY; - } - /* If we processed everything, we are done */ if (processed < budget) { /* Turn off polling */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) if (napi_complete_done(napi, processed)) hw_ops->enable_mgm_interrupt(pdata); +#else + napi_complete(napi); + hw_ops->enable_mgm_interrupt(pdata); +#endif } if ((processed) && (netif_msg_rx_status(pdata))) { @@ -2327,3 +2606,472 @@ static int fxgmac_all_poll(struct napi_struct *napi, int budget) return processed; } + +void fxgmac_tx_start_xmit(struct fxgmac_channel *channel, + struct fxgmac_ring *ring) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_desc_data *desc_data; + + /* Make sure everything is written before the register write */ + wmb(); + + /* Issue a poll command to Tx DMA by writing address + * of next immediate free descriptor + */ + desc_data = FXGMAC_GET_DESC_DATA(ring, ring->cur); + + writereg(pdata->pAdapter, lower_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_TDTR_LO)); + + if (netif_msg_tx_done(pdata)) { + DPRINTK("tx_start_xmit: dump before wr reg, \ + dma base=0x%016llx,reg=0x%08x, \ + tx timer usecs=%u,tx_timer_active=%u\n", + desc_data->dma_desc_addr, + readreg(pdata->pAdapter, FXGMAC_DMA_REG(channel, DMA_CH_TDTR_LO)), + pdata->tx_usecs, channel->tx_timer_active); + } + + ring->tx.xmit_more = 0; +} + +void fxgmac_dev_xmit(struct fxgmac_channel *channel) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->tx_ring; + unsigned int tso_context, vlan_context; + struct fxgmac_desc_data *desc_data; + struct fxgmac_dma_desc *dma_desc; + struct fxgmac_pkt_info *pkt_info; + unsigned int csum, tso, vlan; + int start_index = ring->cur; + int cur_index = ring->cur; + int i; + + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit callin, desc cur=%d\n", cur_index); + + pkt_info = &ring->pkt_info; + csum = FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN); + tso = FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN); + vlan = FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN); + + if (tso && (pkt_info->mss != ring->tx.cur_mss)) + tso_context = 1; + else + tso_context = 0; + + if ((tso_context) && (netif_msg_tx_done(pdata))) { + /* tso is initialized to start... */ + DPRINTK("fxgmac_dev_xmit, tso_%s tso=0x%x, pkt_mss=%d, cur_mss=%d\n", + (pkt_info->mss) ? "start" : "stop", tso, pkt_info->mss, + ring->tx.cur_mss); + } + + if (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)) + vlan_context = 1; + else + vlan_context = 0; + + if (vlan && (netif_msg_tx_done(pdata))) + DPRINTK("fxgmac_dev_xmi:pkt vlan=%d, ring vlan=%d, vlan_context=%d\n", + pkt_info->vlan_ctag, ring->tx.cur_vlan_ctag, + vlan_context); + + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + dma_desc = desc_data->dma_desc; + + /* Create a context descriptor if this is a TSO pkt_info */ + if (tso_context || vlan_context) { + if (tso_context) { + if (netif_msg_tx_done(pdata)) + DPRINTK("xlgamc dev xmit, construct tso context descriptor, mss=%u\n", + pkt_info->mss); + + /* Set the MSS size */ + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc2, TX_CONTEXT_DESC2_MSS_POS, + TX_CONTEXT_DESC2_MSS_LEN, pkt_info->mss); + + /* Mark it as a CONTEXT descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_CTXT_POS, + TX_CONTEXT_DESC3_CTXT_LEN, 1); + + /* Indicate this descriptor contains the MSS */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_TCMSSV_POS, + TX_CONTEXT_DESC3_TCMSSV_LEN, 1); + + ring->tx.cur_mss = pkt_info->mss; + } + + if (vlan_context) { + netif_dbg(pdata, tx_queued, pdata->netdev, + "VLAN context descriptor, ctag=%u\n", + pkt_info->vlan_ctag); + + /* Mark it as a CONTEXT descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_CTXT_POS, + TX_CONTEXT_DESC3_CTXT_LEN, 1); + + /* Set the VLAN tag */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_VT_POS, + TX_CONTEXT_DESC3_VT_LEN, pkt_info->vlan_ctag); + + /* Indicate this descriptor contains the VLAN tag */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_VLTV_POS, + TX_CONTEXT_DESC3_VLTV_LEN, 1); + + ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag; + } + + cur_index = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + dma_desc = desc_data->dma_desc; + } + + /* Update buffer address (for TSO this is the header) */ + dma_desc->desc0 = cpu_to_le32(lower_32_bits(desc_data->skb_dma)); + dma_desc->desc1 = cpu_to_le32(upper_32_bits(desc_data->skb_dma)); + + /* Update the buffer length */ + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc2, + TX_NORMAL_DESC2_HL_B1L_POS, + TX_NORMAL_DESC2_HL_B1L_LEN, + desc_data->skb_dma_len); + + /* VLAN tag insertion check */ + if (vlan) { + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc2, TX_NORMAL_DESC2_VTIR_POS, + TX_NORMAL_DESC2_VTIR_LEN, TX_NORMAL_DESC2_VLAN_INSERT); + pdata->stats.tx_vlan_packets++; + } + + /* Timestamp enablement check */ + if (FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_PTP_POS, + TX_PACKET_ATTRIBUTES_PTP_LEN)) + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc2, TX_NORMAL_DESC2_TTSE_POS, + TX_NORMAL_DESC2_TTSE_LEN, 1); + + /* Mark it as First Descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_FD_POS, + TX_NORMAL_DESC3_FD_LEN, 1); + + /* Mark it as a NORMAL descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_CTXT_POS, + TX_NORMAL_DESC3_CTXT_LEN, 0); + + /* Set OWN bit if not the first descriptor */ + if (cur_index != start_index) + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, + TX_NORMAL_DESC3_OWN_LEN, 1); + + if (tso) { + /* Enable TSO */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_TSE_POS, + TX_NORMAL_DESC3_TSE_LEN, 1); + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_TCPPL_POS, + TX_NORMAL_DESC3_TCPPL_LEN, pkt_info->tcp_payload_len); + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_TCPHDRLEN_POS, + TX_NORMAL_DESC3_TCPHDRLEN_LEN, + pkt_info->tcp_header_len / 4); + + pdata->stats.tx_tso_packets++; + } else { + /* Enable CRC and Pad Insertion */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_CPC_POS, + TX_NORMAL_DESC3_CPC_LEN, 0); + + /* Enable HW CSUM */ + if (csum) + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_CIC_POS, + TX_NORMAL_DESC3_CIC_LEN, 0x3); + + /* Set the total length to be transmitted */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_FL_POS, + TX_NORMAL_DESC3_FL_LEN, + pkt_info->length); + } + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit before more descs, desc cur=%d, start=%d, desc=%#x,%#x,%#x,%#x\n", + cur_index, start_index, dma_desc->desc0, + dma_desc->desc1, dma_desc->desc2, dma_desc->desc3); + + if (start_index <= cur_index) + i = cur_index - start_index + 1; + else + i = ring->dma_desc_count - start_index + cur_index; + + for (; i < pkt_info->desc_count; i++) { + cur_index = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); + + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + dma_desc = desc_data->dma_desc; + + /* Update buffer address */ + dma_desc->desc0 = + cpu_to_le32(lower_32_bits(desc_data->skb_dma)); + dma_desc->desc1 = + cpu_to_le32(upper_32_bits(desc_data->skb_dma)); + + /* Update the buffer length */ + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc2, TX_NORMAL_DESC2_HL_B1L_POS, + TX_NORMAL_DESC2_HL_B1L_LEN, desc_data->skb_dma_len); + + /* Set OWN bit */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, + TX_NORMAL_DESC3_OWN_LEN, 1); + + /* Mark it as NORMAL descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_CTXT_POS, + TX_NORMAL_DESC3_CTXT_LEN, 0); + + /* Enable HW CSUM */ + if (csum) + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_CIC_POS, + TX_NORMAL_DESC3_CIC_LEN, 0x3); + } + + /* Set LAST bit for the last descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_LD_POS, + TX_NORMAL_DESC3_LD_LEN, 1); + + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc2, + TX_NORMAL_DESC2_IC_POS, + TX_NORMAL_DESC2_IC_LEN, 1); + + /* Save the Tx info to report back during cleanup */ + desc_data->tx.packets = pkt_info->tx_packets; + desc_data->tx.bytes = pkt_info->tx_bytes; + + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit last descs, desc cur=%d, desc=%#x,%#x,%#x,%#x\n", + cur_index, dma_desc->desc0, dma_desc->desc1, + dma_desc->desc2, dma_desc->desc3); + + /* In case the Tx DMA engine is running, make sure everything + * is written to the descriptor(s) before setting the OWN bit + * for the first descriptor + */ + dma_wmb(); + + /* Set OWN bit for the first descriptor */ + desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); + dma_desc = desc_data->dma_desc; + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_OWN_POS, + TX_NORMAL_DESC3_OWN_LEN, 1); + + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit first descs, start=%d, desc=%#x,%#x,%#x,%#x\n", + start_index, dma_desc->desc0, dma_desc->desc1, + dma_desc->desc2, dma_desc->desc3); + + if (netif_msg_tx_queued(pdata)) + fxgmac_dump_tx_desc(pdata, ring, start_index, + pkt_info->desc_count, 1); + + /* Make sure ownership is written to the descriptor */ + smp_wmb(); + + ring->cur = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); + + fxgmac_tx_start_xmit(channel, ring); + + /* yzhang for reduce debug output */ + if (netif_msg_tx_done(pdata)) { + DPRINTK("dev_xmit callout %s: descriptors %u to %u written\n", + channel->name, start_index & (ring->dma_desc_count - 1), + (ring->cur - 1) & (ring->dma_desc_count - 1)); + } +} + +extern void fxgmac_diag_get_rx_info(struct fxgmac_channel *channel); + +static void fxgmac_get_rx_tstamp(struct fxgmac_pkt_info *pkt_info, + struct fxgmac_dma_desc *dma_desc) +{ + u64 nsec; + + nsec = le32_to_cpu(dma_desc->desc1); + nsec <<= 32; + nsec |= le32_to_cpu(dma_desc->desc0); + if (nsec != 0xffffffffffffffffULL) { + pkt_info->rx_tstamp = nsec; + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS, + RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN, 1); + } +} + +static int fxgmac_dev_read(struct fxgmac_channel *channel) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->rx_ring; + struct net_device *netdev = pdata->netdev; + struct fxgmac_desc_data *desc_data; + struct fxgmac_dma_desc *dma_desc; + struct fxgmac_pkt_info *pkt_info; + u32 ipce, iphe, rxparser; + unsigned int err, etlt; + + static unsigned int cnt_incomplete; + + desc_data = FXGMAC_GET_DESC_DATA(ring, ring->cur); + dma_desc = desc_data->dma_desc; + pkt_info = &ring->pkt_info; + + /* Check for data availability */ + if (FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_OWN_POS, + RX_NORMAL_DESC3_OWN_LEN)) + return 1; + + /* Make sure descriptor fields are read after reading the OWN bit */ + dma_rmb(); + + if (netif_msg_rx_status(pdata)) + fxgmac_dump_rx_desc(pdata, ring, ring->cur); + + if (FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_CTXT_POS, + RX_NORMAL_DESC3_CTXT_LEN)) { + /* Timestamp Context Descriptor */ + fxgmac_get_rx_tstamp(pkt_info, dma_desc); + + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_LEN, 1); + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN, 0); + if (netif_msg_rx_status(pdata)) + DPRINTK("dev_read context desc, ch=%s\n", channel->name); + return 0; + } + + /* Normal Descriptor, be sure Context Descriptor bit is off */ + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CONTEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_LEN, + 0); + + /* Get the header length */ + if (FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_FD_POS, + RX_NORMAL_DESC3_FD_LEN)) { + desc_data->rx.hdr_len = FXGMAC_GET_REG_BITS_LE( + dma_desc->desc2, RX_NORMAL_DESC2_HL_POS, + RX_NORMAL_DESC2_HL_LEN); + if (desc_data->rx.hdr_len) + pdata->stats.rx_split_header_packets++; + } + + /* Get the pkt_info length */ + desc_data->rx.len = FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_PL_POS, + RX_NORMAL_DESC3_PL_LEN); + + if (!FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_LD_POS, + RX_NORMAL_DESC3_LD_LEN)) { + /* Not all the data has been transferred for this pkt_info */ + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, + RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, 1); + cnt_incomplete++; + if ((cnt_incomplete < 2) && netif_msg_rx_status(pdata)) + DPRINTK("dev_read NOT last desc, pkt incomplete yet,%u\n", + cnt_incomplete); + + return 0; + } + if ((cnt_incomplete) && netif_msg_rx_status(pdata)) + DPRINTK("dev_read rx back to normal and incomplete cnt=%u\n", cnt_incomplete); + cnt_incomplete = 0; /* when back to normal, reset cnt */ + + /* This is the last of the data for this pkt_info */ + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, + RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, 0); + + /* Set checksum done indicator as appropriate */ + if (netdev->features & NETIF_F_RXCSUM) { + ipce = FXGMAC_GET_REG_BITS_LE(desc_data->dma_desc->desc1, + RX_NORMAL_DESC1_WB_IPCE_POS, + RX_NORMAL_DESC1_WB_IPCE_LEN); + iphe = FXGMAC_GET_REG_BITS_LE(desc_data->dma_desc->desc1, + RX_NORMAL_DESC1_WB_IPHE_POS, + RX_NORMAL_DESC1_WB_IPHE_LEN); + if (!ipce && !iphe) + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, + RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, + 1); + else + return 0; + } + + /* Check for errors (only valid in last descriptor) */ + err = FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_ES_POS, + RX_NORMAL_DESC3_ES_LEN); + /* b111: Incomplete parsing due to ECC error */ + rxparser = FXGMAC_GET_REG_BITS_LE(desc_data->dma_desc->desc2, + RX_NORMAL_DESC2_WB_RAPARSER_POS, + RX_NORMAL_DESC2_WB_RAPARSER_LEN); + if (err || rxparser == 0x7) { + pkt_info->errors = FXGMAC_SET_REG_BITS(pkt_info->errors, + RX_PACKET_ERRORS_FRAME_POS, + RX_PACKET_ERRORS_FRAME_LEN, 1); + return 0; + } + + etlt = FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_ETLT_POS, + RX_NORMAL_DESC3_ETLT_LEN); + + /* No error if err is 0 or etlt is 0 */ + if ((etlt == 0x4 /*yzhang changed to 0x4, 0x09*/) && + (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, + RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN, 1); + pkt_info->vlan_ctag = FXGMAC_GET_REG_BITS_LE( + dma_desc->desc0, RX_NORMAL_DESC0_OVT_POS, + RX_NORMAL_DESC0_OVT_LEN); + netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n", + pkt_info->vlan_ctag); + } + + return 0; +} diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-pci.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-pci.c index f6f8f4f6a5e9b..d1d008396a71f 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-pci.c +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-pci.c @@ -1,13 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2021 Motorcomm Corporation. */ -#include -#include -#include - -/* for file operation */ -#include - #include "fuxi-gmac.h" #include "fuxi-gmac-reg.h" @@ -56,14 +49,15 @@ static int fxgmac_probe(struct pci_dev *pcidev, const struct pci_device_id *id) static void fxgmac_remove(struct pci_dev *pcidev) { - struct net_device *netdev = dev_get_drvdata(&pcidev->dev); - struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct net_device *netdev; + struct fxgmac_pdata *pdata; + u32 msix; -#ifdef CONFIG_PCI_MSI - u32 msix = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + netdev = dev_get_drvdata(&pcidev->dev); + pdata = netdev_priv(netdev); + msix = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, FXGMAC_FLAG_MSIX_POS, FXGMAC_FLAG_MSIX_LEN); -#endif fxgmac_drv_remove(&pcidev->dev); #ifdef CONFIG_PCI_MSI @@ -74,9 +68,7 @@ static void fxgmac_remove(struct pci_dev *pcidev) } #endif -#ifdef HAVE_FXGMAC_DEBUG_FS - fxgmac_dbg_exit(pdata); -#endif /* HAVE_FXGMAC_DEBUG_FS */ + DPRINTK("%s has been removed\n", netdev->name); } /* for Power management, 20210628 */ @@ -123,14 +115,16 @@ static int __fxgmac_shutdown(struct pci_dev *pdev, bool *enable_wake) static void fxgmac_shutdown(struct pci_dev *pdev) { - bool wake; struct net_device *netdev = dev_get_drvdata(&pdev->dev); struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + bool wake; DPRINTK("fxpm, fxgmac_shutdown callin\n"); - pdata->expansion.current_state = CURRENT_STATE_SHUTDOWN; + fxgmac_lock(pdata); __fxgmac_shutdown(pdev, &wake); + hw_ops->led_under_shutdown(pdata); if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, wake); @@ -138,6 +132,7 @@ static void fxgmac_shutdown(struct pci_dev *pdev) } DPRINTK("fxpm, fxgmac_shutdown callout, system power off=%d\n", (system_state == SYSTEM_POWER_OFF) ? 1 : 0); + fxgmac_unlock(pdata); } #ifdef CONFIG_PM @@ -145,22 +140,26 @@ static void fxgmac_shutdown(struct pci_dev *pdev) static int fxgmac_suspend(struct pci_dev *pdev, pm_message_t __always_unused state) { - int retval; - bool wake; struct net_device *netdev = dev_get_drvdata(&pdev->dev); struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + int retval = 0; + bool wake; DPRINTK("fxpm, fxgmac_suspend callin\n"); - pdata->expansion.current_state = CURRENT_STATE_SUSPEND; + fxgmac_lock(pdata); + if (pdata->expansion.dev_state != FXGMAC_DEV_START) + goto unlock; if (netif_running(netdev)) { retval = __fxgmac_shutdown(pdev, &wake); if (retval) - return retval; + goto unlock; } else { wake = !!(pdata->expansion.wol); } + hw_ops->led_under_sleep(pdata); if (wake) { pci_prepare_to_sleep(pdev); @@ -169,24 +168,28 @@ static int fxgmac_suspend(struct pci_dev *pdev, pci_set_power_state(pdev, PCI_D3hot); } + pdata->expansion.dev_state = FXGMAC_DEV_SUSPEND; DPRINTK("fxpm, fxgmac_suspend callout to %s\n", wake ? "sleep" : "D3hot"); - return 0; +unlock: + fxgmac_unlock(pdata); + return retval; } static int fxgmac_resume(struct pci_dev *pdev) { - struct fxgmac_pdata *pdata; - struct net_device *netdev; - u32 err; + struct net_device *netdev = dev_get_drvdata(&pdev->dev); + struct fxgmac_pdata *pdata = netdev_priv(netdev); + u32 err = 0; DPRINTK("fxpm, fxgmac_resume callin\n"); - netdev = dev_get_drvdata(&pdev->dev); - pdata = netdev_priv(netdev); + fxgmac_lock(pdata); + if (pdata->expansion.dev_state != FXGMAC_DEV_SUSPEND) + goto unlock; - pdata->expansion.current_state = CURRENT_STATE_RESUME; + pdata->expansion.dev_state = FXGMAC_DEV_RESUME; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); @@ -198,9 +201,8 @@ static int fxgmac_resume(struct pci_dev *pdev) err = pci_enable_device_mem(pdev); if (err) { - dev_err(pdata->dev, - "fxgmac_resume, failed to enable PCI device from suspend\n"); - return err; + dev_err(pdata->dev, "fxgmac_resume, failed to enable PCI device from suspend\n"); + goto unlock; } smp_mb__before_atomic(); __clear_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate); @@ -219,7 +221,8 @@ static int fxgmac_resume(struct pci_dev *pdev) rtnl_unlock(); DPRINTK("fxpm, fxgmac_resume callout\n"); - +unlock: + fxgmac_unlock(pdata); return err; } #endif @@ -246,5 +249,5 @@ module_pci_driver(fxgmac_pci_driver); MODULE_DESCRIPTION(FXGMAC_DRV_DESC); MODULE_VERSION(FXGMAC_DRV_VERSION); -MODULE_AUTHOR("Frank "); -MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Motorcomm Electronic Tech. Co., Ltd."); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-phy.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-phy.c index 88066a110f410..7419f6207c8ec 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-phy.c +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-phy.c @@ -1,17 +1,50 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2021 Motorcomm Corporation. */ -#include -#include - #include "fuxi-gmac.h" #include "fuxi-gmac-reg.h" -void fxgmac_phy_force_speed(struct fxgmac_pdata *pdata, int speed) +/* + * When in forced mode, set the speed, duplex, and auto-negotiation of the PHY + * all at once to avoid the problems caused by individual settings + * on some machines + */ +int fxgmac_phy_force_mode(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 regval = 0; + unsigned int high_bit = 0, low_bit = 0; + int ret = 0; + + switch (pdata->phy_speed) { + case SPEED_1000: + high_bit = 1, low_bit = 0; + break; + case SPEED_100: + high_bit = 0, low_bit = 1; + break; + case SPEED_10: + high_bit = 0, low_bit = 0; + break; + default: + break; + } + + hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_AUTOENG_POS, PHY_CR_AUTOENG_LEN, pdata->phy_autoeng); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_SPEED_SEL_H_POS, PHY_CR_SPEED_SEL_H_LEN, high_bit); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_SPEED_SEL_L_POS, PHY_CR_SPEED_SEL_L_LEN, low_bit); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_DUPLEX_POS, PHY_CR_DUPLEX_LEN, pdata->phy_duplex); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, regval); + return ret; +} + +int fxgmac_phy_force_speed(struct fxgmac_pdata *pdata, int speed) { struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; u32 regval = 0; unsigned int high_bit = 0, low_bit = 0; + int ret = 0; switch (speed) { case SPEED_1000: @@ -27,35 +60,55 @@ void fxgmac_phy_force_speed(struct fxgmac_pdata *pdata, int speed) break; } - /* disable autoneg */ hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); - regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_AUTOENG_POS, - PHY_CR_AUTOENG_LEN, 0); regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_SPEED_SEL_H_POS, PHY_CR_SPEED_SEL_H_LEN, high_bit); regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_SPEED_SEL_L_POS, PHY_CR_SPEED_SEL_L_LEN, low_bit); - hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, regval); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, regval); + return ret; } -void fxgmac_phy_force_duplex(struct fxgmac_pdata *pdata, int duplex) +int fxgmac_phy_force_duplex(struct fxgmac_pdata *pdata, int duplex) { struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; u32 regval = 0; + int ret = 0; + hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_DUPLEX_POS, PHY_CR_DUPLEX_LEN, (duplex ? 1 : 0)); - hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, regval); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, regval); + + return ret; } -void fxgmac_phy_force_autoneg(struct fxgmac_pdata *pdata, int autoneg) +int fxgmac_phy_force_autoneg(struct fxgmac_pdata *pdata, int autoneg) { struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; u32 regval = 0; + int ret = 0; + hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_AUTOENG_POS, PHY_CR_AUTOENG_LEN, (autoneg ? 1 : 0)); - hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, regval); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, regval); + + return ret; +} + +void fxgmac_set_phy_link_ksettings(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + pdata->phy_speed = pdata->expansion.pre_phy_speed; + pdata->phy_duplex = pdata->expansion.pre_phy_duplex; + pdata->phy_autoeng = pdata->expansion.pre_phy_autoneg; + + if (pdata->phy_autoeng) + hw_ops->phy_config(pdata); + else + fxgmac_phy_force_mode(pdata); } /* @@ -158,7 +211,7 @@ int fxgmac_ephy_soft_reset(struct fxgmac_pdata *pdata) ret = hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, (unsigned int *)&val); busy--; - } while ((ret >= 0) && (0 != (val & 0x8000)) && (busy)); + } while ((ret == 0) && (0 != (val & 0x8000)) && (busy)); if (0 == (val & 0x8000)) return 0; @@ -179,8 +232,8 @@ static int fxgmac_ephy_adjust_status(u32 lport, int val, int is_utp, int *speed, int speed_mode; *speed = -1; - *duplex = (val & BIT(FUXI_EPHY_DUPLEX_BIT)) >> FUXI_EPHY_DUPLEX_BIT; - speed_mode = (val & FUXI_EPHY_SPEED_MODE) >> FUXI_EPHY_SPEED_MODE_BIT; + *duplex = (val & BIT(FXGMAC_EPHY_DUPLEX_BIT)) >> FXGMAC_EPHY_DUPLEX_BIT; + speed_mode = (val & FXGMAC_EPHY_SPEED_MODE) >> FXGMAC_EPHY_SPEED_MODE_BIT; switch (speed_mode) { case 0: if (is_utp) @@ -224,7 +277,7 @@ int fxgmac_ephy_status_get(struct fxgmac_pdata *pdata, int *speed, int *duplex, if (0 > ret) goto busy_exit; - link = val & (BIT(FUXI_EPHY_LINK_STATUS_BIT)); + link = val & (BIT(FXGMAC_EPHY_LINK_STATUS_BIT)); if (link) { link_utp = 1; fxgmac_ephy_adjust_status(0, val, 1, speed, duplex); @@ -235,9 +288,9 @@ int fxgmac_ephy_status_get(struct fxgmac_pdata *pdata, int *speed, int *duplex, if (link_utp || link_fiber) { /* case of fiber of priority */ if (link_utp) - *media = (FUXI_EPHY_SMI_SEL_PHY + 1); + *media = (FXGMAC_EPHY_SMI_SEL_PHY + 1); if (link_fiber) - *media = (FUXI_EPHY_SMI_SEL_SDS_SGMII + 1); + *media = (FXGMAC_EPHY_SMI_SEL_SDS_SGMII + 1); *ret_link = 1; } else { @@ -254,3 +307,99 @@ int fxgmac_ephy_status_get(struct fxgmac_pdata *pdata, int *speed, int *duplex, return ret; } + +/* + * fxgmac_phy_update_link - update the phy link status + * @adapter: pointer to the device adapter structure + */ +static void fxgmac_phy_update_link(struct net_device *netdev) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 regval, cur_link, cur_speed; + + regval = hw_ops->get_ephy_state(pdata); + // We should make sure that PHY is done with the reset + if (regval & MGMT_EPHY_CTRL_STA_EPHY_RESET) { + pdata->expansion.phy_link = false; + return; + } + + cur_link = FXGMAC_GET_REG_BITS(regval, + MGMT_EPHY_CTRL_STA_EPHY_LINKUP_POS, + MGMT_EPHY_CTRL_STA_EPHY_LINKUP_LEN); + if (pdata->expansion.phy_link != cur_link) { + hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, NULL); + hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, NULL); + + pdata->expansion.phy_link = cur_link; + if (pdata->expansion.phy_link) { + cur_speed = FXGMAC_GET_REG_BITS(regval, + MGMT_EPHY_CTRL_STA_SPEED_POS, + MGMT_EPHY_CTRL_STA_SPEED_LEN); + pdata->phy_speed = (cur_speed == 2) ? SPEED_1000 : + (cur_speed == 1) ? SPEED_100 : SPEED_10; + pdata->phy_duplex = FXGMAC_GET_REG_BITS(regval, + MGMT_EPHY_CTRL_STA_EPHY_DUPLEX_POS, + MGMT_EPHY_CTRL_STA_EPHY_DUPLEX_LEN); + hw_ops->config_mac_speed(pdata); + + hw_ops->enable_rx(pdata); + hw_ops->enable_tx(pdata); + netif_carrier_on(pdata->netdev); + if (netif_running(pdata->netdev)) { + netif_tx_wake_all_queues(pdata->netdev); + dev_info(pdata->dev, "%s now is link up, mac_speed=%d.\n", + netdev_name(pdata->netdev), + pdata->phy_speed); + } + } else { + netif_carrier_off(pdata->netdev); + netif_tx_stop_all_queues(pdata->netdev); + pdata->phy_speed = SPEED_UNKNOWN; + pdata->phy_duplex = DUPLEX_UNKNOWN; + hw_ops->disable_rx(pdata); + hw_ops->disable_tx(pdata); + dev_info(pdata->dev, "%s now is link down\n", netdev_name(pdata->netdev)); + } + } +} + +static void fxgmac_phy_link_poll(struct timer_list *t) +{ + struct fxgmac_pdata *pdata = from_timer(pdata, t, expansion.phy_poll_tm); + + if (NULL == pdata->netdev) { + DPRINTK("fxgmac_phy_timer polling with NULL netdev %lx\n", (unsigned long)(pdata->netdev)); + return; + } + + pdata->stats.ephy_poll_timer_cnt++; + +#if FXGMAC_PM_FEATURE_ENABLED + if (!test_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate)) +#endif + { + mod_timer(&pdata->expansion.phy_poll_tm, jiffies + HZ / 2); + fxgmac_phy_update_link(pdata->netdev); + } else { + DPRINTK("fxgmac_phy_timer polling, powerstate changed, %ld, netdev=%lx, tm=%lx\n", pdata->expansion.powerstate, (unsigned long)(pdata->netdev), (unsigned long)&pdata->expansion.phy_poll_tm); + } +} + +int fxgmac_phy_timer_init(struct fxgmac_pdata *pdata) +{ + init_timer_key(&pdata->expansion.phy_poll_tm, NULL, 0, "fuxi_phy_link_update_timer", NULL); + pdata->expansion.phy_poll_tm.expires = jiffies + HZ / 2; + pdata->expansion.phy_poll_tm.function = (void *)(fxgmac_phy_link_poll); + add_timer(&pdata->expansion.phy_poll_tm); + + DPRINTK("fxgmac_phy_timer started, %lx\n", jiffies); + return 0; +} + +void fxgmac_phy_timer_destroy(struct fxgmac_pdata *pdata) +{ + del_timer_sync(&pdata->expansion.phy_poll_tm); + DPRINTK("fxgmac_phy_timer removed\n"); +} diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-reg.h b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-reg.h index 65d6288e6869a..26dbe9b2e2352 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-reg.h +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-reg.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2021 Motorcomm Corporation. */ -#ifndef __FUXI_GMAC_REG_H__ -#define __FUXI_GMAC_REG_H__ +#ifndef __FXGMAC_GMAC_REG_H__ +#define __FXGMAC_GMAC_REG_H__ #define AISC_MODE @@ -14,10 +14,12 @@ #define MAC_CR 0x0000 /* The MAC Configuration Register */ #define MAC_ECR 0x0004 #define MAC_PFR 0x0008 +#define MAC_WTR 0x000c #define MAC_HTR0 0x0010 #define MAC_VLANTR 0x0050 #define MAC_VLANHTR 0x0058 #define MAC_VLANIR 0x0060 +#define MAC_IVLANIR 0x0064 #define MAC_Q0TFCR 0x0070 #define MAC_RFCR 0x0090 #define MAC_RQC0R 0x00a0 @@ -36,8 +38,12 @@ #define MAC_LPI_CONTROL 0x00d4 #define MAC_LPI_TIMER 0x00d8 #define MAC_MS_TIC_COUNTER 0x00dc -#define MAC_AN_SR 0x00E4 -#define MAC_PHYIF_STA 0x00F8 +#define MAC_AN_CR 0x00e0 +#define MAC_AN_SR 0x00e4 +#define MAC_AN_ADV 0x00e8 +#define MAC_AN_LPA 0x00ec +#define MAC_AN_EXP 0x00f0 +#define MAC_PHYIF_STA 0x00f8 #define MAC_VR 0x0110 #define MAC_DBG_STA 0x0114 #define MAC_HWF0R 0x011c @@ -46,6 +52,7 @@ #define MAC_HWF3R 0x0128 #define MAC_MDIO_ADDRESS 0x0200 #define MAC_MDIO_DATA 0x0204 +#define MAC_GPIOCR 0x0208 #define MAC_GPIO_SR 0x020c #define MAC_ARP_PROTO_ADDR 0x0210 #define MAC_CSR_SW_CTRL 0x0230 @@ -406,7 +413,7 @@ #define MMC_CR_MCF_POS 3 #define MMC_CR_MCF_LEN 1 #define MMC_RIER_ALL_INTERRUPTS_POS 0 -#define MMC_RIER_ALL_INTERRUPTS_LEN 26 +#define MMC_RIER_ALL_INTERRUPTS_LEN 28 #define MMC_RISR_RXFRAMECOUNT_GB_POS 0 #define MMC_RISR_RXFRAMECOUNT_GB_LEN 1 #define MMC_RISR_RXOCTETCOUNT_GB_POS 1 @@ -465,7 +472,7 @@ #define MMC_RISR_RXLPITRANSITION_LEN 1 #define MMC_TIER_ALL_INTERRUPTS_POS 0 -#define MMC_TIER_ALL_INTERRUPTS_LEN 26 +#define MMC_TIER_ALL_INTERRUPTS_LEN 28 #define MMC_TISR_TXOCTETCOUNT_GB_POS 0 #define MMC_TISR_TXOCTETCOUNT_GB_LEN 1 #define MMC_TISR_TXFRAMECOUNT_GB_POS 1 @@ -525,6 +532,8 @@ /* MTL register offsets */ #define MTL_OMR 0x0c00 +#define MTL_FDCR 0x0c08 +#define MTL_FDSR 0x0c0c #define MTL_FDDR 0x0c10 #define MTL_INT_SR 0x0c20 #define MTL_RQDCM0R 0x0c30 @@ -549,8 +558,11 @@ #define MTL_Q_INT_CTL_SR 0x0d2c #define MTL_Q_TQOMR 0x00 +#define MTL_Q_TQUR 0x04 #define MTL_Q_RQOMR 0x30 +#define MTL_Q_RQMPOCR 0x34 #define MTL_Q_RQDR 0x38 +#define MTL_Q_RQCR 0x3c #define MTL_Q_IER 0x2c #define MTL_Q_ISR 0x2c /* no isr register */ #define MTL_TXQ_DEG 0x08 /* transmit debug */ @@ -652,10 +664,19 @@ #define MTL_TC_BASE MTL_Q_BASE #define MTL_TC_INC MTL_Q_INC +#define MTL_TC_TQDR 0x08 #define MTL_TC_ETSCR 0x10 #define MTL_TC_ETSSR 0x14 #define MTL_TC_QWR 0x18 +/* The Queue 0 Transmit Debug register gives the debug status of various blocks + * related to the Transmit queue + */ +#define MTL_TC_TQDR_TRCSTS_POS 1 +#define MTL_TC_TQDR_TRCSTS_LEN 2 +#define MTL_TC_TQDR_TXQSTS_POS 4 +#define MTL_TC_TQDR_TXQSTS_LEN 1 + /* MTL traffic class register entry bit positions and sizes */ #define MTL_TC_ETSCR_TSA_POS 0 #define MTL_TC_ETSCR_TSA_LEN 2 @@ -673,6 +694,11 @@ #define DMA_DSR0 0x100c #define DMA_DSR1 0x1010 #define DMA_DSR2 0x1014 +#define DMA_AXIARCR 0x1020 +#define DMA_AXIAWCR 0x1024 +#define DMA_AXIAWRCR 0x1028 +#define DMA_SAFE_ISR 0x1080 +#define DMA_ECC_IE 0x1084 #define DMA_ECC_INT_SR 0x1088 /* DMA register entry bit positions and sizes */ @@ -682,10 +708,16 @@ #define DMA_ISR_MTLIS_LEN 1 #define DMA_MR_SWR_POS 0 #define DMA_MR_SWR_LEN 1 +#define DMA_MR_TXPR_POS 11 +#define DMA_MR_TXPR_LEN 1 #define DMA_MR_INTM_POS 16 #define DMA_MR_INTM_LEN 2 -#define DMA_MR_QUREAD_POS 19 -#define DMA_MR_QUREAD_LEN 1 +#define DMA_MR_QUREAD_POS 19 +#define DMA_MR_QUREAD_LEN 1 +#define DMA_MR_TNDF_POS 20 +#define DMA_MR_TNDF_LEN 2 +#define DMA_MR_RNDF_POS 22 +#define DMA_MR_RNDF_LEN 2 #define DMA_SBMR_EN_LPI_POS 31 #define DMA_SBMR_EN_LPI_LEN 1 @@ -695,6 +727,8 @@ #define DMA_SBMR_WR_OSR_LMT_LEN 6 #define DMA_SBMR_RD_OSR_LMT_POS 16 #define DMA_SBMR_RD_OSR_LMT_LEN 8 +#define DMA_SBMR_AAL_POS 12 +#define DMA_SBMR_AAL_LEN 1 #define DMA_SBMR_EAME_POS 11 #define DMA_SBMR_EAME_LEN 1 #define DMA_SBMR_AALE_POS 10 @@ -749,6 +783,12 @@ #define DMA_CH_RDRLR 0x30 #define DMA_CH_IER 0x34 #define DMA_CH_RIWT 0x38 +#define DMA_CH_CATDR_LO 0x44 +#define DMA_CH_CARDR_LO 0x4c +#define DMA_CH_CATBR_HI 0x50 +#define DMA_CH_CATBR_LO 0x54 +#define DMA_CH_CARBR_HI 0x58 +#define DMA_CH_CARBR_LO 0x5c #define DMA_CH_SR 0x60 /* DMA channel register entry bit positions and sizes */ @@ -861,6 +901,8 @@ #define RX_NORMAL_DESC3_INTE_POS 30 #define RX_NORMAL_DESC3_INTE_LEN 1 #define RX_NORMAL_DESC3_L34T_LEN 4 +#define RX_NORMAL_DESC3_RSV_POS 26 +#define RX_NORMAL_DESC3_RSV_LEN 1 #define RX_NORMAL_DESC3_LD_POS 28 #define RX_NORMAL_DESC3_LD_LEN 1 #define RX_NORMAL_DESC3_OWN_POS 31 @@ -871,7 +913,6 @@ #define RX_NORMAL_DESC3_BUF1V_LEN 1 #define RX_NORMAL_DESC3_PL_POS 0 #define RX_NORMAL_DESC3_PL_LEN 15 -#define RX_NORMAL_DESC3_RSV_LEN 1 /* Inner VLAN Tag. Valid only when Double VLAN tag processing * and VLAN tag stripping are enabled. @@ -909,6 +950,8 @@ */ #define RX_NORMAL_DESC2_WB_DAF_POS 17 #define RX_NORMAL_DESC2_WB_DAF_LEN 1 +#define RX_NORMAL_DESC2_WB_RAPARSER_POS 11 +#define RX_NORMAL_DESC2_WB_RAPARSER_LEN 3 #define RX_NORMAL_DESC3_WB_LD_POS 28 #define RX_NORMAL_DESC3_WB_LD_LEN 1 @@ -1037,6 +1080,8 @@ #define PHY_CR_SPEED_SEL_L_LEN 1 #define PHY_CR_AUTOENG_POS 12 #define PHY_CR_AUTOENG_LEN 1 +#define PHY_CR_POWER_POS 11 +#define PHY_CR_POWER_LEN 1 #define PHY_CR_RE_AUTOENG_POS 9 #define PHY_CR_RE_AUTOENG_LEN 1 #define PHY_CR_DUPLEX_POS 8 @@ -1060,6 +1105,10 @@ #define PHY_MII_ADVERTISE_10HALF_POS 5 #define PHY_MII_ADVERTISE_10HALF_LEN 1 #define REG_MII_LPA 0x05 /* Link partner ability reg */ +#define PHY_MII_LINK_PARNTNER_10FULL_POS 6 +#define PHY_MII_LINK_PARNTNER_10FULL_LEN 1 +#define PHY_MII_LINK_PARNTNER_10HALF_POS 5 +#define PHY_MII_LINK_PARNTNER_10HALF_LEN 1 #define REG_MII_EXPANSION 0x06 /* Expansion register */ #define REG_MII_NEXT_PAGE 0x07 /* Next page register */ #define REG_MII_LPR_NEXT_PAGE 0x08 /* LPR next page register */ @@ -1068,14 +1117,14 @@ #define PHY_MII_CTRL1000_1000FULL_LEN 1 #define PHY_MII_CTRL1000_1000HALF_POS 8 #define PHY_MII_CTRL1000_1000HALF_LEN 1 -#define REG_MII_STAT1000 0x0A /* 1000BASE-T status */ +#define REG_MII_STAT1000 0x0a /* 1000BASE-T status */ #define PHY_MII_STAT1000_CFG_ERROR_POS 15 #define PHY_MII_STAT1000_CFG_ERROR_LEN 1 -#define REG_MII_MMD_CTRL 0x0D /* MMD access control register */ -#define REG_MII_MMD_DATA 0x0E /* MMD access data register */ +#define REG_MII_MMD_CTRL 0x0d /* MMD access control register */ +#define REG_MII_MMD_DATA 0x0e /* MMD access data register */ -#define REG_MII_ESTATUS 0x0F /* Extended Status */ +#define REG_MII_ESTATUS 0x0f /* Extended Status */ #define REG_MII_SPEC_CTRL 0x10 /* PHY specific func control */ #define PHY_MII_SPEC_CTRL_CRS_ON_POS 3 @@ -1104,8 +1153,8 @@ #define REG_MII_DOWNG_CTRL 0x14 /* Speed auto downgrade control*/ #define REG_MII_RERRCOUNTER 0x15 /* Receive error counter */ -#define REG_MII_EXT_ADDR 0x1E /* Extended reg's address */ -#define REG_MII_EXT_DATA 0x1F /* Extended reg's date */ +#define REG_MII_EXT_ADDR 0x1e /* Extended reg's address */ +#define REG_MII_EXT_DATA 0x1f /* Extended reg's date */ #define FXGMAC_EPHY_ID_MASK 0x0000ffff @@ -1161,18 +1210,18 @@ #define BIT(n) (0x1<<(n)) #endif -#ifndef FUXI_EPHY_SPEED_MODE_BIT -#define FUXI_EPHY_SPEED_MODE 0xc000 -#define FUXI_EPHY_DUPLEX 0x2000 -#define FUXI_EPHY_SPEED_MODE_BIT 14 -#define FUXI_EPHY_DUPLEX_BIT 13 -#define FUXI_EPHY_LINK_STATUS_BIT 10 +#ifndef FXGMAC_EPHY_SPEED_MODE_BIT +#define FXGMAC_EPHY_SPEED_MODE 0xc000 +#define FXGMAC_EPHY_DUPLEX 0x2000 +#define FXGMAC_EPHY_SPEED_MODE_BIT 14 +#define FXGMAC_EPHY_DUPLEX_BIT 13 +#define FXGMAC_EPHY_LINK_STATUS_BIT 10 #endif -#define FUXI_EPHY_SMI_SEL_PHY 0x0 -#define FUXI_EPHY_SMI_SEL_SDS_QSGMII 0x02 -#define FUXI_EPHY_SMI_SEL_SDS_SGMII 0x03 +#define FXGMAC_EPHY_SMI_SEL_PHY 0x0 +#define FXGMAC_EPHY_SMI_SEL_SDS_QSGMII 0x02 +#define FXGMAC_EPHY_SMI_SEL_SDS_SGMII 0x03 #define REG_MII_EXT_ANALOG_CFG3 0x52 #define MII_EXT_ANALOG_CFG3_ADC_START_CFG_POS 14 @@ -1210,31 +1259,32 @@ #define REG_MII_EXT_SLEEP_REG_CLEAN_LOOPBACK 0xe812 #define REG_MII_EXT_ANALOG_CFG2 0x51 -#define REG_MII_EXT_ANALOG_CFG2_LED_VALUE 0x4a9 +#define REG_MII_EXT_ANALOG_CFG2_VALUE 0x4a9 #define REG_MII_EXT_ANALOG_CFG8 0x57 -#define REG_MII_EXT_ANALOG_CFG8_LED_VALUE 0x274c +#define REG_MII_EXT_ANALOG_CFG8_VALUE 0x274c +#define REG_MII_EXT_ANALOG_CFG8_137D1D05_VALUE 0x264c -#define REG_MII_EXT_COMMON_LED_CFG 0xA00B -#define REG_MII_EXT_COMMON_LED0_CFG 0xA00C +#define REG_MII_EXT_COMMON_LED_CFG 0xa00b +#define REG_MII_EXT_COMMON_LED0_CFG 0xa00c #define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION0 0x2600 #define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION1 0x00 #define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION2 0x20 #define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION3 0x2600 -#define REG_MII_EXT_COMMON_LED1_CFG 0xA00D +#define REG_MII_EXT_COMMON_LED1_CFG 0xa00d #define REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION0 0x1800 #define REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION1 0x00 #define REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION2 0x40 -#define REG_MII_EXT_COMMON_LED2_CFG 0xA00E +#define REG_MII_EXT_COMMON_LED2_CFG 0xa00e #define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION0 0x00 #define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION2 0x07 #define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION3 0x20 #define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION4 0x1800 -#define REG_MII_EXT_COMMON_LED_BLINK_CFG 0xA00F -#define REG_MII_EXT_COMMON_LED_BLINK_CFG_SOLUTION2 0x0F +#define REG_MII_EXT_COMMON_LED_BLINK_CFG 0xa00f +#define REG_MII_EXT_COMMON_LED_BLINK_CFG_SOLUTION2 0x0f #define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SLEEP_SOLUTION3 0x2600 -#define REG_MII_EXT_PKG_CFG0 0xA0 +#define REG_MII_EXT_PKG_CFG0 0xa0 #define REG_MII_EXT_PKG_CHECK_POS 14 #define REG_MII_EXT_PKG_CHECK_LEN 2 #define REG_MII_EXT_PKG_ENABLE_CHECK 0x2 @@ -1244,33 +1294,33 @@ #define MII_EXT_SLEEP_CONTROL1_EN_LEN 1 #define MII_EXT_SLEEP_CONTROL1_PLLON_IN_SLP_POS 14 #define MII_EXT_SLEEP_CONTROL1_PLLON_IN_SLP_LEN 1 -#define REG_MII_EXT_PKG_RX_VALID0 0xA3 -#define REG_MII_EXT_REG_RX_VALID1 0xA4 -#define REG_MII_EXT_REG_RX_OS0 0xA5 -#define REG_MII_EXT_REG_RX_OS1 0xA6 -#define REG_MII_EXT_REG_RX_US0 0xA7 -#define REG_MII_EXT_REG_RX_US1 0xA8 -#define REG_MII_EXT_REG_RX_ERR 0xA9 -#define REG_MII_EXT_REG_RX_0S_BAD 0xAA -#define REG_MII_EXT_REG_RX_FRAGMENT 0xAB -#define REG_MII_EXT_REG_RX_NOSFD 0xAC -#define REG_MII_EXT_REG_TX_VALID0 0xAD -#define REG_MII_EXT_REG_TX_VALID1 0xAE -#define REG_MII_EXT_REG_TX_OS0 0xAF -#define REG_MII_EXT_REG_TX_OS1 0xB0 -#define REG_MII_EXT_REG_TX_US0 0xB1 -#define REG_MII_EXT_REG_TX_US1 0xB2 -#define REG_MII_EXT_REG_TX_ERR 0xB3 -#define REG_MII_EXT_REG_TX_OS_BAD 0xB4 -#define REG_MII_EXT_REG_TX_FRAGMENT 0xB5 -#define REG_MII_EXT_REG_TX_NOSFD 0xB6 +#define REG_MII_EXT_PKG_RX_VALID0 0xa3 +#define REG_MII_EXT_REG_RX_VALID1 0xa4 +#define REG_MII_EXT_REG_RX_OS0 0xa5 +#define REG_MII_EXT_REG_RX_OS1 0xa6 +#define REG_MII_EXT_REG_RX_US0 0xa7 +#define REG_MII_EXT_REG_RX_US1 0xa8 +#define REG_MII_EXT_REG_RX_ERR 0xa9 +#define REG_MII_EXT_REG_RX_0S_BAD 0xaa +#define REG_MII_EXT_REG_RX_FRAGMENT 0xab +#define REG_MII_EXT_REG_RX_NOSFD 0xac +#define REG_MII_EXT_REG_TX_VALID0 0xad +#define REG_MII_EXT_REG_TX_VALID1 0xae +#define REG_MII_EXT_REG_TX_OS0 0xaf +#define REG_MII_EXT_REG_TX_OS1 0xb0 +#define REG_MII_EXT_REG_TX_US0 0xb1 +#define REG_MII_EXT_REG_TX_US1 0xb2 +#define REG_MII_EXT_REG_TX_ERR 0xb3 +#define REG_MII_EXT_REG_TX_OS_BAD 0xb4 +#define REG_MII_EXT_REG_TX_FRAGMENT 0xb5 +#define REG_MII_EXT_REG_TX_NOSFD 0xb6 #define REG_MII_EXT_REG_PMA_DBG0_ADC 0x13 #define REG_MII_EXT_ENABLE_GIGA_POWER_SAVING_FOR_SHORT_CABLE 0x3538 -#define REG_MII_EXT_REG_CLD_REG0 0x3A0 -#define REG_MII_EXT_ENABLE_CLD_NP_WP 0xEB24 -#define REG_MII_EXT_REG_CLD_REG1 0x3CC +#define REG_MII_EXT_REG_CLD_REG0 0x3a0 +#define REG_MII_EXT_ENABLE_CLD_NP_WP 0xeb24 +#define REG_MII_EXT_REG_CLD_REG1 0x3cc #define REG_MII_EXT_ENABLE_CLD_GT_HT_BT 0x7001 -#define REG_MMD_EEE_ABILITY_REG 0x3C +#define REG_MMD_EEE_ABILITY_REG 0x3c #define REG_MMD_EEE_ABILITY_VALUE 0x06 /* Below registers don't belong to GMAC, it has zero offset, not 0x2000 offset. mem_base + REG_XXX. */ @@ -1302,27 +1352,29 @@ #define MGMT_EPHY_CTRL_STA_SPEED_LEN 2 #define MGMT_EPHY_CTRL_STA_SPEED_MASK 0x18 -#define MGMT_EPHY_CTRL_ERROR_VAULE 0xFFFFFFFF +#define MGMT_EPHY_CTRL_ERROR_VALUE 0xffffffff #define MGMT_PCIE_EP_CTRL 0x1008 #define MGMT_PCIE_EP_CTRL_DBI_CS_EN_POS 0 #define MGMT_PCIE_EP_CTRL_DBI_CS_EN_LEN 1 -#define MGMT_PCIE_CFG_CTRL 0x8BC +#define MGMT_PCIE_CFG_CTRL 0x8bc #define PCIE_CFG_CTRL_DEFAULT_VAL 0x7ff40 #define MGMT_PCIE_CFG_CTRL_CS_EN_POS 0 #define MGMT_PCIE_CFG_CTRL_CS_EN_LEN 1 /***power management ***/ -#define WOL_CTL 0x100C +#define WOL_CTL 0x100c /* set means magic and remote packet wakeup enable */ #define WOL_PKT_EN_POS 1 #define WOL_PKT_EN_LEN 1 /* set means link change wakeup enable */ #define WOL_LINKCHG_EN_POS 0 #define WOL_LINKCHG_EN_LEN 1 +#define WOL_WAIT_TIME_POS 2 +#define WOL_WAIT_TIME_LEN 13 #define OOB_WOL_CTRL 0x1010 #define OOB_WOL_CTRL_DIS_POS 0 @@ -1341,34 +1393,47 @@ /* MAC management registers bit positions and sizes */ #define MGMT_INT_CTRL0_INT_MASK_POS 16 #define MGMT_INT_CTRL0_INT_MASK_LEN 16 -#define MGMT_INT_CTRL0_INT_MASK_MASK 0xFFFF -#define MGMT_INT_CTRL0_INT_MASK_RXCH 0xF +#define MGMT_INT_CTRL0_INT_MASK_MASK 0xffff +#define MGMT_INT_CTRL0_INT_MASK_RXCH 0xf #define MGMT_INT_CTRL0_INT_MASK_TXCH 0x10 -#define MGMT_INT_CTRL0_INT_MASK_EX_PMT 0xF7FF -#define MGMT_INT_CTRL0_INT_MASK_DISABLE 0xF000 +#define MGMT_INT_CTRL0_INT_MASK_EX_PMT 0xf7ff +#define MGMT_INT_CTRL0_INT_MASK_DISABLE 0xf000 #define MGMT_INT_CTRL0_INT_STATUS_POS 0 #define MGMT_INT_CTRL0_INT_STATUS_LEN 16 -#define MGMT_INT_CTRL0_INT_STATUS_MASK 0xFFFF +#define MGMT_INT_CTRL0_INT_STATUS_MASK 0xffff #define MGMT_INT_CTRL0_INT_STATUS_RX 0x0001 #define MGMT_INT_CTRL0_INT_STATUS_TX 0x0010 -#define MGMT_INI_CTRL0_INT_STATUS_TX_INVERSE 0xFFEF -#define MGMG_INT_CTRL0_INT_STATUS_PHY_INVERSE 0xFFDF -#define MGMT_INT_CTRL0_INT_STATUS_PHY 0x0020 +#define MGMT_INI_CTRL0_INT_STATUS_TX_INVERSE 0xffff +#define MGMT_INT_CTRL0_INT_STATUS_MISC_INVERSE 0xffdf +#define MGMT_INT_CTRL0_INT_STATUS_MISC 0x0020 #define MGMT_INT_CTRL0_INT_MASK_RXCH_POS 16 #define MGMT_INT_CTRL0_INT_STATUS_RXCH_POS 0 #define MGMT_INT_CTRL0_INT_STATUS_RXCH_LEN 4 -#define MGMT_INT_CTRL0_INT_STATUS_RXCH_MASK 0xF +#define MGMT_INT_CTRL0_INT_STATUS_RXCH_MASK 0xf #define MGMT_INT_CTRL0_INT_STATUS_RXTX_LEN 5 -#define MGMT_INT_CTRL0_INT_STATUS_RXTX_MASK 0x1F -#define MGMT_INT_CTRL0_INT_STATUS_RXTXPHY_MASK 0x3F +#define MGMT_INT_CTRL0_INT_STATUS_RXTX_MASK 0x1f +#define MGMT_INT_CTRL0_INT_STATUS_RXTXMISC_MASK 0x3f #define MGMT_INT_CTRL0_INT_MASK_TXCH_POS 20 #define MGMT_INT_CTRL0_INT_STATUS_TXCH_POS 4 #define MGMT_INT_CTRL0_INT_STATUS_TXCH_LEN 1 #define MGMT_INT_CTRL0_INT_STATUS_TXCH_MASK 0x1 +#define MGMT_MAC_PHYIF_STA_POS 0 +#define MGMT_MAC_AN_SR0_POS 1 +#define MGMT_MAC_AN_SR1_POS 2 +#define MGMT_MAC_AN_SR2_POS 3 +#define MGMT_MAC_PMT_STA_POS 4 +#define MGMT_MAC_LPI_STA_POS 5 +#define MGMT_MAC_MMC_STA_POS 8 +#define MGMT_MAC_RX_MMC_STA_POS 9 +#define MGMT_MAC_TX_MMC_STA_POS 10 +#define MGMT_MMC_IPCRXINT_POS 11 +#define MGMT_MAC_TX_RX_STA0_POS 13 +#define MGMT_MAC_TX_RX_STA1_POS 14 +#define MGMT_MAC_GPIO_SR_POS 15 /* Interrupt Ctrl1 */ #define INT_CTRL1 0x1104 @@ -1387,19 +1452,19 @@ #define INT_MOD_IN_US 200 /*in us*/ /* PCIE LTR 2 working modes: -Two working mode: -1. SW trigger -LTR idle threshold timer set as 0, enable LTR enable will trigger one LTR message -Note: PCIe cfg enable should set in initilization before enable LTR. -2. HW auto trigger -LTR idle threshold timer set as one non-zero value, HW monitor system status, -when system idle timer over threshold, HW send out LTR message -system exit idle state, send out one LTR exit message. + * Two working mode: + * 1. SW trigger + * LTR idle threshold timer set as 0, enable LTR enable will trigger one LTR message + * Note : PCIe cfg enable should set in initialization before enable LTR. + * 2. HW auto trigger + * LTR idle threshold timer set as one non-zero value, HW monitor system status, + * when system idle timer over threshold, HW send out LTR message + * system exit idle state, send out one LTR exit message. */ #define LTR_CTRL 0x1130 #define LTR_CTRL_IDLE_THRE_TIMER_POS 16 #define LTR_CTRL_IDLE_THRE_TIMER_LEN 14 /* in 8ns units*/ -#define LTR_CTRL_IDLE_THRE_TIMER_VAL 0x3FFF +#define LTR_CTRL_IDLE_THRE_TIMER_VAL 0x3fff #define LTR_CTRL_EN_POS 0 #define LTR_CTRL_EN_LEN 1 @@ -1411,7 +1476,7 @@ system exit idle state, send out one LTR exit message. #define LTR_CTRL2_DBG_DATA_POS 0 #define LTR_CTRL2_DBG_DATA_LEN 32 -#define LTR_IDLE_ENTER 0x113C /* LTR_CTRL3, LTR latency message, only for System IDLE Start. */ +#define LTR_IDLE_ENTER 0x113c /* LTR_CTRL3, LTR latency message, only for System IDLE Start. */ #define LTR_IDLE_ENTER_POS 0 #define LTR_IDLE_ENTER_LEN 10 #define LTR_IDLE_ENTER_USVAL 900 @@ -1425,7 +1490,7 @@ system exit idle state, send out one LTR exit message. #define LTR_IDLE_EXIT 0x1140 /* LTR_CTRL4, LTR latency message, only for System IDLE End. */ #define LTR_IDLE_EXIT_POS 0 #define LTR_IDLE_EXIT_LEN 10 -#define LTR_IDLE_EXIT_USVAL 2 +#define LTR_IDLE_EXIT_USVAL 171 #define LTR_IDLE_EXIT_SCALE_POS 10 #define LTR_IDLE_EXIT_SCALE_LEN 5 #define LTR_IDLE_EXIT_SCALE 2 @@ -1463,7 +1528,7 @@ system exit idle state, send out one LTR exit message. #define LPW_CTRL_OTP_CLK_ON_LEN 1 #define MSI_PBA_REG 0x1300 -#define SYS_RESET_REG 0x152C +#define SYS_RESET_REG 0x152c #define SYS_RESET_POS 31 #define SYS_RESET_LEN 1 @@ -1504,7 +1569,7 @@ system exit idle state, send out one LTR exit message. #define PCIE_SERDES_STATUS_HW_BIAS_ON_POS 0 #define PCIE_SERDES_STATUS_HW_BIAS_ON_LEN 1 -#define REG_PCIE_SERDES_PLL 0x199C +#define REG_PCIE_SERDES_PLL 0x199c #define PCIE_SERDES_PLL_AUTOOFF_POS 0 #define PCIE_SERDES_PLL_AUTOOFF_LEN 1 @@ -1595,6 +1660,8 @@ system exit idle state, send out one LTR exit message. #define MGMT_RSS_CTRL_TBL_SIZE_LEN 3 #define MGMT_RSS_CTRL_TBL_SIZE_MASK 0x7 +#define MAC_RSSCR_IP2TE_POS 1 +#define MAC_RSSCR_IP2TE_LEN 1 #define MAC_RSSCR_RSSE_POS 31 #define MAC_RSSCR_RSSE_LEN 1 @@ -1653,7 +1720,17 @@ system exit idle state, send out one LTR exit message. #define MGMT_RMK_CTRL 0x1400 -#define MGMT_SIGDET 0x17F8 +#define MGMT_SIGDET_DEGLITCH 0x17f0 +#define MGMT_SIGDET_DEGLITCH_DISABLE_POS 2 //sigdet deglitch disable ,active low +#define MGMT_SIGDET_DEGLITCH_DISABLE_LEN 1 +#define MGMT_SIGDET_DEGLITCH_TIME_WIN_POS 3 //sigdet deglitch time windows filter seltion +#define MGMT_SIGDET_DEGLITCH_TIME_WIN_LEN 2 +#define MGMT_SIGDET_DEGLITCH_TIME_WIN_10ns 0 +#define MGMT_SIGDET_DEGLITCH_TIME_WIN_20ns 1 +#define MGMT_SIGDET_DEGLITCH_TIME_WIN_30ns 2 +#define MGMT_SIGDET_DEGLITCH_TIME_WIN_40ns 3 + +#define MGMT_SIGDET 0x17f8 #define MGMT_SIGDET_POS 13 #define MGMT_SIGDET_LEN 3 #define MGMT_SIGDET_55MV 7 @@ -1696,7 +1773,7 @@ system exit idle state, send out one LTR exit message. #define MSIX_TBL_RXTX_NUM 8 #endif #define MSIX_TBL_BASE_ADDR 0x1200 -#define MSIX_TBL_MASK_OFFSET 0xC +#define MSIX_TBL_MASK_OFFSET 0xc #define MSIX_TBL_DATA_OFFSET 0x8 #define MSIX_TBL_ADDR_OFFSET 0x0 @@ -1738,15 +1815,15 @@ system exit idle state, send out one LTR exit message. /* efuse layout refer to http://redmine.motor-comm.com/issues/3856 */ #define EFUSE_FISRT_UPDATE_ADDR 255 #define EFUSE_SECOND_UPDATE_ADDR 209 -#define FUXI_EFUSE_MAX_ENTRY 39 -#define FUXI_EFUSE_MAX_ENTRY_UNDER_LED_COMMON 24 +#define FXGMAC_EFUSE_MAX_ENTRY 39 +#define FXGMAC_EFUSE_MAX_ENTRY_UNDER_LED_COMMON 24 #define EFUSE_PATCH_ADDR_START_BYTE 0 #define EFUSE_PATCH_DATA_START_BYTE 2 #define EFUSE_REGION_A_B_LENGTH 18 #define EFUSE_EACH_PATH_SIZE 6 #define EFUSE_REVID_REGISTER 0x0008 -#define EFUSE_SUBSYS_REGISTER 0x002C +#define EFUSE_SUBSYS_REGISTER 0x002c /* mac[5]->bit7:0, mac[4]->bit15:8, mac[3]->bit23:16, mac[2]->bit31:24. */ #define MACA0LR_FROM_EFUSE 0x1520 /* mac[1]->bit7:0, mac[0]->bit15:8. mac[6] = @@ -1765,7 +1842,7 @@ system exit idle state, send out one LTR exit message. #define EFUSE_LED_SOLUTION2 2 #define EFUSE_LED_SOLUTION3 3 #define EFUSE_LED_SOLUTION4 4 -#define EFUSE_LED_COMMON_SOLUTION 0x1F +#define EFUSE_LED_COMMON_SOLUTION 0x1f /******************** Below for pcie configuration register. *********************/ #define REG_PCI_VENDOR_ID 0x0 /* WORD reg */ @@ -1786,9 +1863,9 @@ system exit idle state, send out one LTR exit message. #define REG_PCI_REVID 0x8 /* BYTE reg */ #define REG_PCI_PROGRAM_INTF 0x9 /* BYTE reg */ /* PCI Class Program Interface */ -#define REG_PCI_SUB_CLASS 0xA /* BYTE reg */ -#define REG_PCI_BASE_CLASS 0xB /* BYTE reg */ -#define REG_CACHE_LINE_SIZE 0xC +#define REG_PCI_SUB_CLASS 0xa /* BYTE reg */ +#define REG_PCI_BASE_CLASS 0xb /* BYTE reg */ +#define REG_CACHE_LINE_SIZE 0xc #define REG_MEM_BASE 0x10 /* DWORD or QWORD reg */ @@ -1796,10 +1873,10 @@ system exit idle state, send out one LTR exit message. #define REG_IO_BASE 0x20 /* DWORD reg */ -#define REG_PCI_SUB_VENDOR_ID 0x2C /* WORD reg */ -#define REG_PCI_SUB_DEVICE_ID 0x2E /* WORD reg */ +#define REG_PCI_SUB_VENDOR_ID 0x2c /* WORD reg */ +#define REG_PCI_SUB_DEVICE_ID 0x2e /* WORD reg */ -#define REG_INT_LINE 0x3C /* BYTE reg */ +#define REG_INT_LINE 0x3c /* BYTE reg */ #define REG_PM_STATCTRL 0x44 /* WORD reg */ #define PM_STATCTRL_PWR_STAT_POS 0 @@ -1836,7 +1913,7 @@ system exit idle state, send out one LTR exit message. #define DEVICE_CTRL2_LTR_EN_POS 10 /* Enable from BIOS side. */ #define DEVICE_CTRL2_LTR_EN_LEN 1 -#define REG_MSIX_CAPABILITY 0xB0 +#define REG_MSIX_CAPABILITY 0xb0 /* ASPM L1ss PM Substates */ #define REG_ASPM_L1SS_CAP 0x154 /* Capabilities Register */ @@ -1876,7 +1953,7 @@ system exit idle state, send out one LTR exit message. #define REG_ASPM_L1SS_CTL2 0x15c /* Control 2 Register */ -#define REG_ASPM_CONTROL 0x70C +#define REG_ASPM_CONTROL 0x70c #define ASPM_L1_IDLE_THRESHOLD_POS 27 #define ASPM_L1_IDLE_THRESHOLD_LEN 3 #define ASPM_L1_IDLE_THRESHOLD_1US 0 @@ -1891,4 +1968,6 @@ system exit idle state, send out one LTR exit message. #define POWER_EIOS_POS 7 #define POWER_EIOS_LEN 1 +#define AISTONEID_137D1D05_ADJUST_SI 0x137d1d05 + #endif /* __FUXI_GMAC_REG_H__ */ diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac.h b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac.h index ea01ebdadc4e3..32b4fc0f0c429 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac.h +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac.h @@ -1,9 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2021 Motorcomm Corporation. */ -#ifndef __FUXI_GMAC_H__ -#define __FUXI_GMAC_H__ +#ifndef __FXGMAC_GMAC_H__ +#define __FXGMAC_GMAC_H__ +#include "fuxi-errno.h" #include "fuxi-os.h" /* For fpga before 20210507 */ @@ -12,17 +13,17 @@ #define FXGMAC_DRV_NAME "yt6801" -#define FXGMAC_DRV_DESC "Motorcomm FUXI GMAC Driver" +#define FXGMAC_DRV_DESC "Motorcomm YT6801 Gigabit Ethernet Driver" -#define FUXI_MAC_REGS_OFFSET 0x2000 +#define FXGMAC_MAC_REGS_OFFSET 0x2000 /* 1: in normal D0 state, turn off ephy link change interrupt. */ -#define FUXI_EPHY_INTERRUPT_D0_OFF 0 +#define FXGMAC_EPHY_INTERRUPT_D0_OFF 0 /* 1:when rec buffer is not enough, to create rbd and rec buffer, * but the rdb need to be continus with the intialized rdb, so * close the feature */ -#define FUXI_ALLOC_NEW_RECBUFFER 0 +#define FXGMAC_ALLOC_NEW_RECBUFFER 0 #define RESUME_MAX_TIME 3000000 #define PHY_LINK_TIMEOUT 3000 @@ -41,11 +42,11 @@ #define FXGMAX_ASPM_WAR_EN /* Descriptor related parameters */ -#if FXGMAC_TX_HANG_TIMER_EN +#if FXGMAC_TX_HANG_TIMER_ENABLED #define FXGMAC_TX_DESC_CNT 1024 #else /* 256 to make sure the tx ring is in the 4k range when - * FXGMAC_TX_HANG_TIMER_EN is 0 + * FXGMAC_TX_HANG_TIMER_ENABLED is 0 */ #define FXGMAC_TX_DESC_CNT 256 #endif @@ -73,9 +74,6 @@ #define FXGMAC_SPH_HDSMS_SIZE 3 #define FXGMAC_SKB_ALLOC_SIZE 512 -/* In Linux Driver, it set MAX_FIFO size 131072, here it uses - * the same value as windows driver - */ #define FXGMAC_MAX_FIFO 81920 #define FXGMAC_MAX_DMA_CHANNELS FXGMAC_MSIX_Q_VECTORS @@ -126,6 +124,10 @@ #define FXGMAC_POWER_STATE_DOWN 0 #define FXGMAC_POWER_STATE_UP 1 +#define FXGMAC_DATA_WIDTH 128 + +#define FXGMAC_WOL_WAIT_TIME 2 // unit 1ms + struct wol_bitmap_pattern { u32 flags; u32 pattern_size; @@ -419,7 +421,7 @@ struct fxgmac_channel { u32 dma_irq; FXGMAC_CHANNEL_OF_PLATFORM expansion; - unsigned int saved_ier; + u32 saved_ier; unsigned int tx_timer_active; @@ -438,7 +440,7 @@ struct fxphy_ag_adv { }; struct fxgmac_desc_ops { - int (*alloc_channles_and_rings)(struct fxgmac_pdata *pdata); + int (*alloc_channels_and_rings)(struct fxgmac_pdata *pdata); void (*free_channels_and_rings)(struct fxgmac_pdata *pdata); int (*map_tx_skb)(struct fxgmac_channel *channel, struct sk_buff *skb); int (*map_rx_buffer)(struct fxgmac_pdata *pdata, @@ -448,6 +450,13 @@ struct fxgmac_desc_ops { struct fxgmac_desc_data *desc_data); void (*tx_desc_init)(struct fxgmac_pdata *pdata); void (*rx_desc_init)(struct fxgmac_pdata *pdata); + /* For descriptor related operation */ + void (*tx_desc_init_channel)(struct fxgmac_channel *channel); + void (*rx_desc_init_channel)(struct fxgmac_channel *channel); + void (*tx_desc_reset)(struct fxgmac_desc_data *desc_data); + void (*rx_desc_reset)(struct fxgmac_pdata *pdata, + struct fxgmac_desc_data *desc_data, + unsigned int index); }; struct fxgmac_hw_ops { @@ -455,9 +464,9 @@ struct fxgmac_hw_ops { int (*exit)(struct fxgmac_pdata *pdata); void (*save_nonstick_reg)(struct fxgmac_pdata *pdata); void (*restore_nonstick_reg)(struct fxgmac_pdata *pdata); - int (*set_gmac_register)(struct fxgmac_pdata *pdata, u8 *address, + int (*set_gmac_register)(struct fxgmac_pdata *pdata, IOMEM address, unsigned int data); - u32 (*get_gmac_register)(struct fxgmac_pdata *pdata, u8 *address); + u32 (*get_gmac_register)(struct fxgmac_pdata *pdata, IOMEM address); void (*esd_restore_pcie_cfg)(struct fxgmac_pdata *pdata); int (*tx_complete)(struct fxgmac_dma_desc *dma_desc); @@ -476,13 +485,15 @@ struct fxgmac_hw_ops { void (*set_interrupt_moderation)(struct fxgmac_pdata *pdata); void (*enable_msix_rxtxinterrupt)(struct fxgmac_pdata *pdata); void (*disable_msix_interrupt)(struct fxgmac_pdata *pdata); - void (*enable_msix_rxtxphyinterrupt)(struct fxgmac_pdata *pdata); + int (*enable_msix_rxtxphyinterrupt)(struct fxgmac_pdata *pdata); void (*enable_msix_one_interrupt)(struct fxgmac_pdata *pdata, u32 intid); void (*disable_msix_one_interrupt)(struct fxgmac_pdata *pdata, u32 intid); bool (*enable_mgm_interrupt)(struct fxgmac_pdata *pdata); bool (*disable_mgm_interrupt)(struct fxgmac_pdata *pdata); + int (*dismiss_all_int)(struct fxgmac_pdata *pdata); + void (*clear_misc_int_status)(struct fxgmac_pdata *pdata); void (*dev_xmit)(struct fxgmac_channel *channel); int (*dev_read)(struct fxgmac_channel *channel); @@ -496,26 +507,19 @@ struct fxgmac_hw_ops { /* For MII speed configuration */ int (*config_mac_speed)(struct fxgmac_pdata *pdata); - int (*set_xlgmii_2500_speed)(struct fxgmac_pdata *pdata); - int (*set_xlgmii_1000_speed)(struct fxgmac_pdata *pdata); - int (*set_xlgmii_100_speed)(struct fxgmac_pdata *pdata); int (*get_xlgmii_phy_status)(struct fxgmac_pdata *pdata, u32 *speed, bool *link_up, bool link_up_wait_to_complete); /* For descriptor related operation */ - void (*tx_desc_init)(struct fxgmac_channel *channel); - void (*rx_desc_init)(struct fxgmac_channel *channel); - void (*tx_desc_reset)(struct fxgmac_desc_data *desc_data); - void (*rx_desc_reset)(struct fxgmac_pdata *pdata, - struct fxgmac_desc_data *desc_data, - unsigned int index); + // void (*tx_desc_init)(struct fxgmac_channel *channel); + // void (*rx_desc_init)(struct fxgmac_channel *channel); + // void (*tx_desc_reset)(struct fxgmac_desc_data *desc_data); + // void (*rx_desc_reset)(struct fxgmac_pdata *pdata, + // struct fxgmac_desc_data *desc_data, + // unsigned int index); int (*is_last_desc)(struct fxgmac_dma_desc *dma_desc); int (*is_context_desc)(struct fxgmac_dma_desc *dma_desc); - void (*tx_start_xmit)(struct fxgmac_channel *channel, - struct fxgmac_ring *ring); - void (*set_pattern_data)(struct fxgmac_pdata *pdata); - void (*config_wol)(struct fxgmac_pdata *pdata, int en); /* For Flow Control */ int (*config_tx_flow_control)(struct fxgmac_pdata *pdata); @@ -537,9 +541,9 @@ struct fxgmac_hw_ops { /* For RX coalescing */ int (*config_rx_coalesce)(struct fxgmac_pdata *pdata); int (*config_tx_coalesce)(struct fxgmac_pdata *pdata); - unsigned int (*usec_to_riwt)(struct fxgmac_pdata *pdata, + unsigned long (*usec_to_riwt)(struct fxgmac_pdata *pdata, unsigned int usec); - unsigned int (*riwt_to_usec)(struct fxgmac_pdata *pdata, + unsigned long (*riwt_to_usec)(struct fxgmac_pdata *pdata, unsigned int riwt); /* For RX and TX threshold config */ @@ -556,10 +560,11 @@ struct fxgmac_hw_ops { int (*config_osp_mode)(struct fxgmac_pdata *pdata); /* For RX and TX PBL config */ + u32 (*calculate_max_checksum_size)(struct fxgmac_pdata *pdata); int (*config_rx_pbl_val)(struct fxgmac_pdata *pdata); - int (*get_rx_pbl_val)(struct fxgmac_pdata *pdata); + u32 (*get_rx_pbl_val)(struct fxgmac_pdata *pdata); int (*config_tx_pbl_val)(struct fxgmac_pdata *pdata); - int (*get_tx_pbl_val)(struct fxgmac_pdata *pdata); + u32 (*get_tx_pbl_val)(struct fxgmac_pdata *pdata); int (*config_pblx8)(struct fxgmac_pdata *pdata); /* For MMC statistics */ @@ -579,6 +584,7 @@ struct fxgmac_hw_ops { const u32 *table); /*For Offload*/ +#ifdef FXGMAC_POWER_MANAGEMENT void (*set_arp_offload)(struct fxgmac_pdata *pdata, unsigned char *ip_addr); int (*enable_arp_offload)(struct fxgmac_pdata *pdata); @@ -609,7 +615,7 @@ struct fxgmac_hw_ops { int (*set_wake_pattern_mask)(struct fxgmac_pdata *pdata, u32 filter_index, u8 register_index, u32 Data); -#if defined(FUXI_PM_WPI_READ_FEATURE_EN) && FUXI_PM_WPI_READ_FEATURE_EN +#if FXGMAC_PM_WPI_READ_FEATURE_ENABLED void (*get_wake_packet_indication)(struct fxgmac_pdata *pdata, int *wake_reason, u32 *wake_pattern_number, @@ -617,6 +623,7 @@ struct fxgmac_hw_ops { u32 *packet_size); void (*enable_wake_packet_indication)(struct fxgmac_pdata *pdata, int en); +#endif #endif void (*reset_phy)(struct fxgmac_pdata *pdata); @@ -630,9 +637,9 @@ struct fxgmac_hw_ops { void (*enable_phy_sleep)(struct fxgmac_pdata *pdata); void (*phy_green_ethernet)(struct fxgmac_pdata *pdata); void (*phy_eee_feature)(struct fxgmac_pdata *pdata); - int (*get_ephy_state)(struct fxgmac_pdata *pdata); + u32 (*get_ephy_state)(struct fxgmac_pdata *pdata); int (*write_ephy_reg)(struct fxgmac_pdata *pdata, u32 val, u32 data); - int (*read_ephy_reg)(struct fxgmac_pdata *pdata, u32 val, u32 *data); + int (*read_ephy_reg)(struct fxgmac_pdata *pdata, u32 val, u32 __far *data); int (*set_ephy_autoneg_advertise)(struct fxgmac_pdata *pdata, struct fxphy_ag_adv phy_ag_adv); int (*phy_config)(struct fxgmac_pdata *pdata); @@ -669,18 +676,21 @@ struct fxgmac_hw_ops { unsigned int enable); /* efuse relevant operation. */ + bool (*read_patch_from_efuse_per_index)( + struct fxgmac_pdata *pdata, u8 index, u32 __far *offset, + u32 __far *value); /* read patch per index. */ + bool (*read_mac_subsys_from_efuse)(struct fxgmac_pdata *pdata, + u8 *mac_addr, u32 *subsys, + u32 *revid); + bool (*read_efuse_data)(struct fxgmac_pdata *pdata, u32 offset, + u32 __far *value); +#ifndef COMMENT_UNUSED_CODE_TO_REDUCE_SIZE bool (*read_patch_from_efuse)(struct fxgmac_pdata *pdata, u32 offset, u32 *value); /* read patch per index. */ - bool (*read_patch_from_efuse_per_index)( - struct fxgmac_pdata *pdata, u8 index, u32 *offset, - u32 *value); /* read patch per index. */ bool (*write_patch_to_efuse)(struct fxgmac_pdata *pdata, u32 offset, u32 value); bool (*write_patch_to_efuse_per_index)(struct fxgmac_pdata *pdata, u8 index, u32 offset, u32 value); - bool (*read_mac_subsys_from_efuse)(struct fxgmac_pdata *pdata, - u8 *mac_addr, u32 *subsys, - u32 *revid); bool (*write_mac_subsys_to_efuse)(struct fxgmac_pdata *pdata, u8 *mac_addr, u32 *subsys, u32 *revid); @@ -689,27 +699,17 @@ struct fxgmac_hw_ops { bool (*write_mac_addr_to_efuse)(struct fxgmac_pdata *pdata, u8 *mac_addr); bool (*efuse_load)(struct fxgmac_pdata *pdata); - bool (*read_efuse_data)(struct fxgmac_pdata *pdata, u32 offset, - u32 *value); bool (*write_oob)(struct fxgmac_pdata *pdata); bool (*write_led)(struct fxgmac_pdata *pdata, u32 value); bool (*read_led_config)(struct fxgmac_pdata *pdata); bool (*write_led_config)(struct fxgmac_pdata *pdata); +#endif int (*pcie_init)(struct fxgmac_pdata *pdata, bool ltr_en, bool aspm_l1ss_en, bool aspm_l1_en, bool aspm_l0s_en); void (*trigger_pcie)( struct fxgmac_pdata *pdata, u32 code); /* To trigger pcie sniffer for analysis. */ -#ifdef DPDK - int (*phy_init)(struct fxgmac_pdata *); - int (*phy_start)(struct fxgmac_pdata *); - void (*phy_stop)(struct fxgmac_pdata *); - void (*phy_status)(struct fxgmac_pdata *); - void (*an_isr)( - struct fxgmac_pdata - *); /* phy_if->an_isr For single interrupt support */ -#endif }; /* This structure contains flags that indicate what hardware features @@ -717,47 +717,47 @@ struct fxgmac_hw_ops { */ struct fxgmac_hw_features { /* HW Version */ - unsigned int version; + u32 version; /* HW Feature Register0 */ - unsigned int phyifsel; /* PHY interface support */ - unsigned int vlhash; /* VLAN Hash Filter */ - unsigned int sma; /* SMA(MDIO) Interface */ - unsigned int rwk; /* PMT remote wake-up packet */ - unsigned int mgk; /* PMT magic packet */ - unsigned int mmc; /* RMON module */ - unsigned int aoe; /* ARP Offload */ - unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */ - unsigned int eee; /* Energy Efficient Ethernet */ - unsigned int tx_coe; /* Tx Checksum Offload */ - unsigned int rx_coe; /* Rx Checksum Offload */ - unsigned int addn_mac; /* Additional MAC Addresses */ - unsigned int ts_src; /* Timestamp Source */ - unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */ + u32 phyifsel; /* PHY interface support */ + u32 vlhash; /* VLAN Hash Filter */ + u32 sma; /* SMA(MDIO) Interface */ + u32 rwk; /* PMT remote wake-up packet */ + u32 mgk; /* PMT magic packet */ + u32 mmc; /* RMON module */ + u32 aoe; /* ARP Offload */ + u32 ts; /* IEEE 1588-2008 Advanced Timestamp */ + u32 eee; /* Energy Efficient Ethernet */ + u32 tx_coe; /* Tx Checksum Offload */ + u32 rx_coe; /* Rx Checksum Offload */ + u32 addn_mac; /* Additional MAC Addresses */ + u32 ts_src; /* Timestamp Source */ + u32 sa_vlan_ins; /* Source Address or VLAN Insertion */ /* HW Feature Register1 */ - unsigned int rx_fifo_size; /* MTL Receive FIFO Size */ - unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */ - unsigned int adv_ts_hi; /* Advance Timestamping High Word */ - unsigned int dma_width; /* DMA width */ - unsigned int dcb; /* DCB Feature */ - unsigned int sph; /* Split Header Feature */ - unsigned int tso; /* TCP Segmentation Offload */ - unsigned int dma_debug; /* DMA Debug Registers */ - unsigned int rss; /* Receive Side Scaling */ - unsigned int tc_cnt; /* Number of Traffic Classes */ - unsigned int avsel; /* AV Feature Enable */ - unsigned int ravsel; /* Rx Side Only AV Feature Enable */ - unsigned int hash_table_size; /* Hash Table Size */ - unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */ + u32 rx_fifo_size; /* MTL Receive FIFO Size */ + u32 tx_fifo_size; /* MTL Transmit FIFO Size */ + u32 adv_ts_hi; /* Advance Timestamping High Word */ + u32 dma_width; /* DMA width */ + u32 dcb; /* DCB Feature */ + u32 sph; /* Split Header Feature */ + u32 tso; /* TCP Segmentation Offload */ + u32 dma_debug; /* DMA Debug Registers */ + u32 rss; /* Receive Side Scaling */ + u32 tc_cnt; /* Number of Traffic Classes */ + u32 avsel; /* AV Feature Enable */ + u32 ravsel; /* Rx Side Only AV Feature Enable */ + u32 hash_table_size; /* Hash Table Size */ + u32 l3l4_filter_num; /* Number of L3-L4 Filters */ /* HW Feature Register2 */ - unsigned int rx_q_cnt; /* Number of MTL Receive Queues */ - unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */ - unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */ - unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */ - unsigned int pps_out_num; /* Number of PPS outputs */ - unsigned int aux_snap_num; /* Number of Aux snapshot inputs */ + u32 rx_q_cnt; /* Number of MTL Receive Queues */ + u32 tx_q_cnt; /* Number of MTL Transmit Queues */ + u32 rx_ch_cnt; /* Number of DMA Receive Channels */ + u32 tx_ch_cnt; /* Number of DMA Transmit Channels */ + u32 pps_out_num; /* Number of PPS outputs */ + u32 aux_snap_num; /* Number of Aux snapshot inputs */ /* HW Feature Register3 */ u32 hwfr3; @@ -808,7 +808,7 @@ struct fxgmac_pdata { unsigned int tx_threshold; unsigned int tx_pbl; unsigned int tx_osp_mode; -#if FXGMAC_TX_HANG_TIMER_EN +#if FXGMAC_TX_HANG_TIMER_ENABLED /* for tx hang checking. 20211227 */ unsigned int tx_hang_restart_queuing; #endif @@ -823,7 +823,7 @@ struct fxgmac_pdata { unsigned int tx_frames; /* Rx coalescing settings */ - unsigned int rx_riwt; + unsigned long rx_riwt; unsigned int rx_usecs; unsigned int rx_frames; @@ -883,8 +883,10 @@ struct fxgmac_pdata { int phy_duplex; int phy_autoeng; +#ifndef COMMENT_UNUSED_CODE_TO_REDUCE_SIZE char drv_name[32]; char drv_ver[32]; +#endif struct wol_bitmap_pattern pattern[MAX_PATTERN_COUNT]; @@ -915,6 +917,24 @@ struct fxgmac_pdata { #define FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN 1 #define FXGMAC_FLAG_LEGACY_NAPI_FREE_POS 30 /* bit30 */ #define FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN 1 +#define FXGMAC_FLAG_MISC_IRQ_FREE_POS 29 +#define FXGMAC_FLAG_MISC_IRQ_FREE_LEN 1 +#define FXGMAC_FLAG_MISC_NAPI_FREE_POS 28 +#define FXGMAC_FLAG_MISC_NAPI_FREE_LEN 1 +#define FXGMAC_FLAG_TX_IRQ_FREE_POS 27 +#define FXGMAC_FLAG_TX_IRQ_FREE_LEN 1 +#define FXGMAC_FLAG_TX_NAPI_FREE_POS 26 +#define FXGMAC_FLAG_TX_NAPI_FREE_LEN 1 +#define FXGMAC_FLAG_RX_IRQ_FREE_POS 22 +#define FXGMAC_FLAG_RX_IRQ_FREE_LEN 4 +#define FXGMAC_FLAG_PER_CHAN_RX_IRQ_FREE_LEN 1 +#define FXGMAC_FLAG_RX_NAPI_FREE_POS 18 +#define FXGMAC_FLAG_RX_NAPI_FREE_LEN 4 +#define FXGMAC_FLAG_PER_CHAN_RX_NAPI_FREE_LEN 1 + +#ifndef FXGMAC_FAKE_4_TX_QUEUE_ENABLED +#define FXGMAC_FAKE_4_TX_QUEUE_ENABLED 0 +#endif void fxgmac_init_desc_ops(struct fxgmac_desc_ops *desc_ops); void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops); diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-os.h b/drivers/net/ethernet/motorcomm/yt6801/fuxi-os.h index 1a40267e1fa2e..0e2ca78ae063f 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-os.h +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-os.h @@ -2,36 +2,59 @@ /* Copyright (c) 2021 Motorcomm Corporation. */ -#ifndef __FUXI_OS_H__ -#define __FUXI_OS_H__ +#ifndef __FXGMAC_OS_H__ +#define __FXGMAC_OS_H__ #include +//#include +#include +#include +#include #include #include -#include +#include +#include #include +#include +#include +#include +#include #include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #ifdef CONFIG_PCI_MSI #include #endif -#include -#include -#include -#include -#include -#include -#include #include #include #include "fuxi-dbg.h" struct fxgmac_ring; struct fxgmac_pdata; +struct fxgmac_channel; + +#define FXGMAC_DRV_VERSION "1.0.29" -#define FXGMAC_DRV_VERSION "1.0.27" +#define FXGMAC_INT_MODERATION_ENABLED 1 #define PCIE_LP_ASPM_L0S 1 #define PCIE_LP_ASPM_L1 2 @@ -50,50 +73,38 @@ struct fxgmac_pdata; #define FXGMAC_TEST_MAC_HEAD_LEN 14 -#define FUXI_PM_WPI_READ_FEATURE_EN 1 +#define FXGMAC_PM_WPI_READ_FEATURE_ENABLED 1 #define RSS_Q_COUNT 4 -#define FXGMAC_TX_HANG_TIMER_EN 0 -/* only for debug. for normal run, pls keep them both 0 - * 0: use default tx q; other: specify txq-1: 1 txq; - */ -#define FXGMAC_NUM_OF_TX_Q_USED 0 -/* 1 to enable a dummy tx, ie, no tail for gmac; */ -#define FXGMAC_DUMMY_TX_DEBUG 0 +#define FXGMAC_TX_HANG_TIMER_ENABLED 0 /* 1 to trigger(write reg 0x1000) for sniffer stop */ #define FXGMAC_TRIGGER_TX_HANG 0 /* driver feature configuration */ -#if FXGMAC_TX_HANG_TIMER_EN +#if FXGMAC_TX_HANG_TIMER_ENABLED /* 0: check hw current desc; 1: check software dirty */ #define FXGMAC_TX_HANG_CHECH_DIRTY 0 #endif -/* 1:poll tx of 4 channels; 0: since only 1 tx channel supported in this - * version, poll ch 0 always. - */ - -#define FXGMAC_FULL_TX_CHANNEL 0 - #ifdef CONFIG_ARM64 /* when you want to run this driver on 64bit arm, you should open this, * otherwise dma's mask cannot be set successfully. */ -#define FUXI_DMA_BIT_MASK 64 +#define FXGMAC_DMA_BIT_MASK 64 #endif #ifdef CONFIG_PCI_MSI /* should be same as FXGMAC_MAX_DMA_CHANNELS + 1 tx_irq */ #define FXGMAC_MAX_MSIX_Q_VECTORS (FXGMAC_MSIX_Q_VECTORS + 1) -#define FXGMAC_MSIX_CH0RXDIS_EN 0 /* set to 1 for ch0 unbalance fix; */ +#define FXGMAC_MSIX_CH0RXDIS_ENABLED 0 /* set to 1 for ch0 unbalance fix; */ #define FXGMAC_MSIX_INTCTRL_EN 1 #define FXGMAC_PHY_INT_NUM 1 #define FXGMAC_MSIX_INT_NUMS (FXGMAC_MAX_MSIX_Q_VECTORS + FXGMAC_PHY_INT_NUM) #else /* for case of no CONFIG_PCI_MSI */ /* NO modification needed! for non-MSI, set to 0 always */ -#define FXGMAC_MSIX_CH0RXDIS_EN 0 +#define FXGMAC_MSIX_CH0RXDIS_ENABLED 0 #define FXGMAC_MSIX_INTCTRL_EN 0 #endif @@ -131,11 +142,48 @@ struct fxgmac_pdata; /*vlan id filter*/ #define FXGMAC_FILTER_SINGLE_VLAN_ENABLED 1 /* 1:enable health checking; */ #define FXGMAC_FILTER_MULTIPLE_VLAN_ENABLED 1 -#define FUXI_MAC_HASH_TABLE 1 +#define FXGMAC_MAC_HASH_TABLE 1 #define FXGMAC_FILTER_MULTIPLE_MAC_ADDR_ENABLED 1 -#define FUXI_MISC_INT_HANDLE_FEATURE_EN 1 +#define FXGMAC_MISC_INT_HANDLE_FEATURE_ENABLED 1 + +#define FXGMAC_ESD_RESTORE_PCIE_CFG + +#define FXGMAC_WOL_INTEGRATED_WOL_PARAMETER + +#define FXGMAC_LINK_SPEED_CHECK_PHY_LINK + +#define FXGMAC_FLUSH_TX_CHECK_ENABLED + +#define FXGMAC_POWER_MANAGEMENT + +#define FXGMAC_INTERRUPT_TX_INTERVAL + +#define FXGMAC_INTERRUPT_RX_INTERVAL + +#define FXGMAC_WAIT_TX_STOP + +#define FXGMAC_WAIT_RX_STOP_BY_PRXQ_RXQSTS + +#define FXGMAC_USE_DEFAULT_RSS_KEY_TBALE + +#define FXGMAC_RX_VLAN_FILTERING_ENABLED (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + +#define FXGMAC_NETDEV_PR_MODE_ENABLED ((pdata->netdev->flags & IFF_PROMISC) != 0) +#define FXGMAC_NETDEV_AM_MODE_ENABLED ((pdata->netdev->flags & IFF_ALLMULTI) != 0) +#define FXGMAC_NETDEV_MU_MODE_ENABLED ((pdata->netdev->flags & IFF_MULTICAST) != 0) +#define FXGMAC_NETDEV_BD_MODE_ENABLED ((pdata->netdev->flags & IFF_BROADCAST) != 0) -#define HAVE_FXGMAC_DEBUG_FS +#define FXGMAC_RX_CHECKSUM_ENABLED (pdata->netdev->features & NETIF_F_RXCSUM) + +#define TEST_MAC_HEAD 14 +#define TEST_TCP_HEAD_LEN_OFFSET 12 +#define TEST_TCP_OFFLOAD_LEN_OFFSET 48 +#define TEST_TCP_FIX_HEAD_LEN 24 +#define TEST_TCP_MSS_OFFSET 56 + +#define DF_MAX_NIC_NUM 16 + +/* #define HAVE_FXGMAC_DEBUG_FS */ #ifndef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &(((TYPE *)0)->MEMBER)) @@ -222,8 +270,12 @@ struct fxgmac_pdata; #define fxgmac_dump_buffer(_skb, _len, _tx_rx) #define DumpLine(_p, _cbLine, _fAddress, _ulGroup) +#ifndef __far +#define __far +#endif + #ifndef FXGMAC_DEBUG -#define FXGMAC_DEBUG +/* #define FXGMAC_DEBUG */ #endif /* For debug prints */ @@ -231,7 +283,18 @@ struct fxgmac_pdata; #define FXGMAC_PR(fmt, args...) \ pr_alert("[%s,%d]:" fmt, __func__, __LINE__, ## args) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) +/* + * If you want to continue a line, you NEED to use KERN_CONT. + * That has always been true. It hasn't always been enforced, though. + * If you do two printk's and the second one doesn't say "I'm a continuation", + * the printk logic assumes you're just confused and wanted two lines. + */ +#define DPRINTK(fmt, args...) \ + printk(KERN_CONT fmt, ## args) +#else #define DPRINTK printk +#endif #else #define FXGMAC_PR(x...) do { } while (0) #define DPRINTK(x...) @@ -240,12 +303,12 @@ struct fxgmac_pdata; #define IOC_MAGIC 'M' #define IOC_MAXNR (0x80 + 5) -#define FUXI_DFS_IOCTL_DEVICE_INACTIVE 0x10001 -#define FUXI_DFS_IOCTL_DEVICE_RESET 0x10002 -#define FUXI_DFS_IOCTL_DIAG_BEGIN 0x10003 -#define FUXI_DFS_IOCTL_DIAG_END 0x10004 -#define FUXI_DFS_IOCTL_DIAG_TX_PKT 0x10005 -#define FUXI_DFS_IOCTL_DIAG_RX_PKT 0x10006 +#define FXGMAC_DFS_IOCTL_DEVICE_INACTIVE 0x10001 +#define FXGMAC_DFS_IOCTL_DEVICE_RESET 0x10002 +#define FXGMAC_DFS_IOCTL_DIAG_BEGIN 0x10003 +#define FXGMAC_DFS_IOCTL_DIAG_END 0x10004 +#define FXGMAC_DFS_IOCTL_DIAG_TX_PKT 0x10005 +#define FXGMAC_DFS_IOCTL_DIAG_RX_PKT 0x10006 #define FXGMAC_EFUSE_UPDATE_LED_CFG 0x10007 #define FXGMAC_EFUSE_WRITE_LED 0x10008 @@ -262,11 +325,11 @@ struct fxgmac_pdata; #define FXGMAC_SET_MAC_DATA 0x10012 #define FXGMAC_GET_SUBSYS_ID 0x10013 #define FXGMAC_SET_SUBSYS_ID 0x10014 -#define FXGMAC_GET_GMAC_REG 0x10015 -#define FXGMAC_SET_GMAC_REG 0x10016 +#define FXGMAC_GET_REG 0x10015 +#define FXGMAC_SET_REG 0x10016 #define FXGMAC_GET_PHY_REG 0x10017 #define FXGMAC_SET_PHY_REG 0x10018 -#define FXGMAC_EPHYSTATISTICS 0x10019 +#define FXGMAC_EPHY_STATISTICS 0x10019 #define FXGMAC_GET_STATISTICS 0x1001A #define FXGMAC_GET_PCIE_LOCATION 0x1001B @@ -292,14 +355,49 @@ struct fxgmac_pdata; #define PCI_CAP_ID_MSIX_ENABLE_POS 0x1F #define PCI_CAP_ID_MSIX_ENABLE_LEN 0x1 +#define FXGMAC_IRQ_ENABLE 0x1 +#define FXGMAC_IRQ_DISABLE 0x0 +#define FXGMAC_NAPI_ENABLE 0x1 +#define FXGMAC_NAPI_DISABLE 0x0 + #ifndef fallthrough + +#ifdef __has_attribute #if __has_attribute(__fallthrough__) # define fallthrough __attribute__((__fallthrough__)) #else # define fallthrough do {} while (0) /* fallthrough */ #endif + +#else +# define fallthrough do {} while (0) /* fallthrough */ +#endif + #endif +#define PHY_POWER_DOWN 1 +#define PHY_POWER_UP 0 + +#define FXGMAC_MMC_IER_ALL_DEFAULT 0 + +/* #define FXGMAC_ESD_CHECK_ENABLED */ +#ifdef FXGMAC_ESD_CHECK_ENABLED +#define FXGMAC_ESD_INTERVAL (5 * HZ) +#define FXGMAC_ESD_ERROR_THRESHOLD ((u64)4000000000) +#define FXGMAC_PCIE_LINK_DOWN 0xFFFFFFFF +#define FXGMAC_PCIE_RECOVER_TIMES 5000 +#define FXGMAC_PCIE_IO_MEM_MASTER_ENABLE 0x7 +#endif + +#ifndef BIT +#define BIT(n) (0x1<<(n)) +#endif + +#define UDP_RSS_FLAGS (BIT(MAC_RSSCR_UDP4TE_POS) | \ + BIT(MAC_RSSCR_UDP6TE_POS)) + +#define MF90_SUB_VENTOR_ID 0x17aa +#define MF90_SUB_DEVICE_ID 0x3509 #pragma pack(1) /* it's better to make this struct's size to 128byte. */ @@ -322,17 +420,6 @@ struct pattern_packet{ }; #pragma pack() -typedef enum { - CURRENT_STATE_SHUTDOWN = 0, - CURRENT_STATE_RESUME = 1, - CURRENT_STATE_INIT = 2, - CURRENT_STATE_SUSPEND = 3, - CURRENT_STATE_CLOSE = 4, - CURRENT_STATE_OPEN = 5, - CURRENT_STATE_RESTART = 6, - CURRENT_STATE_REMOVE = 7, -} CURRENT_STATE; - typedef dma_addr_t DMA_ADDR_T; typedef enum pkt_hash_types RSS_HASH_TYPE; typedef void __iomem *IOMEM; @@ -402,7 +489,7 @@ typedef struct fxgmac_channel_of_platform { struct napi_struct napi_rx; struct timer_list tx_timer; -#if FXGMAC_TX_HANG_TIMER_EN +#if FXGMAC_TX_HANG_TIMER_ENABLED unsigned int tx_hang_timer_active; struct timer_list tx_hang_timer; unsigned int tx_hang_hw_cur; @@ -425,6 +512,36 @@ typedef struct ext_command_data { u32 val2; } CMD_DATA; +enum fxgmac_task_flag { + FXGMAC_FLAG_TASK_DOWN = 0, + FXGMAC_FLAG_TASK_RESET_PENDING, + FXGMAC_FLAG_TASK_ESD_CHECK_PENDING, + FXGMAC_FLAG_TASK_LINKCHG_CHECK_PENDING, + FXGMAC_FLAG_TASK_MAX +}; + +typedef struct fxgmac_esd_stats { + u32 tx_abort_excess_collisions; + u32 tx_dma_underrun; + u32 tx_lost_crs; + u32 tx_late_collisions; + u32 rx_crc_errors; + u32 rx_align_errors; + u32 rx_runt_errors; + u32 single_collisions; + u32 multi_collisions; + u32 tx_deferred_frames; +} FXGMAC_ESD_STATS; + +typedef enum fxgmac_dev_state { + FXGMAC_DEV_OPEN = 0x0, + FXGMAC_DEV_CLOSE = 0x1, + FXGMAC_DEV_STOP = 0x2, + FXGMAC_DEV_START = 0x3, + FXGMAC_DEV_SUSPEND = 0x4, + FXGMAC_DEV_RESUME = 0x5, + FXGMAC_DEV_PROBE = 0xFF, +} DEV_STATE; typedef struct fxgmac_pdata_of_platform { u32 cfg_pci_cmd; u32 cfg_cache_line_size; @@ -437,21 +554,31 @@ typedef struct fxgmac_pdata_of_platform { u32 cfg_device_ctrl2; u32 cfg_msix_capability; + int pre_phy_speed; + int pre_phy_duplex; + int pre_phy_autoneg; + struct work_struct restart_work; +#ifdef FXGMAC_ESD_CHECK_ENABLED + struct delayed_work esd_work; + FXGMAC_ESD_STATS esd_stats; + DECLARE_BITMAP(task_flags, FXGMAC_FLAG_TASK_MAX); +#endif u32 int_flags; /* legacy, msi or msix */ - int phy_irq; + int misc_irq; #ifdef CONFIG_PCI_MSI struct msix_entry *msix_entries; #endif /* power management and wol*/ - u32 wol; /* wol options */ - unsigned long powerstate; /* power state */ - unsigned int ns_offload_tab_idx; /* for ns-offload table. 2 entries supported. */ - CURRENT_STATE current_state; + u32 wol; + unsigned long powerstate; + /*for ns-offload table. 2 entries supported. */ + unsigned int ns_offload_tab_idx; netdev_features_t netdev_features; struct napi_struct napi; - struct napi_struct napi_phy; + struct napi_struct napi_misc; + char misc_irq_name[IFNAMSIZ + 32]; u32 mgm_intctrl_val; bool phy_link; bool fxgmac_test_tso_flag; @@ -461,26 +588,17 @@ typedef struct fxgmac_pdata_of_platform { volatile u32 fxgmac_test_skb_arr_in_index; volatile u32 fxgmac_test_skb_arr_out_index; struct sk_buff *fxgmac_test_skb_array[FXGMAC_MAX_DBG_TEST_PKT]; -#ifdef HAVE_FXGMAC_DEBUG_FS - struct dentry *dbg_adapter; - struct dentry *fxgmac_dbg_root; - char fxgmac_dbg_netdev_ops_buf[FXGMAC_NETDEV_OPS_BUF_LEN]; -#endif + DEV_STATE dev_state; + struct mutex mutex; + struct timer_list phy_poll_tm; } FXGMAC_PDATA_OF_PLATFORM; void fxgmac_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx); int fxgmac_dismiss_all_int(struct fxgmac_pdata *pdata); -#ifdef HAVE_FXGMAC_DEBUG_FS -void fxgmac_dbg_adapter_init(struct fxgmac_pdata *pdata); -void fxgmac_dbg_adapter_exit(struct fxgmac_pdata *pdata); -void fxgmac_dbg_init(struct fxgmac_pdata *pdata); -void fxgmac_dbg_exit(struct fxgmac_pdata *pdata); -#endif /* HAVE_FXGMAC_DEBUG_FS */ - void fxgmac_restart_dev(struct fxgmac_pdata *pdata); -long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, +long fxgmac_netdev_ops_ioctl(struct file *file, unsigned int cmd, unsigned long arg); int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg); @@ -490,9 +608,13 @@ int fxgmac_ephy_autoneg_ability_get(struct fxgmac_pdata *pdata, int fxgmac_ephy_status_get(struct fxgmac_pdata *pdata, int *speed, int *duplex, int *ret_link, int *media); int fxgmac_ephy_soft_reset(struct fxgmac_pdata *pdata); -void fxgmac_phy_force_speed(struct fxgmac_pdata *pdata, int speed); -void fxgmac_phy_force_duplex(struct fxgmac_pdata *pdata, int duplex); -void fxgmac_phy_force_autoneg(struct fxgmac_pdata *pdata, int autoneg); +int fxgmac_phy_force_mode(struct fxgmac_pdata *pdata); +int fxgmac_phy_force_speed(struct fxgmac_pdata *pdata, int speed); +int fxgmac_phy_force_duplex(struct fxgmac_pdata *pdata, int duplex); +int fxgmac_phy_force_autoneg(struct fxgmac_pdata *pdata, int autoneg); +//void fxgmac_act_phy_link(struct fxgmac_pdata *pdata); +int fxgmac_phy_timer_init(struct fxgmac_pdata *pdata); +void fxgmac_phy_timer_destroy(struct fxgmac_pdata *pdata); unsigned int fxgmac_get_netdev_ip4addr(struct fxgmac_pdata *pdata); unsigned char *fxgmac_get_netdev_ip6addr(struct fxgmac_pdata *pdata, @@ -512,4 +634,15 @@ void fxgmac_stop(struct fxgmac_pdata *pdata); void fxgmac_free_rx_data(struct fxgmac_pdata *pdata); void fxgmac_free_tx_data(struct fxgmac_pdata *pdata); +void fxgmac_tx_start_xmit(struct fxgmac_channel *channel, struct fxgmac_ring *ring); +void fxgmac_dev_xmit(struct fxgmac_channel *channel); + +void fxgmac_config_wol(struct fxgmac_pdata *pdata, int en); +void fxgmac_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx); + +void fxgmac_lock(struct fxgmac_pdata *pdata); +void fxgmac_unlock(struct fxgmac_pdata *pdata); + +void fxgmac_set_phy_link_ksettings(struct fxgmac_pdata *pdata); + #endif /* __FUXI_OS_H__ */