diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index e0fb0f1122db42..20760e10211a48 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -509,8 +509,8 @@ static int au1000_mii_probe(struct net_device *dev) * on the current MAC's MII bus */ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) - if (mdiobus_get_phy(aup->mii_bus, aup->phy_addr)) { - phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr); + if (mdiobus_get_phy(aup->mii_bus, phy_addr)) { + phydev = mdiobus_get_phy(aup->mii_bus, phy_addr); if (!aup->phy_search_highest_addr) /* break out with first one found */ break; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 4763252bbf8557..d1cdc2d7615158 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -481,20 +481,23 @@ static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd, /* the qdma core needs scratch memory to be setup */ static int mtk_init_fq_dma(struct mtk_eth *eth) { - dma_addr_t phy_ring_head, phy_ring_tail; + dma_addr_t phy_ring_tail; int cnt = MTK_DMA_SIZE; dma_addr_t dma_addr; int i; eth->scratch_ring = dma_alloc_coherent(eth->dev, cnt * sizeof(struct mtk_tx_dma), - &phy_ring_head, + ð->phy_scratch_ring, GFP_ATOMIC | __GFP_ZERO); if (unlikely(!eth->scratch_ring)) return -ENOMEM; eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); + if (unlikely(!eth->scratch_head)) + return -ENOMEM; + dma_addr = dma_map_single(eth->dev, eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, DMA_FROM_DEVICE); @@ -502,19 +505,19 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) return -ENOMEM; memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt); - phy_ring_tail = phy_ring_head + + phy_ring_tail = eth->phy_scratch_ring + (sizeof(struct mtk_tx_dma) * (cnt - 1)); for (i = 0; i < cnt; i++) { eth->scratch_ring[i].txd1 = (dma_addr + (i * MTK_QDMA_PAGE_SIZE)); if (i < cnt - 1) - eth->scratch_ring[i].txd2 = (phy_ring_head + + eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring + ((i + 1) * sizeof(struct mtk_tx_dma))); eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE); } - mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD); + mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD); mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); @@ -671,7 +674,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, err_dma: do { - tx_buf = mtk_desc_to_tx_buf(ring, txd); + tx_buf = mtk_desc_to_tx_buf(ring, itxd); /* unmap dma */ mtk_tx_unmap(&dev->dev, tx_buf); @@ -701,6 +704,20 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb) return nfrags; } +static int mtk_queue_stopped(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + if (netif_queue_stopped(eth->netdev[i])) + return 1; + } + + return 0; +} + static void mtk_wake_queue(struct mtk_eth *eth) { int i; @@ -766,12 +783,9 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0) goto drop; - if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) { + if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) mtk_stop_queue(eth); - if (unlikely(atomic_read(&ring->free_count) > - ring->thresh)) - mtk_wake_queue(eth); - } + spin_unlock_irqrestore(ð->page_lock, flags); return NETDEV_TX_OK; @@ -826,6 +840,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { skb_free_frag(new_data); + netdev->stats.rx_dropped++; goto release_desc; } @@ -833,6 +848,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, skb = build_skb(data, ring->frag_size); if (unlikely(!skb)) { put_page(virt_to_head_page(new_data)); + netdev->stats.rx_dropped++; goto release_desc; } skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); @@ -921,7 +937,6 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) } mtk_tx_unmap(eth->dev, tx_buf); - ring->last_free->txd2 = next_cpu; ring->last_free = desc; atomic_inc(&ring->free_count); @@ -946,7 +961,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) if (!total) return 0; - if (atomic_read(&ring->free_count) > ring->thresh) + if (mtk_queue_stopped(eth) && + (atomic_read(&ring->free_count) > ring->thresh)) mtk_wake_queue(eth); return total; @@ -1027,9 +1043,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth) atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); ring->next_free = &ring->dma[0]; - ring->last_free = &ring->dma[MTK_DMA_SIZE - 2]; - ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2, - MAX_SKB_FRAGS); + ring->last_free = &ring->dma[MTK_DMA_SIZE - 1]; + ring->thresh = MAX_SKB_FRAGS; /* make sure that all changes to the dma ring are flushed before we * continue @@ -1207,6 +1222,14 @@ static void mtk_dma_free(struct mtk_eth *eth) for (i = 0; i < MTK_MAC_COUNT; i++) if (eth->netdev[i]) netdev_reset_queue(eth->netdev[i]); + if (eth->scratch_ring) { + dma_free_coherent(eth->dev, + MTK_DMA_SIZE * sizeof(struct mtk_tx_dma), + eth->scratch_ring, + eth->phy_scratch_ring); + eth->scratch_ring = NULL; + eth->phy_scratch_ring = 0; + } mtk_tx_clean(eth); mtk_rx_clean(eth); kfree(eth->scratch_head); @@ -1269,7 +1292,7 @@ static int mtk_start_dma(struct mtk_eth *eth) mtk_w32(eth, MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN | MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS | - MTK_RX_BT_32DWORDS, + MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO, MTK_QDMA_GLO_CFG); return 0; @@ -1383,7 +1406,7 @@ static int __init mtk_hw_init(struct mtk_eth *eth) /* disable delay and normal interrupt */ mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); - mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); + mtk_irq_disable(eth, ~0); mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); mtk_w32(eth, 0, MTK_RST_GL); @@ -1697,7 +1720,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; SET_NETDEV_DEV(eth->netdev[id], eth->dev); - eth->netdev[id]->watchdog_timeo = HZ; + eth->netdev[id]->watchdog_timeo = 5 * HZ; eth->netdev[id]->netdev_ops = &mtk_netdev_ops; eth->netdev[id]->base_addr = (unsigned long)eth->base; eth->netdev[id]->vlan_features = MTK_HW_FEATURES & diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index eed626d56ea4f1..a5eb7c62306b66 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -91,6 +91,7 @@ #define MTK_QDMA_GLO_CFG 0x1A04 #define MTK_RX_2B_OFFSET BIT(31) #define MTK_RX_BT_32DWORDS (3 << 11) +#define MTK_NDP_CO_PRO BIT(10) #define MTK_TX_WB_DDONE BIT(6) #define MTK_DMA_SIZE_16DWORDS (2 << 4) #define MTK_RX_DMA_BUSY BIT(3) @@ -357,6 +358,7 @@ struct mtk_rx_ring { * @rx_ring: Pointer to the memore holding info about the RX ring * @rx_napi: The NAPI struct * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring + * @phy_scratch_ring: physical address of scratch_ring * @scratch_head: The scratch memory that scratch_ring points to. * @clk_ethif: The ethif clock * @clk_esw: The switch clock @@ -384,6 +386,7 @@ struct mtk_eth { struct mtk_rx_ring rx_ring; struct napi_struct rx_napi; struct mtk_tx_dma *scratch_ring; + dma_addr_t phy_scratch_ring; void *scratch_head; struct clk *clk_ethif; struct clk *clk_esw; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index e6bb0ecb12c74c..53190894f17a04 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2505,8 +2505,6 @@ static int cpsw_probe(struct platform_device *pdev) clean_ale_ret: cpsw_ale_destroy(priv->ale); clean_dma_ret: - cpdma_chan_destroy(priv->txch); - cpdma_chan_destroy(priv->rxch); cpdma_ctlr_destroy(priv->dma); clean_runtime_disable_ret: pm_runtime_disable(&pdev->dev); @@ -2534,8 +2532,6 @@ static int cpsw_remove(struct platform_device *pdev) unregister_netdev(ndev); cpsw_ale_destroy(priv->ale); - cpdma_chan_destroy(priv->txch); - cpdma_chan_destroy(priv->rxch); cpdma_ctlr_destroy(priv->dma); pm_runtime_disable(&pdev->dev); device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device); diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 2e21e9366f76a1..b62c4aaee40bd8 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -75,22 +75,13 @@ static int smsc_phy_reset(struct phy_device *phydev) * in all capable mode before using it. */ if ((rc & MII_LAN83C185_MODE_MASK) == MII_LAN83C185_MODE_POWERDOWN) { - int timeout = 50000; - - /* set "all capable" mode and reset the phy */ + /* set "all capable" mode */ rc |= MII_LAN83C185_MODE_ALL; phy_write(phydev, MII_LAN83C185_SPECIAL_MODES, rc); - phy_write(phydev, MII_BMCR, BMCR_RESET); - - /* wait end of reset (max 500 ms) */ - do { - udelay(10); - if (timeout-- == 0) - return -1; - rc = phy_read(phydev, MII_BMCR); - } while (rc & BMCR_RESET); } - return 0; + + /* reset the phy */ + return genphy_soft_reset(phydev); } static int lan911x_config_init(struct phy_device *phydev) diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h index 7c27fa1030e873..feb04ea20f11cb 100644 --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h @@ -52,6 +52,12 @@ struct sock *inet_diag_find_one_icsk(struct net *net, int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk); +void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk); + +int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, + struct inet_diag_msg *r, int ext, + struct user_namespace *user_ns); + extern int inet_diag_register(const struct inet_diag_handler *handler); extern void inet_diag_unregister(const struct inet_diag_handler *handler); #endif /* _INET_DIAG_H_ */ diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 7be9b124235486..d1790c565d0ad1 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -231,7 +231,6 @@ struct tcp_sock { /* RTT measurement */ u32 srtt_us; /* smoothed round trip time << 3 in usecs */ u32 mdev_us; /* medium deviation */ - u32 mdev_max_us; /* maximal mdev for the last rtt period */ u32 rttvar_us; /* smoothed mdev_max */ u32 rtt_seq; /* sequence number to update rttvar */ struct rtt_meas { diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 16079772222860..43d2cd862bc260 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -213,8 +213,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb } EXPORT_SYMBOL_GPL(br_handle_frame_finish); -/* note: already called with rcu_read_lock */ -static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb) +static void __br_handle_local_finish(struct sk_buff *skb) { struct net_bridge_port *p = br_port_get_rcu(skb->dev); u16 vid = 0; @@ -222,6 +221,14 @@ static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_bu /* check if vlan is allowed, to avoid spoofing */ if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid)) br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false); +} + +/* note: already called with rcu_read_lock */ +static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb) +{ + struct net_bridge_port *p = br_port_get_rcu(skb->dev); + + __br_handle_local_finish(skb); BR_INPUT_SKB_CB(skb)->brdev = p->br->dev; br_pass_frame_up(skb); @@ -274,7 +281,9 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb) if (p->br->stp_enabled == BR_NO_STP || fwd_mask & (1u << dest[5])) goto forward; - break; + *pskb = skb; + __br_handle_local_finish(skb); + return RX_HANDLER_PASS; case 0x01: /* IEEE MAC (Pause) */ goto drop; diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 2ed9dd2b5f2f5a..1d71c40eaaf35a 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -127,7 +127,9 @@ __be32 ic_myaddr = NONE; /* My IP address */ static __be32 ic_netmask = NONE; /* Netmask for local subnet */ __be32 ic_gateway = NONE; /* Gateway IP address */ -__be32 ic_addrservaddr = NONE; /* IP Address of the IP addresses'server */ +#ifdef IPCONFIG_DYNAMIC +static __be32 ic_addrservaddr = NONE; /* IP Address of the IP addresses'server */ +#endif __be32 ic_servaddr = NONE; /* Boot server IP address */ diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index d6c8f4cd080001..252a1843cdfeb5 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -680,8 +680,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) /* Called to compute a smoothed rtt estimate. The data fed to this * routine either comes from timestamps, or from segments that were * known _not_ to have been retransmitted [see Karn/Partridge - * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 - * piece by Van Jacobson. + * Proceedings SIGCOMM 87]. * NOTE: the next three routines used to be one big routine. * To save cycles in the RFC 1323 implementation it was better to break * it up into three procedures. -- erics @@ -692,59 +691,21 @@ static void tcp_rtt_estimator(struct sock *sk, long mrtt_us) long m = mrtt_us; /* RTT */ u32 srtt = tp->srtt_us; - /* The following amusing code comes from Jacobson's - * article in SIGCOMM '88. Note that rtt and mdev - * are scaled versions of rtt and mean deviation. - * This is designed to be as fast as possible - * m stands for "measurement". - * - * On a 1990 paper the rto value is changed to: - * RTO = rtt + 4 * mdev - * - * Funny. This algorithm seems to be very broken. - * These formulae increase RTO, when it should be decreased, increase - * too slowly, when it should be increased quickly, decrease too quickly - * etc. I guess in BSD RTO takes ONE value, so that it is absolutely - * does not matter how to _calculate_ it. Seems, it was trap - * that VJ failed to avoid. 8) - */ if (srtt != 0) { - m -= (srtt >> 3); /* m is now error in rtt est */ - srtt += m; /* rtt = 7/8 rtt + 1/8 new */ - if (m < 0) { - m = -m; /* m is now abs(error) */ - m -= (tp->mdev_us >> 2); /* similar update on mdev */ - /* This is similar to one of Eifel findings. - * Eifel blocks mdev updates when rtt decreases. - * This solution is a bit different: we use finer gain - * for mdev in this case (alpha*beta). - * Like Eifel it also prevents growth of rto, - * but also it limits too fast rto decreases, - * happening in pure Eifel. - */ - if (m > 0) - m >>= 3; - } else { - m -= (tp->mdev_us >> 2); /* similar update on mdev */ - } - tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ - if (tp->mdev_us > tp->mdev_max_us) { - tp->mdev_max_us = tp->mdev_us; - if (tp->mdev_max_us > tp->rttvar_us) - tp->rttvar_us = tp->mdev_max_us; - } - if (after(tp->snd_una, tp->rtt_seq)) { - if (tp->mdev_max_us < tp->rttvar_us) - tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2; + m -= (srtt >> 3); /* m' = m - srtt / 8 = (R' - SRTT) */ + srtt += m; /* srtt = srtt + m’ = srtt + m - srtt / 8 */ + if (m < 0) + m = -m; + m -= (tp->mdev_us >> 2); /* m'' = |m'| - mdev / 4 */ + tp->mdev_us += m; + tp->rttvar_us = tp->mdev_us; + if (after(tp->snd_una, tp->rtt_seq)) tp->rtt_seq = tp->snd_nxt; - tp->mdev_max_us = tcp_rto_min_us(sk); - } } else { /* no previous measure. */ - srtt = m << 3; /* take the measured time to be rtt */ - tp->mdev_us = m << 1; /* make sure rto = 3*rtt */ - tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk)); - tp->mdev_max_us = tp->rttvar_us; + srtt = m << 3; + tp->mdev_us = m << 1; + tp->rttvar_us = tp->mdev_us; tp->rtt_seq = tp->snd_nxt; } tp->srtt_us = max(1U, srtt); @@ -798,6 +759,7 @@ static void tcp_update_pacing_rate(struct sock *sk) */ static void tcp_set_rto(struct sock *sk) { + const u32 min_rto = tcp_rto_min_us(sk); const struct tcp_sock *tp = tcp_sk(sk); /* Old crap is replaced with new one. 8) * @@ -809,17 +771,15 @@ static void tcp_set_rto(struct sock *sk) * is invisible. Actually, Linux-2.4 also generates erratic * ACKs in some circumstances. */ - inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); - + if (((tp->srtt_us >> 3) + tp->rttvar_us) < min_rto) + inet_csk(sk)->icsk_rto = usecs_to_jiffies(min_rto); + else + inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); /* 2. Fixups made earlier cannot be right. * If we do not estimate RTO correctly without them, * all the algo is pure shit and should be replaced * with correct one. It is exactly, which we pretend to do. */ - - /* NOTE: clamping at TCP_RTO_MIN is not required, current algo - * guarantees that rto is higher. - */ tcp_bound_rto(sk); } diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index b617826e24770a..7f59f5b979c704 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -561,7 +561,7 @@ void tcp_init_metrics(struct sock *sk) * retransmission. */ tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK); - tp->mdev_us = tp->mdev_max_us = tp->rttvar_us; + tp->mdev_us = tp->rttvar_us; inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; } diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 4b0a82191bc401..071718bccdab65 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -202,26 +202,18 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) sch_tree_unlock(sch); for (i = 0; i < q->bands; i++) { - if (q->queues[i] == &noop_qdisc) { - struct Qdisc *child, *old; - - child = qdisc_create_dflt(sch->dev_queue, - &pfifo_qdisc_ops, - TC_H_MAKE(sch->handle, i + 1)); - if (child) { - sch_tree_lock(sch); - old = q->queues[i]; - q->queues[i] = child; - - if (old != &noop_qdisc) { - qdisc_tree_reduce_backlog(old, - old->q.qlen, - old->qstats.backlog); - qdisc_destroy(old); - } - sch_tree_unlock(sch); - } - } + struct Qdisc *child; + + if (q->queues[i] != &noop_qdisc) + continue; + + child = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, + TC_H_MAKE(sch->handle, i + 1)); + if (!child) + return -ENOMEM; + sch_tree_lock(sch); + q->queues[i] = child; + sch_tree_unlock(sch); } return 0; } diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c index 1ce724b87618a0..f69edcf219e514 100644 --- a/net/sctp/sctp_diag.c +++ b/net/sctp/sctp_diag.c @@ -3,12 +3,6 @@ #include #include -extern void inet_diag_msg_common_fill(struct inet_diag_msg *r, - struct sock *sk); -extern int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, - struct inet_diag_msg *r, int ext, - struct user_namespace *user_ns); - static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, void *info);