Commit c94eee8a authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Fix two regressions in ipv6 route lookups, particularly wrt output
    interface specifications in the lookup key.  From David Ahern.

 2) Fix checks in ipv6 IPSEC tunnel pre-encap fragmentation, from
    Herbert Xu.

 3) Fix mis-advertisement of 1000BASE-T on bcm63xx_enet, from Simon
    Arlott.

 4) Some smsc phys misbehave with energy detect mode enabled, so add a
    DT property and disable it on such switches.  From Heiko Schocher.

 5) Fix TSO corruption on TX in mv643xx_eth, from Philipp Kirchhofer.

 6) Fix regression added by removal of openvswitch vport stats, from
    James Morse.

 7) Vendor Kconfig options should be bool, not tristate, from Andreas
    Schwab.

 8) Use non-_BH() net stats bump in tcp_xmit_probe_skb(), otherwise we
    barf during TCP REPAIR operations.

 9) Fix various bugs in openvswitch conntrack support, from Joe
    Stringer.

10) Fix NETLINK_LIST_MEMBERSHIPS locking, from David Herrmann.

11) Don't have VSOCK do sock_put() in interrupt context, from Jorgen
    Hansen.

12) Fix skb_realloc_headroom() failures properly in ISDN, from Karsten
    Keil.

13) Add some device IDs to qmi_wwan, from Bjorn Mork.

14) Fix ovs egress tunnel information when using lwtunnel devices, from
    Pravin B Shelar.

15) Add missing NETIF_F_FRAGLIST to macvtab feature list, from Jason
    Wang.

16) Fix incorrect handling of throw routes when the result of the throw
    cannot find a match, from Xin Long.

17) Protect ipv6 MTU calculations from wrap-around, from Hannes Frederic
    Sowa.

18) Fix failed autonegotiation on KSZ9031 micrel PHYs, from Nathan
    Sullivan.

19) Add missing memory barries in descriptor accesses or xgbe driver,
    from Thomas Lendacky.

20) Fix release conditon test in pppoe_release(), from Guillaume Nault.

21) Fix gianfar bugs wrt filter configuration, from Claudiu Manoil.

22) Fix violations of RX buffer alignment in sh_eth driver, from Sergei
    Shtylyov.

23) Fixing missing of_node_put() calls in various places around the
    networking, from Julia Lawall.

24) Fix incorrect leaf now walking in ipv4 routing tree, from Alexander
    Duyck.

25) RDS doesn't check pskb_pull()/pskb_trim() return values, from
    Sowmini Varadhan.

26) Fix VLAN configuration in mlx4 driver, from Jack Morgenstein.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (79 commits)
  ipv6: protect mtu calculation of wrap-around and infinite loop by rounding issues
  Revert "Merge branch 'ipv6-overflow-arith'"
  net/mlx4: Copy/set only sizeof struct mlx4_eqe bytes
  net/mlx4_en: Explicitly set no vlan tags in WQE ctrl segment when no vlan is present
  vhost: fix performance on LE hosts
  bpf: sample: define aarch64 specific registers
  amd-xgbe: Fix race between access of desc and desc index
  RDS-TCP: Recover correctly from pskb_pull()/pksb_trim() failure in rds_tcp_data_recv
  forcedeth: fix unilateral interrupt disabling in netpoll path
  openvswitch: Fix skb leak using IPv6 defrag
  ipv6: Export nf_ct_frag6_consume_orig()
  openvswitch: Fix double-free on ip_defrag() errors
  fib_trie: leaf_walk_rcu should not compute key if key is less than pn->key
  net: mv643xx_eth: add missing of_node_put
  ath6kl: add missing of_node_put
  net: phy: mdio: add missing of_node_put
  netdev/phy: add missing of_node_put
  net: netcp: add missing of_node_put
  net: thunderx: add missing of_node_put
  ipv6: gre: support SIT encapsulation
  ...
parents 38dab9ac 89bc7848
......@@ -39,6 +39,7 @@ Required properties:
Optional properties:
- dual_emac_res_vlan : Specifies VID to be used to segregate the ports
- mac-address : See ethernet.txt file in the same directory
- phy-handle : See ethernet.txt file in the same directory
Note: "ti,hwmods" field is used to fetch the base address and irq
resources from TI, omap hwmod data base during device registration.
......
SMSC LAN87xx Ethernet PHY
Some boards require special tuning values. Configure them
through an Ethernet OF device node.
Optional properties:
- smsc,disable-energy-detect:
If set, do not enable energy detect mode for the SMSC phy.
default: enable energy detect mode
Examples:
smsc phy with disabled energy detect mode on an am335x based board.
&davinci_mdio {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&davinci_mdio_default>;
pinctrl-1 = <&davinci_mdio_sleep>;
status = "okay";
ethernetphy0: ethernet-phy@0 {
reg = <0>;
smsc,disable-energy-detect;
};
};
......@@ -4428,6 +4428,14 @@ L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: drivers/net/ethernet/freescale/ucc_geth*
FREESCALE eTSEC ETHERNET DRIVER (GIANFAR)
M: Claudiu Manoil <claudiu.manoil@freescale.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/freescale/gianfar*
X: drivers/net/ethernet/freescale/gianfar_ptp.c
F: Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
FREESCALE QUICC ENGINE UCC UART DRIVER
M: Timur Tabi <timur@tabi.org>
L: linuxppc-dev@lists.ozlabs.org
......
......@@ -1247,7 +1247,7 @@ static void
l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
struct sk_buff *skb;
struct sk_buff *skb, *nskb;
struct Layer2 *l2 = &st->l2;
u_char header[MAX_HEADER_LEN];
int i, hdr_space_needed;
......@@ -1262,14 +1262,10 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
return;
hdr_space_needed = l2headersize(l2, 0);
if (hdr_space_needed > skb_headroom(skb)) {
struct sk_buff *orig_skb = skb;
skb = skb_realloc_headroom(skb, hdr_space_needed);
if (!skb) {
dev_kfree_skb(orig_skb);
return;
}
nskb = skb_realloc_headroom(skb, hdr_space_needed);
if (!nskb) {
skb_queue_head(&l2->i_queue, skb);
return;
}
spin_lock_irqsave(&l2->lock, flags);
if (test_bit(FLG_MOD128, &l2->flag))
......@@ -1282,7 +1278,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
p1);
dev_kfree_skb(l2->windowar[p1]);
}
l2->windowar[p1] = skb_clone(skb, GFP_ATOMIC);
l2->windowar[p1] = skb;
i = sethdraddr(&st->l2, header, CMD);
......@@ -1295,8 +1291,8 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
l2->vs = (l2->vs + 1) % 8;
}
spin_unlock_irqrestore(&l2->lock, flags);
memcpy(skb_push(skb, i), header, i);
st->l2.l2l1(st, PH_PULL | INDICATION, skb);
memcpy(skb_push(nskb, i), header, i);
st->l2.l2l1(st, PH_PULL | INDICATION, nskb);
test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
FsmDelTimer(&st->l2.t203, 13);
......
......@@ -1476,7 +1476,7 @@ static void
l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb, *nskb, *oskb;
struct sk_buff *skb, *nskb;
u_char header[MAX_L2HEADER_LEN];
u_int i, p1;
......@@ -1486,48 +1486,34 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
skb = skb_dequeue(&l2->i_queue);
if (!skb)
return;
if (test_bit(FLG_MOD128, &l2->flag))
p1 = (l2->vs - l2->va) % 128;
else
p1 = (l2->vs - l2->va) % 8;
p1 = (p1 + l2->sow) % l2->window;
if (l2->windowar[p1]) {
printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n",
mISDNDevName4ch(&l2->ch), p1);
dev_kfree_skb(l2->windowar[p1]);
}
l2->windowar[p1] = skb;
i = sethdraddr(l2, header, CMD);
if (test_bit(FLG_MOD128, &l2->flag)) {
header[i++] = l2->vs << 1;
header[i++] = l2->vr << 1;
} else
header[i++] = (l2->vr << 5) | (l2->vs << 1);
nskb = skb_realloc_headroom(skb, i);
if (!nskb) {
printk(KERN_WARNING "%s: no headroom(%d) copy for IFrame\n",
mISDNDevName4ch(&l2->ch), i);
skb_queue_head(&l2->i_queue, skb);
return;
}
if (test_bit(FLG_MOD128, &l2->flag)) {
p1 = (l2->vs - l2->va) % 128;
l2->vs = (l2->vs + 1) % 128;
} else {
header[i++] = (l2->vr << 5) | (l2->vs << 1);
p1 = (l2->vs - l2->va) % 8;
l2->vs = (l2->vs + 1) % 8;
}
nskb = skb_clone(skb, GFP_ATOMIC);
p1 = skb_headroom(nskb);
if (p1 >= i)
memcpy(skb_push(nskb, i), header, i);
else {
printk(KERN_WARNING
"%s: L2 pull_iqueue skb header(%d/%d) too short\n",
mISDNDevName4ch(&l2->ch), i, p1);
oskb = nskb;
nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
if (!nskb) {
dev_kfree_skb(oskb);
printk(KERN_WARNING "%s: no skb mem in %s\n",
mISDNDevName4ch(&l2->ch), __func__);
return;
}
memcpy(skb_put(nskb, i), header, i);
memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len);
dev_kfree_skb(oskb);
p1 = (p1 + l2->sow) % l2->window;
if (l2->windowar[p1]) {
printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n",
mISDNDevName4ch(&l2->ch), p1);
dev_kfree_skb(l2->windowar[p1]);
}
l2->windowar[p1] = skb;
memcpy(skb_push(nskb, i), header, i);
l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
......
......@@ -847,21 +847,25 @@ static int emac_probe(struct platform_device *pdev)
if (ndev->irq == -ENXIO) {
netdev_err(ndev, "No irq resource\n");
ret = ndev->irq;
goto out;
goto out_iounmap;
}
db->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(db->clk)) {
ret = PTR_ERR(db->clk);
goto out;
goto out_iounmap;
}
clk_prepare_enable(db->clk);
ret = clk_prepare_enable(db->clk);
if (ret) {
dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret);
goto out_iounmap;
}
ret = sunxi_sram_claim(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "Error couldn't map SRAM to device\n");
goto out;
goto out_clk_disable_unprepare;
}
db->phy_node = of_parse_phandle(np, "phy", 0);
......@@ -910,6 +914,10 @@ static int emac_probe(struct platform_device *pdev)
out_release_sram:
sunxi_sram_release(&pdev->dev);
out_clk_disable_unprepare:
clk_disable_unprepare(db->clk);
out_iounmap:
iounmap(db->membase);
out:
dev_err(db->dev, "not found (%d).\n", ret);
......@@ -921,8 +929,12 @@ static int emac_probe(struct platform_device *pdev)
static int emac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct emac_board_info *db = netdev_priv(ndev);
unregister_netdev(ndev);
sunxi_sram_release(&pdev->dev);
clk_disable_unprepare(db->clk);
iounmap(db->membase);
free_netdev(ndev);
dev_dbg(&pdev->dev, "released and freed device\n");
......
......@@ -1595,7 +1595,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
packet->rdesc_count, 1);
/* Make sure ownership is written to the descriptor */
dma_wmb();
smp_wmb();
ring->cur = cur_index + 1;
if (!packet->skb->xmit_more ||
......
......@@ -1807,6 +1807,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
struct netdev_queue *txq;
int processed = 0;
unsigned int tx_packets = 0, tx_bytes = 0;
unsigned int cur;
DBGPR("-->xgbe_tx_poll\n");
......@@ -1814,10 +1815,15 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
if (!ring)
return 0;
cur = ring->cur;
/* Be sure we get ring->cur before accessing descriptor data */
smp_rmb();
txq = netdev_get_tx_queue(netdev, channel->queue_index);
while ((processed < XGBE_TX_DESC_MAX_PROC) &&
(ring->dirty != ring->cur)) {
(ring->dirty != cur)) {
rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
rdesc = rdata->rdesc;
......
......@@ -2049,7 +2049,7 @@ static void swphy_poll_timer(unsigned long data)
for (i = 0; i < priv->num_ports; i++) {
struct bcm63xx_enetsw_port *port;
int val, j, up, advertise, lpa, lpa2, speed, duplex, media;
int val, j, up, advertise, lpa, speed, duplex, media;
int external_phy = bcm_enet_port_is_rgmii(i);
u8 override;
......@@ -2092,22 +2092,27 @@ static void swphy_poll_timer(unsigned long data)
lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
MII_LPA);
lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
MII_STAT1000);
/* figure out media and duplex from advertise and LPA values */
media = mii_nway_result(lpa & advertise);
duplex = (media & ADVERTISE_FULL) ? 1 : 0;
if (lpa2 & LPA_1000FULL)
duplex = 1;
if (lpa2 & (LPA_1000FULL | LPA_1000HALF))
speed = 1000;
else {
if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
speed = 100;
else
speed = 10;
if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
speed = 100;
else
speed = 10;
if (val & BMSR_ESTATEN) {
advertise = bcmenet_sw_mdio_read(priv, external_phy,
port->phy_id, MII_CTRL1000);
lpa = bcmenet_sw_mdio_read(priv, external_phy,
port->phy_id, MII_STAT1000);
if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
&& lpa & (LPA_1000FULL | LPA_1000HALF)) {
speed = 1000;
duplex = (lpa & LPA_1000FULL);
}
}
dev_info(&priv->pdev->dev,
......
......@@ -3,7 +3,7 @@
#
config NET_VENDOR_CAVIUM
tristate "Cavium ethernet drivers"
bool "Cavium ethernet drivers"
depends on PCI
default y
---help---
......
......@@ -22,7 +22,6 @@
struct nicpf {
struct pci_dev *pdev;
u8 rev_id;
u8 node;
unsigned int flags;
u8 num_vf_en; /* No of VF enabled */
......@@ -44,6 +43,7 @@ struct nicpf {
u8 duplex[MAX_LMAC];
u32 speed[MAX_LMAC];
u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
u16 rss_ind_tbl_size;
bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
......@@ -54,6 +54,11 @@ struct nicpf {
bool irq_allocated[NIC_PF_MSIX_VECTORS];
};
static inline bool pass1_silicon(struct nicpf *nic)
{
return nic->pdev->revision < 8;
}
/* Supported devices */
static const struct pci_device_id nic_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) },
......@@ -117,7 +122,7 @@ static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
* when PF writes to MBOX(1), in next revisions when
* PF writes to MBOX(0)
*/
if (nic->rev_id == 0) {
if (pass1_silicon(nic)) {
/* see the comment for nic_reg_write()/nic_reg_read()
* functions above
*/
......@@ -305,9 +310,6 @@ static void nic_init_hw(struct nicpf *nic)
{
int i;
/* Reset NIC, in case the driver is repeatedly inserted and removed */
nic_reg_write(nic, NIC_PF_SOFT_RESET, 1);
/* Enable NIC HW block */
nic_reg_write(nic, NIC_PF_CFG, 0x3);
......@@ -395,8 +397,18 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
/* Leave RSS_SIZE as '0' to disable RSS */
nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
(vnic << 24) | (padd << 16) | (rssi_base + rssi));
if (pass1_silicon(nic)) {
nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
(vnic << 24) | (padd << 16) |
(rssi_base + rssi));
} else {
/* Set MPI_ALG to '0' to disable MCAM parsing */
nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
(padd << 16));
/* MPI index is same as CPI if MPI_ALG is not enabled */
nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3),
(vnic << 24) | (rssi_base + rssi));
}
if ((rssi + 1) >= cfg->rq_cnt)
continue;
......@@ -409,6 +421,7 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
rssi = ((cpi - cpi_base) & 0x38) >> 3;
}
nic->cpi_base[cfg->vf_id] = cpi_base;
nic->rssi_base[cfg->vf_id] = rssi_base;
}
/* Responsds to VF with its RSS indirection table size */
......@@ -434,10 +447,9 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
{
u8 qset, idx = 0;
u64 cpi_cfg, cpi_base, rssi_base, rssi;
u64 idx_addr;
cpi_base = nic->cpi_base[cfg->vf_id];
cpi_cfg = nic_reg_read(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3));
rssi_base = (cpi_cfg & 0x0FFF) + cfg->tbl_offset;
rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset;
rssi = rssi_base;
qset = cfg->vf_id;
......@@ -454,9 +466,15 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
idx++;
}
cpi_base = nic->cpi_base[cfg->vf_id];
if (pass1_silicon(nic))
idx_addr = NIC_PF_CPI_0_2047_CFG;
else
idx_addr = NIC_PF_MPI_0_2047_CFG;
cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3));
cpi_cfg &= ~(0xFULL << 20);
cpi_cfg |= (cfg->hash_bits << 20);
nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3), cpi_cfg);
nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg);
}
/* 4 level transmit side scheduler configutation
......@@ -1001,8 +1019,6 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_release_regions;
}
pci_read_config_byte(pdev, PCI_REVISION_ID, &nic->rev_id);
nic->node = nic_get_node_id(pdev);
nic_set_lmac_vf_mapping(nic);
......
......@@ -85,7 +85,11 @@
#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
#define NIC_PF_MCAM_0_191_ENA (0x100000)
#define NIC_PF_MCAM_0_191_M_0_5_DATA (0x110000)
#define NIC_PF_MCAM_CTRL (0x120000)
#define NIC_PF_CPI_0_2047_CFG (0x200000)
#define NIC_PF_MPI_0_2047_CFG (0x210000)
#define NIC_PF_RSSI_0_4097_RQ (0x220000)
#define NIC_PF_LMAC_0_7_CFG (0x240000)
#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
......
......@@ -29,7 +29,7 @@
static const struct pci_device_id nicvf_id_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_THUNDER_NIC_VF,
PCI_VENDOR_ID_CAVIUM, 0xA11E) },
PCI_VENDOR_ID_CAVIUM, 0xA134) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
PCI_VENDOR_ID_CAVIUM, 0xA11E) },
......
......@@ -977,8 +977,10 @@ static int bgx_init_of_phy(struct bgx *bgx)
SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
bgx->lmac[lmac].lmacid = lmac;
lmac++;
if (lmac == MAX_LMAC_PER_BGX)
if (lmac == MAX_LMAC_PER_BGX) {
of_node_put(np_child);
break;
}
}
return 0;
}
......
......@@ -341,7 +341,7 @@ static void gfar_rx_offload_en(struct gfar_private *priv)
if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
priv->uses_rxfcb = 1;
if (priv->hwts_rx_en)
if (priv->hwts_rx_en || priv->rx_filer_enable)
priv->uses_rxfcb = 1;
}
......@@ -351,7 +351,7 @@ static void gfar_mac_rx_config(struct gfar_private *priv)
u32 rctrl = 0;
if (priv->rx_filer_enable) {
rctrl |= RCTRL_FILREN;
rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
/* Program the RIR0 reg with the required distribution */
if (priv->poll_mode == GFAR_SQ_POLLING)
gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
......@@ -3462,11 +3462,9 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
netif_dbg(priv, tx_err, dev, "Transmit Error\n");
}
if (events & IEVENT_BSY) {
dev->stats.rx_errors++;
dev->stats.rx_over_errors++;
atomic64_inc(&priv->extra_stats.rx_bsy);
gfar_receive(irq, grp_id);
netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
gfar_read(&regs->rstat));
}
......
......@@ -676,14 +676,14 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
u32 fcr = 0x0, fpr = FPR_FILER_MASK;
if (ethflow & RXH_L2DA) {
fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH |
RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
fcr = RQFCR_PID_DAL | RQFCR_CMP_NOMATCH |
RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
......
......@@ -1344,6 +1344,12 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) {
data[i++] = veb->tc_stats.tc_tx_packets[j];
data[i++] = veb->tc_stats.tc_tx_bytes[j];
data[i++] = veb->tc_stats.tc_rx_packets[j];
data[i++] = veb->tc_stats.tc_rx_bytes[j];
}
}
for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
......
......@@ -7911,6 +7911,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
if (pf->hw.func_caps.vmdq) {
pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
pf->flags |= I40E_FLAG_VMDQ_ENABLED;
pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
}
#ifdef I40E_FCOE
......
......@@ -759,11 +759,23 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
desc->l4i_chk = 0;
desc->byte_cnt = length;
desc->buf_ptr = dma_map_single(dev->dev.parent, data,
length, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) {
WARN(1, "dma_map_single failed!\n");
return -ENOMEM;
if (length <= 8 && (uintptr_t)data & 0x7) {
/* Copy unaligned small data fragment to TSO header data area */
memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE,
data, length);
desc->buf_ptr = txq->tso_hdrs_dma
+ txq->tx_curr_desc * TSO_HEADER_SIZE;
} else {
/* Alignment is okay, map buffer and hand off to hardware */
txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
desc->buf_ptr = dma_map_single(dev->dev.parent, data,
length, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dev.parent,
desc->buf_ptr))) {
WARN(1, "dma_map_single failed!\n");
return -ENOMEM;
}
}
cmd_sts = BUFFER_OWNED_BY_DMA;
......@@ -779,7 +791,8 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
}
static inline void
txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
u32 *first_cmd_sts, bool first_desc)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
......@@ -788,6 +801,7 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
int ret;
u32 cmd_csum = 0;
u16 l4i_chk = 0;
u32 cmd_sts;
tx_index = txq->tx_curr_desc;
desc = &txq->tx_desc_area[tx_index];
......@@ -803,9 +817,17 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
desc->byte_cnt = hdr_len;