Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
	drivers/net/ethernet/emulex/benet/be.h
	drivers/net/netconsole.c
	net/bridge/br_private.h

Three mostly trivial conflicts.

The net/bridge/br_private.h conflict was a function signature (argument
addition) change overlapping with the extern removals from Joe Perches.

In drivers/net/netconsole.c we had one change adjusting a printk message
whilst another changed "printk(KERN_INFO" into "pr_info(".

Lastly, the emulex change was a new inline function addition overlapping
with Joe Perches's extern removals.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2013-11-04 13:48:30 -05:00
189 changed files with 1496 additions and 1169 deletions

View File

@@ -252,25 +252,33 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
struct bgmac_slot_info *slot)
{
struct device *dma_dev = bgmac->core->dma_dev;
struct sk_buff *skb;
dma_addr_t dma_addr;
struct bgmac_rx_header *rx;
/* Alloc skb */
slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
if (!slot->skb)
skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
if (!skb)
return -ENOMEM;
/* Poison - if everything goes fine, hardware will overwrite it */
rx = (struct bgmac_rx_header *)slot->skb->data;
rx = (struct bgmac_rx_header *)skb->data;
rx->len = cpu_to_le16(0xdead);
rx->flags = cpu_to_le16(0xbeef);
/* Map skb for the DMA */
slot->dma_addr = dma_map_single(dma_dev, slot->skb->data,
BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dma_dev, slot->dma_addr)) {
dma_addr = dma_map_single(dma_dev, skb->data,
BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dma_dev, dma_addr)) {
bgmac_err(bgmac, "DMA mapping error\n");
dev_kfree_skb(skb);
return -ENOMEM;
}
/* Update the slot */
slot->skb = skb;
slot->dma_addr = dma_addr;
if (slot->dma_addr & 0xC0000000)
bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");

View File

@@ -2545,10 +2545,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
}
/* Allocated memory for FW statistics */
if (bnx2x_alloc_fw_stats_mem(bp))
LOAD_ERROR_EXIT(bp, load_error0);
/* need to be done after alloc mem, since it's self adjusting to amount
* of memory available for RSS queues
*/
@@ -2558,6 +2554,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
LOAD_ERROR_EXIT(bp, load_error0);
}
/* Allocated memory for FW statistics */
if (bnx2x_alloc_fw_stats_mem(bp))
LOAD_ERROR_EXIT(bp, load_error0);
/* request pf to initialize status blocks */
if (IS_VF(bp)) {
rc = bnx2x_vfpf_init(bp);
@@ -2812,8 +2812,8 @@ load_error1:
if (IS_PF(bp))
bnx2x_clear_pf_load(bp);
load_error0:
bnx2x_free_fp_mem(bp);
bnx2x_free_fw_stats_mem(bp);
bnx2x_free_fp_mem(bp);
bnx2x_free_mem(bp);
return rc;

View File

@@ -2018,6 +2018,8 @@ failed:
void bnx2x_iov_remove_one(struct bnx2x *bp)
{
int vf_idx;
/* if SRIOV is not enabled there's nothing to do */
if (!IS_SRIOV(bp))
return;
@@ -2026,6 +2028,18 @@ void bnx2x_iov_remove_one(struct bnx2x *bp)
pci_disable_sriov(bp->pdev);
DP(BNX2X_MSG_IOV, "sriov disabled\n");
/* disable access to all VFs */
for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
bnx2x_pretend_func(bp,
HW_VF_HANDLE(bp,
bp->vfdb->sriov.first_vf_in_pf +
vf_idx));
DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
bp->vfdb->sriov.first_vf_in_pf + vf_idx);
bnx2x_vf_enable_internal(bp, 0);
bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
}
/* free vf database */
__bnx2x_iov_free_vfdb(bp);
}
@@ -3197,7 +3211,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
* the "acquire" messages to appear on the VF PF channel.
*/
DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
pci_disable_sriov(bp->pdev);
bnx2x_disable_sriov(bp);
rc = pci_enable_sriov(bp->pdev, req_vfs);
if (rc) {
BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);