ethernet: atl1: Add missing DMA mapping error checks and count errors

[ Upstream commit d72411d20905180cdc452c553be17481b24463d2 ]

The `dma_map_XXX()` functions can fail and must be checked using
`dma_mapping_error()`.  This patch adds proper error handling for all
DMA mapping calls.

In `atl1_alloc_rx_buffers()`, if DMA mapping fails, the buffer is
deallocated and marked accordingly.

In `atl1_tx_map()`, previously mapped buffers are unmapped and the
packet is dropped on failure.

If `atl1_xmit_frame()` drops the packet, increment the tx_error counter.

Fixes: f3cc28c797 ("Add Attansic L1 ethernet driver.")
Signed-off-by: Thomas Fourier <fourier.thomas@gmail.com>
Link: https://patch.msgid.link/20250625141629.114984-2-fourier.thomas@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Thomas Fourier
2025-06-25 16:16:24 +02:00
committed by Greg Kroah-Hartman
parent 2a7ac29f10
commit 31afd30797

View File

@@ -1861,14 +1861,21 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
break; break;
} }
buffer_info->alloced = 1;
buffer_info->skb = skb;
buffer_info->length = (u16) adapter->rx_buffer_len;
page = virt_to_page(skb->data); page = virt_to_page(skb->data);
offset = offset_in_page(skb->data); offset = offset_in_page(skb->data);
buffer_info->dma = dma_map_page(&pdev->dev, page, offset, buffer_info->dma = dma_map_page(&pdev->dev, page, offset,
adapter->rx_buffer_len, adapter->rx_buffer_len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
kfree_skb(skb);
adapter->soft_stats.rx_dropped++;
break;
}
buffer_info->alloced = 1;
buffer_info->skb = skb;
buffer_info->length = (u16)adapter->rx_buffer_len;
rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
rfd_desc->coalese = 0; rfd_desc->coalese = 0;
@@ -2183,8 +2190,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
return 0; return 0;
} }
static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, static bool atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
struct tx_packet_desc *ptpd) struct tx_packet_desc *ptpd)
{ {
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
struct atl1_buffer *buffer_info; struct atl1_buffer *buffer_info;
@@ -2194,6 +2201,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
unsigned int nr_frags; unsigned int nr_frags;
unsigned int f; unsigned int f;
int retval; int retval;
u16 first_mapped;
u16 next_to_use; u16 next_to_use;
u16 data_len; u16 data_len;
u8 hdr_len; u8 hdr_len;
@@ -2201,6 +2209,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buf_len -= skb->data_len; buf_len -= skb->data_len;
nr_frags = skb_shinfo(skb)->nr_frags; nr_frags = skb_shinfo(skb)->nr_frags;
next_to_use = atomic_read(&tpd_ring->next_to_use); next_to_use = atomic_read(&tpd_ring->next_to_use);
first_mapped = next_to_use;
buffer_info = &tpd_ring->buffer_info[next_to_use]; buffer_info = &tpd_ring->buffer_info[next_to_use];
BUG_ON(buffer_info->skb); BUG_ON(buffer_info->skb);
/* put skb in last TPD */ /* put skb in last TPD */
@@ -2216,6 +2225,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->dma = dma_map_page(&adapter->pdev->dev, page, buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
offset, hdr_len, offset, hdr_len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
goto dma_err;
if (++next_to_use == tpd_ring->count) if (++next_to_use == tpd_ring->count)
next_to_use = 0; next_to_use = 0;
@@ -2242,6 +2253,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
page, offset, page, offset,
buffer_info->length, buffer_info->length,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev,
buffer_info->dma))
goto dma_err;
if (++next_to_use == tpd_ring->count) if (++next_to_use == tpd_ring->count)
next_to_use = 0; next_to_use = 0;
} }
@@ -2254,6 +2268,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->dma = dma_map_page(&adapter->pdev->dev, page, buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
offset, buf_len, offset, buf_len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
goto dma_err;
if (++next_to_use == tpd_ring->count) if (++next_to_use == tpd_ring->count)
next_to_use = 0; next_to_use = 0;
} }
@@ -2277,6 +2293,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev, buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
frag, i * ATL1_MAX_TX_BUF_LEN, frag, i * ATL1_MAX_TX_BUF_LEN,
buffer_info->length, DMA_TO_DEVICE); buffer_info->length, DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev,
buffer_info->dma))
goto dma_err;
if (++next_to_use == tpd_ring->count) if (++next_to_use == tpd_ring->count)
next_to_use = 0; next_to_use = 0;
@@ -2285,6 +2304,22 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
/* last tpd's buffer-info */ /* last tpd's buffer-info */
buffer_info->skb = skb; buffer_info->skb = skb;
return true;
dma_err:
while (first_mapped != next_to_use) {
buffer_info = &tpd_ring->buffer_info[first_mapped];
dma_unmap_page(&adapter->pdev->dev,
buffer_info->dma,
buffer_info->length,
DMA_TO_DEVICE);
buffer_info->dma = 0;
if (++first_mapped == tpd_ring->count)
first_mapped = 0;
}
return false;
} }
static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count, static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
@@ -2355,10 +2390,8 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
len = skb_headlen(skb); len = skb_headlen(skb);
if (unlikely(skb->len <= 0)) { if (unlikely(skb->len <= 0))
dev_kfree_skb_any(skb); goto drop_packet;
return NETDEV_TX_OK;
}
nr_frags = skb_shinfo(skb)->nr_frags; nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++) { for (f = 0; f < nr_frags; f++) {
@@ -2371,10 +2404,9 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
if (mss) { if (mss) {
if (skb->protocol == htons(ETH_P_IP)) { if (skb->protocol == htons(ETH_P_IP)) {
proto_hdr_len = skb_tcp_all_headers(skb); proto_hdr_len = skb_tcp_all_headers(skb);
if (unlikely(proto_hdr_len > len)) { if (unlikely(proto_hdr_len > len))
dev_kfree_skb_any(skb); goto drop_packet;
return NETDEV_TX_OK;
}
/* need additional TPD ? */ /* need additional TPD ? */
if (proto_hdr_len != len) if (proto_hdr_len != len)
count += (len - proto_hdr_len + count += (len - proto_hdr_len +
@@ -2406,23 +2438,26 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
} }
tso = atl1_tso(adapter, skb, ptpd); tso = atl1_tso(adapter, skb, ptpd);
if (tso < 0) { if (tso < 0)
dev_kfree_skb_any(skb); goto drop_packet;
return NETDEV_TX_OK;
}
if (!tso) { if (!tso) {
ret_val = atl1_tx_csum(adapter, skb, ptpd); ret_val = atl1_tx_csum(adapter, skb, ptpd);
if (ret_val < 0) { if (ret_val < 0)
dev_kfree_skb_any(skb); goto drop_packet;
return NETDEV_TX_OK;
}
} }
atl1_tx_map(adapter, skb, ptpd); if (!atl1_tx_map(adapter, skb, ptpd))
goto drop_packet;
atl1_tx_queue(adapter, count, ptpd); atl1_tx_queue(adapter, count, ptpd);
atl1_update_mailbox(adapter); atl1_update_mailbox(adapter);
return NETDEV_TX_OK; return NETDEV_TX_OK;
drop_packet:
adapter->soft_stats.tx_errors++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
} }
static int atl1_rings_clean(struct napi_struct *napi, int budget) static int atl1_rings_clean(struct napi_struct *napi, int budget)