summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRussell King (Oracle) <rmk+kernel@armlinux.org.uk>2026-03-20 16:47:12 +0000
committerPaolo Abeni <pabeni@redhat.com>2026-03-24 12:08:25 +0100
commitfcbf106189aaf69a2a2f6116672d442d473ecbcd (patch)
tree5fbcc9a5b67d3ce8db0d50fc9312992bfa02ca39 /drivers
parenta41a5733e71ac103ca8a045539f7b56050dae75a (diff)
downloadlinux-fcbf106189aaf69a2a2f6116672d442d473ecbcd.tar.gz
linux-fcbf106189aaf69a2a2f6116672d442d473ecbcd.zip
net: stmmac: move stmmac_xmit() skb head handling
The skb head buffer handling is delayed in stmmac_xmit() until after the skb fragments have been populated into the descriptors. The reason is this code used to set the OWN bit on the first descriptor, which then allows the TX DMA to process the first and subsequent descriptors. However, as of commit 579a25a854d4 ("net: stmmac: Initial support for TBS") this is now separated, but the comments weren't updated. Move the code populating the first descriptor along side the jumbo code which also populates the first descriptor. This gives a consistent location where we populate the descriptor(s) for the SKB head. Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk> Link: https://patch.msgid.link/E1w3d0C-0000000DfLj-0BLb@rmk-PC.armlinux.org.uk Tested-by: Maxime Chevallier <maxime.chevallier@bootlin.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c63
1 files changed, 30 insertions, 33 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ecb6d9a27567..fb9719820b93 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4763,6 +4763,33 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
if (unlikely(entry < 0) && (entry != -EINVAL))
goto dma_map_err;
+ } else {
+ bool last_segment = (nfrags == 0);
+
+ dma_addr = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, dma_addr))
+ goto dma_map_err;
+
+ stmmac_set_tx_skb_dma_entry(tx_q, first_entry, dma_addr,
+ nopaged_len, false);
+
+ stmmac_set_desc_addr(priv, first_desc, dma_addr);
+
+ if (last_segment)
+ stmmac_set_tx_dma_last_segment(tx_q, first_entry);
+
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ stmmac_enable_tx_timestamp(priv, first_desc);
+ }
+
+ /* Prepare the first descriptor without setting the OWN bit */
+ stmmac_prepare_tx_desc(priv, first_desc, 1, nopaged_len,
+ csum_insertion, priv->descriptor_mode,
+ 0, last_segment, skb->len);
}
for (i = 0; i < nfrags; i++) {
@@ -4854,39 +4881,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first_desc, priv->sarc_type);
- /* Ready to fill the first descriptor and set the OWN bit w/o any
- * problems because all the descriptors are actually ready to be
- * passed to the DMA engine.
- */
- if (likely(!is_jumbo)) {
- bool last_segment = (nfrags == 0);
-
- dma_addr = dma_map_single(priv->device, skb->data,
- nopaged_len, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, dma_addr))
- goto dma_map_err;
-
- stmmac_set_tx_skb_dma_entry(tx_q, first_entry, dma_addr,
- nopaged_len, false);
-
- stmmac_set_desc_addr(priv, first_desc, dma_addr);
-
- if (last_segment)
- stmmac_set_tx_dma_last_segment(tx_q, first_entry);
-
- if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- priv->hwts_tx_en)) {
- /* declare that device is doing timestamping */
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- stmmac_enable_tx_timestamp(priv, first_desc);
- }
-
- /* Prepare the first descriptor setting the OWN bit too */
- stmmac_prepare_tx_desc(priv, first_desc, 1, nopaged_len,
- csum_insertion, priv->descriptor_mode,
- 0, last_segment, skb->len);
- }
-
if (tx_q->tbs & STMMAC_TBS_EN) {
struct timespec64 ts = ns_to_timespec64(skb->tstamp);
@@ -4894,6 +4888,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
}
+ /* Set the OWN bit on the first descriptor now that all descriptors
+ * for this skb are populated.
+ */
stmmac_set_tx_owner(priv, first_desc);
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);