130 lines
4.2 KiB
Diff
130 lines
4.2 KiB
Diff
From 39e4cf29431dec8aaad599d9734f7a0468a9c20b Mon Sep 17 00:00:00 2001
|
|
From: Laurentiu Tudor <laurentiu.tudor@nxp.com>
|
|
Date: Wed, 4 Apr 2018 12:31:05 +0300
|
|
Subject: [PATCH] dpaa_eth: fix iova handling for contiguous frames
|
|
|
|
The driver relies on the no longer valid assumption that dma addresses
|
|
(iovas) are identical to physical addressees and uses phys_to_virt() to
|
|
make iova -> vaddr conversions. Fix this by adding a function that does
|
|
proper iova -> phys conversions using the iommu api and update the code
|
|
to use it.
|
|
Also, a dma_unmap_single() call had to be moved further down the code
|
|
because iova -> vaddr conversions were required before the unmap.
|
|
For now only the contiguous frame case is handled and the SG case is
|
|
split in a following patch.
|
|
While at it, clean-up a redundant dpaa_bpid2pool() and pass the bp
|
|
as parameter.
|
|
|
|
Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
|
|
Acked-by: Madalin Bucur <madalin.bucur@nxp.com>
|
|
---
|
|
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 44 ++++++++++++++------------
|
|
1 file changed, 24 insertions(+), 20 deletions(-)
|
|
|
|
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
|
|
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
|
|
@@ -50,6 +50,7 @@
|
|
#include <linux/highmem.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/dma-mapping.h>
|
|
+#include <linux/iommu.h>
|
|
#include <linux/sort.h>
|
|
#include <linux/phy_fixed.h>
|
|
#include <soc/fsl/bman.h>
|
|
@@ -1615,6 +1616,17 @@ static int dpaa_eth_refill_bpools(struct
|
|
return 0;
|
|
}
|
|
|
|
+static phys_addr_t dpaa_iova_to_phys(struct device *dev, dma_addr_t addr)
|
|
+{
|
|
+ struct iommu_domain *domain;
|
|
+
|
|
+ domain = iommu_get_domain_for_dev(dev);
|
|
+ if (domain)
|
|
+ return iommu_iova_to_phys(domain, addr);
|
|
+ else
|
|
+ return addr;
|
|
+}
|
|
+
|
|
/* Cleanup function for outgoing frame descriptors that were built on Tx path,
|
|
* either contiguous frames or scatter/gather ones.
|
|
* Skb freeing is not handled here.
|
|
@@ -1639,7 +1651,7 @@ static struct sk_buff *dpaa_cleanup_tx_f
|
|
int nr_frags, i;
|
|
u64 ns;
|
|
|
|
- skbh = (struct sk_buff **)phys_to_virt(addr);
|
|
+ skbh = (struct sk_buff **)phys_to_virt(dpaa_iova_to_phys(dev, addr));
|
|
skb = *skbh;
|
|
|
|
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
|
|
@@ -1718,25 +1730,21 @@ static u8 rx_csum_offload(const struct d
|
|
* accommodate the shared info area of the skb.
|
|
*/
|
|
static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
|
|
- const struct qm_fd *fd)
|
|
+ const struct qm_fd *fd,
|
|
+ struct dpaa_bp *dpaa_bp,
|
|
+ void *vaddr)
|
|
{
|
|
ssize_t fd_off = qm_fd_get_offset(fd);
|
|
- dma_addr_t addr = qm_fd_addr(fd);
|
|
- struct dpaa_bp *dpaa_bp;
|
|
struct sk_buff *skb;
|
|
- void *vaddr;
|
|
|
|
- vaddr = phys_to_virt(addr);
|
|
WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
|
|
|
|
- dpaa_bp = dpaa_bpid2pool(fd->bpid);
|
|
- if (!dpaa_bp)
|
|
- goto free_buffer;
|
|
-
|
|
skb = build_skb(vaddr, dpaa_bp->size +
|
|
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
|
|
- if (WARN_ONCE(!skb, "Build skb failure on Rx\n"))
|
|
- goto free_buffer;
|
|
+ if (WARN_ONCE(!skb, "Build skb failure on Rx\n")) {
|
|
+ skb_free_frag(vaddr);
|
|
+ return NULL;
|
|
+ }
|
|
WARN_ON(fd_off != priv->rx_headroom);
|
|
skb_reserve(skb, fd_off);
|
|
skb_put(skb, qm_fd_get_length(fd));
|
|
@@ -1744,10 +1752,6 @@ static struct sk_buff *contig_fd_to_skb(
|
|
skb->ip_summed = rx_csum_offload(priv, fd);
|
|
|
|
return skb;
|
|
-
|
|
-free_buffer:
|
|
- skb_free_frag(vaddr);
|
|
- return NULL;
|
|
}
|
|
|
|
/* Build an skb with the data of the first S/G entry in the linear portion and
|
|
@@ -2476,12 +2480,12 @@ static enum qman_cb_dqrr_result rx_defau
|
|
if (!dpaa_bp)
|
|
return qman_cb_dqrr_consume;
|
|
|
|
- dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
|
|
-
|
|
/* prefetch the first 64 bytes of the frame or the SGT start */
|
|
- vaddr = phys_to_virt(addr);
|
|
+ vaddr = phys_to_virt(dpaa_iova_to_phys(dpaa_bp->dev, addr));
|
|
prefetch(vaddr + qm_fd_get_offset(fd));
|
|
|
|
+ dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
|
|
+
|
|
/* The only FD types that we may receive are contig and S/G */
|
|
WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
|
|
|
|
@@ -2492,7 +2496,7 @@ static enum qman_cb_dqrr_result rx_defau
|
|
(*count_ptr)--;
|
|
|
|
if (likely(fd_format == qm_fd_contig))
|
|
- skb = contig_fd_to_skb(priv, fd);
|
|
+ skb = contig_fd_to_skb(priv, fd, dpaa_bp, vaddr);
|
|
else
|
|
skb = sg_fd_to_skb(priv, fd);
|
|
if (!skb)
|