cns3xxx: ethernet: cleanup code

Signed-off-by: Koen Vandeputte <koen.vandeputte@ncentric.com>
This commit is contained in:
Koen Vandeputte 2018-08-07 11:18:08 +02:00
parent 23cdbf2644
commit 06beefd6d5

View File

@ -325,6 +325,7 @@ static int cns3xxx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
} else { } else {
temp = MDIO_READ_COMMAND; temp = MDIO_READ_COMMAND;
} }
temp |= ((location & 0x1f) << MDIO_REG_OFFSET); temp |= ((location & 0x1f) << MDIO_REG_OFFSET);
temp |= (phy_id & 0x1f); temp |= (phy_id & 0x1f);
@ -337,8 +338,7 @@ static int cns3xxx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
} }
if (cycles == 5000) { if (cycles == 5000) {
printk(KERN_ERR "%s #%i: MII transaction failed\n", bus->name, printk(KERN_ERR "%s #%i: MII transaction failed\n", bus->name, phy_id);
phy_id);
return -1; return -1;
} }
@ -363,8 +363,7 @@ static int cns3xxx_mdio_read(struct mii_bus *bus, int phy_id, int location)
return ret; return ret;
} }
static int cns3xxx_mdio_write(struct mii_bus *bus, int phy_id, int location, static int cns3xxx_mdio_write(struct mii_bus *bus, int phy_id, int location, u16 val)
u16 val)
{ {
unsigned long flags; unsigned long flags;
int ret; int ret;
@ -392,6 +391,7 @@ static int cns3xxx_mdio_register(void __iomem *base)
if ((err = mdiobus_register(mdio_bus))) if ((err = mdiobus_register(mdio_bus)))
mdiobus_free(mdio_bus); mdiobus_free(mdio_bus);
return err; return err;
} }
@ -537,14 +537,13 @@ static void cns3xxx_alloc_rx_buf(struct sw *sw, int received)
/* put the new buffer on RX-free queue */ /* put the new buffer on RX-free queue */
rx_ring->buff_tab[i] = buf; rx_ring->buff_tab[i] = buf;
rx_ring->phys_tab[i] = phys; rx_ring->phys_tab[i] = phys;
if (i == RX_DESCS - 1) { if (i == RX_DESCS - 1) {
desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | RX_SEGMENT_MRU | END_OF_RING;
i = 0; i = 0;
desc->config0 = END_OF_RING | FIRST_SEGMENT |
LAST_SEGMENT | RX_SEGMENT_MRU;
desc = &(rx_ring)->desc[i]; desc = &(rx_ring)->desc[i];
} else { } else {
desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | RX_SEGMENT_MRU;
RX_SEGMENT_MRU;
i++; i++;
desc++; desc++;
} }
@ -566,6 +565,7 @@ static void eth_check_num_used(struct _tx_ring *tx_ring)
return; return;
tx_ring->stopped = stop; tx_ring->stopped = stop;
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
struct port *port = switch_port_tab[i]; struct port *port = switch_port_tab[i];
struct net_device *dev; struct net_device *dev;
@ -574,6 +574,7 @@ static void eth_check_num_used(struct _tx_ring *tx_ring)
continue; continue;
dev = port->netdev; dev = port->netdev;
if (stop) if (stop)
netif_stop_queue(dev); netif_stop_queue(dev);
else else
@ -592,6 +593,7 @@ static void eth_complete_tx(struct sw *sw)
index = tx_ring->free_index; index = tx_ring->free_index;
desc = &(tx_ring)->desc[index]; desc = &(tx_ring)->desc[index];
for (i = 0; i < num_used; i++) { for (i = 0; i < num_used; i++) {
if (desc->cown) { if (desc->cown) {
skb = tx_ring->buff_tab[index]; skb = tx_ring->buff_tab[index];
@ -610,6 +612,7 @@ static void eth_complete_tx(struct sw *sw)
break; break;
} }
} }
tx_ring->free_index = index; tx_ring->free_index = index;
tx_ring->num_used -= i; tx_ring->num_used -= i;
eth_check_num_used(tx_ring); eth_check_num_used(tx_ring);
@ -633,8 +636,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
break; break;
/* process received frame */ /* process received frame */
dma_unmap_single(sw->dev, rx_ring->phys_tab[i], dma_unmap_single(sw->dev, rx_ring->phys_tab[i], RX_SEGMENT_MRU, DMA_FROM_DEVICE);
RX_SEGMENT_MRU, DMA_FROM_DEVICE);
skb = build_skb(rx_ring->buff_tab[i], RX_SEGMENT_ALLOC_SIZE); skb = build_skb(rx_ring->buff_tab[i], RX_SEGMENT_ALLOC_SIZE);
if (!skb) if (!skb)
@ -741,8 +743,10 @@ static void eth_set_desc(struct sw *sw, struct _tx_ring *tx_ring, int index,
tx_ring->phys_tab[index] = phys; tx_ring->phys_tab[index] = phys;
config0 |= len; config0 |= len;
if (index == TX_DESCS - 1) if (index == TX_DESCS - 1)
config0 |= END_OF_RING; config0 |= END_OF_RING;
if (index == index_last) if (index == index_last)
config0 |= LAST_SEGMENT; config0 |= LAST_SEGMENT;
@ -772,6 +776,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
eth_schedule_poll(sw); eth_schedule_poll(sw);
spin_lock_bh(&tx_lock); spin_lock_bh(&tx_lock);
if ((tx_ring->num_used + nr_desc + 1) >= TX_DESCS) { if ((tx_ring->num_used + nr_desc + 1) >= TX_DESCS) {
spin_unlock_bh(&tx_lock); spin_unlock_bh(&tx_lock);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
@ -875,7 +880,6 @@ static int init_rings(struct sw *sw)
__raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg); __raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg);
__raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl); __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
__raw_writel(CLR_FS_STATE | QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl); __raw_writel(CLR_FS_STATE | QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
__raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl); __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
rx_ring->desc = dmam_alloc_coherent(sw->dev, RX_POOL_ALLOC_SIZE, rx_ring->desc = dmam_alloc_coherent(sw->dev, RX_POOL_ALLOC_SIZE,
@ -885,6 +889,7 @@ static int init_rings(struct sw *sw)
/* Setup RX buffers */ /* Setup RX buffers */
memset(rx_ring->desc, 0, RX_POOL_ALLOC_SIZE); memset(rx_ring->desc, 0, RX_POOL_ALLOC_SIZE);
for (i = 0; i < RX_DESCS; i++) { for (i = 0; i < RX_DESCS; i++) {
struct rx_desc *desc = &(rx_ring)->desc[i]; struct rx_desc *desc = &(rx_ring)->desc[i];
void *buf; void *buf;
@ -894,13 +899,16 @@ static int init_rings(struct sw *sw)
return -ENOMEM; return -ENOMEM;
desc->sdl = RX_SEGMENT_MRU; desc->sdl = RX_SEGMENT_MRU;
if (i == (RX_DESCS - 1)) if (i == (RX_DESCS - 1))
desc->eor = 1; desc->eor = 1;
desc->fsd = 1; desc->fsd = 1;
desc->lsd = 1; desc->lsd = 1;
desc->sdp = dma_map_single(sw->dev, buf + SKB_HEAD_ALIGN, desc->sdp = dma_map_single(sw->dev, buf + SKB_HEAD_ALIGN,
RX_SEGMENT_MRU, DMA_FROM_DEVICE); RX_SEGMENT_MRU, DMA_FROM_DEVICE);
if (dma_mapping_error(sw->dev, desc->sdp)) if (dma_mapping_error(sw->dev, desc->sdp))
return -EIO; return -EIO;
@ -918,12 +926,14 @@ static int init_rings(struct sw *sw)
/* Setup TX buffers */ /* Setup TX buffers */
memset(tx_ring->desc, 0, TX_POOL_ALLOC_SIZE); memset(tx_ring->desc, 0, TX_POOL_ALLOC_SIZE);
for (i = 0; i < TX_DESCS; i++) { for (i = 0; i < TX_DESCS; i++) {
struct tx_desc *desc = &(tx_ring)->desc[i]; struct tx_desc *desc = &(tx_ring)->desc[i];
tx_ring->buff_tab[i] = 0; tx_ring->buff_tab[i] = 0;
if (i == (TX_DESCS - 1)) if (i == (TX_DESCS - 1))
desc->eor = 1; desc->eor = 1;
desc->cown = 1; desc->cown = 1;
} }
__raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_ptr0); __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_ptr0);
@ -944,8 +954,7 @@ static void destroy_rings(struct sw *sw)
if (!buf) if (!buf)
continue; continue;
dma_unmap_single(sw->dev, desc->sdp, RX_SEGMENT_MRU, dma_unmap_single(sw->dev, desc->sdp, RX_SEGMENT_MRU, DMA_FROM_DEVICE);
DMA_FROM_DEVICE);
skb_free_frag(buf); skb_free_frag(buf);
} }