Newer
Older
/*
* Driver for (BCM4706)? GBit MAC core on BCMA bus.
*
* Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bgmac.h"
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/mii.h>
#include <linux/phy_fixed.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/bcm47xx_nvram.h>
static const struct bcma_device_id bgmac_bcma_tbl[] = {
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
};
MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
static inline bool bgmac_is_bcm4707_family(struct bgmac *bgmac)
{
switch (bgmac->core->bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4707:
case BCMA_CHIP_ID_BCM47094:
case BCMA_CHIP_ID_BCM53018:
return true;
default:
return false;
}
}
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
u32 value, int timeout)
{
u32 val;
int i;
for (i = 0; i < timeout / 10; i++) {
val = bcma_read32(core, reg);
if ((val & mask) == value)
return true;
udelay(10);
}
pr_err("Timeout waiting for reg 0x%X\n", reg);
return false;
}
/**************************************************
* DMA
**************************************************/
static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
{
u32 val;
int i;
if (!ring->mmio_base)
return;
/* Suspend DMA TX ring first.
* bgmac_wait_value doesn't support waiting for any of few values, so
* implement whole loop here.
*/
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
BGMAC_DMA_TX_SUSPEND);
for (i = 0; i < 10000 / 10; i++) {
val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
val &= BGMAC_DMA_TX_STAT;
if (val == BGMAC_DMA_TX_STAT_DISABLED ||
val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
val == BGMAC_DMA_TX_STAT_STOPPED) {
i = 0;
break;
}
udelay(10);
}
if (i)
bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
ring->mmio_base, val);
/* Remove SUSPEND bit */
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
if (!bgmac_wait_value(bgmac->core,
ring->mmio_base + BGMAC_DMA_TX_STATUS,
BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
10000)) {
bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
ring->mmio_base);
udelay(300);
val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n",
ring->mmio_base);
}
}
static void bgmac_dma_tx_enable(struct bgmac *bgmac,
struct bgmac_dma_ring *ring)
{
u32 ctl;
ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
if (bgmac->core->id.rev >= 4) {
ctl &= ~BGMAC_DMA_TX_BL_MASK;
ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;
ctl &= ~BGMAC_DMA_TX_MR_MASK;
ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT;
ctl &= ~BGMAC_DMA_TX_PC_MASK;
ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT;
ctl &= ~BGMAC_DMA_TX_PT_MASK;
ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT;
}
ctl |= BGMAC_DMA_TX_ENABLE;
ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
}
static void
bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
int i, int len, u32 ctl0)
{
struct bgmac_slot_info *slot;
struct bgmac_dma_desc *dma_desc;
u32 ctl1;
ctl0 |= BGMAC_DESC_CTL0_EOT;
ctl1 = len & BGMAC_DESC_CTL1_LEN;
slot = &ring->slots[i];
dma_desc = &ring->cpu_base[i];
dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
dma_desc->ctl0 = cpu_to_le32(ctl0);
dma_desc->ctl1 = cpu_to_le32(ctl1);
}
static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
struct bgmac_dma_ring *ring,
struct sk_buff *skb)
{
struct device *dma_dev = bgmac->core->dma_dev;
struct net_device *net_dev = bgmac->net_dev;
int index = ring->end % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[index];
int nr_frags;
u32 flags;
int i;
if (skb->len > BGMAC_DESC_CTL1_LEN) {
bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
if (skb->ip_summed == CHECKSUM_PARTIAL)
skb_checksum_help(skb);
nr_frags = skb_shinfo(skb)->nr_frags;
/* ring->end - ring->start will return the number of valid slots,
* even when ring->end overflows
*/
if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
netif_stop_queue(net_dev);
return NETDEV_TX_BUSY;
}
slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
goto err_dma_head;
flags = BGMAC_DESC_CTL0_SOF;
if (!nr_frags)
flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
flags = 0;
for (i = 0; i < nr_frags; i++) {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
int len = skb_frag_size(frag);
index = (index + 1) % BGMAC_TX_RING_SLOTS;
slot = &ring->slots[index];
slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0,
Loading
Loading full blame...