Newer
Older
BGMAC_CMDCFG_RPI |
BGMAC_CMDCFG_TAI |
BGMAC_CMDCFG_HD |
BGMAC_CMDCFG_ML |
BGMAC_CMDCFG_CFE |
BGMAC_CMDCFG_RL |
BGMAC_CMDCFG_RED |
BGMAC_CMDCFG_PE |
BGMAC_CMDCFG_TPI |
BGMAC_CMDCFG_PAD_EN |
BGMAC_CMDCFG_PF),
BGMAC_CMDCFG_PROM |
BGMAC_CMDCFG_NLC |
BGMAC_CMDCFG_CFE |
bgmac->mac_speed = SPEED_UNKNOWN;
bgmac->mac_duplex = DUPLEX_UNKNOWN;
bgmac_clear_mib(bgmac);
if (bgmac->feature_flags & BGMAC_FEAT_CMN_PHY_CTL)
bgmac_cmn_maskset32(bgmac, BCMA_GMAC_CMN_PHY_CTL, ~0,
BCMA_GMAC_CMN_PC_MTE);
else
bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
bgmac_miiconfig(bgmac);
if (bgmac->mii_bus)
bgmac->mii_bus->reset(bgmac->mii_bus);
netdev_reset_queue(bgmac->net_dev);
}
static void bgmac_chip_intrs_on(struct bgmac *bgmac)
{
bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
}
static void bgmac_chip_intrs_off(struct bgmac *bgmac)
{
bgmac_write(bgmac, BGMAC_INT_MASK, 0);
bgmac_read(bgmac, BGMAC_INT_MASK);
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
static void bgmac_enable(struct bgmac *bgmac)
{
if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
else
cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
udelay(2);
cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
BGMAC_DS_MM_SHIFT;
if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0)
bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2)
bgmac_cco_ctl_maskset(bgmac, 1, ~0,
BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
if (bgmac->feature_flags & (BGMAC_FEAT_FLW_CTRL1 |
BGMAC_FEAT_FLW_CTRL2)) {
u32 fl_ctl;
if (bgmac->feature_flags & BGMAC_FEAT_FLW_CTRL1)
else
fl_ctl = 0x03cb04cb;
bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
}
if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) {
u32 rxq_ctl;
u16 bp_clk;
u8 mdp;
rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
bp_clk = bgmac_get_bus_clock(bgmac) / 1000000;
mdp = (bp_clk * 128 / 1000) - 3;
rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
}
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
static void bgmac_chip_init(struct bgmac *bgmac)
/* Clear any erroneously pending interrupts */
bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);
/* 1 interrupt per received frame */
bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
/* Enable 802.3x tx flow control (honor received PAUSE frames) */
bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
bgmac_set_rx_mode(bgmac->net_dev);
bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
bgmac_enable(bgmac);
}
static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
{
struct bgmac *bgmac = netdev_priv(dev_id);
u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
int_status &= bgmac->int_mask;
if (!int_status)
return IRQ_NONE;
int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX);
if (int_status)
dev_err(bgmac->dev, "Unknown IRQs: 0x%08X\n", int_status);
/* Disable new interrupts until handling existing ones */
bgmac_chip_intrs_off(bgmac);
napi_schedule(&bgmac->napi);
return IRQ_HANDLED;
}
static int bgmac_poll(struct napi_struct *napi, int weight)
{
struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
int handled = 0;
/* Ack */
bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);
bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]);
handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight);
/* Poll again if more events arrived in the meantime */
if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX))
if (handled < weight) {
napi_complete_done(napi, handled);
bgmac_chip_intrs_on(bgmac);
}
return handled;
}
/**************************************************
* net_device_ops
**************************************************/
static int bgmac_open(struct net_device *net_dev)
{
struct bgmac *bgmac = netdev_priv(net_dev);
int err = 0;
bgmac_chip_reset(bgmac);
err = bgmac_dma_init(bgmac);
if (err)
return err;
/* Specs say about reclaiming rings here, but we do that in DMA init */
err = request_irq(bgmac->irq, bgmac_interrupt, IRQF_SHARED,
net_dev->name, net_dev);
dev_err(bgmac->dev, "IRQ request error: %d!\n", err);
bgmac_dma_cleanup(bgmac);
return err;
}
napi_enable(&bgmac->napi);
phy_start(net_dev->phydev);
netif_start_queue(net_dev);
}
static int bgmac_stop(struct net_device *net_dev)
{
struct bgmac *bgmac = netdev_priv(net_dev);
netif_carrier_off(net_dev);
phy_stop(net_dev->phydev);
napi_disable(&bgmac->napi);
bgmac_chip_intrs_off(bgmac);
free_irq(bgmac->irq, net_dev);
bgmac_chip_reset(bgmac);
return 0;
}
static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
struct net_device *net_dev)
{
struct bgmac *bgmac = netdev_priv(net_dev);
struct bgmac_dma_ring *ring;
/* No QOS support yet */
ring = &bgmac->tx_ring[0];
return bgmac_dma_tx_add(bgmac, ring, skb);
}
static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
{
struct bgmac *bgmac = netdev_priv(net_dev);
int ret;
ret = eth_prepare_mac_addr_change(net_dev, addr);
if (ret < 0)
return ret;
ether_addr_copy(net_dev->dev_addr, sa->sa_data);
bgmac_write_mac_address(bgmac, net_dev->dev_addr);
eth_commit_mac_addr_change(net_dev, addr);
return 0;
}
Murali Krishna Policharla
committed
static int bgmac_change_mtu(struct net_device *net_dev, int mtu)
{
struct bgmac *bgmac = netdev_priv(net_dev);
bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + mtu);
return 0;
}
static const struct net_device_ops bgmac_netdev_ops = {
.ndo_open = bgmac_open,
.ndo_stop = bgmac_stop,
.ndo_start_xmit = bgmac_start_xmit,
.ndo_set_rx_mode = bgmac_set_rx_mode,
.ndo_set_mac_address = bgmac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = phy_do_ioctl_running,
Murali Krishna Policharla
committed
.ndo_change_mtu = bgmac_change_mtu,
};
/**************************************************
* ethtool_ops
**************************************************/
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
struct bgmac_stat {
u8 size;
u32 offset;
const char *name;
};
static struct bgmac_stat bgmac_get_strings_stats[] = {
{ 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" },
{ 4, BGMAC_TX_GOOD_PKTS, "tx_good" },
{ 8, BGMAC_TX_OCTETS, "tx_octets" },
{ 4, BGMAC_TX_PKTS, "tx_pkts" },
{ 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" },
{ 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" },
{ 4, BGMAC_TX_LEN_64, "tx_64" },
{ 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" },
{ 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" },
{ 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" },
{ 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" },
{ 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" },
{ 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" },
{ 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" },
{ 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" },
{ 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" },
{ 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" },
{ 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" },
{ 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" },
{ 4, BGMAC_TX_UNDERRUNS, "tx_underruns" },
{ 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" },
{ 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" },
{ 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" },
{ 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" },
{ 4, BGMAC_TX_LATE_COLS, "tx_late_cols" },
{ 4, BGMAC_TX_DEFERED, "tx_defered" },
{ 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" },
{ 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" },
{ 4, BGMAC_TX_UNI_PKTS, "tx_unicast" },
{ 4, BGMAC_TX_Q0_PKTS, "tx_q0" },
{ 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" },
{ 4, BGMAC_TX_Q1_PKTS, "tx_q1" },
{ 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" },
{ 4, BGMAC_TX_Q2_PKTS, "tx_q2" },
{ 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" },
{ 4, BGMAC_TX_Q3_PKTS, "tx_q3" },
{ 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" },
{ 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" },
{ 4, BGMAC_RX_GOOD_PKTS, "rx_good" },
{ 8, BGMAC_RX_OCTETS, "rx_octets" },
{ 4, BGMAC_RX_PKTS, "rx_pkts" },
{ 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" },
{ 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" },
{ 4, BGMAC_RX_LEN_64, "rx_64" },
{ 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" },
{ 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" },
{ 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" },
{ 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" },
{ 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" },
{ 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" },
{ 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" },
{ 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" },
{ 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" },
{ 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" },
{ 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" },
{ 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" },
{ 4, BGMAC_RX_MISSED_PKTS, "rx_missed" },
{ 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" },
{ 4, BGMAC_RX_UNDERSIZE, "rx_undersize" },
{ 4, BGMAC_RX_CRC_ERRS, "rx_crc" },
{ 4, BGMAC_RX_ALIGN_ERRS, "rx_align" },
{ 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" },
{ 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" },
{ 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" },
{ 4, BGMAC_RX_SACHANGES, "rx_sa_changes" },
{ 4, BGMAC_RX_UNI_PKTS, "rx_unicast" },
};
#define BGMAC_STATS_LEN ARRAY_SIZE(bgmac_get_strings_stats)
static int bgmac_get_sset_count(struct net_device *dev, int string_set)
{
switch (string_set) {
case ETH_SS_STATS:
return BGMAC_STATS_LEN;
}
return -EOPNOTSUPP;
}
static void bgmac_get_strings(struct net_device *dev, u32 stringset,
u8 *data)
{
int i;
if (stringset != ETH_SS_STATS)
return;
for (i = 0; i < BGMAC_STATS_LEN; i++)
strlcpy(data + i * ETH_GSTRING_LEN,
bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
}
static void bgmac_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *ss, uint64_t *data)
{
struct bgmac *bgmac = netdev_priv(dev);
const struct bgmac_stat *s;
unsigned int i;
u64 val;
if (!netif_running(dev))
return;
for (i = 0; i < BGMAC_STATS_LEN; i++) {
s = &bgmac_get_strings_stats[i];
val = 0;
if (s->size == 8)
val = (u64)bgmac_read(bgmac, s->offset + 4) << 32;
val |= bgmac_read(bgmac, s->offset);
data[i] = val;
}
}
static void bgmac_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->bus_info, "AXI", sizeof(info->bus_info));
}
static const struct ethtool_ops bgmac_ethtool_ops = {
.get_strings = bgmac_get_strings,
.get_sset_count = bgmac_get_sset_count,
.get_ethtool_stats = bgmac_get_ethtool_stats,
.get_drvinfo = bgmac_get_drvinfo,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
/**************************************************
* MII
**************************************************/
void bgmac_adjust_link(struct net_device *net_dev)
{
struct bgmac *bgmac = netdev_priv(net_dev);
struct phy_device *phy_dev = net_dev->phydev;
bool update = false;
if (phy_dev->link) {
if (phy_dev->speed != bgmac->mac_speed) {
bgmac->mac_speed = phy_dev->speed;
update = true;
}
if (phy_dev->duplex != bgmac->mac_duplex) {
bgmac->mac_duplex = phy_dev->duplex;
update = true;
}
}
if (update) {
bgmac_mac_speed(bgmac);
phy_print_status(phy_dev);
}
}
EXPORT_SYMBOL_GPL(bgmac_adjust_link);
int bgmac_phy_connect_direct(struct bgmac *bgmac)
{
struct fixed_phy_status fphy_status = {
.link = 1,
.speed = SPEED_1000,
.duplex = DUPLEX_FULL,
};
struct phy_device *phy_dev;
int err;
phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
if (!phy_dev || IS_ERR(phy_dev)) {
dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
return -ENODEV;
}
err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link,
PHY_INTERFACE_MODE_MII);
if (err) {
dev_err(bgmac->dev, "Connecting PHY failed\n");
return err;
}
return err;
}
EXPORT_SYMBOL_GPL(bgmac_phy_connect_direct);
struct bgmac *bgmac_alloc(struct device *dev)
{
struct net_device *net_dev;
struct bgmac *bgmac;
/* Allocation and references */
net_dev = devm_alloc_etherdev(dev, sizeof(*bgmac));
return NULL;
net_dev->netdev_ops = &bgmac_netdev_ops;
net_dev->ethtool_ops = &bgmac_ethtool_ops;
bgmac = netdev_priv(net_dev);
bgmac->dev = dev;
return bgmac;
}
EXPORT_SYMBOL_GPL(bgmac_alloc);
int bgmac_enet_probe(struct bgmac *bgmac)
{
struct net_device *net_dev = bgmac->net_dev;
int err;
bgmac_chip_intrs_off(bgmac);
net_dev->irq = bgmac->irq;
SET_NETDEV_DEV(net_dev, bgmac->dev);
dev_set_drvdata(bgmac->dev, bgmac);
if (!is_valid_ether_addr(net_dev->dev_addr)) {
dev_err(bgmac->dev, "Invalid MAC addr: %pM\n",
net_dev->dev_addr);
eth_hw_addr_random(net_dev);
dev_warn(bgmac->dev, "Using random MAC: %pM\n",
net_dev->dev_addr);
/* This (reset &) enable is not preset in specs or reference driver but
* Broadcom does it in arch PCI code when enabling fake PCI device.
*/
bgmac_clk_enable(bgmac, 0);
/* This seems to be fixing IRQ by assigning OOB #6 to the core */
if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86);
}
bgmac_chip_reset(bgmac);
err = bgmac_dma_alloc(bgmac);
if (err) {
dev_err(bgmac->dev, "Unable to alloc memory for DMA\n");
goto err_out;
}
bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
err = bgmac_phy_connect(bgmac);
dev_err(bgmac->dev, "Cannot connect to phy\n");
net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
net_dev->hw_features = net_dev->features;
net_dev->vlan_features = net_dev->features;
Murali Krishna Policharla
committed
/* Omit FCS from max MTU size */
net_dev->max_mtu = BGMAC_RX_MAX_FRAME_SIZE - ETH_FCS_LEN;
err = register_netdev(bgmac->net_dev);
if (err) {
dev_err(bgmac->dev, "Cannot register net device\n");
goto err_phy_disconnect;
}
netif_carrier_off(net_dev);
return 0;
err_phy_disconnect:
phy_disconnect(net_dev->phydev);
err_dma_free:
bgmac_dma_free(bgmac);
EXPORT_SYMBOL_GPL(bgmac_enet_probe);
void bgmac_enet_remove(struct bgmac *bgmac)
{
unregister_netdev(bgmac->net_dev);
phy_disconnect(bgmac->net_dev->phydev);
bgmac_dma_free(bgmac);
free_netdev(bgmac->net_dev);
}
EXPORT_SYMBOL_GPL(bgmac_enet_remove);
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
int bgmac_enet_suspend(struct bgmac *bgmac)
{
if (!netif_running(bgmac->net_dev))
return 0;
phy_stop(bgmac->net_dev->phydev);
netif_stop_queue(bgmac->net_dev);
napi_disable(&bgmac->napi);
netif_tx_lock(bgmac->net_dev);
netif_device_detach(bgmac->net_dev);
netif_tx_unlock(bgmac->net_dev);
bgmac_chip_intrs_off(bgmac);
bgmac_chip_reset(bgmac);
bgmac_dma_cleanup(bgmac);
return 0;
}
EXPORT_SYMBOL_GPL(bgmac_enet_suspend);
int bgmac_enet_resume(struct bgmac *bgmac)
{
int rc;
if (!netif_running(bgmac->net_dev))
return 0;
rc = bgmac_dma_init(bgmac);
if (rc)
return rc;
bgmac_chip_init(bgmac);
napi_enable(&bgmac->napi);
netif_tx_lock(bgmac->net_dev);
netif_device_attach(bgmac->net_dev);
netif_tx_unlock(bgmac->net_dev);
netif_start_queue(bgmac->net_dev);
phy_start(bgmac->net_dev->phydev);
return 0;
}
EXPORT_SYMBOL_GPL(bgmac_enet_resume);
MODULE_AUTHOR("Rafał Miłecki");
MODULE_LICENSE("GPL");