Skip to content
xgmac.c 55 KiB
Newer Older
	void __iomem *ioaddr = priv->base;

	/* Check that the MAC address is valid.  If its not, refuse
	 * to bring the device up. The user must specify an
	 * address using the following linux command:
	 *      ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx  */
	if (!is_valid_ether_addr(dev->dev_addr)) {
		netdev_dbg(priv->dev, "generated random MAC address %pM\n",
			dev->dev_addr);
	}

	memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));

	/* Initialize the XGMAC and descriptors */
	xgmac_hw_init(dev);
	xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
	xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause);

	ret = xgmac_dma_desc_rings_init(dev);
	if (ret < 0)
		return ret;

	/* Enable the MAC Rx/Tx */
	xgmac_mac_enable(ioaddr);

	napi_enable(&priv->napi);
	netif_start_queue(dev);

	return 0;
}

/**
 *  xgmac_release - close entry point of the driver
 *  @dev : device pointer.
 *  Description:
 *  This is the stop entry point of the driver.
 */
static int xgmac_stop(struct net_device *dev)
{
	struct xgmac_priv *priv = netdev_priv(dev);

	netif_stop_queue(dev);

	if (readl(priv->base + XGMAC_DMA_INTR_ENA))
		napi_disable(&priv->napi);

	writel(0, priv->base + XGMAC_DMA_INTR_ENA);

	/* Disable the MAC core */
	xgmac_mac_disable(priv->base);

	/* Release and free the Rx/Tx resources */
	xgmac_free_dma_desc_rings(priv);

	return 0;
}

/**
 *  xgmac_xmit:
 *  @skb : the socket buffer
 *  @dev : device pointer
 *  Description : Tx entry point of the driver.
 */
static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct xgmac_priv *priv = netdev_priv(dev);
	unsigned int entry;
	int i;
	int nfrags = skb_shinfo(skb)->nr_frags;
	struct xgmac_dma_desc *desc, *first;
	unsigned int desc_flags;
	unsigned int len;
	dma_addr_t paddr;

	priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1);
	irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT;

	desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
		TXDESC_CSUM_ALL : 0;
	entry = priv->tx_head;
	desc = priv->dma_tx + entry;
	first = desc;

	len = skb_headlen(skb);
	paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
	if (dma_mapping_error(priv->device, paddr)) {
		dev_kfree_skb(skb);
		return -EIO;
	}
	priv->tx_skbuff[entry] = skb;
	desc_set_buf_addr_and_size(desc, paddr, len);

	for (i = 0; i < nfrags; i++) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

		len = frag->size;

		paddr = skb_frag_dma_map(priv->device, frag, 0, len,
					 DMA_TO_DEVICE);
		if (dma_mapping_error(priv->device, paddr)) {
			dev_kfree_skb(skb);
			return -EIO;
		}

		entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
		desc = priv->dma_tx + entry;
		priv->tx_skbuff[entry] = NULL;

		desc_set_buf_addr_and_size(desc, paddr, len);
		if (i < (nfrags - 1))
			desc_set_tx_owner(desc, desc_flags);
	}

	/* Interrupt on completition only for the latest segment */
	if (desc != first)
		desc_set_tx_owner(desc, desc_flags |
			TXDESC_LAST_SEG | irq_flag);
		desc_flags |= TXDESC_LAST_SEG | irq_flag;

	/* Set owner on first desc last to avoid race condition */
	wmb();
	desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);

	priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);

	writel(1, priv->base + XGMAC_DMA_TX_POLL);
	if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
	    MAX_SKB_FRAGS)
		netif_stop_queue(dev);

	return NETDEV_TX_OK;
}

static int xgmac_rx(struct xgmac_priv *priv, int limit)
{
	unsigned int entry;
	unsigned int count = 0;
	struct xgmac_dma_desc *p;

	while (count < limit) {
		int ip_checksum;
		struct sk_buff *skb;
		int frame_len;

		if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ))
			break;

		entry = priv->rx_tail;
		p = priv->dma_rx + entry;
		if (desc_get_owner(p))
			break;

		count++;
		priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ);

		/* read the status of the incoming frame */
		ip_checksum = desc_get_rx_status(priv, p);
		if (ip_checksum < 0)
			continue;

		skb = priv->rx_skbuff[entry];
		if (unlikely(!skb)) {
			netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n");
			break;
		}
		priv->rx_skbuff[entry] = NULL;

		frame_len = desc_get_rx_frame_len(p);
		netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n",
			frame_len, ip_checksum);

		skb_put(skb, frame_len);
		dma_unmap_single(priv->device, desc_get_buf_addr(p),
				 frame_len, DMA_FROM_DEVICE);

		skb->protocol = eth_type_trans(skb, priv->dev);
		skb->ip_summed = ip_checksum;
		if (ip_checksum == CHECKSUM_NONE)
			netif_receive_skb(skb);
		else
			napi_gro_receive(&priv->napi, skb);
	}

	xgmac_rx_refill(priv);

	return count;
}

/**
 *  xgmac_poll - xgmac poll method (NAPI)
 *  @napi : pointer to the napi structure.
 *  @budget : maximum number of packets that the current CPU can receive from
 *	      all interfaces.
 *  Description :
 *   This function implements the the reception process.
 *   Also it runs the TX completion thread
 */
static int xgmac_poll(struct napi_struct *napi, int budget)
{
	struct xgmac_priv *priv = container_of(napi,
				       struct xgmac_priv, napi);
	int work_done = 0;

	xgmac_tx_complete(priv);
	work_done = xgmac_rx(priv, budget);

	if (work_done < budget) {
		napi_complete(napi);
		__raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
	}
	return work_done;
}

/**
 *  xgmac_tx_timeout
 *  @dev : Pointer to net device structure
 *  Description: this function is called when a packet transmission fails to
 *   complete within a reasonable tmrate. The driver will mark the error in the
 *   netdev structure and arrange for the device to be reset to a sane state
 *   in order to transmit a new packet.
 */
static void xgmac_tx_timeout(struct net_device *dev)
{
	struct xgmac_priv *priv = netdev_priv(dev);

	/* Clear Tx resources and restart transmitting again */
	xgmac_tx_err(priv);
}

/**
 *  xgmac_set_rx_mode - entry point for multicast addressing
 *  @dev : pointer to the device structure
 *  Description:
 *  This function is a driver entry point which gets called by the kernel
 *  whenever multicast addresses must be enabled/disabled.
 *  Return value:
 *  void.
 */
static void xgmac_set_rx_mode(struct net_device *dev)
{
	int i;
	struct xgmac_priv *priv = netdev_priv(dev);
	void __iomem *ioaddr = priv->base;
	unsigned int value = 0;
	u32 hash_filter[XGMAC_NUM_HASH];
	int reg = 1;
	struct netdev_hw_addr *ha;
	bool use_hash = false;

	netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
		 netdev_mc_count(dev), netdev_uc_count(dev));

	if (dev->flags & IFF_PROMISC) {
		writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
		return;
	}

	memset(hash_filter, 0, sizeof(hash_filter));

	if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
		use_hash = true;
		value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
	}
	netdev_for_each_uc_addr(ha, dev) {
		if (use_hash) {
			u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;

			/* The most significant 4 bits determine the register to
			 * use (H/L) while the other 5 bits determine the bit
			 * within the register. */
			hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
		} else {
			xgmac_set_mac_addr(ioaddr, ha->addr, reg);
			reg++;
		}
	}

	if (dev->flags & IFF_ALLMULTI) {
		value |= XGMAC_FRAME_FILTER_PM;
		goto out;
	}

	if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
		use_hash = true;
		value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
	}
	netdev_for_each_mc_addr(ha, dev) {
		if (use_hash) {
			u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;

			/* The most significant 4 bits determine the register to
			 * use (H/L) while the other 5 bits determine the bit
			 * within the register. */
			hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
		} else {
			xgmac_set_mac_addr(ioaddr, ha->addr, reg);
			reg++;
		}
	}

out:
	for (i = 0; i < XGMAC_NUM_HASH; i++)
		writel(hash_filter[i], ioaddr + XGMAC_HASH(i));

	writel(value, ioaddr + XGMAC_FRAME_FILTER);
}

/**
 *  xgmac_change_mtu - entry point to change MTU size for the device.
 *  @dev : device pointer.
 *  @new_mtu : the new MTU size for the device.
 *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
 *  to drive packet transmission. Ethernet has an MTU of 1500 octets
 *  (ETH_DATA_LEN). This value can be changed with ifconfig.
 *  Return value:
 *  0 on success and an appropriate (-)ve integer as defined in errno.h
 *  file on failure.
 */
static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
{
	struct xgmac_priv *priv = netdev_priv(dev);
	int old_mtu;

	if ((new_mtu < 46) || (new_mtu > MAX_MTU)) {
		netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU);
		return -EINVAL;
	}

	old_mtu = dev->mtu;
	dev->mtu = new_mtu;

	/* return early if the buffer sizes will not change */
	if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
		return 0;
	if (old_mtu == new_mtu)
		return 0;

	/* Stop everything, get ready to change the MTU */
	if (!netif_running(dev))
		return 0;

	/* Bring the interface down and then back up */
	xgmac_stop(dev);
	return xgmac_open(dev);
}

static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
{
	u32 intr_status;
	struct net_device *dev = (struct net_device *)dev_id;
	struct xgmac_priv *priv = netdev_priv(dev);
	void __iomem *ioaddr = priv->base;

	intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT);
	if (intr_status & XGMAC_INT_STAT_PMT) {
		netdev_dbg(priv->dev, "received Magic frame\n");
		/* clear the PMT bits 5 and 6 by reading the PMT */
		readl(ioaddr + XGMAC_PMT);
	}
	return IRQ_HANDLED;
}

static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
{
	u32 intr_status;
	bool tx_err = false;
	struct net_device *dev = (struct net_device *)dev_id;
	struct xgmac_priv *priv = netdev_priv(dev);
	struct xgmac_extra_stats *x = &priv->xstats;

	/* read the status register (CSR5) */
	intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS);
	intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA);
	__raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS);

	/* It displays the DMA process states (CSR5 register) */
	/* ABNORMAL interrupts */
	if (unlikely(intr_status & DMA_STATUS_AIS)) {
		if (intr_status & DMA_STATUS_TJT) {
			netdev_err(priv->dev, "transmit jabber\n");
			x->tx_jabber++;
		}
		if (intr_status & DMA_STATUS_RU)
			x->rx_buf_unav++;
		if (intr_status & DMA_STATUS_RPS) {
			netdev_err(priv->dev, "receive process stopped\n");
			x->rx_process_stopped++;
		}
		if (intr_status & DMA_STATUS_ETI) {
			netdev_err(priv->dev, "transmit early interrupt\n");
			x->tx_early++;
		}
		if (intr_status & DMA_STATUS_TPS) {
			netdev_err(priv->dev, "transmit process stopped\n");
			x->tx_process_stopped++;
			tx_err = true;
		}
		if (intr_status & DMA_STATUS_FBI) {
			netdev_err(priv->dev, "fatal bus error\n");
			x->fatal_bus_error++;
			tx_err = true;
		}

		if (tx_err)
			xgmac_tx_err(priv);
	}

	/* TX/RX NORMAL interrupts */
	if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) {
		__raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
		napi_schedule(&priv->napi);
	}

	return IRQ_HANDLED;
}

#ifdef CONFIG_NET_POLL_CONTROLLER
/* Polling receive - used by NETCONSOLE and other diagnostic tools
 * to allow network I/O with interrupts disabled. */
static void xgmac_poll_controller(struct net_device *dev)
{
	disable_irq(dev->irq);
	xgmac_interrupt(dev->irq, dev);
	enable_irq(dev->irq);
}
#endif

stephen hemminger's avatar
stephen hemminger committed
static struct rtnl_link_stats64 *
xgmac_get_stats64(struct net_device *dev,
		       struct rtnl_link_stats64 *storage)
{
	struct xgmac_priv *priv = netdev_priv(dev);
	void __iomem *base = priv->base;
	u32 count;

	spin_lock_bh(&priv->stats_lock);
	writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL);

	storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO);
	storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32;

	storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO);
	storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G);
	storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR);
	storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR);
	storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW);

	storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO);
	storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32;

	count = readl(base + XGMAC_MMC_TXFRAME_GB_LO);
	storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO);
	storage->tx_packets = count;
	storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW);

	writel(0, base + XGMAC_MMC_CTRL);
	spin_unlock_bh(&priv->stats_lock);
	return storage;
}

static int xgmac_set_mac_address(struct net_device *dev, void *p)
{
	struct xgmac_priv *priv = netdev_priv(dev);
	void __iomem *ioaddr = priv->base;
	struct sockaddr *addr = p;

	if (!is_valid_ether_addr(addr->sa_data))
		return -EADDRNOTAVAIL;

	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);

	xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);

	return 0;
}

static int xgmac_set_features(struct net_device *dev, netdev_features_t features)
{
	u32 ctrl;
	struct xgmac_priv *priv = netdev_priv(dev);
	void __iomem *ioaddr = priv->base;
	netdev_features_t changed = dev->features ^ features;

	if (!(changed & NETIF_F_RXCSUM))
		return 0;

	ctrl = readl(ioaddr + XGMAC_CONTROL);
	if (features & NETIF_F_RXCSUM)
		ctrl |= XGMAC_CONTROL_IPC;
	else
		ctrl &= ~XGMAC_CONTROL_IPC;
	writel(ctrl, ioaddr + XGMAC_CONTROL);

	return 0;
}

static const struct net_device_ops xgmac_netdev_ops = {
	.ndo_open = xgmac_open,
	.ndo_start_xmit = xgmac_xmit,
	.ndo_stop = xgmac_stop,
	.ndo_change_mtu = xgmac_change_mtu,
	.ndo_set_rx_mode = xgmac_set_rx_mode,
	.ndo_tx_timeout = xgmac_tx_timeout,
	.ndo_get_stats64 = xgmac_get_stats64,
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = xgmac_poll_controller,
#endif
	.ndo_set_mac_address = xgmac_set_mac_address,
	.ndo_set_features = xgmac_set_features,
};

static int xgmac_ethtool_getsettings(struct net_device *dev,
					  struct ethtool_cmd *cmd)
{
	cmd->autoneg = 0;
	cmd->duplex = DUPLEX_FULL;
	ethtool_cmd_speed_set(cmd, 10000);
	cmd->supported = 0;
	cmd->advertising = 0;
	cmd->transceiver = XCVR_INTERNAL;
	return 0;
}

static void xgmac_get_pauseparam(struct net_device *netdev,
				      struct ethtool_pauseparam *pause)
{
	struct xgmac_priv *priv = netdev_priv(netdev);

	pause->rx_pause = priv->rx_pause;
	pause->tx_pause = priv->tx_pause;
}

static int xgmac_set_pauseparam(struct net_device *netdev,
				     struct ethtool_pauseparam *pause)
{
	struct xgmac_priv *priv = netdev_priv(netdev);

	if (pause->autoneg)
		return -EINVAL;

	return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause);
}

struct xgmac_stats {
	char stat_string[ETH_GSTRING_LEN];
	int stat_offset;
	bool is_reg;
};

#define XGMAC_STAT(m)	\
	{ #m, offsetof(struct xgmac_priv, xstats.m), false }
#define XGMAC_HW_STAT(m, reg_offset)	\
	{ #m, reg_offset, true }

static const struct xgmac_stats xgmac_gstrings_stats[] = {
	XGMAC_STAT(tx_frame_flushed),
	XGMAC_STAT(tx_payload_error),
	XGMAC_STAT(tx_ip_header_error),
	XGMAC_STAT(tx_local_fault),
	XGMAC_STAT(tx_remote_fault),
	XGMAC_STAT(tx_early),
	XGMAC_STAT(tx_process_stopped),
	XGMAC_STAT(tx_jabber),
	XGMAC_STAT(rx_buf_unav),
	XGMAC_STAT(rx_process_stopped),
	XGMAC_STAT(rx_payload_error),
	XGMAC_STAT(rx_ip_header_error),
	XGMAC_STAT(rx_da_filter_fail),
	XGMAC_STAT(rx_sa_filter_fail),
	XGMAC_STAT(fatal_bus_error),
	XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
	XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
	XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME),
	XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME),
	XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME),
};
#define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats)

static void xgmac_get_ethtool_stats(struct net_device *dev,
					 struct ethtool_stats *dummy,
					 u64 *data)
{
	struct xgmac_priv *priv = netdev_priv(dev);
	void *p = priv;
	int i;

	for (i = 0; i < XGMAC_STATS_LEN; i++) {
		if (xgmac_gstrings_stats[i].is_reg)
			*data++ = readl(priv->base +
				xgmac_gstrings_stats[i].stat_offset);
		else
			*data++ = *(u32 *)(p +
				xgmac_gstrings_stats[i].stat_offset);
	}
}

static int xgmac_get_sset_count(struct net_device *netdev, int sset)
{
	switch (sset) {
	case ETH_SS_STATS:
		return XGMAC_STATS_LEN;
	default:
		return -EINVAL;
	}
}

static void xgmac_get_strings(struct net_device *dev, u32 stringset,
				   u8 *data)
{
	int i;
	u8 *p = data;

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < XGMAC_STATS_LEN; i++) {
			memcpy(p, xgmac_gstrings_stats[i].stat_string,
			       ETH_GSTRING_LEN);
			p += ETH_GSTRING_LEN;
		}
		break;
	default:
		WARN_ON(1);
		break;
	}
}

static void xgmac_get_wol(struct net_device *dev,
			       struct ethtool_wolinfo *wol)
{
	struct xgmac_priv *priv = netdev_priv(dev);

	if (device_can_wakeup(priv->device)) {
		wol->supported = WAKE_MAGIC | WAKE_UCAST;
		wol->wolopts = priv->wolopts;
	}
}

static int xgmac_set_wol(struct net_device *dev,
			      struct ethtool_wolinfo *wol)
{
	struct xgmac_priv *priv = netdev_priv(dev);
	u32 support = WAKE_MAGIC | WAKE_UCAST;

	if (!device_can_wakeup(priv->device))
		return -ENOTSUPP;

	if (wol->wolopts & ~support)
		return -EINVAL;

	priv->wolopts = wol->wolopts;

	if (wol->wolopts) {
		device_set_wakeup_enable(priv->device, 1);
		enable_irq_wake(dev->irq);
	} else {
		device_set_wakeup_enable(priv->device, 0);
		disable_irq_wake(dev->irq);
	}

	return 0;
}

stephen hemminger's avatar
stephen hemminger committed
static const struct ethtool_ops xgmac_ethtool_ops = {
	.get_settings = xgmac_ethtool_getsettings,
	.get_link = ethtool_op_get_link,
	.get_pauseparam = xgmac_get_pauseparam,
	.set_pauseparam = xgmac_set_pauseparam,
	.get_ethtool_stats = xgmac_get_ethtool_stats,
	.get_strings = xgmac_get_strings,
	.get_wol = xgmac_get_wol,
	.set_wol = xgmac_set_wol,
	.get_sset_count = xgmac_get_sset_count,
};

/**
 * xgmac_probe
 * @pdev: platform device pointer
 * Description: the driver is initialized through platform_device.
 */
static int xgmac_probe(struct platform_device *pdev)
{
	int ret = 0;
	struct resource *res;
	struct net_device *ndev = NULL;
	struct xgmac_priv *priv = NULL;
	u32 uid;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENODEV;

	if (!request_mem_region(res->start, resource_size(res), pdev->name))
		return -EBUSY;

	ndev = alloc_etherdev(sizeof(struct xgmac_priv));
	if (!ndev) {
		ret = -ENOMEM;
		goto err_alloc;
	}

	SET_NETDEV_DEV(ndev, &pdev->dev);
	priv = netdev_priv(ndev);
	platform_set_drvdata(pdev, ndev);
	ether_setup(ndev);
	ndev->netdev_ops = &xgmac_netdev_ops;
	SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
	spin_lock_init(&priv->stats_lock);

	priv->device = &pdev->dev;
	priv->dev = ndev;
	priv->rx_pause = 1;
	priv->tx_pause = 1;

	priv->base = ioremap(res->start, resource_size(res));
	if (!priv->base) {
		netdev_err(ndev, "ioremap failed\n");
		ret = -ENOMEM;
		goto err_io;
	}

	uid = readl(priv->base + XGMAC_VERSION);
	netdev_info(ndev, "h/w version is 0x%x\n", uid);

	writel(0, priv->base + XGMAC_DMA_INTR_ENA);
	ndev->irq = platform_get_irq(pdev, 0);
	if (ndev->irq == -ENXIO) {
		netdev_err(ndev, "No irq resource\n");
		ret = ndev->irq;
		goto err_irq;
	}

	ret = request_irq(ndev->irq, xgmac_interrupt, 0,
			  dev_name(&pdev->dev), ndev);
	if (ret < 0) {
		netdev_err(ndev, "Could not request irq %d - ret %d)\n",
			ndev->irq, ret);
		goto err_irq;
	}

	priv->pmt_irq = platform_get_irq(pdev, 1);
	if (priv->pmt_irq == -ENXIO) {
		netdev_err(ndev, "No pmt irq resource\n");
		ret = priv->pmt_irq;
		goto err_pmt_irq;
	}

	ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0,
			  dev_name(&pdev->dev), ndev);
	if (ret < 0) {
		netdev_err(ndev, "Could not request irq %d - ret %d)\n",
			priv->pmt_irq, ret);
		goto err_pmt_irq;
	}

	device_set_wakeup_capable(&pdev->dev, 1);
	if (device_can_wakeup(priv->device))
		priv->wolopts = WAKE_MAGIC;	/* Magic Frame as default */

	ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA;
	if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
		ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
				     NETIF_F_RXCSUM;
	ndev->features |= ndev->hw_features;
	ndev->priv_flags |= IFF_UNICAST_FLT;

	/* Get the MAC address */
	xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
	if (!is_valid_ether_addr(ndev->dev_addr))
		netdev_warn(ndev, "MAC address %pM not valid",
			 ndev->dev_addr);

	netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
	ret = register_netdev(ndev);
	if (ret)
		goto err_reg;

	return 0;

err_reg:
	netif_napi_del(&priv->napi);
	free_irq(priv->pmt_irq, ndev);
err_pmt_irq:
	free_irq(ndev->irq, ndev);
err_irq:
	iounmap(priv->base);
err_io:
	free_netdev(ndev);
err_alloc:
	release_mem_region(res->start, resource_size(res));
	return ret;
}

/**
 * xgmac_dvr_remove
 * @pdev: platform device pointer
 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
 * changes the link status, releases the DMA descriptor rings,
 * unregisters the MDIO bus and unmaps the allocated memory.
 */
static int xgmac_remove(struct platform_device *pdev)
{
	struct net_device *ndev = platform_get_drvdata(pdev);
	struct xgmac_priv *priv = netdev_priv(ndev);
	struct resource *res;

	xgmac_mac_disable(priv->base);

	/* Free the IRQ lines */
	free_irq(ndev->irq, ndev);
	free_irq(priv->pmt_irq, ndev);

	unregister_netdev(ndev);
	netif_napi_del(&priv->napi);

	iounmap(priv->base);
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	release_mem_region(res->start, resource_size(res));

	free_netdev(ndev);

	return 0;
}

#ifdef CONFIG_PM_SLEEP
static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
{
	unsigned int pmt = 0;

	if (mode & WAKE_MAGIC)
		pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN;
	if (mode & WAKE_UCAST)
		pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;

	writel(pmt, ioaddr + XGMAC_PMT);
}

static int xgmac_suspend(struct device *dev)
{
	struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
	struct xgmac_priv *priv = netdev_priv(ndev);
	u32 value;

	if (!ndev || !netif_running(ndev))
		return 0;

	netif_device_detach(ndev);
	napi_disable(&priv->napi);
	writel(0, priv->base + XGMAC_DMA_INTR_ENA);

	if (device_may_wakeup(priv->device)) {
		/* Stop TX/RX DMA Only */
		value = readl(priv->base + XGMAC_DMA_CONTROL);
		value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
		writel(value, priv->base + XGMAC_DMA_CONTROL);

		xgmac_pmt(priv->base, priv->wolopts);
	} else
		xgmac_mac_disable(priv->base);

	return 0;
}

static int xgmac_resume(struct device *dev)
{
	struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
	struct xgmac_priv *priv = netdev_priv(ndev);
	void __iomem *ioaddr = priv->base;

	if (!netif_running(ndev))
		return 0;

	xgmac_pmt(ioaddr, 0);

	/* Enable the MAC and DMA */
	xgmac_mac_enable(ioaddr);
	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);

	netif_device_attach(ndev);
	napi_enable(&priv->napi);

	return 0;
}
#endif /* CONFIG_PM_SLEEP */

static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume);

static const struct of_device_id xgmac_of_match[] = {
	{ .compatible = "calxeda,hb-xgmac", },
	{},
};
MODULE_DEVICE_TABLE(of, xgmac_of_match);

static struct platform_driver xgmac_driver = {
	.driver = {
		.name = "calxedaxgmac",
		.of_match_table = xgmac_of_match,
	},
	.probe = xgmac_probe,
	.remove = xgmac_remove,
	.driver.pm = &xgmac_pm_ops,
};

module_platform_driver(xgmac_driver);

MODULE_AUTHOR("Calxeda, Inc.");
MODULE_DESCRIPTION("Calxeda 10G XGMAC driver");
MODULE_LICENSE("GPL v2");