Skip to content
ntb_hw_intel.c 58.3 KiB
Newer Older
/*
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 *   redistributing this file, you may do so under either license.
 *
 *   GPL LICENSE SUMMARY
 *
 *   Copyright(c) 2012 Intel Corporation. All rights reserved.
 *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of version 2 of the GNU General Public License as
 *   published by the Free Software Foundation.
 *
 *   BSD LICENSE
 *
 *   Copyright(c) 2012 Intel Corporation. All rights reserved.
 *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
 *
 *   Redistribution and use in source and binary forms, with or without
 *   modification, are permitted provided that the following conditions
 *   are met:
 *
 *     * Redistributions of source code must retain the above copyright
 *       notice, this list of conditions and the following disclaimer.
 *     * Redistributions in binary form must reproduce the above copy
 *       notice, this list of conditions and the following disclaimer in
 *       the documentation and/or other materials provided with the
 *       distribution.
 *     * Neither the name of Intel Corporation nor the names of its
 *       contributors may be used to endorse or promote products derived
 *       from this software without specific prior written permission.
 *
 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 * Intel PCIe NTB Linux driver
 *
 * Contact Information:
 * Jon Mason <jon.mason@intel.com>
 */
#include <linux/debugfs.h>
Jon Mason's avatar
Jon Mason committed
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
Jon Mason's avatar
Jon Mason committed
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/ntb.h>

#include "ntb_hw_intel.h"
#define NTB_NAME	"ntb_hw_intel"
#define NTB_DESC	"Intel(R) PCI-E Non-Transparent Bridge Driver"
#define NTB_VER		"2.0"
MODULE_DESCRIPTION(NTB_DESC);
MODULE_VERSION(NTB_VER);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");

#define bar0_off(base, bar) ((base) + ((bar) << 2))
#define bar2_off(base, bar) bar0_off(base, (bar) - 2)

static const struct intel_ntb_reg atom_reg;
static const struct intel_ntb_alt_reg atom_pri_reg;
static const struct intel_ntb_alt_reg atom_sec_reg;
static const struct intel_ntb_alt_reg atom_b2b_reg;
static const struct intel_ntb_xlat_reg atom_pri_xlat;
static const struct intel_ntb_xlat_reg atom_sec_xlat;
static const struct intel_ntb_reg xeon_reg;
static const struct intel_ntb_alt_reg xeon_pri_reg;
static const struct intel_ntb_alt_reg xeon_sec_reg;
static const struct intel_ntb_alt_reg xeon_b2b_reg;
static const struct intel_ntb_xlat_reg xeon_pri_xlat;
static const struct intel_ntb_xlat_reg xeon_sec_xlat;
static struct intel_b2b_addr xeon_b2b_usd_addr;
static struct intel_b2b_addr xeon_b2b_dsd_addr;
static const struct ntb_dev_ops intel_ntb_ops;

static const struct file_operations intel_ntb_debugfs_info;
static struct dentry *debugfs_dir;

static int b2b_mw_idx = -1;
module_param(b2b_mw_idx, int, 0644);
MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb.  A "
		 "value of zero or positive starts from first mw idx, and a "
		 "negative value starts from last mw idx.  Both sides MUST "
		 "set the same value here!");

static unsigned int b2b_mw_share;
module_param(b2b_mw_share, uint, 0644);
MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
		 "ntb so that the peer ntb only occupies the first half of "
		 "the mw, so the second half can still be used as a mw.  Both "
		 "sides MUST set the same value here!");

module_param_named(xeon_b2b_usd_bar2_addr64,
		   xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
		 "XEON B2B USD BAR 2 64-bit address");

module_param_named(xeon_b2b_usd_bar4_addr64,
		   xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
		 "XEON B2B USD BAR 4 64-bit address");

module_param_named(xeon_b2b_usd_bar4_addr32,
		   xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
		 "XEON B2B USD split-BAR 4 32-bit address");

module_param_named(xeon_b2b_usd_bar5_addr32,
		   xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
		 "XEON B2B USD split-BAR 5 32-bit address");

module_param_named(xeon_b2b_dsd_bar2_addr64,
		   xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
		 "XEON B2B DSD BAR 2 64-bit address");

module_param_named(xeon_b2b_dsd_bar4_addr64,
		   xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
		 "XEON B2B DSD BAR 4 64-bit address");

module_param_named(xeon_b2b_dsd_bar4_addr32,
		   xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
		 "XEON B2B DSD split-BAR 4 32-bit address");

module_param_named(xeon_b2b_dsd_bar5_addr32,
		   xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
		 "XEON B2B DSD split-BAR 5 32-bit address");
#ifndef ioread64
#ifdef readq
#define ioread64 readq
#else
#define ioread64 _ioread64
static inline u64 _ioread64(void __iomem *mmio)
	low = ioread32(mmio);
	high = ioread32(mmio + sizeof(u32));
	return low | (high << 32);
}
#endif
#endif

#ifndef iowrite64
#ifdef writeq
#define iowrite64 writeq
#else
#define iowrite64 _iowrite64
static inline void _iowrite64(u64 val, void __iomem *mmio)
{
	iowrite32(val, mmio);
	iowrite32(val >> 32, mmio + sizeof(u32));
static inline int pdev_is_atom(struct pci_dev *pdev)
	switch (pdev->device) {
	case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
		return 1;
	}
	return 0;
}

static inline int pdev_is_xeon(struct pci_dev *pdev)
	switch (pdev->device) {
	case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
	case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
	case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
	case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
	case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
	case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
	case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
	case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
	case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
	case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
	case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
	case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
	ndev->unsafe_flags = 0;
	ndev->unsafe_flags_ignore = 0;

	/* Only B2B has a workaround to avoid SDOORBELL */
	if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
		if (!ntb_topo_is_b2b(ndev->ntb.topo))
			ndev->unsafe_flags |= NTB_UNSAFE_DB;

	/* No low level workaround to avoid SB01BASE */
	if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
		ndev->unsafe_flags |= NTB_UNSAFE_DB;
		ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
	}
static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
				 unsigned long flag)
	return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
				     unsigned long flag)
	flag &= ndev->unsafe_flags;
	ndev->unsafe_flags_ignore |= flag;
static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
	if (idx < 0 || idx > ndev->mw_count)
		return -EINVAL;
	return ndev->reg->mw_bar[idx];
}
static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
			       phys_addr_t *db_addr, resource_size_t *db_size,
			       phys_addr_t reg_addr, unsigned long reg)
{
	WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
	if (db_addr) {
		*db_addr = reg_addr + reg;
		dev_dbg(ndev_dev(ndev), "Peer db addr %llx\n", *db_addr);
	}
	if (db_size) {
		*db_size = ndev->reg->db_size;
		dev_dbg(ndev_dev(ndev), "Peer db size %llx\n", *db_size);
	}
static inline u64 ndev_db_read(struct intel_ntb_dev *ndev,
			       void __iomem *mmio)
	WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
	return ndev->reg->db_ioread(mmio);
}

static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
				void __iomem *mmio)
{
	WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
	if (db_bits & ~ndev->db_valid_mask)
		return -EINVAL;
	ndev->reg->db_iowrite(db_bits, mmio);
static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
				   void __iomem *mmio)
	unsigned long irqflags;
	WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));

	if (db_bits & ~ndev->db_valid_mask)
		return -EINVAL;
	spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
	{
		ndev->db_mask |= db_bits;
		ndev->reg->db_iowrite(ndev->db_mask, mmio);
	}
	spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
				     void __iomem *mmio)
	unsigned long irqflags;
	WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));

	if (db_bits & ~ndev->db_valid_mask)
		return -EINVAL;
	spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
	{
		ndev->db_mask &= ~db_bits;
		ndev->reg->db_iowrite(ndev->db_mask, mmio);
	}
	spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
	shift = ndev->db_vec_shift;
	mask = BIT_ULL(shift) - 1;
	return mask << (shift * db_vector);
static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
				 phys_addr_t *spad_addr, phys_addr_t reg_addr,
				 unsigned long reg)
	WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD));

	if (idx < 0 || idx >= ndev->spad_count)
	if (spad_addr) {
		*spad_addr = reg_addr + reg + (idx << 2);
		dev_dbg(ndev_dev(ndev), "Peer spad addr %llx\n", *spad_addr);
	}
static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
				 void __iomem *mmio)
	WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD));
	if (idx < 0 || idx >= ndev->spad_count)
		return 0;
	return ioread32(mmio + (idx << 2));
static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
				  void __iomem *mmio)
	WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD));

	if (idx < 0 || idx >= ndev->spad_count)
	iowrite32(val, mmio + (idx << 2));
static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
	u64 vec_mask;

	vec_mask = ndev_vec_mask(ndev, vec);

	dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask);
	ndev->last_ts = jiffies;

	if (vec_mask & ndev->db_link_mask) {
		if (ndev->reg->poll_link(ndev))
			ntb_link_event(&ndev->ntb);
	}

	if (vec_mask & ndev->db_valid_mask)
		ntb_db_event(&ndev->ntb, vec);

	return IRQ_HANDLED;
static irqreturn_t ndev_vec_isr(int irq, void *dev)
	struct intel_ntb_vec *nvec = dev;
	return ndev_interrupt(nvec->ndev, nvec->num);
static irqreturn_t ndev_irq_isr(int irq, void *dev)
	struct intel_ntb_dev *ndev = dev;
	return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq);
static int ndev_init_isr(struct intel_ntb_dev *ndev,
			 int msix_min, int msix_max,
			 int msix_shift, int total_shift)
	struct pci_dev *pdev;
	int rc, i, msix_count, node;
	pdev = ndev_pdev(ndev);
	node = dev_to_node(&pdev->dev);

	/* Mask all doorbell interrupts */
	ndev->db_mask = ndev->db_valid_mask;
	ndev->reg->db_iowrite(ndev->db_mask,
			      ndev->self_mmio +
			      ndev->self_reg->db_mask);
	/* Try to set up msix irq */

	ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec),
				 GFP_KERNEL, node);
	if (!ndev->vec)
		goto err_msix_vec_alloc;

	ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix),
				  GFP_KERNEL, node);
	if (!ndev->msix)
		goto err_msix_alloc;

	for (i = 0; i < msix_max; ++i)
		ndev->msix[i].entry = i;

	msix_count = pci_enable_msix_range(pdev, ndev->msix,
					   msix_min, msix_max);
	if (msix_count < 0)
		goto err_msix_enable;

	for (i = 0; i < msix_count; ++i) {
		ndev->vec[i].ndev = ndev;
		ndev->vec[i].num = i;
		rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
				 "ndev_vec_isr", &ndev->vec[i]);
		if (rc)
			goto err_msix_request;
	dev_dbg(ndev_dev(ndev), "Using msix interrupts\n");
	ndev->db_vec_count = msix_count;
	ndev->db_vec_shift = msix_shift;
	return 0;
err_msix_request:
	while (i-- > 0)
		free_irq(ndev->msix[i].vector, ndev);
	pci_disable_msix(pdev);
err_msix_enable:
	kfree(ndev->msix);
err_msix_alloc:
	kfree(ndev->vec);
err_msix_vec_alloc:
	ndev->msix = NULL;
	ndev->vec = NULL;
	/* Try to set up msi irq */
Jon Mason's avatar
Jon Mason committed

	rc = pci_enable_msi(pdev);
	if (rc)
		goto err_msi_enable;
Jon Mason's avatar
Jon Mason committed

	rc = request_irq(pdev->irq, ndev_irq_isr, 0,
			 "ndev_irq_isr", ndev);
	if (rc)
		goto err_msi_request;
Jon Mason's avatar
Jon Mason committed

	dev_dbg(ndev_dev(ndev), "Using msi interrupts\n");
	ndev->db_vec_count = 1;
	ndev->db_vec_shift = total_shift;
	return 0;
Jon Mason's avatar
Jon Mason committed

err_msi_request:
	pci_disable_msi(pdev);
err_msi_enable:
Jon Mason's avatar
Jon Mason committed

	/* Try to set up intx irq */
Jon Mason's avatar
Jon Mason committed

	pci_intx(pdev, 1);
Jon Mason's avatar
Jon Mason committed

	rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
			 "ndev_irq_isr", ndev);
	if (rc)
		goto err_intx_request;

	dev_dbg(ndev_dev(ndev), "Using intx interrupts\n");
	ndev->db_vec_count = 1;
	ndev->db_vec_shift = total_shift;
	return 0;

err_intx_request:
	return rc;
static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
	struct pci_dev *pdev;
	int i;
	pdev = ndev_pdev(ndev);
	/* Mask all doorbell interrupts */
	ndev->db_mask = ndev->db_valid_mask;
	ndev->reg->db_iowrite(ndev->db_mask,
			      ndev->self_mmio +
			      ndev->self_reg->db_mask);
Jon Mason's avatar
Jon Mason committed

	if (ndev->msix) {
		i = ndev->db_vec_count;
		while (i--)
			free_irq(ndev->msix[i].vector, &ndev->vec[i]);
		pci_disable_msix(pdev);
		kfree(ndev->msix);
		kfree(ndev->vec);
		free_irq(pdev->irq, ndev);
		if (pci_dev_msi_enabled(pdev))
			pci_disable_msi(pdev);
static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
				 size_t count, loff_t *offp)
	struct intel_ntb_dev *ndev;
	void __iomem *mmio;
	char *buf;
	size_t buf_size;
	ssize_t ret, off;
	union { u64 v64; u32 v32; u16 v16; } u;
	ndev = filp->private_data;
	mmio = ndev->self_mmio;
	buf_size = min(count, 0x800ul);
	buf = kmalloc(buf_size, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;
	off += scnprintf(buf + off, buf_size - off,
			 "NTB Device Information:\n");
	off += scnprintf(buf + off, buf_size - off,
			 "Connection Topology -\t%s\n",
			 ntb_topo_string(ndev->ntb.topo));
	off += scnprintf(buf + off, buf_size - off,
			 "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
	off += scnprintf(buf + off, buf_size - off,
			 "B2B MW Idx -\t\t%d\n", ndev->b2b_idx);
	off += scnprintf(buf + off, buf_size - off,
			 "BAR4 Split -\t\t%s\n",
			 ndev->bar4_split ? "yes" : "no");
Jon Mason's avatar
Jon Mason committed

	off += scnprintf(buf + off, buf_size - off,
			 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
	off += scnprintf(buf + off, buf_size - off,
			 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);

	if (!ndev->reg->link_is_up(ndev)) {
		off += scnprintf(buf + off, buf_size - off,
				 "Link Status -\t\tDown\n");
	} else {
		off += scnprintf(buf + off, buf_size - off,
				 "Link Status -\t\tUp\n");
		off += scnprintf(buf + off, buf_size - off,
				 "Link Speed -\t\tPCI-E Gen %u\n",
				 NTB_LNK_STA_SPEED(ndev->lnk_sta));
		off += scnprintf(buf + off, buf_size - off,
				 "Link Width -\t\tx%u\n",
				 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
	off += scnprintf(buf + off, buf_size - off,
			 "Memory Window Count -\t%u\n", ndev->mw_count);
	off += scnprintf(buf + off, buf_size - off,
			 "Scratchpad Count -\t%u\n", ndev->spad_count);
	off += scnprintf(buf + off, buf_size - off,
			 "Doorbell Count -\t%u\n", ndev->db_count);
	off += scnprintf(buf + off, buf_size - off,
			 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
	off += scnprintf(buf + off, buf_size - off,
			 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);

	off += scnprintf(buf + off, buf_size - off,
			 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
	off += scnprintf(buf + off, buf_size - off,
			 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
	off += scnprintf(buf + off, buf_size - off,
			 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);

	u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
	off += scnprintf(buf + off, buf_size - off,
			 "Doorbell Mask -\t\t%#llx\n", u.v64);

	u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
	off += scnprintf(buf + off, buf_size - off,
			 "Doorbell Bell -\t\t%#llx\n", u.v64);

	off += scnprintf(buf + off, buf_size - off,
			 "\nNTB Incoming XLAT:\n");

	u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
	off += scnprintf(buf + off, buf_size - off,
			 "XLAT23 -\t\t%#018llx\n", u.v64);

	u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
	off += scnprintf(buf + off, buf_size - off,
			 "XLAT45 -\t\t%#018llx\n", u.v64);

	u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
	off += scnprintf(buf + off, buf_size - off,
			 "LMT23 -\t\t\t%#018llx\n", u.v64);

	u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
	off += scnprintf(buf + off, buf_size - off,
			 "LMT45 -\t\t\t%#018llx\n", u.v64);

	if (pdev_is_xeon(ndev->ntb.pdev)) {
		if (ntb_topo_is_b2b(ndev->ntb.topo)) {
			off += scnprintf(buf + off, buf_size - off,
					 "\nNTB Outgoing B2B XLAT:\n");

			u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
			off += scnprintf(buf + off, buf_size - off,
					 "B2B XLAT23 -\t\t%#018llx\n", u.v64);

			u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
			off += scnprintf(buf + off, buf_size - off,
					 "B2B XLAT45 -\t\t%#018llx\n", u.v64);

			u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
			off += scnprintf(buf + off, buf_size - off,
					 "B2B LMT23 -\t\t%#018llx\n", u.v64);

			u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
			off += scnprintf(buf + off, buf_size - off,
					 "B2B LMT45 -\t\t%#018llx\n", u.v64);

			off += scnprintf(buf + off, buf_size - off,
					 "\nNTB Secondary BAR:\n");

			u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
			off += scnprintf(buf + off, buf_size - off,
					 "SBAR01 -\t\t%#018llx\n", u.v64);

			u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
			off += scnprintf(buf + off, buf_size - off,
					 "SBAR23 -\t\t%#018llx\n", u.v64);

			u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
			off += scnprintf(buf + off, buf_size - off,
					 "SBAR45 -\t\t%#018llx\n", u.v64);
		}

		off += scnprintf(buf + off, buf_size - off,
				 "\nXEON NTB Statistics:\n");
		u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
		off += scnprintf(buf + off, buf_size - off,
				 "Upstream Memory Miss -\t%u\n", u.v16);

		off += scnprintf(buf + off, buf_size - off,
				 "\nXEON NTB Hardware Errors:\n");

		if (!pci_read_config_word(ndev->ntb.pdev,
					  XEON_DEVSTS_OFFSET, &u.v16))
			off += scnprintf(buf + off, buf_size - off,
					 "DEVSTS -\t\t%#06x\n", u.v16);

		if (!pci_read_config_word(ndev->ntb.pdev,
					  XEON_LINK_STATUS_OFFSET, &u.v16))
			off += scnprintf(buf + off, buf_size - off,
					 "LNKSTS -\t\t%#06x\n", u.v16);
Jon Mason's avatar
Jon Mason committed

		if (!pci_read_config_dword(ndev->ntb.pdev,
					   XEON_UNCERRSTS_OFFSET, &u.v32))
			off += scnprintf(buf + off, buf_size - off,
					 "UNCERRSTS -\t\t%#06x\n", u.v32);

		if (!pci_read_config_dword(ndev->ntb.pdev,
					   XEON_CORERRSTS_OFFSET, &u.v32))
			off += scnprintf(buf + off, buf_size - off,
					 "CORERRSTS -\t\t%#06x\n", u.v32);
	}

	ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
	kfree(buf);
	return ret;
static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
	if (!debugfs_dir) {
		ndev->debugfs_dir = NULL;
		ndev->debugfs_info = NULL;
	} else {
		ndev->debugfs_dir =
			debugfs_create_dir(ndev_name(ndev), debugfs_dir);
		if (!ndev->debugfs_dir)
			ndev->debugfs_info = NULL;
		else
			ndev->debugfs_info =
				debugfs_create_file("info", S_IRUSR,
						    ndev->debugfs_dir, ndev,
						    &intel_ntb_debugfs_info);
static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
{
	debugfs_remove_recursive(ndev->debugfs_dir);
static int intel_ntb_mw_count(struct ntb_dev *ntb)
	return ntb_ndev(ntb)->mw_count;
}
Jon Mason's avatar
Jon Mason committed

static int intel_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
				  phys_addr_t *base,
				  resource_size_t *size,
				  resource_size_t *align,
				  resource_size_t *align_size)
{
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
	int bar;
	if (idx >= ndev->b2b_idx && !ndev->b2b_off)
		idx += 1;
	bar = ndev_mw_to_bar(ndev, idx);
	if (bar < 0)
		return bar;
Jon Mason's avatar
Jon Mason committed

	if (base)
		*base = pci_resource_start(ndev->ntb.pdev, bar) +
			(idx == ndev->b2b_idx ? ndev->b2b_off : 0);
	if (size)
		*size = pci_resource_len(ndev->ntb.pdev, bar) -
			(idx == ndev->b2b_idx ? ndev->b2b_off : 0);
	if (align)
		*align = pci_resource_len(ndev->ntb.pdev, bar);
Jon Mason's avatar
Jon Mason committed

	if (align_size)
		*align_size = 1;
static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
				  dma_addr_t addr, resource_size_t size)
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
	unsigned long base_reg, xlat_reg, limit_reg;
	resource_size_t bar_size, mw_size;
	void __iomem *mmio;
	u64 base, limit, reg_val;
	int bar;
	if (idx >= ndev->b2b_idx && !ndev->b2b_off)
		idx += 1;
	bar = ndev_mw_to_bar(ndev, idx);
	if (bar < 0)
		return bar;
	bar_size = pci_resource_len(ndev->ntb.pdev, bar);

	if (idx == ndev->b2b_idx)
		mw_size = bar_size - ndev->b2b_off;
	else
		mw_size = bar_size;

	/* hardware requires that addr is aligned to bar size */
	if (addr & (bar_size - 1))
		return -EINVAL;

	/* make sure the range fits in the usable mw size */
	if (size > mw_size)
		return -EINVAL;

	mmio = ndev->self_mmio;
	base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
	xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
	limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);

	if (bar < 4 || !ndev->bar4_split) {
		base = ioread64(mmio + base_reg);

		/* Set the limit if supported, if size is not mw_size */
		if (limit_reg && size != mw_size)
			limit = base + size;
		else
			limit = 0;

		/* set and verify setting the translation address */
		iowrite64(addr, mmio + xlat_reg);
		reg_val = ioread64(mmio + xlat_reg);
		if (reg_val != addr) {
			iowrite64(0, mmio + xlat_reg);
			return -EIO;
		}

		/* set and verify setting the limit */
		iowrite64(limit, mmio + limit_reg);
		reg_val = ioread64(mmio + limit_reg);
		if (reg_val != limit) {
			iowrite64(base, mmio + limit_reg);
			iowrite64(0, mmio + xlat_reg);
			return -EIO;
		}
	} else {
		/* split bar addr range must all be 32 bit */
		if (addr & (~0ull << 32))
			return -EINVAL;
		if ((addr + size) & (~0ull << 32))
			return -EINVAL;

		base = ioread32(mmio + base_reg);

		/* Set the limit if supported, if size is not mw_size */
		if (limit_reg && size != mw_size)
			limit = base + size;
		else
			limit = 0;

		/* set and verify setting the translation address */
		iowrite32(addr, mmio + xlat_reg);
		reg_val = ioread32(mmio + xlat_reg);
		if (reg_val != addr) {
			iowrite32(0, mmio + xlat_reg);
			return -EIO;
		}

		/* set and verify setting the limit */
		iowrite32(limit, mmio + limit_reg);
		reg_val = ioread32(mmio + limit_reg);
		if (reg_val != limit) {
			iowrite32(base, mmio + limit_reg);
			iowrite32(0, mmio + xlat_reg);
			return -EIO;
		}
static int intel_ntb_link_is_up(struct ntb_dev *ntb,
				enum ntb_speed *speed,
				enum ntb_width *width)
{
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
	if (ndev->reg->link_is_up(ndev)) {
		if (speed)
			*speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
		if (width)
			*width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
		return 1;
	} else {
		/* TODO MAYBE: is it possible to observe the link speed and
		 * width while link is training? */
		if (speed)
			*speed = NTB_SPEED_NONE;
		if (width)
			*width = NTB_WIDTH_NONE;
		return 0;
	}
}

static int intel_ntb_link_enable(struct ntb_dev *ntb,
				 enum ntb_speed max_speed,
				 enum ntb_width max_width)
{
	struct intel_ntb_dev *ndev;
	u32 ntb_ctl;

	ndev = container_of(ntb, struct intel_ntb_dev, ntb);

	if (ndev->ntb.topo == NTB_TOPO_SEC)
		return -EINVAL;

	dev_dbg(ndev_dev(ndev),
		"Enabling link with max_speed %d max_width %d\n",
		max_speed, max_width);
	if (max_speed != NTB_SPEED_AUTO)
		dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
	if (max_width != NTB_WIDTH_AUTO)
		dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);

	ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
	ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
	ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
	ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
	if (ndev->bar4_split)
		ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
	iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
static int intel_ntb_link_disable(struct ntb_dev *ntb)
	struct intel_ntb_dev *ndev;
	u32 ntb_cntl;
	ndev = container_of(ntb, struct intel_ntb_dev, ntb);
	if (ndev->ntb.topo == NTB_TOPO_SEC)
		return -EINVAL;
	dev_dbg(ndev_dev(ndev), "Disabling link\n");

	/* Bring NTB link down */
	ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
	ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
	ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
	if (ndev->bar4_split)
		ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
	ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
	iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
	return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
	return ntb_ndev(ntb)->db_valid_mask;
}
static int intel_ntb_db_vector_count(struct ntb_dev *ntb)
{
	struct intel_ntb_dev *ndev;
	ndev = container_of(ntb, struct intel_ntb_dev, ntb);
	return ndev->db_vec_count;
}
static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
{
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
	if (db_vector < 0 || db_vector > ndev->db_vec_count)
		return 0;
	return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
static u64 intel_ntb_db_read(struct ntb_dev *ntb)
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
	return ndev_db_read(ndev,
			    ndev->self_mmio +
			    ndev->self_reg->db_bell);
}
static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
{
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
	return ndev_db_write(ndev, db_bits,
			     ndev->self_mmio +
			     ndev->self_reg->db_bell);
}
static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
{
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
	return ndev_db_set_mask(ndev, db_bits,