Skip to content
qp.c 42.9 KiB
Newer Older
/*
 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include "iw_cxgb4.h"

static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
		      struct c4iw_dev_ucontext *uctx)
{
	/*
	 * uP clears EQ contexts when the connection exits rdma mode,
	 * so no need to post a RESET WR for these EQs.
	 */
	dma_free_coherent(&(rdev->lldi.pdev->dev),
			  wq->rq.memsize, wq->rq.queue,
			  dma_unmap_addr(&wq->rq, mapping));
	dma_free_coherent(&(rdev->lldi.pdev->dev),
			  wq->sq.memsize, wq->sq.queue,
			  dma_unmap_addr(&wq->sq, mapping));
	c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
	kfree(wq->rq.sw_rq);
	kfree(wq->sq.sw_sq);
	c4iw_put_qpid(rdev, wq->rq.qid, uctx);
	c4iw_put_qpid(rdev, wq->sq.qid, uctx);
	return 0;
}

static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
		     struct t4_cq *rcq, struct t4_cq *scq,
		     struct c4iw_dev_ucontext *uctx)
{
	int user = (uctx != &rdev->uctx);
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	struct c4iw_wr_wait wr_wait;
	struct sk_buff *skb;
	int ret;
	int eqsize;

	wq->sq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->sq.qid)
		return -ENOMEM;

	wq->rq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->rq.qid)
		goto err1;

	if (!user) {
		wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
				 GFP_KERNEL);
		if (!wq->sq.sw_sq)
			goto err2;

		wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
				 GFP_KERNEL);
		if (!wq->rq.sw_rq)
			goto err3;
	}

	/*
	 * RQT must be a power of 2.
	 */
	wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
	wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
	if (!wq->rq.rqt_hwaddr)
		goto err4;

	wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
					  wq->sq.memsize, &(wq->sq.dma_addr),
					  GFP_KERNEL);
	if (!wq->sq.queue)
		goto err5;
	memset(wq->sq.queue, 0, wq->sq.memsize);
	dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);

	wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
					  wq->rq.memsize, &(wq->rq.dma_addr),
					  GFP_KERNEL);
	if (!wq->rq.queue)
		goto err6;
	PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
		__func__, wq->sq.queue,
		(unsigned long long)virt_to_phys(wq->sq.queue),
		wq->rq.queue,
		(unsigned long long)virt_to_phys(wq->rq.queue));
	memset(wq->rq.queue, 0, wq->rq.memsize);
	dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);

	wq->db = rdev->lldi.db_reg;
	wq->gts = rdev->lldi.gts_reg;
	if (user) {
		wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(wq->sq.qid << rdev->qpshift);
		wq->sq.udb &= PAGE_MASK;
		wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(wq->rq.qid << rdev->qpshift);
		wq->rq.udb &= PAGE_MASK;
	}
	wq->rdev = rdev;
	wq->rq.msn = 1;

	/* build fw_ri_res_wr */
	wr_len = sizeof *res_wr + 2 * sizeof *res;

	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb) {
		ret = -ENOMEM;
		goto err7;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(2) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (u64)&wr_wait;
	res = res_wr->res;
	res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/*
	 * eqsize is the number of 64B entries plus the status page size.
	 */
	eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;

	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		V_FW_RI_RES_WR_IQID(scq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(3) |
		V_FW_RI_RES_WR_FBMAX(3) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
	res++;
	res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/*
	 * eqsize is the number of 64B entries plus the status page size.
	 */
	eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		V_FW_RI_RES_WR_IQID(rcq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(3) |
		V_FW_RI_RES_WR_FBMAX(3) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);

	c4iw_init_wr_wait(&wr_wait);

	ret = c4iw_ofld_send(rdev, skb);
	if (ret)
		goto err7;
	wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
	if (!wr_wait.done) {
		printk(KERN_ERR MOD "Device %s not responding!\n",
		       pci_name(rdev->lldi.pdev));
		rdev->flags = T4_FATAL_ERROR;
		ret = -EIO;
	} else
		ret = wr_wait.ret;
	if (ret)
		goto err7;

	PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
	     __func__, wq->sq.qid, wq->rq.qid, wq->db,
	     (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);

	return 0;
err7:
	dma_free_coherent(&(rdev->lldi.pdev->dev),
			  wq->rq.memsize, wq->rq.queue,
			  dma_unmap_addr(&wq->rq, mapping));
err6:
	dma_free_coherent(&(rdev->lldi.pdev->dev),
			  wq->sq.memsize, wq->sq.queue,
			  dma_unmap_addr(&wq->sq, mapping));
err5:
	c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
err4:
	kfree(wq->rq.sw_rq);
err3:
	kfree(wq->sq.sw_sq);
err2:
	c4iw_put_qpid(rdev, wq->rq.qid, uctx);
err1:
	c4iw_put_qpid(rdev, wq->sq.qid, uctx);
	return -ENOMEM;
}

static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
{
	int i;
	u32 plen;
	int size;
	u8 *datap;

	if (wr->num_sge > T4_MAX_SEND_SGE)
		return -EINVAL;
	switch (wr->opcode) {
	case IB_WR_SEND:
		if (wr->send_flags & IB_SEND_SOLICITED)
			wqe->send.sendop_pkd = cpu_to_be32(
				V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
		else
			wqe->send.sendop_pkd = cpu_to_be32(
				V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
		wqe->send.stag_inv = 0;
		break;
	case IB_WR_SEND_WITH_INV:
		if (wr->send_flags & IB_SEND_SOLICITED)
			wqe->send.sendop_pkd = cpu_to_be32(
				V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
		else
			wqe->send.sendop_pkd = cpu_to_be32(
				V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
		wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
		break;

	default:
		return -EINVAL;
	}
	plen = 0;
	if (wr->num_sge) {
		if (wr->send_flags & IB_SEND_INLINE) {
			datap = (u8 *)wqe->send.u.immd_src[0].data;
			for (i = 0; i < wr->num_sge; i++) {
				if ((plen + wr->sg_list[i].length) >
				    T4_MAX_SEND_INLINE) {
					return -EMSGSIZE;
				}
				plen += wr->sg_list[i].length;
				memcpy(datap,
				     (void *)(unsigned long)wr->sg_list[i].addr,
				     wr->sg_list[i].length);
				datap += wr->sg_list[i].length;
			}
			wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
			wqe->send.u.immd_src[0].r1 = 0;
			wqe->send.u.immd_src[0].r2 = 0;
			wqe->send.u.immd_src[0].immdlen = cpu_to_be32(plen);
			size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
			       plen;
		} else {
			for (i = 0; i < wr->num_sge; i++) {
				if ((plen + wr->sg_list[i].length) < plen)
					return -EMSGSIZE;
				plen += wr->sg_list[i].length;
				wqe->send.u.isgl_src[0].sge[i].stag =
					cpu_to_be32(wr->sg_list[i].lkey);
				wqe->send.u.isgl_src[0].sge[i].len =
					cpu_to_be32(wr->sg_list[i].length);
				wqe->send.u.isgl_src[0].sge[i].to =
					cpu_to_be64(wr->sg_list[i].addr);
			}
			wqe->send.u.isgl_src[0].op = FW_RI_DATA_ISGL;
			wqe->send.u.isgl_src[0].r1 = 0;
			wqe->send.u.isgl_src[0].nsge = cpu_to_be16(wr->num_sge);
			wqe->send.u.isgl_src[0].r2 = 0;
			size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
			       wr->num_sge * sizeof(struct fw_ri_sge);
		}
	} else {
		wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
		wqe->send.u.immd_src[0].r1 = 0;
		wqe->send.u.immd_src[0].r2 = 0;
		wqe->send.u.immd_src[0].immdlen = 0;
		size = sizeof wqe->send + sizeof(struct fw_ri_immd);
	}
	*len16 = DIV_ROUND_UP(size, 16);
	wqe->send.plen = cpu_to_be32(plen);
	return 0;
}

static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
{
	int i;
	u32 plen;
	int size;
	u8 *datap;

	if (wr->num_sge > T4_MAX_WRITE_SGE)
		return -EINVAL;
	wqe->write.r2 = 0;
	wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
	wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
	plen = 0;
	if (wr->num_sge) {
		if (wr->send_flags & IB_SEND_INLINE) {
			datap = (u8 *)wqe->write.u.immd_src[0].data;
			for (i = 0; i < wr->num_sge; i++) {
				if ((plen + wr->sg_list[i].length) >
				    T4_MAX_WRITE_INLINE) {
					return -EMSGSIZE;
				}
				plen += wr->sg_list[i].length;
				memcpy(datap,
				     (void *)(unsigned long)wr->sg_list[i].addr,
				     wr->sg_list[i].length);
				datap += wr->sg_list[i].length;
			}
			wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
			wqe->write.u.immd_src[0].r1 = 0;
			wqe->write.u.immd_src[0].r2 = 0;
			wqe->write.u.immd_src[0].immdlen = cpu_to_be32(plen);
			size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
			       plen;
		} else {
			for (i = 0; i < wr->num_sge; i++) {
				if ((plen + wr->sg_list[i].length) < plen)
					return -EMSGSIZE;
				plen += wr->sg_list[i].length;
				wqe->write.u.isgl_src[0].sge[i].stag =
					cpu_to_be32(wr->sg_list[i].lkey);
				wqe->write.u.isgl_src[0].sge[i].len =
					cpu_to_be32(wr->sg_list[i].length);
				wqe->write.u.isgl_src[0].sge[i].to =
					cpu_to_be64(wr->sg_list[i].addr);
			}
			wqe->write.u.isgl_src[0].op = FW_RI_DATA_ISGL;
			wqe->write.u.isgl_src[0].r1 = 0;
			wqe->write.u.isgl_src[0].nsge =
						       cpu_to_be16(wr->num_sge);
			wqe->write.u.isgl_src[0].r2 = 0;
			size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
			       wr->num_sge * sizeof(struct fw_ri_sge);
		}
	} else {
		wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
		wqe->write.u.immd_src[0].r1 = 0;
		wqe->write.u.immd_src[0].r2 = 0;
		wqe->write.u.immd_src[0].immdlen = 0;
		size = sizeof wqe->write + sizeof(struct fw_ri_immd);
	}
	*len16 = DIV_ROUND_UP(size, 16);
	wqe->write.plen = cpu_to_be32(plen);
	return 0;
}

static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
{
	if (wr->num_sge > 1)
		return -EINVAL;
	if (wr->num_sge) {
		wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
		wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
							>> 32));
		wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
		wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
		wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
		wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
							 >> 32));
		wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
	} else {
		wqe->read.stag_src = cpu_to_be32(2);
		wqe->read.to_src_hi = 0;
		wqe->read.to_src_lo = 0;
		wqe->read.stag_sink = cpu_to_be32(2);
		wqe->read.plen = 0;
		wqe->read.to_sink_hi = 0;
		wqe->read.to_sink_lo = 0;
	}
	wqe->read.r2 = 0;
	wqe->read.r5 = 0;
	*len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
	return 0;
}

static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
			   struct ib_recv_wr *wr, u8 *len16)
{
	int i;
	int plen = 0;

	for (i = 0; i < wr->num_sge; i++) {
		if ((plen + wr->sg_list[i].length) < plen)
			return -EMSGSIZE;
		plen += wr->sg_list[i].length;
		wqe->recv.isgl.sge[i].stag =
			cpu_to_be32(wr->sg_list[i].lkey);
		wqe->recv.isgl.sge[i].len =
			cpu_to_be32(wr->sg_list[i].length);
		wqe->recv.isgl.sge[i].to =
			cpu_to_be64(wr->sg_list[i].addr);
	}
	for (; i < T4_MAX_RECV_SGE; i++) {
		wqe->recv.isgl.sge[i].stag = 0;
		wqe->recv.isgl.sge[i].len = 0;
		wqe->recv.isgl.sge[i].to = 0;
	}
	wqe->recv.isgl.op = FW_RI_DATA_ISGL;
	wqe->recv.isgl.r1 = 0;
	wqe->recv.isgl.nsge = cpu_to_be16(wr->num_sge);
	wqe->recv.isgl.r2 = 0;
	*len16 = DIV_ROUND_UP(sizeof wqe->recv +
			      wr->num_sge * sizeof(struct fw_ri_sge), 16);
	return 0;
}

static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
{

	struct fw_ri_immd *imdp;
	__be64 *p;
	int i;
	int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);

	if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
		return -EINVAL;

	wqe->fr.qpbinde_to_dcacpu = 0;
	wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
	wqe->fr.addr_type = FW_RI_VA_BASED_TO;
	wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
	wqe->fr.len_hi = 0;
	wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
	wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
	wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
	wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
					0xffffffff);
	if (pbllen > T4_MAX_FR_IMMD) {
		struct c4iw_fr_page_list *c4pl =
				to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
		struct fw_ri_dsgl *sglp;

		sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
		sglp->op = FW_RI_DATA_DSGL;
		sglp->r1 = 0;
		sglp->nsge = cpu_to_be16(1);
		sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
		sglp->len0 = cpu_to_be32(pbllen);

		*len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16);
	} else {
		imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
		imdp->op = FW_RI_DATA_IMMD;
		imdp->r1 = 0;
		imdp->r2 = 0;
		imdp->immdlen = cpu_to_be32(pbllen);
		p = (__be64 *)(imdp + 1);
		for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++)
			*p = cpu_to_be64(
				(u64)wr->wr.fast_reg.page_list->page_list[i]);
		*len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen,
				      16);
	}
	return 0;
}

static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
			  u8 *len16)
{
	wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
	wqe->inv.r2 = 0;
	*len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
	return 0;
}

void c4iw_qp_add_ref(struct ib_qp *qp)
{
	PDBG("%s ib_qp %p\n", __func__, qp);
	atomic_inc(&(to_c4iw_qp(qp)->refcnt));
}

void c4iw_qp_rem_ref(struct ib_qp *qp)
{
	PDBG("%s ib_qp %p\n", __func__, qp);
	if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
		wake_up(&(to_c4iw_qp(qp)->wait));
}

int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
		   struct ib_send_wr **bad_wr)
{
	int err = 0;
	u8 len16 = 0;
	enum fw_wr_opcodes fw_opcode = 0;
	enum fw_ri_wr_flags fw_flags;
	struct c4iw_qp *qhp;
	union t4_wr *wqe;
	u32 num_wrs;
	struct t4_swsqe *swsqe;
	unsigned long flag;
	u16 idx = 0;

	qhp = to_c4iw_qp(ibqp);
	spin_lock_irqsave(&qhp->lock, flag);
	if (t4_wq_in_error(&qhp->wq)) {
		spin_unlock_irqrestore(&qhp->lock, flag);
		return -EINVAL;
	}
	num_wrs = t4_sq_avail(&qhp->wq);
	if (num_wrs == 0) {
		spin_unlock_irqrestore(&qhp->lock, flag);
		return -ENOMEM;
	}
	while (wr) {
		if (num_wrs == 0) {
			err = -ENOMEM;
			*bad_wr = wr;
			break;
		}
		wqe = &qhp->wq.sq.queue[qhp->wq.sq.pidx];
		fw_flags = 0;
		if (wr->send_flags & IB_SEND_SOLICITED)
			fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
		if (wr->send_flags & IB_SEND_SIGNALED)
			fw_flags |= FW_RI_COMPLETION_FLAG;
		swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
		switch (wr->opcode) {
		case IB_WR_SEND_WITH_INV:
		case IB_WR_SEND:
			if (wr->send_flags & IB_SEND_FENCE)
				fw_flags |= FW_RI_READ_FENCE_FLAG;
			fw_opcode = FW_RI_SEND_WR;
			if (wr->opcode == IB_WR_SEND)
				swsqe->opcode = FW_RI_SEND;
			else
				swsqe->opcode = FW_RI_SEND_WITH_INV;
			err = build_rdma_send(wqe, wr, &len16);
			break;
		case IB_WR_RDMA_WRITE:
			fw_opcode = FW_RI_RDMA_WRITE_WR;
			swsqe->opcode = FW_RI_RDMA_WRITE;
			err = build_rdma_write(wqe, wr, &len16);
			break;
		case IB_WR_RDMA_READ:
		case IB_WR_RDMA_READ_WITH_INV:
			fw_opcode = FW_RI_RDMA_READ_WR;
			swsqe->opcode = FW_RI_READ_REQ;
			if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
				fw_flags |= FW_RI_RDMA_READ_INVALIDATE;
			else
				fw_flags = 0;
			err = build_rdma_read(wqe, wr, &len16);
			if (err)
				break;
			swsqe->read_len = wr->sg_list[0].length;
			if (!qhp->wq.sq.oldest_read)
				qhp->wq.sq.oldest_read = swsqe;
			break;
		case IB_WR_FAST_REG_MR:
			fw_opcode = FW_RI_FR_NSMR_WR;
			swsqe->opcode = FW_RI_FAST_REGISTER;
			err = build_fastreg(wqe, wr, &len16);
			break;
		case IB_WR_LOCAL_INV:
			if (wr->send_flags & IB_SEND_FENCE)
				fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
			fw_opcode = FW_RI_INV_LSTAG_WR;
			swsqe->opcode = FW_RI_LOCAL_INV;
			err = build_inv_stag(wqe, wr, &len16);
			break;
		default:
			PDBG("%s post of type=%d TBD!\n", __func__,
			     wr->opcode);
			err = -EINVAL;
		}
		if (err) {
			*bad_wr = wr;
			break;
		}
		swsqe->idx = qhp->wq.sq.pidx;
		swsqe->complete = 0;
		swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
		swsqe->wr_id = wr->wr_id;

		init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);

		PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
		     __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
		     swsqe->opcode, swsqe->read_len);
		wr = wr->next;
		num_wrs--;
		t4_sq_produce(&qhp->wq);
		idx++;
	}
	if (t4_wq_db_enabled(&qhp->wq))
		t4_ring_sq_db(&qhp->wq, idx);
	spin_unlock_irqrestore(&qhp->lock, flag);
	return err;
}

int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
		      struct ib_recv_wr **bad_wr)
{
	int err = 0;
	struct c4iw_qp *qhp;
	union t4_recv_wr *wqe;
	u32 num_wrs;
	u8 len16 = 0;
	unsigned long flag;
	u16 idx = 0;

	qhp = to_c4iw_qp(ibqp);
	spin_lock_irqsave(&qhp->lock, flag);
	if (t4_wq_in_error(&qhp->wq)) {
		spin_unlock_irqrestore(&qhp->lock, flag);
		return -EINVAL;
	}
	num_wrs = t4_rq_avail(&qhp->wq);
	if (num_wrs == 0) {
		spin_unlock_irqrestore(&qhp->lock, flag);
		return -ENOMEM;
	}
	while (wr) {
		if (wr->num_sge > T4_MAX_RECV_SGE) {
			err = -EINVAL;
			*bad_wr = wr;
			break;
		}
		wqe = &qhp->wq.rq.queue[qhp->wq.rq.pidx];
		if (num_wrs)
			err = build_rdma_recv(qhp, wqe, wr, &len16);
		else
			err = -ENOMEM;
		if (err) {
			*bad_wr = wr;
			break;
		}

		qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;

		wqe->recv.opcode = FW_RI_RECV_WR;
		wqe->recv.r1 = 0;
		wqe->recv.wrid = qhp->wq.rq.pidx;
		wqe->recv.r2[0] = 0;
		wqe->recv.r2[1] = 0;
		wqe->recv.r2[2] = 0;
		wqe->recv.len16 = len16;
		if (len16 < 5)
			wqe->flits[8] = 0;

		PDBG("%s cookie 0x%llx pidx %u\n", __func__,
		     (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
		t4_rq_produce(&qhp->wq);
		wr = wr->next;
		num_wrs--;
		idx++;
	}
	if (t4_wq_db_enabled(&qhp->wq))
		t4_ring_rq_db(&qhp->wq, idx);
	spin_unlock_irqrestore(&qhp->lock, flag);
	return err;
}

int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
{
	return -ENOSYS;
}

static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
				    u8 *ecode)
{
	int status;
	int tagged;
	int opcode;
	int rqtype;
	int send_inv;

	if (!err_cqe) {
		*layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
		*ecode = 0;
		return;
	}

	status = CQE_STATUS(err_cqe);
	opcode = CQE_OPCODE(err_cqe);
	rqtype = RQ_TYPE(err_cqe);
	send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
		   (opcode == FW_RI_SEND_WITH_SE_INV);
	tagged = (opcode == FW_RI_RDMA_WRITE) ||
		 (rqtype && (opcode == FW_RI_READ_RESP));

	switch (status) {
	case T4_ERR_STAG:
		if (send_inv) {
			*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
			*ecode = RDMAP_CANT_INV_STAG;
		} else {
			*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
			*ecode = RDMAP_INV_STAG;
		}
		break;
	case T4_ERR_PDID:
		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
		if ((opcode == FW_RI_SEND_WITH_INV) ||
		    (opcode == FW_RI_SEND_WITH_SE_INV))
			*ecode = RDMAP_CANT_INV_STAG;
		else
			*ecode = RDMAP_STAG_NOT_ASSOC;
		break;
	case T4_ERR_QPID:
		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
		*ecode = RDMAP_STAG_NOT_ASSOC;
		break;
	case T4_ERR_ACCESS:
		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
		*ecode = RDMAP_ACC_VIOL;
		break;
	case T4_ERR_WRAP:
		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
		*ecode = RDMAP_TO_WRAP;
		break;
	case T4_ERR_BOUND:
		if (tagged) {
			*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
			*ecode = DDPT_BASE_BOUNDS;
		} else {
			*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
			*ecode = RDMAP_BASE_BOUNDS;
		}
		break;
	case T4_ERR_INVALIDATE_SHARED_MR:
	case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
		*ecode = RDMAP_CANT_INV_STAG;
		break;
	case T4_ERR_ECC:
	case T4_ERR_ECC_PSTAG:
	case T4_ERR_INTERNAL_ERR:
		*layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
		*ecode = 0;
		break;
	case T4_ERR_OUT_OF_RQE:
		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
		*ecode = DDPU_INV_MSN_NOBUF;
		break;
	case T4_ERR_PBL_ADDR_BOUND:
		*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
		*ecode = DDPT_BASE_BOUNDS;
		break;
	case T4_ERR_CRC:
		*layer_type = LAYER_MPA|DDP_LLP;
		*ecode = MPA_CRC_ERR;
		break;
	case T4_ERR_MARKER:
		*layer_type = LAYER_MPA|DDP_LLP;
		*ecode = MPA_MARKER_ERR;
		break;
	case T4_ERR_PDU_LEN_ERR:
		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
		*ecode = DDPU_MSG_TOOBIG;
		break;
	case T4_ERR_DDP_VERSION:
		if (tagged) {
			*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
			*ecode = DDPT_INV_VERS;
		} else {
			*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
			*ecode = DDPU_INV_VERS;
		}
		break;
	case T4_ERR_RDMA_VERSION:
		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
		*ecode = RDMAP_INV_VERS;
		break;
	case T4_ERR_OPCODE:
		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
		*ecode = RDMAP_INV_OPCODE;
		break;
	case T4_ERR_DDP_QUEUE_NUM:
		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
		*ecode = DDPU_INV_QN;
		break;
	case T4_ERR_MSN:
	case T4_ERR_MSN_GAP:
	case T4_ERR_MSN_RANGE:
	case T4_ERR_IRD_OVERFLOW:
		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
		*ecode = DDPU_INV_MSN_RANGE;
		break;
	case T4_ERR_TBIT:
		*layer_type = LAYER_DDP|DDP_LOCAL_CATA;
		*ecode = 0;
		break;
	case T4_ERR_MO:
		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
		*ecode = DDPU_INV_MO;
		break;
	default:
		*layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
		*ecode = 0;
		break;
	}
}

int c4iw_post_zb_read(struct c4iw_qp *qhp)
{
	union t4_wr *wqe;
	struct sk_buff *skb;
	u8 len16;

	PDBG("%s enter\n", __func__);
	skb = alloc_skb(40, GFP_KERNEL);
	if (!skb) {
		printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
		return -ENOMEM;
	}
	set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);

	wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
	memset(wqe, 0, sizeof wqe->read);
	wqe->read.r2 = cpu_to_be64(0);
	wqe->read.stag_sink = cpu_to_be32(1);
	wqe->read.to_sink_hi = cpu_to_be32(0);
	wqe->read.to_sink_lo = cpu_to_be32(1);
	wqe->read.stag_src = cpu_to_be32(1);
	wqe->read.plen = cpu_to_be32(0);
	wqe->read.to_src_hi = cpu_to_be32(0);
	wqe->read.to_src_lo = cpu_to_be32(1);
	len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
	init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);

	return c4iw_ofld_send(&qhp->rhp->rdev, skb);
}

static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
			   gfp_t gfp)
{
	struct fw_ri_wr *wqe;
	struct sk_buff *skb;
	struct terminate_message *term;

	PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
	     qhp->ep->hwtid);

	skb = alloc_skb(sizeof *wqe, gfp);
	set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);

	wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
	memset(wqe, 0, sizeof *wqe);
	wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
	wqe->flowid_len16 = cpu_to_be32(
		FW_WR_FLOWID(qhp->ep->hwtid) |
		FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));

	wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
	wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
	term = (struct terminate_message *)wqe->u.terminate.termmsg;
	build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
	c4iw_ofld_send(&qhp->rhp->rdev, skb);
}

/*
 * Assumes qhp lock is held.
 */
static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
		       struct c4iw_cq *schp, unsigned long *flag)
{
	int count;
	int flushed;

	PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
	/* take a ref on the qhp since we must release the lock */
	atomic_inc(&qhp->refcnt);
	spin_unlock_irqrestore(&qhp->lock, *flag);

	/* locking heirarchy: cq lock first, then qp lock. */
	spin_lock_irqsave(&rchp->lock, *flag);
	spin_lock(&qhp->lock);
	c4iw_flush_hw_cq(&rchp->cq);
	c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
	flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
	spin_unlock(&qhp->lock);
	spin_unlock_irqrestore(&rchp->lock, *flag);
	if (flushed)
		(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);

	/* locking heirarchy: cq lock first, then qp lock. */
	spin_lock_irqsave(&schp->lock, *flag);
	spin_lock(&qhp->lock);
	c4iw_flush_hw_cq(&schp->cq);
	c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
	flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
	spin_unlock(&qhp->lock);
	spin_unlock_irqrestore(&schp->lock, *flag);
	if (flushed)
		(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);

	/* deref */
	if (atomic_dec_and_test(&qhp->refcnt))
		wake_up(&qhp->wait);

	spin_lock_irqsave(&qhp->lock, *flag);
}

static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
{
	struct c4iw_cq *rchp, *schp;

	rchp = get_chp(qhp->rhp, qhp->attr.rcq);
	schp = get_chp(qhp->rhp, qhp->attr.scq);

	if (qhp->ibqp.uobject) {
		t4_set_wq_in_error(&qhp->wq);
		t4_set_cq_in_error(&rchp->cq);
		if (schp != rchp)
			t4_set_cq_in_error(&schp->cq);
		return;
	}
	__flush_qp(qhp, rchp, schp, flag);
}

static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
{
	struct fw_ri_wr *wqe;
	int ret;
	struct c4iw_wr_wait wr_wait;
	struct sk_buff *skb;

	PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
	     qhp->ep->hwtid);

	skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
	if (!skb)
		return -ENOMEM;
	set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);

	wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
	memset(wqe, 0, sizeof *wqe);
	wqe->op_compl = cpu_to_be32(
		FW_WR_OP(FW_RI_INIT_WR) |
		FW_WR_COMPL(1));
	wqe->flowid_len16 = cpu_to_be32(
		FW_WR_FLOWID(qhp->ep->hwtid) |
		FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
	wqe->cookie = (u64)&wr_wait;

	wqe->u.fini.type = FW_RI_TYPE_FINI;
	c4iw_init_wr_wait(&wr_wait);
	ret = c4iw_ofld_send(&rhp->rdev, skb);
	if (ret)
		goto out;

	wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
	if (!wr_wait.done) {
		printk(KERN_ERR MOD "Device %s not responding!\n",
		       pci_name(rhp->rdev.lldi.pdev));
		rhp->rdev.flags = T4_FATAL_ERROR;
		ret = -EIO;
	} else {
		ret = wr_wait.ret;
		if (ret)
			printk(KERN_WARNING MOD
			       "%s: Abnormal close qpid %d ret %u\n",
			       pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid,
			       ret);
	}
out:
	PDBG("%s ret %d\n", __func__, ret);