123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966 |
- /*
- * Copyright(c) 2015 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
- #include <linux/spinlock.h>
- #include "hfi.h"
- #include "mad.h"
- #include "qp.h"
- #include "verbs_txreq.h"
- #include "trace.h"
- static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
- {
- return (gid->global.interface_id == id &&
- (gid->global.subnet_prefix == gid_prefix ||
- gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
- }
- /*
- *
- * This should be called with the QP r_lock held.
- *
- * The s_lock will be acquired around the hfi1_migrate_qp() call.
- */
- int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet)
- {
- __be64 guid;
- unsigned long flags;
- struct rvt_qp *qp = packet->qp;
- u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
- u32 dlid = packet->dlid;
- u32 slid = packet->slid;
- u32 sl = packet->sl;
- bool migrated = packet->migrated;
- u16 pkey = packet->pkey;
- if (qp->s_mig_state == IB_MIG_ARMED && migrated) {
- if (!packet->grh) {
- if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
- IB_AH_GRH) &&
- (packet->etype != RHF_RCV_TYPE_BYPASS))
- return 1;
- } else {
- const struct ib_global_route *grh;
- if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
- IB_AH_GRH))
- return 1;
- grh = rdma_ah_read_grh(&qp->alt_ah_attr);
- guid = get_sguid(ibp, grh->sgid_index);
- if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
- guid))
- return 1;
- if (!gid_ok(
- &packet->grh->sgid,
- grh->dgid.global.subnet_prefix,
- grh->dgid.global.interface_id))
- return 1;
- }
- if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
- sc5, slid))) {
- hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
- slid, dlid);
- return 1;
- }
- /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
- if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) ||
- ppd_from_ibp(ibp)->port !=
- rdma_ah_get_port_num(&qp->alt_ah_attr))
- return 1;
- spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_migrate_qp(qp);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- } else {
- if (!packet->grh) {
- if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
- IB_AH_GRH) &&
- (packet->etype != RHF_RCV_TYPE_BYPASS))
- return 1;
- } else {
- const struct ib_global_route *grh;
- if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
- IB_AH_GRH))
- return 1;
- grh = rdma_ah_read_grh(&qp->remote_ah_attr);
- guid = get_sguid(ibp, grh->sgid_index);
- if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
- guid))
- return 1;
- if (!gid_ok(
- &packet->grh->sgid,
- grh->dgid.global.subnet_prefix,
- grh->dgid.global.interface_id))
- return 1;
- }
- if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
- sc5, slid))) {
- hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
- slid, dlid);
- return 1;
- }
- /* Validate the SLID. See Ch. 9.6.1.5 */
- if ((slid != rdma_ah_get_dlid(&qp->remote_ah_attr)) ||
- ppd_from_ibp(ibp)->port != qp->port_num)
- return 1;
- if (qp->s_mig_state == IB_MIG_REARM && !migrated)
- qp->s_mig_state = IB_MIG_ARMED;
- }
- return 0;
- }
- /**
- * ruc_loopback - handle UC and RC loopback requests
- * @sqp: the sending QP
- *
- * This is called from hfi1_do_send() to
- * forward a WQE addressed to the same HFI.
- * Note that although we are single threaded due to the send engine, we still
- * have to protect against post_send(). We don't have to worry about
- * receive interrupts since this is a connected protocol and all packets
- * will pass through here.
- */
- static void ruc_loopback(struct rvt_qp *sqp)
- {
- struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
- struct rvt_qp *qp;
- struct rvt_swqe *wqe;
- struct rvt_sge *sge;
- unsigned long flags;
- struct ib_wc wc;
- u64 sdata;
- atomic64_t *maddr;
- enum ib_wc_status send_status;
- bool release;
- int ret;
- bool copy_last = false;
- int local_ops = 0;
- rcu_read_lock();
- /*
- * Note that we check the responder QP state after
- * checking the requester's state.
- */
- qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
- sqp->remote_qpn);
- spin_lock_irqsave(&sqp->s_lock, flags);
- /* Return if we are already busy processing a work request. */
- if ((sqp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT)) ||
- !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
- goto unlock;
- sqp->s_flags |= RVT_S_BUSY;
- again:
- if (sqp->s_last == READ_ONCE(sqp->s_head))
- goto clr_busy;
- wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
- /* Return if it is not OK to start a new work request. */
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
- goto clr_busy;
- /* We are in the error state, flush the work request. */
- send_status = IB_WC_WR_FLUSH_ERR;
- goto flush_send;
- }
- /*
- * We can rely on the entry not changing without the s_lock
- * being held until we update s_last.
- * We increment s_cur to indicate s_last is in progress.
- */
- if (sqp->s_last == sqp->s_cur) {
- if (++sqp->s_cur >= sqp->s_size)
- sqp->s_cur = 0;
- }
- spin_unlock_irqrestore(&sqp->s_lock, flags);
- if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
- qp->ibqp.qp_type != sqp->ibqp.qp_type) {
- ibp->rvp.n_pkt_drops++;
- /*
- * For RC, the requester would timeout and retry so
- * shortcut the timeouts and just signal too many retries.
- */
- if (sqp->ibqp.qp_type == IB_QPT_RC)
- send_status = IB_WC_RETRY_EXC_ERR;
- else
- send_status = IB_WC_SUCCESS;
- goto serr;
- }
- memset(&wc, 0, sizeof(wc));
- send_status = IB_WC_SUCCESS;
- release = true;
- sqp->s_sge.sge = wqe->sg_list[0];
- sqp->s_sge.sg_list = wqe->sg_list + 1;
- sqp->s_sge.num_sge = wqe->wr.num_sge;
- sqp->s_len = wqe->length;
- switch (wqe->wr.opcode) {
- case IB_WR_REG_MR:
- goto send_comp;
- case IB_WR_LOCAL_INV:
- if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
- if (rvt_invalidate_rkey(sqp,
- wqe->wr.ex.invalidate_rkey))
- send_status = IB_WC_LOC_PROT_ERR;
- local_ops = 1;
- }
- goto send_comp;
- case IB_WR_SEND_WITH_INV:
- if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
- wc.wc_flags = IB_WC_WITH_INVALIDATE;
- wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
- }
- goto send;
- case IB_WR_SEND_WITH_IMM:
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- /* FALLTHROUGH */
- case IB_WR_SEND:
- send:
- ret = rvt_get_rwqe(qp, false);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto rnr_nak;
- if (wqe->length > qp->r_len)
- goto inv_err;
- break;
- case IB_WR_RDMA_WRITE_WITH_IMM:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- ret = rvt_get_rwqe(qp, true);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto rnr_nak;
- /* skip copy_last set and qp_access_flags recheck */
- goto do_write;
- case IB_WR_RDMA_WRITE:
- copy_last = rvt_is_user_qp(qp);
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
- do_write:
- if (wqe->length == 0)
- break;
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_WRITE)))
- goto acc_err;
- qp->r_sge.sg_list = NULL;
- qp->r_sge.num_sge = 1;
- qp->r_sge.total_len = wqe->length;
- break;
- case IB_WR_RDMA_READ:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
- goto inv_err;
- if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_READ)))
- goto acc_err;
- release = false;
- sqp->s_sge.sg_list = NULL;
- sqp->s_sge.num_sge = 1;
- qp->r_sge.sge = wqe->sg_list[0];
- qp->r_sge.sg_list = wqe->sg_list + 1;
- qp->r_sge.num_sge = wqe->wr.num_sge;
- qp->r_sge.total_len = wqe->length;
- break;
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
- goto inv_err;
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
- wqe->atomic_wr.remote_addr,
- wqe->atomic_wr.rkey,
- IB_ACCESS_REMOTE_ATOMIC)))
- goto acc_err;
- /* Perform atomic OP and save result. */
- maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
- sdata = wqe->atomic_wr.compare_add;
- *(u64 *)sqp->s_sge.sge.vaddr =
- (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
- (u64)atomic64_add_return(sdata, maddr) - sdata :
- (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
- sdata, wqe->atomic_wr.swap);
- rvt_put_mr(qp->r_sge.sge.mr);
- qp->r_sge.num_sge = 0;
- goto send_comp;
- default:
- send_status = IB_WC_LOC_QP_OP_ERR;
- goto serr;
- }
- sge = &sqp->s_sge.sge;
- while (sqp->s_len) {
- u32 len = sqp->s_len;
- if (len > sge->length)
- len = sge->length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- WARN_ON_ONCE(len == 0);
- hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release, copy_last);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (!release)
- rvt_put_mr(sge->mr);
- if (--sqp->s_sge.num_sge)
- *sge = *sqp->s_sge.sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= RVT_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- sqp->s_len -= len;
- }
- if (release)
- rvt_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
- goto send_comp;
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- else
- wc.opcode = IB_WC_RECV;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
- wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
- wc.port_num = 1;
- /* Signal completion event if the solicited bit is set. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
- wqe->wr.send_flags & IB_SEND_SOLICITED);
- send_comp:
- spin_lock_irqsave(&sqp->s_lock, flags);
- ibp->rvp.n_loop_pkts++;
- flush_send:
- sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
- hfi1_send_complete(sqp, wqe, send_status);
- if (local_ops) {
- atomic_dec(&sqp->local_ops_pending);
- local_ops = 0;
- }
- goto again;
- rnr_nak:
- /* Handle RNR NAK */
- if (qp->ibqp.qp_type == IB_QPT_UC)
- goto send_comp;
- ibp->rvp.n_rnr_naks++;
- /*
- * Note: we don't need the s_lock held since the BUSY flag
- * makes this single threaded.
- */
- if (sqp->s_rnr_retry == 0) {
- send_status = IB_WC_RNR_RETRY_EXC_ERR;
- goto serr;
- }
- if (sqp->s_rnr_retry_cnt < 7)
- sqp->s_rnr_retry--;
- spin_lock_irqsave(&sqp->s_lock, flags);
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
- goto clr_busy;
- rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
- IB_AETH_CREDIT_SHIFT);
- goto clr_busy;
- op_err:
- send_status = IB_WC_REM_OP_ERR;
- wc.status = IB_WC_LOC_QP_OP_ERR;
- goto err;
- inv_err:
- send_status =
- sqp->ibqp.qp_type == IB_QPT_RC ?
- IB_WC_REM_INV_REQ_ERR :
- IB_WC_SUCCESS;
- wc.status = IB_WC_LOC_QP_OP_ERR;
- goto err;
- acc_err:
- send_status = IB_WC_REM_ACCESS_ERR;
- wc.status = IB_WC_LOC_PROT_ERR;
- err:
- /* responder goes to error state */
- rvt_rc_error(qp, wc.status);
- serr:
- spin_lock_irqsave(&sqp->s_lock, flags);
- hfi1_send_complete(sqp, wqe, send_status);
- if (sqp->ibqp.qp_type == IB_QPT_RC) {
- int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
- sqp->s_flags &= ~RVT_S_BUSY;
- spin_unlock_irqrestore(&sqp->s_lock, flags);
- if (lastwqe) {
- struct ib_event ev;
- ev.device = sqp->ibqp.device;
- ev.element.qp = &sqp->ibqp;
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
- sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
- }
- goto done;
- }
- clr_busy:
- sqp->s_flags &= ~RVT_S_BUSY;
- unlock:
- spin_unlock_irqrestore(&sqp->s_lock, flags);
- done:
- rcu_read_unlock();
- }
- /**
- * hfi1_make_grh - construct a GRH header
- * @ibp: a pointer to the IB port
- * @hdr: a pointer to the GRH header being constructed
- * @grh: the global route address to send to
- * @hwords: size of header after grh being sent in dwords
- * @nwords: the number of 32 bit words of data being sent
- *
- * Return the size of the header in 32 bit words.
- */
- u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
- const struct ib_global_route *grh, u32 hwords, u32 nwords)
- {
- hdr->version_tclass_flow =
- cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
- (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
- (grh->flow_label << IB_GRH_FLOW_SHIFT));
- hdr->paylen = cpu_to_be16((hwords + nwords) << 2);
- /* next_hdr is defined by C8-7 in ch. 8.4.1 */
- hdr->next_hdr = IB_GRH_NEXT_HDR;
- hdr->hop_limit = grh->hop_limit;
- /* The SGID is 32-bit aligned. */
- hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
- hdr->sgid.global.interface_id =
- grh->sgid_index < HFI1_GUIDS_PER_PORT ?
- get_sguid(ibp, grh->sgid_index) :
- get_sguid(ibp, HFI1_PORT_GUID_INDEX);
- hdr->dgid = grh->dgid;
- /* GRH header size in 32-bit words. */
- return sizeof(struct ib_grh) / sizeof(u32);
- }
- #define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, \
- hdr.ibh.u.oth.bth[2]) / 4)
- /**
- * build_ahg - create ahg in s_ahg
- * @qp: a pointer to QP
- * @npsn: the next PSN for the request/response
- *
- * This routine handles the AHG by allocating an ahg entry and causing the
- * copy of the first middle.
- *
- * Subsequent middles use the copied entry, editing the
- * PSN with 1 or 2 edits.
- */
- static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
- {
- struct hfi1_qp_priv *priv = qp->priv;
- if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR))
- clear_ahg(qp);
- if (!(qp->s_flags & HFI1_S_AHG_VALID)) {
- /* first middle that needs copy */
- if (qp->s_ahgidx < 0)
- qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
- if (qp->s_ahgidx >= 0) {
- qp->s_ahgpsn = npsn;
- priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
- /* save to protect a change in another thread */
- priv->s_ahg->ahgidx = qp->s_ahgidx;
- qp->s_flags |= HFI1_S_AHG_VALID;
- }
- } else {
- /* subsequent middle after valid */
- if (qp->s_ahgidx >= 0) {
- priv->s_ahg->tx_flags |= SDMA_TXREQ_F_USE_AHG;
- priv->s_ahg->ahgidx = qp->s_ahgidx;
- priv->s_ahg->ahgcount++;
- priv->s_ahg->ahgdesc[0] =
- sdma_build_ahg_descriptor(
- (__force u16)cpu_to_be16((u16)npsn),
- BTH2_OFFSET,
- 16,
- 16);
- if ((npsn & 0xffff0000) !=
- (qp->s_ahgpsn & 0xffff0000)) {
- priv->s_ahg->ahgcount++;
- priv->s_ahg->ahgdesc[1] =
- sdma_build_ahg_descriptor(
- (__force u16)cpu_to_be16(
- (u16)(npsn >> 16)),
- BTH2_OFFSET,
- 0,
- 16);
- }
- }
- }
- }
- static inline void hfi1_make_ruc_bth(struct rvt_qp *qp,
- struct ib_other_headers *ohdr,
- u32 bth0, u32 bth1, u32 bth2)
- {
- bth1 |= qp->remote_qpn;
- ohdr->bth[0] = cpu_to_be32(bth0);
- ohdr->bth[1] = cpu_to_be32(bth1);
- ohdr->bth[2] = cpu_to_be32(bth2);
- }
- /**
- * hfi1_make_ruc_header_16B - build a 16B header
- * @qp: the queue pair
- * @ohdr: a pointer to the destination header memory
- * @bth0: bth0 passed in from the RC/UC builder
- * @bth2: bth2 passed in from the RC/UC builder
- * @middle: non zero implies indicates ahg "could" be used
- * @ps: the current packet state
- *
- * This routine may disarm ahg under these situations:
- * - packet needs a GRH
- * - BECN needed
- * - migration state not IB_MIG_MIGRATED
- */
- static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
- struct ib_other_headers *ohdr,
- u32 bth0, u32 bth2, int middle,
- struct hfi1_pkt_state *ps)
- {
- struct hfi1_qp_priv *priv = qp->priv;
- struct hfi1_ibport *ibp = ps->ibp;
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u32 bth1 = 0;
- u32 slid;
- u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
- u8 l4 = OPA_16B_L4_IB_LOCAL;
- u8 extra_bytes = hfi1_get_16b_padding(
- (ps->s_txreq->hdr_dwords << 2),
- ps->s_txreq->s_cur_size);
- u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
- extra_bytes + SIZE_OF_LT) >> 2);
- bool becn = false;
- if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
- hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
- struct ib_grh *grh;
- struct ib_global_route *grd =
- rdma_ah_retrieve_grh(&qp->remote_ah_attr);
- /*
- * Ensure OPA GIDs are transformed to IB gids
- * before creating the GRH.
- */
- if (grd->sgid_index == OPA_GID_INDEX)
- grd->sgid_index = 0;
- grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh;
- l4 = OPA_16B_L4_IB_GLOBAL;
- ps->s_txreq->hdr_dwords +=
- hfi1_make_grh(ibp, grh, grd,
- ps->s_txreq->hdr_dwords - LRH_16B_DWORDS,
- nwords);
- middle = 0;
- }
- if (qp->s_mig_state == IB_MIG_MIGRATED)
- bth1 |= OPA_BTH_MIG_REQ;
- else
- middle = 0;
- if (qp->s_flags & RVT_S_ECN) {
- qp->s_flags &= ~RVT_S_ECN;
- /* we recently received a FECN, so return a BECN */
- becn = true;
- middle = 0;
- }
- if (middle)
- build_ahg(qp, bth2);
- else
- qp->s_flags &= ~HFI1_S_AHG_VALID;
- bth0 |= pkey;
- bth0 |= extra_bytes << 20;
- hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
- if (!ppd->lid)
- slid = be32_to_cpu(OPA_LID_PERMISSIVE);
- else
- slid = ppd->lid |
- (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
- ((1 << ppd->lmc) - 1));
- hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah,
- slid,
- opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
- 16B),
- (ps->s_txreq->hdr_dwords + nwords) >> 1,
- pkey, becn, 0, l4, priv->s_sc);
- }
- /**
- * hfi1_make_ruc_header_9B - build a 9B header
- * @qp: the queue pair
- * @ohdr: a pointer to the destination header memory
- * @bth0: bth0 passed in from the RC/UC builder
- * @bth2: bth2 passed in from the RC/UC builder
- * @middle: non zero implies indicates ahg "could" be used
- * @ps: the current packet state
- *
- * This routine may disarm ahg under these situations:
- * - packet needs a GRH
- * - BECN needed
- * - migration state not IB_MIG_MIGRATED
- */
- static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
- struct ib_other_headers *ohdr,
- u32 bth0, u32 bth2, int middle,
- struct hfi1_pkt_state *ps)
- {
- struct hfi1_qp_priv *priv = qp->priv;
- struct hfi1_ibport *ibp = ps->ibp;
- u32 bth1 = 0;
- u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
- u16 lrh0 = HFI1_LRH_BTH;
- u8 extra_bytes = -ps->s_txreq->s_cur_size & 3;
- u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
- extra_bytes) >> 2);
- if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
- struct ib_grh *grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh;
- lrh0 = HFI1_LRH_GRH;
- ps->s_txreq->hdr_dwords +=
- hfi1_make_grh(ibp, grh,
- rdma_ah_read_grh(&qp->remote_ah_attr),
- ps->s_txreq->hdr_dwords - LRH_9B_DWORDS,
- nwords);
- middle = 0;
- }
- lrh0 |= (priv->s_sc & 0xf) << 12 |
- (rdma_ah_get_sl(&qp->remote_ah_attr) & 0xf) << 4;
- if (qp->s_mig_state == IB_MIG_MIGRATED)
- bth0 |= IB_BTH_MIG_REQ;
- else
- middle = 0;
- if (qp->s_flags & RVT_S_ECN) {
- qp->s_flags &= ~RVT_S_ECN;
- /* we recently received a FECN, so return a BECN */
- bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
- middle = 0;
- }
- if (middle)
- build_ahg(qp, bth2);
- else
- qp->s_flags &= ~HFI1_S_AHG_VALID;
- bth0 |= pkey;
- bth0 |= extra_bytes << 20;
- hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
- hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
- lrh0,
- ps->s_txreq->hdr_dwords + nwords,
- opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
- ppd_from_ibp(ibp)->lid |
- rdma_ah_get_path_bits(&qp->remote_ah_attr));
- }
- typedef void (*hfi1_make_ruc_hdr)(struct rvt_qp *qp,
- struct ib_other_headers *ohdr,
- u32 bth0, u32 bth2, int middle,
- struct hfi1_pkt_state *ps);
- /* We support only two types - 9B and 16B for now */
- static const hfi1_make_ruc_hdr hfi1_ruc_header_tbl[2] = {
- [HFI1_PKT_TYPE_9B] = &hfi1_make_ruc_header_9B,
- [HFI1_PKT_TYPE_16B] = &hfi1_make_ruc_header_16B
- };
- void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
- u32 bth0, u32 bth2, int middle,
- struct hfi1_pkt_state *ps)
- {
- struct hfi1_qp_priv *priv = qp->priv;
- /*
- * reset s_ahg/AHG fields
- *
- * This insures that the ahgentry/ahgcount
- * are at a non-AHG default to protect
- * build_verbs_tx_desc() from using
- * an include ahgidx.
- *
- * build_ahg() will modify as appropriate
- * to use the AHG feature.
- */
- priv->s_ahg->tx_flags = 0;
- priv->s_ahg->ahgcount = 0;
- priv->s_ahg->ahgidx = 0;
- /* Make the appropriate header */
- hfi1_ruc_header_tbl[priv->hdr_type](qp, ohdr, bth0, bth2, middle, ps);
- }
- /* when sending, force a reschedule every one of these periods */
- #define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
- /**
- * schedule_send_yield - test for a yield required for QP send engine
- * @timeout: Final time for timeout slice for jiffies
- * @qp: a pointer to QP
- * @ps: a pointer to a structure with commonly lookup values for
- * the the send engine progress
- *
- * This routine checks if the time slice for the QP has expired
- * for RC QPs, if so an additional work entry is queued. At this
- * point, other QPs have an opportunity to be scheduled. It
- * returns true if a yield is required, otherwise, false
- * is returned.
- */
- static bool schedule_send_yield(struct rvt_qp *qp,
- struct hfi1_pkt_state *ps)
- {
- ps->pkts_sent = true;
- if (unlikely(time_after(jiffies, ps->timeout))) {
- if (!ps->in_thread ||
- workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) {
- spin_lock_irqsave(&qp->s_lock, ps->flags);
- qp->s_flags &= ~RVT_S_BUSY;
- hfi1_schedule_send(qp);
- spin_unlock_irqrestore(&qp->s_lock, ps->flags);
- this_cpu_inc(*ps->ppd->dd->send_schedule);
- trace_hfi1_rc_expired_time_slice(qp, true);
- return true;
- }
- cond_resched();
- this_cpu_inc(*ps->ppd->dd->send_schedule);
- ps->timeout = jiffies + ps->timeout_int;
- }
- trace_hfi1_rc_expired_time_slice(qp, false);
- return false;
- }
- void hfi1_do_send_from_rvt(struct rvt_qp *qp)
- {
- hfi1_do_send(qp, false);
- }
- void _hfi1_do_send(struct work_struct *work)
- {
- struct iowait *wait = container_of(work, struct iowait, iowork);
- struct rvt_qp *qp = iowait_to_qp(wait);
- hfi1_do_send(qp, true);
- }
- /**
- * hfi1_do_send - perform a send on a QP
- * @work: contains a pointer to the QP
- * @in_thread: true if in a workqueue thread
- *
- * Process entries in the send work queue until credit or queue is
- * exhausted. Only allow one CPU to send a packet per QP.
- * Otherwise, two threads could send packets out of order.
- */
- void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
- {
- struct hfi1_pkt_state ps;
- struct hfi1_qp_priv *priv = qp->priv;
- int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
- ps.dev = to_idev(qp->ibqp.device);
- ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
- ps.ppd = ppd_from_ibp(ps.ibp);
- ps.in_thread = in_thread;
- trace_hfi1_rc_do_send(qp, in_thread);
- switch (qp->ibqp.qp_type) {
- case IB_QPT_RC:
- if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
- ~((1 << ps.ppd->lmc) - 1)) ==
- ps.ppd->lid)) {
- ruc_loopback(qp);
- return;
- }
- make_req = hfi1_make_rc_req;
- ps.timeout_int = qp->timeout_jiffies;
- break;
- case IB_QPT_UC:
- if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
- ~((1 << ps.ppd->lmc) - 1)) ==
- ps.ppd->lid)) {
- ruc_loopback(qp);
- return;
- }
- make_req = hfi1_make_uc_req;
- ps.timeout_int = SEND_RESCHED_TIMEOUT;
- break;
- default:
- make_req = hfi1_make_ud_req;
- ps.timeout_int = SEND_RESCHED_TIMEOUT;
- }
- spin_lock_irqsave(&qp->s_lock, ps.flags);
- /* Return if we are already busy processing a work request. */
- if (!hfi1_send_ok(qp)) {
- spin_unlock_irqrestore(&qp->s_lock, ps.flags);
- return;
- }
- qp->s_flags |= RVT_S_BUSY;
- ps.timeout_int = ps.timeout_int / 8;
- ps.timeout = jiffies + ps.timeout_int;
- ps.cpu = priv->s_sde ? priv->s_sde->cpu :
- cpumask_first(cpumask_of_node(ps.ppd->dd->node));
- ps.pkts_sent = false;
- /* insure a pre-built packet is handled */
- ps.s_txreq = get_waiting_verbs_txreq(qp);
- do {
- /* Check for a constructed packet to be sent. */
- if (ps.s_txreq) {
- spin_unlock_irqrestore(&qp->s_lock, ps.flags);
- /*
- * If the packet cannot be sent now, return and
- * the send engine will be woken up later.
- */
- if (hfi1_verbs_send(qp, &ps))
- return;
- /* allow other tasks to run */
- if (schedule_send_yield(qp, &ps))
- return;
- spin_lock_irqsave(&qp->s_lock, ps.flags);
- }
- } while (make_req(qp, &ps));
- iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
- spin_unlock_irqrestore(&qp->s_lock, ps.flags);
- }
- /*
- * This should be called with s_lock held.
- */
- void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
- enum ib_wc_status status)
- {
- u32 old_last, last;
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
- return;
- last = qp->s_last;
- old_last = last;
- trace_hfi1_qp_send_completion(qp, wqe, last);
- if (++last >= qp->s_size)
- last = 0;
- trace_hfi1_qp_send_completion(qp, wqe, last);
- qp->s_last = last;
- /* See post_send() */
- barrier();
- rvt_put_swqe(wqe);
- if (qp->ibqp.qp_type == IB_QPT_UD ||
- qp->ibqp.qp_type == IB_QPT_SMI ||
- qp->ibqp.qp_type == IB_QPT_GSI)
- atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
- rvt_qp_swqe_complete(qp,
- wqe,
- ib_hfi1_wc_opcode[wqe->wr.opcode],
- status);
- if (qp->s_acked == old_last)
- qp->s_acked = last;
- if (qp->s_cur == old_last)
- qp->s_cur = last;
- if (qp->s_tail == old_last)
- qp->s_tail = last;
- if (qp->state == IB_QPS_SQD && last == qp->s_cur)
- qp->s_draining = 0;
- }
|