in prov/gni/src/gnix_ep.c [1801:2064]
DIRECT_FN int gnix_ep_bind(fid_t fid, struct fid *bfid, uint64_t flags)
{
int ret = FI_SUCCESS;
struct gnix_fid_ep *ep;
struct gnix_fid_eq *eq;
struct gnix_fid_av *av;
struct gnix_fid_cq *cq;
struct gnix_fid_stx *stx;
struct gnix_fid_cntr *cntr;
struct gnix_fid_trx *trx_priv;
struct gnix_nic_attr nic_attr = {0};
GNIX_TRACE(FI_LOG_EP_CTRL, "\n");
switch (fid->fclass) {
case FI_CLASS_TX_CTX:
case FI_CLASS_RX_CTX:
trx_priv = container_of(fid, struct gnix_fid_trx, ep_fid);
ep = trx_priv->ep;
break;
default:
ep = container_of(fid, struct gnix_fid_ep, ep_fid.fid);
}
ret = ofi_ep_bind_valid(&gnix_prov, bfid, flags);
if (ret)
return ret;
/*
* Per fi_endpoint man page, can't bind an object
* to an ep after its been enabled.
* For scalable endpoints, the rx/tx contexts are bound to the same
* gnix_ep so we allow enabling of the tx before binding the rx and
* vice versa.
*/
switch (fid->fclass) {
case FI_CLASS_TX_CTX:
if (ep->send_cq && ep->tx_enabled) {
return -FI_EOPBADSTATE;
}
break;
case FI_CLASS_RX_CTX:
if (ep->recv_cq && ep->rx_enabled) {
return -FI_EOPBADSTATE;
}
break;
default:
if ((ep->send_cq && ep->tx_enabled) ||
(ep->recv_cq && ep->rx_enabled)) {
return -FI_EOPBADSTATE;
}
}
switch (bfid->fclass) {
case FI_CLASS_EQ:
eq = container_of(bfid, struct gnix_fid_eq, eq_fid.fid);
if (ep->domain->fabric != eq->fabric) {
ret = -FI_EINVAL;
break;
}
if (ep->eq) {
ret = -FI_EINVAL;
break;
}
ep->eq = eq;
_gnix_eq_poll_obj_add(eq, &ep->ep_fid.fid);
_gnix_ref_get(eq);
GNIX_DEBUG(FI_LOG_EP_CTRL, "Bound EQ to EP: %p, %p\n", eq, ep);
break;
case FI_CLASS_CQ:
cq = container_of(bfid, struct gnix_fid_cq, cq_fid.fid);
if (ep->domain != cq->domain) {
ret = -FI_EINVAL;
break;
}
if (flags & FI_TRANSMIT) {
/* don't allow rebinding */
if (ep->send_cq) {
ret = -FI_EINVAL;
break;
}
ep->send_cq = cq;
if (flags & FI_SELECTIVE_COMPLETION) {
ep->send_selective_completion = 1;
}
_gnix_ref_get(cq);
}
if (flags & FI_RECV) {
/* don't allow rebinding */
if (ep->recv_cq) {
ret = -FI_EINVAL;
break;
}
ep->recv_cq = cq;
if (flags & FI_SELECTIVE_COMPLETION) {
ep->recv_selective_completion = 1;
}
_gnix_ref_get(cq);
}
break;
case FI_CLASS_AV:
av = container_of(bfid, struct gnix_fid_av, av_fid.fid);
if (ep->domain != av->domain) {
ret = -FI_EINVAL;
break;
}
ep->av = av;
_gnix_ep_init_vc(ep);
_gnix_ref_get(ep->av);
break;
case FI_CLASS_CNTR:
cntr = container_of(bfid, struct gnix_fid_cntr, cntr_fid.fid);
if (ep->domain != cntr->domain) {
ret = -FI_EINVAL;
break;
}
if (flags & FI_SEND) {
/* don't allow rebinding */
if (ep->send_cntr) {
GNIX_WARN(FI_LOG_EP_CTRL,
"cannot rebind send counter (%p)\n",
cntr);
ret = -FI_EINVAL;
break;
}
ep->send_cntr = cntr;
_gnix_ref_get(cntr);
}
if (flags & FI_RECV) {
/* don't allow rebinding */
if (ep->recv_cntr) {
GNIX_WARN(FI_LOG_EP_CTRL,
"cannot rebind recv counter (%p)\n",
cntr);
ret = -FI_EINVAL;
break;
}
ep->recv_cntr = cntr;
_gnix_ref_get(cntr);
}
if (flags & FI_WRITE) {
/* don't allow rebinding */
if (ep->write_cntr) {
GNIX_WARN(FI_LOG_EP_CTRL,
"cannot rebind write counter (%p)\n",
cntr);
ret = -FI_EINVAL;
break;
}
ep->write_cntr = cntr;
_gnix_ref_get(cntr);
}
if (flags & FI_READ) {
/* don't allow rebinding */
if (ep->read_cntr) {
GNIX_WARN(FI_LOG_EP_CTRL,
"cannot rebind read counter (%p)\n",
cntr);
ret = -FI_EINVAL;
break;
}
ep->read_cntr = cntr;
_gnix_ref_get(cntr);
}
if (flags & FI_REMOTE_WRITE) {
/* don't allow rebinding */
if (ep->rwrite_cntr) {
GNIX_WARN(FI_LOG_EP_CTRL,
"cannot rebind rwrite counter (%p)\n",
cntr);
ret = -FI_EINVAL;
break;
}
ep->rwrite_cntr = cntr;
_gnix_ref_get(cntr);
}
if (flags & FI_REMOTE_READ) {
/* don't allow rebinding */
if (ep->rread_cntr) {
GNIX_WARN(FI_LOG_EP_CTRL,
"cannot rebind rread counter (%p)\n",
cntr);
ret = -FI_EINVAL;
break;
}
ep->rread_cntr = cntr;
_gnix_ref_get(cntr);
}
break;
case FI_CLASS_STX_CTX:
stx = container_of(bfid, struct gnix_fid_stx, stx_fid.fid);
if (ep->domain != stx->domain) {
ret = -FI_EINVAL;
break;
}
/*
* can only bind an STX to an ep opened with
* FI_SHARED_CONTEXT ep_attr->tx_ctx_cnt and also
* if a nic has not been previously bound
*/
if (ep->shared_tx == false || ep->nic) {
ret = -FI_EOPBADSTATE;
break;
}
/*
* we force allocation of a nic to make semantics
* match the intent fi_endpoint man page, provide
* a TX context (aka gnix nic) that can be shared
* explicitly amongst endpoints
*/
if (stx->auth_key && ep->auth_key != stx->auth_key) {
ret = -FI_EINVAL;
break;
}
if (!stx->nic) {
nic_attr.must_alloc = true;
nic_attr.auth_key = ep->auth_key;
ret = gnix_nic_alloc(ep->domain, &nic_attr,
&stx->nic);
if (ret != FI_SUCCESS) {
GNIX_WARN(FI_LOG_EP_CTRL,
"_gnix_nic_alloc call returned %d\n",
ret);
break;
}
stx->auth_key = nic_attr.auth_key;
}
ep->stx_ctx = stx;
_gnix_ref_get(ep->stx_ctx);
ep->nic = stx->nic;
if (ep->nic->smsg_callbacks == NULL)
ep->nic->smsg_callbacks = gnix_ep_smsg_callbacks;
_gnix_ref_get(ep->nic);
break;
case FI_CLASS_MR:/*TODO: got to figure this one out */
default:
ret = -FI_ENOSYS;
break;
}
return ret;
}