static int sock_ep_bind()

in prov/sockets/src/sock_ep.c [739:903]


static int sock_ep_bind(struct fid *fid, struct fid *bfid, uint64_t flags)
{
	int ret;
	size_t i;
	struct sock_ep *ep;
	struct sock_eq *eq;
	struct sock_cq *cq;
	struct sock_av *av;
	struct sock_cntr *cntr;
	struct sock_tx_ctx *tx_ctx;
	struct sock_rx_ctx *rx_ctx;

	ret = ofi_ep_bind_valid(&sock_prov, bfid, flags);
	if (ret)
		return ret;

	switch (fid->fclass) {
	case FI_CLASS_EP:
		ep = container_of(fid, struct sock_ep, ep.fid);
		break;

	case FI_CLASS_SEP:
		ep = container_of(fid, struct sock_ep, ep.fid);
		break;

	default:
		return -FI_EINVAL;
	}

	switch (bfid->fclass) {
	case FI_CLASS_EQ:
		eq = container_of(bfid, struct sock_eq, eq.fid);
		ep->attr->eq = eq;
		break;

	case FI_CLASS_MR:
		return 0;

	case FI_CLASS_CQ:
		cq = container_of(bfid, struct sock_cq, cq_fid.fid);
		if (ep->attr->domain != cq->domain)
			return -FI_EINVAL;

		if (flags & FI_SEND) {
			for (i = 0; i < ep->attr->ep_attr.tx_ctx_cnt; i++) {
				tx_ctx = ep->attr->tx_array[i];

				if (!tx_ctx)
					continue;

				ret = sock_ctx_bind_cq(&tx_ctx->fid.ctx.fid, bfid, flags);
				if (ret)
					return ret;
			}
		}

		if (flags & FI_RECV) {
			for (i = 0; i < ep->attr->ep_attr.rx_ctx_cnt; i++) {
				rx_ctx = ep->attr->rx_array[i];

				if (!rx_ctx)
					continue;

				ret = sock_ctx_bind_cq(&rx_ctx->ctx.fid, bfid, flags);
				if (ret)
					return ret;
			}
		}
		break;

	case FI_CLASS_CNTR:
		cntr = container_of(bfid, struct sock_cntr, cntr_fid.fid);
		if (ep->attr->domain != cntr->domain)
			return -FI_EINVAL;

		if (flags & FI_SEND || flags & FI_WRITE || flags & FI_READ) {
			for (i = 0; i < ep->attr->ep_attr.tx_ctx_cnt; i++) {
				tx_ctx = ep->attr->tx_array[i];

				if (!tx_ctx)
					continue;

				ret = sock_ctx_bind_cntr(&tx_ctx->fid.ctx.fid, bfid, flags);
				if (ret)
					return ret;
			}
		}

		if (flags & FI_RECV || flags & FI_REMOTE_READ ||
		    flags & FI_REMOTE_WRITE) {
			for (i = 0; i < ep->attr->ep_attr.rx_ctx_cnt; i++) {
				rx_ctx = ep->attr->rx_array[i];

				if (!rx_ctx)
					continue;

				ret = sock_ctx_bind_cntr(&rx_ctx->ctx.fid, bfid, flags);
				if (ret)
					return ret;
			}
		}
		break;

	case FI_CLASS_AV:
		av = container_of(bfid, struct sock_av, av_fid.fid);
		if (ep->attr->domain != av->domain)
			return -FI_EINVAL;

		ep->attr->av = av;
		ofi_atomic_inc32(&av->ref);

		if (ep->attr->tx_ctx &&
		    ep->attr->tx_ctx->fid.ctx.fid.fclass == FI_CLASS_TX_CTX) {
			ep->attr->tx_ctx->av = av;
		}

		if (ep->attr->rx_ctx &&
		    ep->attr->rx_ctx->ctx.fid.fclass == FI_CLASS_RX_CTX)
			ep->attr->rx_ctx->av = av;

		for (i = 0; i < ep->attr->ep_attr.tx_ctx_cnt; i++) {
			if (ep->attr->tx_array[i])
				ep->attr->tx_array[i]->av = av;
		}

		for (i = 0; i < ep->attr->ep_attr.rx_ctx_cnt; i++) {
			if (ep->attr->rx_array[i])
				ep->attr->rx_array[i]->av = av;
		}
		fastlock_acquire(&av->list_lock);
		ret = fid_list_insert(&av->ep_list, &ep->attr->lock, &ep->ep.fid);
		if (ret) {
			SOCK_LOG_ERROR("Error in adding fid in the EP list\n");
			fastlock_release(&av->list_lock);
			return ret;
		}
		fastlock_release(&av->list_lock);
		break;

	case FI_CLASS_STX_CTX:
		tx_ctx = container_of(bfid, struct sock_tx_ctx, fid.stx.fid);
		fastlock_acquire(&tx_ctx->lock);
		dlist_insert_tail(&ep->attr->tx_ctx_entry, &tx_ctx->ep_list);
		fastlock_release(&tx_ctx->lock);

		ep->attr->tx_ctx->use_shared = 1;
		ep->attr->tx_ctx->stx_ctx = tx_ctx;
		break;

	case FI_CLASS_SRX_CTX:
		rx_ctx = container_of(bfid, struct sock_rx_ctx, ctx);
		fastlock_acquire(&rx_ctx->lock);
		dlist_insert_tail(&ep->attr->rx_ctx_entry, &rx_ctx->ep_list);
		fastlock_release(&rx_ctx->lock);

		ep->attr->rx_ctx->use_shared = 1;
		ep->attr->rx_ctx->srx_ctx = rx_ctx;
		break;

	default:
		return -ENOSYS;
	}

	return 0;
}