static int fi_bgq_rx_ctx()

in prov/bgq/src/fi_bgq_sep.c [195:330]


static int fi_bgq_rx_ctx(struct fid_ep *sep, int index,
			struct fi_rx_attr *attr, struct fid_ep **rx_ep,
			void *context)
{
	int ret;
	struct fi_info info = {0};
	struct fi_bgq_sep *bgq_sep;
	struct fi_bgq_ep  *bgq_rx_ep;

	if (!sep || !attr || !rx_ep) {
		errno = FI_EINVAL;
		return -errno;
	}

	bgq_sep = container_of(sep, struct fi_bgq_sep, ep_fid);

	uint64_t caps = attr->caps;	/* TODO - "By default, a receive context inherits the properties of its associated endpoint. However, applications may request context specific attributes through the attr parameter." */

	if ((caps & FI_MSG || caps & FI_TAGGED) && (caps & FI_SEND)) {
		FI_LOG(fi_bgq_global.prov, FI_LOG_DEBUG, FI_LOG_DOMAIN,
				"FI_MSG|FI_TAGGED with FI_SEND capability specified for a RX context\n");
		caps &= ~FI_SEND;
	}

	if ((caps & FI_RMA || caps & FI_ATOMIC) && (caps & FI_READ || caps & FI_WRITE)) {
		FI_LOG(fi_bgq_global.prov, FI_LOG_DEBUG, FI_LOG_DOMAIN,
				"FI_RMA|FI_ATOMIC with FI_READ|FI_WRITE capability specified for a RX context\n");
		caps &= ~FI_READ;
		caps &= ~FI_WRITE;
	}

	if (caps & FI_MSG || caps & FI_TAGGED) {
		caps |= FI_RECV;
	}

	if (caps & FI_RMA || caps & FI_ATOMIC) {
		caps |= FI_REMOTE_READ;
		caps |= FI_REMOTE_WRITE;
	}

	if (ofi_send_allowed(caps) || ofi_rma_initiate_allowed(caps)) {
		FI_LOG(fi_bgq_global.prov, FI_LOG_DEBUG, FI_LOG_DOMAIN,
				"TX capabilities specified for RX context\n");
		errno = FI_EINVAL;
		return -errno;
	}

	if (!ofi_recv_allowed(caps) && !ofi_rma_target_allowed(caps)) {
		FI_LOG(fi_bgq_global.prov, FI_LOG_DEBUG, FI_LOG_DOMAIN,
				"RX capabilities not specified for RX context\n");
		errno = FI_EINVAL;
		return -errno;
	}

	if (bgq_sep->domain->rx.count >= fi_bgq_domain_get_rx_max(bgq_sep->domain)) {
		FI_LOG(fi_bgq_global.prov, FI_LOG_DEBUG, FI_LOG_DOMAIN,
				"RX ctx count exceeded (max %lu, created %lu)\n",
				fi_bgq_domain_get_rx_max(bgq_sep->domain), bgq_sep->domain->rx.count);
		errno = FI_EINVAL;
		return -errno;
	}

	info.caps = caps;
	info.mode = attr->mode;

	info.rx_attr = calloc(1, sizeof(*info.rx_attr));
	if (!info.rx_attr) {
		errno = FI_ENOMEM;
		goto err;
	}

	info.rx_attr->caps     = caps;
	info.rx_attr->mode     = attr->mode;
	info.rx_attr->op_flags = attr->op_flags;
	info.rx_attr->msg_order = attr->msg_order;
	info.rx_attr->total_buffered_recv = attr->total_buffered_recv;
	info.rx_attr->iov_limit = attr->iov_limit;

	info.ep_attr = calloc(1, sizeof(*info.ep_attr));
	if (!info.ep_attr) {
		errno = FI_ENOMEM;
		goto err;
	}
	memcpy(info.ep_attr, bgq_sep->info->ep_attr,
			sizeof(*info.ep_attr));

	info.domain_attr = calloc(1, sizeof(*info.domain_attr));
	if (!info.domain_attr) {
		errno = FI_ENOMEM;
		goto err;
	}
	memcpy(info.domain_attr, bgq_sep->info->domain_attr,
			sizeof(*info.domain_attr));

	info.fabric_attr = calloc(1, sizeof(*info.fabric_attr));
	if (!info.fabric_attr) {
		errno = FI_ENOMEM;
		goto err;
	}
	memcpy(info.fabric_attr, bgq_sep->info->fabric_attr,
			sizeof(*info.fabric_attr));

#ifdef FI_BGQ_TRACE
        fprintf(stderr,"fi_bgq_tx_ctx calling fi_bgq_endpoint_rx_tx with rx index %d\n",index);
#endif
	ret = fi_bgq_endpoint_rx_tx(&bgq_sep->domain->domain_fid, &info,
			rx_ep, context, index, -1);
	if (ret) {
		goto err;
	}

	bgq_rx_ep = container_of(*rx_ep, struct fi_bgq_ep, ep_fid);
	bgq_rx_ep->ep_fid.fid.fclass = FI_CLASS_RX_CTX;

	bgq_rx_ep->sep = container_of(sep, struct fi_bgq_sep, ep_fid);

	bgq_rx_ep->av = bgq_sep->av;
	fi_bgq_ref_inc(&bgq_rx_ep->av->ref_cnt, "address vector");

	++ bgq_sep->domain->rx.count;

	fi_bgq_ref_inc(&bgq_sep->ref_cnt, "scalable endpoint");

	return 0;

err:
	if (info.fabric_attr)
		free(info.fabric_attr);
	if (info.domain_attr)
		free(info.domain_attr);
	if (info.ep_attr)
		free(info.ep_attr);
	if (info.tx_attr)
		free(info.tx_attr);
	return -errno;
}