in drivers/virt/nitro_enclaves/ne_pci_dev.c [99:177]
int ne_do_request(struct pci_dev *pdev, enum ne_pci_dev_cmd_type cmd_type,
void *cmd_request, size_t cmd_request_size,
struct ne_pci_dev_cmd_reply *cmd_reply, size_t cmd_reply_size)
{
struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
int rc = -EINVAL;
if (cmd_type <= INVALID_CMD || cmd_type >= MAX_CMD) {
dev_err_ratelimited(&pdev->dev, "Invalid cmd type=%u\n", cmd_type);
return -EINVAL;
}
if (!cmd_request) {
dev_err_ratelimited(&pdev->dev, "Null cmd request for cmd type=%u\n",
cmd_type);
return -EINVAL;
}
if (cmd_request_size > NE_SEND_DATA_SIZE) {
dev_err_ratelimited(&pdev->dev, "Invalid req size=%zu for cmd type=%u\n",
cmd_request_size, cmd_type);
return -EINVAL;
}
if (!cmd_reply) {
dev_err_ratelimited(&pdev->dev, "Null cmd reply for cmd type=%u\n",
cmd_type);
return -EINVAL;
}
if (cmd_reply_size > NE_RECV_DATA_SIZE) {
dev_err_ratelimited(&pdev->dev, "Invalid reply size=%zu for cmd type=%u\n",
cmd_reply_size, cmd_type);
return -EINVAL;
}
/*
* Use this mutex so that the PCI device handles one command request at
* a time.
*/
mutex_lock(&ne_pci_dev->pci_dev_mutex);
atomic_set(&ne_pci_dev->cmd_reply_avail, 0);
ne_submit_request(pdev, cmd_type, cmd_request, cmd_request_size);
rc = ne_wait_for_reply(pdev);
if (rc < 0) {
dev_err_ratelimited(&pdev->dev, "Error in wait for reply for cmd type=%u [rc=%d]\n",
cmd_type, rc);
goto unlock_mutex;
}
ne_retrieve_reply(pdev, cmd_reply, cmd_reply_size);
atomic_set(&ne_pci_dev->cmd_reply_avail, 0);
if (cmd_reply->rc < 0) {
rc = cmd_reply->rc;
dev_err_ratelimited(&pdev->dev, "Error in cmd process logic, cmd type=%u [rc=%d]\n",
cmd_type, rc);
goto unlock_mutex;
}
rc = 0;
unlock_mutex:
mutex_unlock(&ne_pci_dev->pci_dev_mutex);
return rc;
}