in mlx5/net/mlx5_vnet.c [2538:2673]
static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
const struct vdpa_dev_set_config *add_config)
{
struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
struct virtio_net_config *config;
struct mlx5_core_dev *pfmdev;
struct mlx5_vdpa_dev *mvdev;
struct mlx5_vdpa_net *ndev;
struct mlx5_core_dev *mdev;
u32 max_vqs;
u16 mtu;
int err;
if (mgtdev->ndev)
return -ENOSPC;
mdev = mgtdev->madev->mdev;
if (!(MLX5_CAP_DEV_VDPA_EMULATION(mdev, virtio_queue_type) &
MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)) {
dev_warn(mdev->device, "missing support for split virtqueues\n");
return -EOPNOTSUPP;
}
max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
if (max_vqs < 2) {
dev_warn(mdev->device,
"%d virtqueues are supported. At least 2 are required\n",
max_vqs);
return -EAGAIN;
}
if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) {
if (add_config->net.max_vq_pairs > max_vqs / 2)
return -EINVAL;
max_vqs = min_t(u32, max_vqs, 2 * add_config->net.max_vq_pairs);
} else {
max_vqs = 2;
}
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
name, false);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
ndev->mvdev.mlx_features = mgtdev->mgtdev.supported_features;
ndev->mvdev.max_vqs = max_vqs;
mvdev = &ndev->mvdev;
mvdev->mdev = mdev;
ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL);
ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL);
if (!ndev->vqs || !ndev->event_cbs) {
err = -ENOMEM;
goto err_alloc;
}
init_mvqs(ndev);
mutex_init(&ndev->reslock);
config = &ndev->config;
err = query_mtu(mdev, &mtu);
if (err)
goto err_mtu;
ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu);
if (get_link_state(mvdev))
ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
else
ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP);
if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR)) {
memcpy(ndev->config.mac, add_config->net.mac, ETH_ALEN);
} else {
err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac);
if (err)
goto err_mtu;
}
if (!is_zero_ether_addr(config->mac)) {
pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
err = mlx5_mpfs_add_mac(pfmdev, config->mac);
if (err)
goto err_mtu;
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC);
}
config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, mlx5_vdpa_max_qps(max_vqs));
mvdev->vdev.dma_dev = &mdev->pdev->dev;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
if (err)
goto err_mpfs;
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
err = mlx5_vdpa_create_mr(mvdev, NULL);
if (err)
goto err_res;
}
err = alloc_resources(ndev);
if (err)
goto err_mr;
mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq");
if (!mvdev->wq) {
err = -ENOMEM;
goto err_res2;
}
ndev->nb.notifier_call = event_handler;
mlx5_notifier_register(mdev, &ndev->nb);
mvdev->vdev.mdev = &mgtdev->mgtdev;
err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs) + 1);
if (err)
goto err_reg;
mgtdev->ndev = ndev;
return 0;
err_reg:
destroy_workqueue(mvdev->wq);
err_res2:
free_resources(ndev);
err_mr:
mlx5_vdpa_destroy_mr(mvdev);
err_res:
mlx5_vdpa_free_resources(&ndev->mvdev);
err_mpfs:
if (!is_zero_ether_addr(config->mac))
mlx5_mpfs_del_mac(pfmdev, config->mac);
err_mtu:
mutex_destroy(&ndev->reslock);
err_alloc:
put_device(&mvdev->vdev.dev);
return err;
}