static void gve_enable_supported_features()

in google/gve/gve_adminq.c [888:972]


static void gve_enable_supported_features(struct gve_priv *priv,
					  u32 supported_features_mask,
					  const struct gve_device_option_jumbo_frames
					  *dev_op_jumbo_frames,
					  const struct gve_device_option_dqo_qpl
					  *dev_op_dqo_qpl,
					  const struct gve_device_option_buffer_sizes
					  *dev_op_buffer_sizes,
					  const struct gve_device_option_flow_steering
					  *dev_op_flow_steering,
					  const struct gve_device_option_rss_config
					  *dev_op_rss_config,
					  const struct gve_device_option_modify_ring
					  *dev_op_modify_ring)
{
	/* Before control reaches this point, the page-size-capped max MTU from
	 * the gve_device_descriptor field has already been stored in
	 * priv->dev->max_mtu. We overwrite it with the true max MTU below.
	 */
	if (dev_op_jumbo_frames &&
	    (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) {
		dev_info(&priv->pdev->dev,
			 "JUMBO FRAMES device option enabled.\n");
		priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu);
	}

	/* Override pages for qpl for DQO-QPL */
	if (dev_op_dqo_qpl) {
		priv->tx_pages_per_qpl =
			be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl);
		if (priv->tx_pages_per_qpl == 0)
			priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES;
	}

	if (dev_op_buffer_sizes &&
	    (supported_features_mask & GVE_SUP_BUFFER_SIZES_MASK)) {
		priv->max_rx_buffer_size =
			be16_to_cpu(dev_op_buffer_sizes->packet_buffer_size);
		priv->header_buf_size =
			be16_to_cpu(dev_op_buffer_sizes->header_buffer_size);
		dev_info(&priv->pdev->dev,
			 "BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
			 priv->max_rx_buffer_size, priv->header_buf_size);
	}

	/* Read and store ring size ranges given by device */
	if (dev_op_modify_ring &&
	    (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) {
		priv->modify_ring_size_enabled = true;

		/* max ring size for DQO QPL should not be overwritten because of device limit */
		if (priv->queue_format != GVE_DQO_QPL_FORMAT) {
			priv->max_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_rx_ring_size);
			priv->max_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_tx_ring_size);
		}
		if (priv->default_min_ring_size) {
			/* If device hasn't provided minimums, use default minimums */
			priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE;
			priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE;
		} else {
			priv->min_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_rx_ring_size);
			priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size);
		}
	}

	if (dev_op_flow_steering &&
	    (supported_features_mask & GVE_SUP_FLOW_STEERING_MASK)) {
		if (dev_op_flow_steering->max_flow_rules) {
			priv->max_flow_rules =
				be32_to_cpu(dev_op_flow_steering->max_flow_rules);
			priv->dev->hw_features |= NETIF_F_NTUPLE;
			dev_info(&priv->pdev->dev,
				 "FLOW STEERING device option enabled with max rule limit of %u.\n",
				 priv->max_flow_rules);
		}
	}

	if (dev_op_rss_config &&
	    (supported_features_mask & GVE_SUP_RSS_CONFIG_MASK)) {
		priv->rss_key_size =
			be16_to_cpu(dev_op_rss_config->hash_key_size);
		priv->rss_lut_size =
			be16_to_cpu(dev_op_rss_config->hash_lut_size);
	}
}