void iavf_virtchnl_completion()

in ethernet/intel/iavf/iavf_virtchnl.c [1849:2344]


void iavf_virtchnl_completion(struct iavf_adapter *adapter,
			      enum virtchnl_ops v_opcode,
			      enum iavf_status v_retval, u8 *msg, u16 msglen)
{
	struct net_device *netdev = adapter->netdev;

	if (v_opcode == VIRTCHNL_OP_EVENT) {
		struct virtchnl_pf_event *vpe =
			(struct virtchnl_pf_event *)msg;
		bool link_up = iavf_get_vpe_link_status(adapter, vpe);

		switch (vpe->event) {
		case VIRTCHNL_EVENT_LINK_CHANGE:
			iavf_set_adapter_link_speed_from_vpe(adapter, vpe);

			/* we've already got the right link status, bail */
			if (adapter->link_up == link_up)
				break;

			if (link_up) {
				/* If we get link up message and start queues
				 * before our queues are configured it will
				 * trigger a TX hang. In that case, just ignore
				 * the link status message,we'll get another one
				 * after we enable queues and actually prepared
				 * to send traffic.
				 */
				if (adapter->state != __IAVF_RUNNING)
					break;

				/* For ADq enabled VF, we reconfigure VSIs and
				 * re-allocate queues. Hence wait till all
				 * queues are enabled.
				 */
				if (adapter->flags &
				    IAVF_FLAG_QUEUES_DISABLED)
					break;
			}

			adapter->link_up = link_up;
			if (link_up) {
				netif_tx_start_all_queues(netdev);
				netif_carrier_on(netdev);
			} else {
				netif_tx_stop_all_queues(netdev);
				netif_carrier_off(netdev);
			}
			iavf_print_link_message(adapter);
			break;
		case VIRTCHNL_EVENT_RESET_IMPENDING:
			dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
			if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
				adapter->flags |= IAVF_FLAG_RESET_PENDING;
				dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
				queue_work(iavf_wq, &adapter->reset_task);
			}
			break;
		default:
			dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
				vpe->event);
			break;
		}
		return;
	}
	if (v_retval) {
		switch (v_opcode) {
		case VIRTCHNL_OP_ADD_VLAN:
			dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
				iavf_stat_str(&adapter->hw, v_retval));
			break;
		case VIRTCHNL_OP_ADD_ETH_ADDR:
			dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
				iavf_stat_str(&adapter->hw, v_retval));
			iavf_mac_add_reject(adapter);
			/* restore administratively set MAC address */
			ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
			break;
		case VIRTCHNL_OP_DEL_VLAN:
			dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
				iavf_stat_str(&adapter->hw, v_retval));
			break;
		case VIRTCHNL_OP_DEL_ETH_ADDR:
			dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
				iavf_stat_str(&adapter->hw, v_retval));
			break;
		case VIRTCHNL_OP_ENABLE_CHANNELS:
			dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
				iavf_stat_str(&adapter->hw, v_retval));
			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
			adapter->ch_config.state = __IAVF_TC_INVALID;
			netdev_reset_tc(netdev);
			netif_tx_start_all_queues(netdev);
			break;
		case VIRTCHNL_OP_DISABLE_CHANNELS:
			dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
				iavf_stat_str(&adapter->hw, v_retval));
			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
			adapter->ch_config.state = __IAVF_TC_RUNNING;
			netif_tx_start_all_queues(netdev);
			break;
		case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
			struct iavf_cloud_filter *cf, *cftmp;

			list_for_each_entry_safe(cf, cftmp,
						 &adapter->cloud_filter_list,
						 list) {
				if (cf->state == __IAVF_CF_ADD_PENDING) {
					cf->state = __IAVF_CF_INVALID;
					dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
						 iavf_stat_str(&adapter->hw,
							       v_retval));
					iavf_print_cloud_filter(adapter,
								&cf->f);
					list_del(&cf->list);
					kfree(cf);
					adapter->num_cloud_filters--;
				}
			}
			}
			break;
		case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
			struct iavf_cloud_filter *cf;

			list_for_each_entry(cf, &adapter->cloud_filter_list,
					    list) {
				if (cf->state == __IAVF_CF_DEL_PENDING) {
					cf->state = __IAVF_CF_ACTIVE;
					dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
						 iavf_stat_str(&adapter->hw,
							       v_retval));
					iavf_print_cloud_filter(adapter,
								&cf->f);
				}
			}
			}
			break;
		case VIRTCHNL_OP_ADD_FDIR_FILTER: {
			struct iavf_fdir_fltr *fdir, *fdir_tmp;

			spin_lock_bh(&adapter->fdir_fltr_lock);
			list_for_each_entry_safe(fdir, fdir_tmp,
						 &adapter->fdir_list_head,
						 list) {
				if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
					dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n",
						 iavf_stat_str(&adapter->hw,
							       v_retval));
					iavf_print_fdir_fltr(adapter, fdir);
					if (msglen)
						dev_err(&adapter->pdev->dev,
							"%s\n", msg);
					list_del(&fdir->list);
					kfree(fdir);
					adapter->fdir_active_fltr--;
				}
			}
			spin_unlock_bh(&adapter->fdir_fltr_lock);
			}
			break;
		case VIRTCHNL_OP_DEL_FDIR_FILTER: {
			struct iavf_fdir_fltr *fdir;

			spin_lock_bh(&adapter->fdir_fltr_lock);
			list_for_each_entry(fdir, &adapter->fdir_list_head,
					    list) {
				if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
					dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
						 iavf_stat_str(&adapter->hw,
							       v_retval));
					iavf_print_fdir_fltr(adapter, fdir);
				}
			}
			spin_unlock_bh(&adapter->fdir_fltr_lock);
			}
			break;
		case VIRTCHNL_OP_ADD_RSS_CFG: {
			struct iavf_adv_rss *rss, *rss_tmp;

			spin_lock_bh(&adapter->adv_rss_lock);
			list_for_each_entry_safe(rss, rss_tmp,
						 &adapter->adv_rss_list_head,
						 list) {
				if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
					iavf_print_adv_rss_cfg(adapter, rss,
							       "Failed to change the input set for",
							       NULL);
					list_del(&rss->list);
					kfree(rss);
				}
			}
			spin_unlock_bh(&adapter->adv_rss_lock);
			}
			break;
		case VIRTCHNL_OP_DEL_RSS_CFG: {
			struct iavf_adv_rss *rss;

			spin_lock_bh(&adapter->adv_rss_lock);
			list_for_each_entry(rss, &adapter->adv_rss_list_head,
					    list) {
				if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
					rss->state = IAVF_ADV_RSS_ACTIVE;
					dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n",
						iavf_stat_str(&adapter->hw,
							      v_retval));
				}
			}
			spin_unlock_bh(&adapter->adv_rss_lock);
			}
			break;
		case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
		case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
			dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
			break;
		default:
			dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
				v_retval, iavf_stat_str(&adapter->hw, v_retval),
				v_opcode);
		}
	}
	switch (v_opcode) {
	case VIRTCHNL_OP_ADD_ETH_ADDR:
		if (!v_retval)
			iavf_mac_add_ok(adapter);
		if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
			eth_hw_addr_set(netdev, adapter->hw.mac.addr);
		break;
	case VIRTCHNL_OP_GET_STATS: {
		struct iavf_eth_stats *stats =
			(struct iavf_eth_stats *)msg;
		netdev->stats.rx_packets = stats->rx_unicast +
					   stats->rx_multicast +
					   stats->rx_broadcast;
		netdev->stats.tx_packets = stats->tx_unicast +
					   stats->tx_multicast +
					   stats->tx_broadcast;
		netdev->stats.rx_bytes = stats->rx_bytes;
		netdev->stats.tx_bytes = stats->tx_bytes;
		netdev->stats.tx_errors = stats->tx_errors;
		netdev->stats.rx_dropped = stats->rx_discards;
		netdev->stats.tx_dropped = stats->tx_discards;
		adapter->current_stats = *stats;
		}
		break;
	case VIRTCHNL_OP_GET_VF_RESOURCES: {
		u16 len = sizeof(struct virtchnl_vf_resource) +
			  IAVF_MAX_VF_VSI *
			  sizeof(struct virtchnl_vsi_resource);
		memcpy(adapter->vf_res, msg, min(msglen, len));
		iavf_validate_num_queues(adapter);
		iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
		if (is_zero_ether_addr(adapter->hw.mac.addr)) {
			/* restore current mac address */
			ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
		} else {
			/* refresh current mac address if changed */
			eth_hw_addr_set(netdev, adapter->hw.mac.addr);
			ether_addr_copy(netdev->perm_addr,
					adapter->hw.mac.addr);
		}
		spin_lock_bh(&adapter->mac_vlan_list_lock);
		iavf_add_filter(adapter, adapter->hw.mac.addr);

		if (VLAN_ALLOWED(adapter)) {
			if (!list_empty(&adapter->vlan_filter_list)) {
				struct iavf_vlan_filter *vlf;

				/* re-add all VLAN filters over virtchnl */
				list_for_each_entry(vlf,
						    &adapter->vlan_filter_list,
						    list)
					vlf->add = true;

				adapter->aq_required |=
					IAVF_FLAG_AQ_ADD_VLAN_FILTER;
			}
		}

		spin_unlock_bh(&adapter->mac_vlan_list_lock);

		iavf_parse_vf_resource_msg(adapter);

		/* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
		 * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish
		 * configuration
		 */
		if (VLAN_V2_ALLOWED(adapter))
			break;
		/* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2
		 * wasn't successfully negotiated with the PF
		 */
		}
		fallthrough;
	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: {
		if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
			memcpy(&adapter->vlan_v2_caps, msg,
			       min_t(u16, msglen,
				     sizeof(adapter->vlan_v2_caps)));

		iavf_process_config(adapter);

		/* unlock crit_lock before acquiring rtnl_lock as other
		 * processes holding rtnl_lock could be waiting for the same
		 * crit_lock
		 */
		mutex_unlock(&adapter->crit_lock);
		/* VLAN capabilities can change during VFR, so make sure to
		 * update the netdev features with the new capabilities
		 */
		rtnl_lock();
		netdev_update_features(netdev);
		rtnl_unlock();
		if (iavf_lock_timeout(&adapter->crit_lock, 10000))
			dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n",
				 __FUNCTION__);

		/* Request VLAN offload settings */
		if (VLAN_V2_ALLOWED(adapter))
			iavf_set_vlan_offload_features(adapter, 0,
						       netdev->features);

		iavf_set_queue_vlan_tag_loc(adapter);

		}
		break;
	case VIRTCHNL_OP_ENABLE_QUEUES:
		/* enable transmits */
		iavf_irq_enable(adapter, true);
		adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED;
		break;
	case VIRTCHNL_OP_DISABLE_QUEUES:
		iavf_free_all_tx_resources(adapter);
		iavf_free_all_rx_resources(adapter);
		if (adapter->state == __IAVF_DOWN_PENDING) {
			iavf_change_state(adapter, __IAVF_DOWN);
			wake_up(&adapter->down_waitqueue);
		}
		break;
	case VIRTCHNL_OP_VERSION:
	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
		/* Don't display an error if we get these out of sequence.
		 * If the firmware needed to get kicked, we'll get these and
		 * it's no problem.
		 */
		if (v_opcode != adapter->current_op)
			return;
		break;
	case VIRTCHNL_OP_IWARP:
		/* Gobble zero-length replies from the PF. They indicate that
		 * a previous message was received OK, and the client doesn't
		 * care about that.
		 */
		if (msglen && CLIENT_ENABLED(adapter))
			iavf_notify_client_message(&adapter->vsi, msg, msglen);
		break;

	case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
		adapter->client_pending &=
				~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
		break;
	case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
		struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;

		if (msglen == sizeof(*vrh))
			adapter->hena = vrh->hena;
		else
			dev_warn(&adapter->pdev->dev,
				 "Invalid message %d from PF\n", v_opcode);
		}
		break;
	case VIRTCHNL_OP_REQUEST_QUEUES: {
		struct virtchnl_vf_res_request *vfres =
			(struct virtchnl_vf_res_request *)msg;

		if (vfres->num_queue_pairs != adapter->num_req_queues) {
			dev_info(&adapter->pdev->dev,
				 "Requested %d queues, PF can support %d\n",
				 adapter->num_req_queues,
				 vfres->num_queue_pairs);
			adapter->num_req_queues = 0;
			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
		}
		}
		break;
	case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
		struct iavf_cloud_filter *cf;

		list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
			if (cf->state == __IAVF_CF_ADD_PENDING)
				cf->state = __IAVF_CF_ACTIVE;
		}
		}
		break;
	case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
		struct iavf_cloud_filter *cf, *cftmp;

		list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
					 list) {
			if (cf->state == __IAVF_CF_DEL_PENDING) {
				cf->state = __IAVF_CF_INVALID;
				list_del(&cf->list);
				kfree(cf);
				adapter->num_cloud_filters--;
			}
		}
		}
		break;
	case VIRTCHNL_OP_ADD_FDIR_FILTER: {
		struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg;
		struct iavf_fdir_fltr *fdir, *fdir_tmp;

		spin_lock_bh(&adapter->fdir_fltr_lock);
		list_for_each_entry_safe(fdir, fdir_tmp,
					 &adapter->fdir_list_head,
					 list) {
			if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
				if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
					dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
						 fdir->loc);
					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
					fdir->flow_id = add_fltr->flow_id;
				} else {
					dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n",
						 add_fltr->status);
					iavf_print_fdir_fltr(adapter, fdir);
					list_del(&fdir->list);
					kfree(fdir);
					adapter->fdir_active_fltr--;
				}
			}
		}
		spin_unlock_bh(&adapter->fdir_fltr_lock);
		}
		break;
	case VIRTCHNL_OP_DEL_FDIR_FILTER: {
		struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg;
		struct iavf_fdir_fltr *fdir, *fdir_tmp;

		spin_lock_bh(&adapter->fdir_fltr_lock);
		list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
					 list) {
			if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
				if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
					dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
						 fdir->loc);
					list_del(&fdir->list);
					kfree(fdir);
					adapter->fdir_active_fltr--;
				} else {
					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
					dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
						 del_fltr->status);
					iavf_print_fdir_fltr(adapter, fdir);
				}
			}
		}
		spin_unlock_bh(&adapter->fdir_fltr_lock);
		}
		break;
	case VIRTCHNL_OP_ADD_RSS_CFG: {
		struct iavf_adv_rss *rss;

		spin_lock_bh(&adapter->adv_rss_lock);
		list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
			if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
				iavf_print_adv_rss_cfg(adapter, rss,
						       "Input set change for",
						       "successful");
				rss->state = IAVF_ADV_RSS_ACTIVE;
			}
		}
		spin_unlock_bh(&adapter->adv_rss_lock);
		}
		break;
	case VIRTCHNL_OP_DEL_RSS_CFG: {
		struct iavf_adv_rss *rss, *rss_tmp;

		spin_lock_bh(&adapter->adv_rss_lock);
		list_for_each_entry_safe(rss, rss_tmp,
					 &adapter->adv_rss_list_head, list) {
			if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
				list_del(&rss->list);
				kfree(rss);
			}
		}
		spin_unlock_bh(&adapter->adv_rss_lock);
		}
		break;
	default:
		if (adapter->current_op && (v_opcode != adapter->current_op))
			dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
				 adapter->current_op, v_opcode);
		break;
	} /* switch v_opcode */
	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
}