def iteration_completed()

in ignite/metrics/metric.py [0:0]


    def iteration_completed(self, engine: Engine) -> None:
        """Helper method to update metric's computation. It is automatically attached to the
        `engine` with :meth:`~ignite.metrics.metric.Metric.attach`.

        Args:
            engine: the engine to which the metric must be attached

        Note:
            ``engine.state.output`` is used to compute metric values.
            The majority of implemented metrics accepts the following formats for ``engine.state.output``:
            ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. ``y_pred`` and ``y`` can be torch tensors or
            list of tensors/numbers if applicable.

        .. versionchanged:: 0.4.5
            ``y_pred`` and ``y`` can be torch tensors or list of tensors/numbers
        """

        output = self._output_transform(engine.state.output)
        if isinstance(output, Mapping):
            if self.required_output_keys is None:
                raise TypeError(
                    f"Transformed engine output for {self.__class__.__name__} metric should be a tuple/list, "
                    f"but given {type(output)}"
                )
            if not all([k in output for k in self.required_output_keys]):
                raise ValueError(
                    "When transformed engine's output is a mapping, "
                    f"it should contain {self.required_output_keys} keys, but given {list(output.keys())}"
                )
            output = tuple(output[k] for k in self.required_output_keys)

        if isinstance(output, Sequence) and all([_is_list_of_tensors_or_numbers(o) for o in output]):
            if not (len(output) == 2 and len(output[0]) == len(output[1])):
                raise ValueError(
                    f"Output should have 2 items of the same length, "
                    f"got {len(output)} and {len(output[0])}, {len(output[1])}"
                )
            for o1, o2 in zip(output[0], output[1]):
                # o1 and o2 are list of tensors or numbers
                tensor_o1 = _to_batched_tensor(o1)
                tensor_o2 = _to_batched_tensor(o2, device=tensor_o1.device)
                self.update((tensor_o1, tensor_o2))
        else:
            self.update(output)