def forward()

in evals/elsuite/hr_ml_agent_bench/benchmarks/vectorization/env/train.py [0:0]


    def forward(self, features_batch: np.ndarray) -> np.ndarray:
        """
        Forward Pass or the Full Convolution
        Convolve over the batch of Image using the filters. Each new Filter produces a new Feature/channel from the previous Image.
        So if image had 32 features/channels and you have used 64 as num of filters in this layer, your image will have 64 features/channels
        args:
            features_batch: Batch of Images (Batch of Features) of shape [batch size, height, width, channels].
            This is input coming from the previous Layer. If this matrix is output from a previous Convolution Layer, then the channels == (no of features from the previous layer)

        output: Convolved Image batch with new height, width and new detected features
        """
        padding_size = 0  # How to implement self.padding = 'same'?
        if isinstance(self.padding, int):  # If specified padding
            padding_size = self.padding

        (
            batch_size,
            h_old,
            w_old,
            num_features_old,
        ) = (
            features_batch.shape
        )  # [batch size, height, width, no of features (channels) from the previous layer]
        (
            filter_size,
            filter_size,
            num_features_old,
            num_of_filters_new,
        ) = (
            self.kernel_matrices.shape
        )  # [filter_size, filter_size, num_features_old, num_of_filters_new]

        # New Height/Width is dependent on the old height/ width, stride, filter size, and amount of padding
        h_new = int((h_old + (2 * padding_size) - filter_size) / self.stride) + 1
        w_new = int((w_old + (2 * padding_size) - filter_size) / self.stride) + 1

        padded_batch = add_padding(
            features_batch, padding_size
        )  # Pad the current input. third param is 0 by default so it is zero padding

        # This will act as an Input to the layer Next to it
        output = np.zeros(
            [batch_size, h_new, w_new, num_of_filters_new]
        )  # batch size will be same but height, width and no of filters will be changed

        for index in range(batch_size):  # index i is the i-th Image or Image Matrix in other terms
            padded_feature = padded_batch[index, :, :, :]  # Get Every feature or Channel
            for h in range(
                h_new
            ):  # Used in Vertical slicing or Window's height start and height end
                for w in range(
                    w_new
                ):  # Used in Horizontal slicing or Window's width start and width end
                    for filter_index in range(
                        num_of_filters_new
                    ):  # Feature index. Selects the appropriate kernel one at a time

                        vertical_start = (
                            h * self.stride
                        )  # It is shifted with every loop. Every starts with a new starting point in vertical direction
                        vertical_end = (
                            vertical_start + filter_size
                        )  # Filter Size is the width of window

                        horizontal_start = w * self.stride  # Window's Width starting point
                        horizontal_end = (
                            horizontal_start + filter_size
                        )  # Filter is squared so vertical and horizontal window are same so window width == window height

                        image_portion = padded_feature[
                            vertical_start:vertical_end, horizontal_start:horizontal_end, :
                        ]  # Sliced window
                        kernel_matrix = self.kernel_matrices[
                            :, :, :, filter_index
                        ]  # Select appropriate Kernel Matrix
                        bias = self.biases[:, :, :, filter_index]  # Select corresponding bias

                        result = self.convolution_step(
                            image_portion, kernel_matrix, bias
                        )  # Get 1 value per window and kernel
                        output[
                            index, h, w, filter_index
                        ] = result  # Fill the resulting output matrix with corresponding values

        if self.activation == "relu":  # apply activation Function.
            return relu(output)

        return output