static void same_padding_new_num_rows_and_cols()

in runtime/kernels/pooling_utils.h [42:123]


static void same_padding_new_num_rows_and_cols(Tensor tensor_in, Tensor& paddedTensor,
                                        OpKernelContext* context, int rows,
                                        int cols, int batch_size, int channels,
                                        int rowKernelSize, int colKernelSize,
                                        int rowStrideLen, int colStrideLen,
                                        int& newNumRows, int& newNumCols, float paddingValue,
                                        bool isNHWC) {
  // int row_offset = rowKernelSize / 2;
  // int col_offset = colKernelSize / 2;
  int totalRowPadding = rowKernelSize - 1;
  int totalColPadding = colKernelSize - 1;
  int paddedRowLen = rows + totalRowPadding;
  int paddedColLen = cols + totalColPadding;
  // ceiling(paddedRowLen/rowStrideLen)
  newNumRows = ((paddedRowLen - rowKernelSize) / rowStrideLen) + 1;
  newNumCols = ((paddedColLen - colKernelSize) / colStrideLen) + 1;
  // newNumRows and newNumCols to be used later when generating output shape

  // floor(totalRowPadding / 2)
  int row_offset_up = (totalRowPadding) / 2;
  // ceil(totalRowPadding / 2)
  int row_offset_down = (totalRowPadding + 1) / 2;
  // floor(totalColPadding / 2)
  int col_offset_left = (totalColPadding) / 2;
  // ceil(totalColPadding / 2)
  int col_offset_right = (totalColPadding + 1) / 2;

  TensorShape newinputshape;
  if (isNHWC) {
    newinputshape =
        TensorShape{batch_size, paddedRowLen, paddedColLen, channels};
  } else {
    newinputshape =
        TensorShape{batch_size, channels, paddedRowLen, paddedColLen};
  }

  OP_REQUIRES_OK(
      context, context->allocate_temp(DT_FLOAT, newinputshape, &paddedTensor));
  VLOG(1) << "Successfully allocated the paddedTensor for same paddding.";

  auto newmatrix = paddedTensor.tensor<float, 4>();
  auto t = tensor_in.tensor<float, 4>();

  if (isNHWC) {
    VLOG(1) << "Channels last padded tensor being created";
    // create the padded tensor surrounded with proper padding
    for (int b = 0; b < batch_size; b++) {
      for (int r = 0; r < paddedRowLen; r++) {
        for (int c = 0; c < paddedColLen; c++) {
          for (int ch = 0; ch < channels; ch++) {
            if ((r < row_offset_up || c < col_offset_left) ||
                (r >= paddedRowLen - row_offset_down ||
                 c >= paddedColLen - col_offset_right)) {
              newmatrix(b, r, c, ch) = paddingValue;
            } else {
              newmatrix(b, r, c, ch) =
                  t(b, r - row_offset_up, c - col_offset_left, ch);
            }
          }
        }
      }
    }
  } else {
    VLOG(1) << "Channels First padded tensor being created";
    for (int b = 0; b < batch_size; b++) {
      for (int ch = 0; ch < channels; ch++) {
        for (int r = 0; r < paddedRowLen; r++) {
          for (int c = 0; c < paddedColLen; c++) {
            if ((r < row_offset_up || c < col_offset_left) ||
                (r >= paddedRowLen - row_offset_down ||
                 c >= paddedColLen - col_offset_right)) {
              newmatrix(b, ch, r, c) = paddingValue;
            } else {
              newmatrix(b, ch, r, c) =
                  t(b, ch, r - row_offset_up, c - col_offset_left);
            }
          }
        }
      }
    }
  }
}