in src/io/image_transformer.cc [87:198]
Tensor resize(Tensor& input, const size_t resize_height,
const size_t resize_width, const string& image_dim_order) {
CHECK_LE(input.nDim(), 4u);
CHECK_GE(input.nDim(), 2u);
if (!resize_height || !resize_width) return input;
Tensor output;
cv::Mat mat;
const auto* in = input.data<float>();
if (input.nDim() == 4u) {
/// TODO
/// batch based resize
LOG(FATAL) << "Not implemented";
} else if (input.nDim() == 3u) {
if (image_dim_order == "CHW") {
size_t height = input.shape(1), width = input.shape(2),
channel = input.shape(0);
if (channel == 3u) {
mat = cv::Mat(height, width, CV_32FC3, cv::Scalar(0, 0, 0));
for (size_t i = 0; i < height; i++)
for (size_t j = 0; j < width; j++)
for (size_t k = 0; k < channel; k++)
mat.at<cv::Vec3f>(i, j)[k] = in[k * height * width + i * width + j];
} else if (channel == 1u) {
mat = cv::Mat(height, width, CV_32FC1);
for (size_t i = 0; i < height; i++)
for (size_t j = 0; j < width; j++)
mat.at<cv::Vec<float, 1>>(i, j)[0] = in[i * width + j];
} else LOG(FATAL) << "Invalid channel size: " << channel;
} else if (image_dim_order == "HWC") {
size_t height = input.shape(0), width = input.shape(1),
channel = input.shape(2);
if (channel == 3u) {
mat = cv::Mat(height, width, CV_32FC3, cv::Scalar(0, 0, 0));
for (size_t i = 0; i < height; i++)
for (size_t j = 0; j < width; j++)
for (size_t k = 0; k < channel; k++)
mat.at<cv::Vec3f>(i, j)[k] =
in[i * width * channel + j * channel + k];
} else if (channel == 1u) { /// 2D gray image
mat = cv::Mat(height, width, CV_32FC1);
for (size_t i = 0; i < height; i++)
for (size_t j = 0; j < width; j++)
mat.at<cv::Vec<float, 1>>(i, j)[0] = in[i * width + j];
} else LOG(FATAL) << "Invalid channel size: " << channel;
} else {
LOG(FATAL) << "Unknow dimension order for images " << image_dim_order
<< " Only support 'HWC' and 'CHW'";
}
} else { /// 2D gray image
size_t height = input.shape(0), width = input.shape(1);
mat = cv::Mat(height, width, CV_32FC1);
for (size_t i = 0; i < height; i++)
for (size_t j = 0; j < width; j++)
mat.at<cv::Vec<float, 1>>(i, j)[0] = in[i * width + j];
}
cv::Size size(resize_width, resize_height);
cv::Mat resized;
cv::resize(mat, resized, size);
CHECK_EQ(resized.size().height, resize_height);
CHECK_EQ(resized.size().width, resize_width);
size_t new_size = resize_height * resize_width * resized.channels();
float* out = new float[new_size];
if (input.nDim() == 4u) {
/// TODO
/// batch based resize
LOG(FATAL) << "Not implemented";
} else if (input.nDim() == 3u) {
if (image_dim_order == "CHW") {
size_t height = resize_height, width = resize_width,
channel = input.shape(0);
if (channel == 3u) {
for (size_t i = 0; i < height; i++)
for (size_t j = 0; j < width; j++)
for (size_t k = 0; k < channel; k++)
out[k * height * width + i * width + j] = resized.at<cv::Vec3f>(i, j)[k];
} else { /// 2D gray image
for (size_t i = 0; i < height; i++)
for (size_t j = 0; j < width; j++)
out[i * width + j] = resized.at<cv::Vec<float, 1>>(i, j)[0];
}
Tensor temp(Shape{channel, height, width});
temp.CopyDataFromHostPtr<float>(out, new_size);
output = temp;
} else {
size_t height = resize_height, width = resize_width,
channel = input.shape(2);
if (channel == 3u) {
for (size_t i = 0; i < height; i++)
for (size_t j = 0; j < width; j++)
for (size_t k = 0; k < channel; k++)
out[i * width * channel + j * channel + k] = resized.at<cv::Vec3f>(i, j)[k];
} else { /// 1 channel
for (size_t i = 0; i < height; i++)
for (size_t j = 0; j < width; j++)
out[i * width + j] = resized.at<cv::Vec<float, 1>>(i, j)[0];
}
Tensor temp(Shape{height, width, channel});
temp.CopyDataFromHostPtr<float>(out, new_size);
output = temp;
}
} else { /// 2D gray image
size_t height = resize_height, width = resize_width;
for (size_t i = 0; i < height; i++)
for (size_t j = 0; j < width; j++)
out[i * width + j] = resized.at<cv::Vec<float, 1>>(i, j)[0];
Tensor temp(Shape{height, width});
temp.CopyDataFromHostPtr<float>(out, new_size);
output = temp;
}
delete[] out;
return output;
}