in src/operator/mkl/mkl_convolution-inl.h [112:267]
void LayerSetUp(const mshadow::Tensor<xpu, 4, DType> &data,
const mshadow::Tensor<xpu, 4, DType> &out) {
this->width_ = data.shape_[3];
this->height_ = data.shape_[2];
this->channels_ = data.shape_[1];
this->num_ = data.shape_[0];
this->group_ = param_.num_group;
this->width_out_ = out.shape_[3];
this->height_out_ = out.shape_[2];
int channel_out_ = out.shape_[1];
this->num_output_ = channel_out_;
kernel_w_ = param_.kernel[1];
kernel_h_ = param_.kernel[0];
stride_w_ = param_.stride[1];
stride_h_ = param_.stride[0];
pad_w_ = param_.pad[1];
pad_h_ = param_.pad[0];
int status;
size_t n, g;
size_t iw, ih, ic;
size_t ow, oh, oc;
size_t kw, kh;
size_t dimension = 4;
g = std::max(this->group_, 1);
n = this->num_;
iw = this->width_;
ih = this->height_;
ic = this->channels_;
ow = this->width_out_;
oh = this->height_out_;
oc = this->num_output_;
kw = this->kernel_w_;
kh = this->kernel_h_;
oc = this->num_output_;
size_t bdata_sizes[4] = { iw, ih, ic, n };
size_t bdata_strides[4] = { 1, iw, iw*ih, iw*ih*ic };
/* starting with MKL 2017 Gold in case of groups filter layout
* becomes 5D, i.e. groups become a separate dimension */
size_t g_mkl2017 = g;
size_t f_dimension = dimension + (g != 1);
if (getMKLBuildDate() < 20160701) {
g_mkl2017 = 1;
f_dimension = dimension;
}
size_t fdata_sizes[5] = { kw, kh, ic / g, oc / g_mkl2017, g_mkl2017 };
size_t fdata_strides[5] = { 1, kw, kw*kh, kw*kh*ic / g, kw*kh*ic / g*oc / g };
size_t bias_sizes[1] = { oc };
size_t bias_strides[1] = { 1 };
size_t tdata_sizes[4] = { ow, oh, oc, n };
size_t tdata_strides[4] = { 1, ow, ow*oh, ow*oh*oc };
size_t convolutionStrides[2] = { this->stride_w_, this->stride_h_ };
int inputOffset[2] = { -this->pad_w_, -this->pad_h_ };
// Names are for debugging purposes only.
/*** convolution section ***/
if (!param_.no_bias) {
status = dnnGroupsConvolutionCreateForwardBias<DType>(&convolutionFwd,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
bdata_sizes,
tdata_sizes,
fdata_sizes,
convolutionStrides,
inputOffset,
dnnBorderZeros);
} else {
status = dnnGroupsConvolutionCreateForward<DType>(&convolutionFwd,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
bdata_sizes,
tdata_sizes,
fdata_sizes,
convolutionStrides,
inputOffset,
dnnBorderZeros);
}
CHECK_EQ(status, 0)
<< "Failed dnnCreateConvolution<DType>(dnnForward) with status "
<< status << "\n";
fwd_bottom_data->create_layouts(convolutionFwd, dnnResourceSrc, dimension,
bdata_sizes, bdata_strides);
fwd_top_data->create_layouts(convolutionFwd, dnnResourceDst, dimension,
tdata_sizes, tdata_strides);
fwd_filter_data->create_layouts(convolutionFwd, dnnResourceFilter,
f_dimension, fdata_sizes, fdata_strides);
if (!param_.no_bias)
fwd_bias_data->create_layouts(convolutionFwd, dnnResourceBias, 1,
bias_sizes, bias_strides);
/*
* Backward by data layer setup
*/
status = dnnGroupsConvolutionCreateBackwardData<DType>(&convolutionBwdData,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
bdata_sizes,
tdata_sizes,
fdata_sizes,
convolutionStrides,
inputOffset,
dnnBorderZeros);
CHECK_EQ(status, 0)
<< "Failed dnnConvolutionCreateBackwardData with status "
<< status << "\n";
bwdd_bottom_diff->create_layouts(convolutionBwdData, dnnResourceDiffSrc,
dimension, bdata_sizes, bdata_strides);
bwdd_top_diff->create_layouts(convolutionBwdData, dnnResourceDiffDst,
dimension, tdata_sizes, tdata_strides);
bwdd_filter_data->create_layouts(convolutionBwdData, dnnResourceFilter,
f_dimension, fdata_sizes, fdata_strides);
/*
* Backward by filter layer setup
*/
status = dnnGroupsConvolutionCreateBackwardFilter<DType>(&convolutionBwdFilter,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
bdata_sizes,
tdata_sizes,
fdata_sizes,
convolutionStrides,
inputOffset,
dnnBorderZeros);
CHECK_EQ(status, 0)
<< "Failed dnnConvolutionCreateBackwardFilter with status "
<< status << "\n";
bwdf_bottom_data->create_layouts(convolutionBwdFilter, dnnResourceSrc,
dimension, bdata_sizes, bdata_strides);
bwdf_top_diff->create_layouts(convolutionBwdFilter, dnnResourceDiffDst,
dimension, tdata_sizes, tdata_strides);
bwdf_filter_diff->create_layouts(convolutionBwdFilter, dnnResourceDiffFilter,
f_dimension, fdata_sizes, fdata_strides);
/*
* Backward by bias layer setup
*/
if (!param_.no_bias) {
status = dnnGroupsConvolutionCreateBackwardBias<DType>(&convolutionBwdBias,
NULL,
dnnAlgorithmConvolutionDirect,
g,
dimension,
tdata_sizes);
CHECK_EQ(status, 0)
<< "Failed dnnConvolutionCreateBackwardBias with status "
<< status << "\n";
bwdb_top_diff->create_layouts(convolutionBwdBias, dnnResourceDiffDst,
dimension, tdata_sizes, tdata_strides);
bwdb_bias_diff->create_layouts(convolutionBwdBias, dnnResourceDiffBias, 1,
bias_sizes, bias_strides);
}
}