in tfjs-core/src/ops/fused/conv2d.ts [96:267]
function fusedConv2d_<T extends Tensor3D|Tensor4D>({
x,
filter,
strides,
pad,
dataFormat = 'NHWC',
dilations = [1, 1],
dimRoundingMode,
bias,
activation = 'linear',
preluActivationWeights,
leakyreluAlpha
}: {
x: T|TensorLike,
filter: Tensor4D|TensorLike,
strides: [number, number]|number,
pad: 'valid'|'same'|number|conv_util.ExplicitPadding,
dataFormat?: 'NHWC'|'NCHW',
dilations?: [number, number]|number,
dimRoundingMode?: 'floor'|'round'|'ceil',
bias?: Tensor|TensorLike,
activation?: Activation,
preluActivationWeights?: Tensor,
leakyreluAlpha?: number
}): T {
activation = activation || 'linear';
if (shouldFuse(ENGINE.state.gradientDepth, activation) === false) {
let result = unfusedConv2d(
x, filter, strides, pad, dataFormat, dilations, dimRoundingMode);
if (bias != null) {
result = add(result, bias);
}
return applyActivation(
result, activation, preluActivationWeights, leakyreluAlpha) as T;
}
const $x = convertToTensor(x, 'x', 'conv2d', 'float32');
const $filter = convertToTensor(filter, 'filter', 'conv2d', 'float32');
let x4D = $x as Tensor4D;
let reshapedTo4D = false;
if ($x.rank === 3) {
reshapedTo4D = true;
x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
}
util.assert(
x4D.rank === 4,
() => `Error in fused conv2d: input must be rank 4, but got rank ` +
`${x4D.rank}.`);
util.assert(
$filter.rank === 4,
() => `Error in fused conv2d: filter must be rank 4, but got rank ` +
`${$filter.rank}.`);
conv_util.checkPadOnDimRoundingMode('fused conv2d', pad, dimRoundingMode);
util.assert(
x4D.shape[3] === $filter.shape[2],
() => `Error in conv2d: depth of input (${x4D.shape[3]}) must match ` +
`input depth for filter ${$filter.shape[2]}.`);
util.assert(
conv_util.eitherStridesOrDilationsAreOne(strides, dilations),
() => 'Error in conv2D: Either strides or dilations must be 1. ' +
`Got strides ${strides} and dilations '${dilations}'`);
util.assert(
dataFormat === 'NHWC',
() => `Error in conv2d: got dataFormat of ${
dataFormat} but only NHWC is currently supported.`);
const convInfo = conv_util.computeConv2DInfo(
x4D.shape, $filter.shape, strides, dilations, pad, dimRoundingMode);
let $bias: Tensor;
if (bias != null) {
$bias = convertToTensor(bias, 'bias', 'fused conv2d');
[$bias] = makeTypesMatch($bias, $x);
broadcast_util.assertAndGetBroadcastShape(convInfo.outShape, $bias.shape);
}
let $preluActivationWeights: Tensor;
if (preluActivationWeights != null) {
$preluActivationWeights = convertToTensor(
preluActivationWeights, 'prelu weights', 'fused conv2d');
}
const grad = (dy: Tensor4D, saved: Tensor[]) => {
const [$filter, x4D, y, $bias] =
saved as [Tensor4D, Tensor4D, Tensor4D, Tensor];
const dyActivation = getFusedDyActivation(dy, y, activation) as Tensor4D;
util.assert(
conv_util.tupleValuesAreOne(dilations),
() => 'Error in gradient of fused conv2D: ' +
`dilation rates greater than 1 ` +
`are not yet supported in gradients. Got dilations '${dilations}'`);
const xDer =
conv2DBackpropInput(x4D.shape, dyActivation, $filter, strides, pad);
const filterDer =
conv2DBackpropFilter(x4D, dyActivation, $filter.shape, strides, pad);
const der: Tensor[] = [xDer, filterDer];
if ($bias != null) {
const biasDer = getFusedBiasGradient($bias, dyActivation);
der.push(biasDer);
}
return der;
};
const inputs: FusedConv2DInputs = {
x: x4D,
filter: $filter,
bias: $bias,
preluActivationWeights: $preluActivationWeights
};
const attrs: FusedConv2DAttrs = {
strides,
pad,
dataFormat,
dilations,
dimRoundingMode,
activation,
leakyreluAlpha
};
// Depending on the the params passed in we will have different number of
// inputs and thus a a different number of elements in the gradient.
if (bias == null) {
const customOp =
customGrad((x4D: Tensor4D, filter: Tensor4D, save: GradSaveFunc) => {
let res: Tensor4D|Tensor3D =
// tslint:disable-next-line: no-unnecessary-type-assertion
ENGINE.runKernel(
FusedConv2D, inputs as {} as NamedTensorMap,
attrs as {} as NamedAttrMap);
save([filter, x4D, res]);
if (reshapedTo4D) {
// tslint:disable-next-line: no-unnecessary-type-assertion
res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]) as
Tensor3D;
}
return {value: res, gradFunc: grad};
});
return customOp(x4D, $filter) as T;
} else {
const customOpWithBias = customGrad(
(x4D: Tensor4D, filter: Tensor4D, bias: Tensor, save: GradSaveFunc) => {
let res: Tensor4D|Tensor3D = ENGINE.runKernel(
FusedConv2D, inputs as {} as NamedTensorMap,
attrs as {} as NamedAttrMap);
save([filter, x4D, res, bias]);
if (reshapedTo4D) {
// tslint:disable-next-line: no-unnecessary-type-assertion
res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]) as
Tensor3D;
}
return {value: res, gradFunc: grad};
});
return customOpWithBias(x4D, $filter, $bias) as T;
}
}