in lib/Dialect/mhlo/IR/hlo_ops.cc [3504:3620]
static LogicalResult Verify(ReduceOp op) {
// Check that there are even number of operands and >= 2.
if (op.getNumOperands() % 2 != 0 || op.getOperands().empty())
return op.emitOpError()
<< "expects the size of operands to be even and >= 2";
// Collect the input and init-value operands. Note that the operand-type is
// enforced as "TensorType" by ODS.
int64_t numInputs = op.getNumOperands() / 2;
auto operandTensorTypes = llvm::to_vector<4>(llvm::map_range(
op.getOperandTypes(),
[](Type t) -> TensorType { return t.cast<TensorType>(); }));
ArrayRef<TensorType> inputArgTypes(operandTensorTypes.begin(),
operandTensorTypes.begin() + numInputs);
ArrayRef<TensorType> initValueTypes(operandTensorTypes.begin() + numInputs,
operandTensorTypes.end());
// Check for unranked tensors in input operands.
int64_t rankedInputIdx = -1;
for (int64_t inputIdx = 0; inputIdx < numInputs; ++inputIdx) {
if (inputArgTypes[inputIdx].hasRank()) {
rankedInputIdx = inputIdx;
break;
}
}
bool allInputsUnranked = (rankedInputIdx == -1);
// Check that all input operands have compatible shapes. The element types may
// be different.
if (!allInputsUnranked) {
for (int64_t inputIdx = 0; inputIdx < numInputs; ++inputIdx) {
if (failed(mlir::verifyCompatibleShape(inputArgTypes[rankedInputIdx],
inputArgTypes[inputIdx]))) {
return op.emitOpError()
<< "expects all inputs to have compatible shapes. Shape at"
<< " input-index " << inputIdx
<< " is not compatible with shape at input-index "
<< rankedInputIdx;
}
}
}
// Check that
// 1. the dimensions of reduce-op are in-bounds for the given shape.
// 2. the dimension-attribute have no duplicate entries.
DenseSet<int64_t> dimensionsToReduceSet;
for (int64_t dimension : op.dimensions().getValues<int64_t>()) {
if ((!allInputsUnranked &&
dimension >= inputArgTypes[rankedInputIdx].getRank()) ||
dimension < 0) {
return op.emitError() << "Out-of-bounds dimension " << dimension
<< " for input-tensor rank: "
<< inputArgTypes[rankedInputIdx].getRank();
}
if (!dimensionsToReduceSet.insert(dimension).second) {
return op.emitError() << "Duplicate reduction dimension: " << dimension;
}
}
// Verify the inner block defining the reducer function.
SmallVector<int64_t> newDimensions;
if (!allInputsUnranked) {
for (int inputIdx = 0; inputIdx < inputArgTypes[rankedInputIdx].getRank();
++inputIdx) {
if (!dimensionsToReduceSet.count(inputIdx)) {
newDimensions.push_back(
inputArgTypes[rankedInputIdx].getDimSize(inputIdx));
}
}
}
Block& block = op.body().front();
SmallVector<TensorType> accumulatorSubShapes;
if (failed(verifyReducerShape(op, block, inputArgTypes, initValueTypes,
numInputs, newDimensions, allInputsUnranked,
accumulatorSubShapes)))
return failure();
// Check if the reduce-op's result-type matches with the one derived from
// the reducer-block and dimensions attribute.
if (op.getResults().size() != accumulatorSubShapes.size())
return op.emitError()
<< "Unexpected number of reduce-op's returned values: "
<< op.getResults().size() << " vs " << accumulatorSubShapes.size()
<< " (expected)";
for (int64_t shapeIdx = 0; shapeIdx < accumulatorSubShapes.size();
shapeIdx++) {
// The result-type is enforced as "TensorType" by ODS.
auto opResultType = op.getResult(shapeIdx).getType().cast<TensorType>();
// Check element-type.
if (accumulatorSubShapes[shapeIdx].getElementType() !=
opResultType.getElementType()) {
return op.emitError()
<< "Unexpected element-type for reduce-op's return value at index "
<< shapeIdx << ": " << opResultType.getElementType() << " vs "
<< accumulatorSubShapes[shapeIdx].getElementType()
<< " (expected)";
}
// Check shape.
if (!allInputsUnranked && opResultType.hasRank() &&
(newDimensions != opResultType.getShape())) {
Type expectedResultType = RankedTensorType::get(
newDimensions, accumulatorSubShapes[shapeIdx].getElementType());
return op.emitError()
<< "Unexpected type for reduce-op's return value at index "
<< shapeIdx << ": " << opResultType << " vs " << expectedResultType
<< " (expected)";
}
}
return success();
}