[mlir][xla] LHLO-to-Affine: simplify DotOp conversion with nested builders

MLIR recently introduced a new idiom for constructing loop nests. Use it to
make the legalization of LHLO to affine loops more concise and readable.

PiperOrigin-RevId: 317649515
Change-Id: Idfab27b4655d6df90d940fb7b064ea9941d8a700
This commit is contained in:
Alex Zinenko 2020-06-22 07:24:04 -07:00 committed by TensorFlower Gardener
parent 12f5cd7dde
commit bb73be3e36

View File

@ -31,6 +31,17 @@ namespace mlir {
namespace xla_lhlo {
namespace {
// Builds an affine loop nest iterating from zeros to "upper_bounds" with unit
// steps, and populates the body of the innermost loop using "body_builder".
static void BuildBoundedAffineLoopNest(
OpBuilder& builder, Location location, ArrayRef<int64_t> upper_bounds,
function_ref<void(OpBuilder&, Location, ValueRange)> body_builder) {
SmallVector<int64_t, 3> lower_bounds(upper_bounds.size(), /*Value=*/0);
SmallVector<int64_t, 3> steps(upper_bounds.size(), /*Value=*/1);
buildAffineLoopNest(builder, location, lower_bounds, upper_bounds, steps,
body_builder);
}
struct DotOpConverter : public OpRewritePattern<DotOp> {
using OpRewritePattern<DotOp>::OpRewritePattern;
@ -48,37 +59,29 @@ struct DotOpConverter : public OpRewritePattern<DotOp> {
if ((lhs_type.getRank() != 2) || (rhs_type.getRank() != 2)) {
return failure();
}
SmallVector<Value, 4> lhs_indices, rhs_indices, result_indices;
const auto& loc = op.getLoc();
// Create the canonical ijk form of matmul.
auto forOp = rewriter.create<AffineForOp>(loc, 0, shape_lhs[0]);
lhs_indices.push_back(forOp.getInductionVar());
result_indices.push_back(forOp.getInductionVar());
LogicalResult map_status = success();
auto body_builder = [&](OpBuilder& builder, Location loc, ValueRange ivs) {
SmallVector<Value, 2> lhs_indices{ivs[0], ivs[2]},
rhs_indices{ivs[2], ivs[1]}, result_indices{ivs[0], ivs[1]};
rewriter.setInsertionPointToStart(forOp.getBody());
forOp = rewriter.create<AffineForOp>(loc, 0, shape_rhs.back());
result_indices.push_back(forOp.getInductionVar());
rhs_indices.resize(2);
rhs_indices[1] = forOp.getInductionVar();
auto l = builder.create<AffineLoadOp>(loc, lhs, lhs_indices);
auto r = builder.create<AffineLoadOp>(loc, rhs, rhs_indices);
auto result =
rewriter.create<AffineLoadOp>(loc, op.output(), result_indices);
Value op_result = xla_lhlo::XlaOpToStdScalarOp::map<DotOp>(
op, element_type, {l, r, result}, &builder);
map_status = success(op_result != nullptr);
if (failed(map_status)) return;
builder.create<AffineStoreOp>(loc, op_result, op.output(),
result_indices);
};
rewriter.setInsertionPointToStart(forOp.getBody());
forOp = rewriter.create<AffineForOp>(loc, 0, shape_rhs.front());
lhs_indices.push_back(forOp.getInductionVar());
rhs_indices[0] = forOp.getInductionVar();
BuildBoundedAffineLoopNest(rewriter, op.getLoc(),
{shape_lhs[0], shape_rhs[1], shape_rhs[0]},
body_builder);
if (failed(map_status)) return failure();
// Construct the innermost loop body.
rewriter.setInsertionPointToStart(forOp.getBody());
auto l = rewriter.create<AffineLoadOp>(loc, lhs, lhs_indices);
auto r = rewriter.create<AffineLoadOp>(loc, rhs, rhs_indices);
auto result =
rewriter.create<AffineLoadOp>(loc, op.output(), result_indices);
Value op_result = xla_lhlo::XlaOpToStdScalarOp::map<DotOp>(
op, element_type, {l, r, result}, &rewriter);
if (op_result == nullptr) {
return failure();
}
rewriter.create<AffineStoreOp>(loc, op_result, op.output(), result_indices);
rewriter.eraseOp(op);
return success();
}
@ -99,22 +102,22 @@ struct BinaryOpConverter : public OpRewritePattern<LhloOpTy> {
if (lhs_type.getShape() != rhs_type.getShape()) {
return failure();
}
const auto& shape = lhs_type.getShape();
SmallVector<Value, 4> induction_vars;
const auto loc = op.getLoc();
for (int i = 0; i < shape.size(); ++i) {
auto forOp = rewriter.create<AffineForOp>(loc, 0, shape[i]);
induction_vars.push_back(forOp.getInductionVar());
rewriter.setInsertionPointToStart(forOp.getBody());
}
auto l = rewriter.create<AffineLoadOp>(loc, lhs, induction_vars);
auto r = rewriter.create<AffineLoadOp>(loc, rhs, induction_vars);
Value opResult = xla_lhlo::XlaOpToStdScalarOp::map<LhloOpTy>(
op, element_type, {l, r}, &rewriter);
if (opResult == nullptr) {
return failure();
}
rewriter.create<AffineStoreOp>(loc, opResult, op.out(), induction_vars);
LogicalResult map_status = success();
auto body_builder = [&](OpBuilder& builder, Location loc,
ValueRange induction_vars) {
auto l = builder.create<AffineLoadOp>(loc, lhs, induction_vars);
auto r = builder.create<AffineLoadOp>(loc, rhs, induction_vars);
Value op_result = xla_lhlo::XlaOpToStdScalarOp::map<LhloOpTy>(
op, element_type, {l, r}, &builder);
map_status = success(op_result != nullptr);
if (failed(map_status)) return;
rewriter.create<AffineStoreOp>(loc, op_result, op.out(), induction_vars);
};
BuildBoundedAffineLoopNest(rewriter, op.getLoc(), lhs_type.getShape(),
body_builder);
if (failed(map_status)) return failure();
rewriter.eraseOp(op);
return success();
}