From 80e5ad4c926850f55f9b5f7a6c127b529d87a1d8 Mon Sep 17 00:00:00 2001 From: Christian Sigg Date: Fri, 18 Dec 2020 12:23:00 -0800 Subject: [PATCH] Use mlir::OpState::operator->() to get to methods of mlir::Operation. This is a preparation step to remove those methods from OpState. PiperOrigin-RevId: 348209062 Change-Id: I72c68635d6b47d74a16932385cadf1fa3fe5b517 --- .../tensorflow/transforms/cross_host_transfer.cc | 8 ++++---- .../utils/tpu_rewrite_device_util_test.cc | 14 +++++++------- .../transforms/gpu_kernel_to_blob_pass.cc | 4 ++-- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/cross_host_transfer.cc b/tensorflow/compiler/mlir/tensorflow/transforms/cross_host_transfer.cc index 155d1f60adb..06dea80dbe8 100644 --- a/tensorflow/compiler/mlir/tensorflow/transforms/cross_host_transfer.cc +++ b/tensorflow/compiler/mlir/tensorflow/transforms/cross_host_transfer.cc @@ -128,13 +128,13 @@ void CrossHostTransferPass::runOnFunction() { std::string key = GetNextKey(); auto send_op = builder.create(op->getLoc(), arg, key, dst_host); - send_op.setAttr(kOpDeviceAttr, - builder.getStringAttr(src_host + kCPUDevice)); + send_op->setAttr(kOpDeviceAttr, + builder.getStringAttr(src_host + kCPUDevice)); auto receive_op = builder.create( op->getLoc(), arg.getType(), key, src_host); - receive_op.setAttr(kOpDeviceAttr, - builder.getStringAttr(dst_host + kCPUDevice)); + receive_op->setAttr(kOpDeviceAttr, + builder.getStringAttr(dst_host + kCPUDevice)); transferred_value_by_host[dst_host] = receive_op.getResult(); op->replaceUsesOfWith(arg, receive_op.getResult()); diff --git a/tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util_test.cc b/tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util_test.cc index 78547c8243d..63afd0a93e6 100644 --- a/tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util_test.cc +++ b/tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util_test.cc @@ -650,10 +650,10 @@ TEST(TPURewriteDeviceUtilTest, TestGetHostDeviceFailModelParallelism) { llvm::SmallVector result_types; auto cluster = builder.create( mlir::UnknownLoc::get(&context), result_types); - cluster.setAttr(kNumCoresPerReplicaAttr, - builder.getIntegerAttr(builder.getIntegerType(64), 5)); - cluster.setAttr(kTopologyAttr, builder.getStringAttr("")); - cluster.setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({})); + cluster->setAttr(kNumCoresPerReplicaAttr, + builder.getIntegerAttr(builder.getIntegerType(64), 5)); + cluster->setAttr(kTopologyAttr, builder.getStringAttr("")); + cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({})); mlir::TF::RuntimeDevices runtime_devices; std::string host_device; @@ -671,9 +671,9 @@ TEST(TPURewriteDeviceUtilTest, TestGetHostDeviceFailMissingTopology) { llvm::SmallVector result_types; auto cluster = builder.create( mlir::UnknownLoc::get(&context), result_types); - cluster.setAttr(kNumCoresPerReplicaAttr, - builder.getIntegerAttr(builder.getIntegerType(64), 1)); - cluster.setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({})); + cluster->setAttr(kNumCoresPerReplicaAttr, + builder.getIntegerAttr(builder.getIntegerType(64), 1)); + cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({})); mlir::TF::RuntimeDevices runtime_devices; std::string host_device; diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/gpu_kernel_to_blob_pass.cc b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/gpu_kernel_to_blob_pass.cc index df6f55d9ef5..dc1365a675a 100644 --- a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/gpu_kernel_to_blob_pass.cc +++ b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/gpu_kernel_to_blob_pass.cc @@ -69,8 +69,8 @@ class GpuKernelToBlobPass if (blob_or.ok()) { const auto& blob = blob_or.ValueOrDie(); std::string blob_string(blob.begin(), blob.end()); - gpu_module.setAttr(blob_annotation_, - mlir::StringAttr::get(blob_string, &getContext())); + gpu_module->setAttr(blob_annotation_, + mlir::StringAttr::get(blob_string, &getContext())); return; } // Forward the error by attaching the message to the gpu module.