From b8bd7b3483f63c8bda1cd2489e2e01db53d2f8b6 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Mon, 8 Jun 2020 09:30:37 -0700 Subject: [PATCH] Integrate LLVM at https://github.com/llvm/llvm-project/commit/92cb0ce8f814 PiperOrigin-RevId: 315290349 Change-Id: I2405c3505b6a860dd32f32d754d1a6da3f3acd29 --- .../mlir/tensorflow/utils/convert_tensor.cc | 22 +++++++------------ tensorflow/compiler/mlir/xla/hlo_utils.cc | 3 +-- .../compiler/mlir/xla/mlir_hlo_to_hlo.cc | 21 +++++++++++------- .../compiler/mlir/xla/tests/convert.mlir | 2 +- .../compiler/xla/service/llvm_ir/llvm_util.cc | 1 + third_party/mlir/BUILD | 20 ----------------- 6 files changed, 24 insertions(+), 45 deletions(-) diff --git a/tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.cc b/tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.cc index b28f26b6c3c..359314a64b0 100644 --- a/tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.cc +++ b/tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.cc @@ -89,12 +89,11 @@ StatusOr ConvertFlatTensor(const Tensor& input_tensor, ElementsAttr ConvertBf16Tensor(const Tensor& input_tensor, RankedTensorType type) { - auto flat = input_tensor.flat(); - llvm::SmallVector floats; - floats.reserve(flat.size()); - for (bfloat16 v : llvm::makeArrayRef(flat.data(), flat.size())) - floats.push_back(llvm::APFloat(static_cast(v))); - return mlir::DenseElementsAttr::get(type, llvm::makeArrayRef(floats)); + auto buffer = llvm::makeArrayRef(static_cast(input_tensor.data()), + input_tensor.TotalBytes()); + return mlir::DenseElementsAttr::getFromRawBuffer( + type, buffer, + /*isSplatBuffer=*/type.getNumElements() == 1); } ElementsAttr ConvertHalfTensor(const Tensor& tensor, RankedTensorType type) { @@ -280,16 +279,11 @@ void ConvertIntElementsAttr(const mlir::DenseIntElementsAttr attr, void ConvertBfloat16ElementsAttr(const mlir::DenseFPElementsAttr attr, protobuf::RepeatedField* output) { - // Bfloat16 is internally represented as `double` in MLIR. if (attr.isSplat()) { - double v = attr.getSplatValue(); - bfloat16 bf16_val = static_cast(v); - output->Add(absl::bit_cast(bf16_val)); + output->Add((*attr.begin()).bitcastToAPInt().getSExtValue()); } else { - for (auto v : attr.getValues()) { - bfloat16 bf16_val = static_cast(v); - output->Add(absl::bit_cast(bf16_val)); - } + for (const llvm::APFloat value : attr.getFloatValues()) + output->Add(value.bitcastToAPInt().getSExtValue()); } } diff --git a/tensorflow/compiler/mlir/xla/hlo_utils.cc b/tensorflow/compiler/mlir/xla/hlo_utils.cc index dc801f64ede..e1b5feeb117 100644 --- a/tensorflow/compiler/mlir/xla/hlo_utils.cc +++ b/tensorflow/compiler/mlir/xla/hlo_utils.cc @@ -44,8 +44,7 @@ template } mlir::APFloat ConvertToAPFloat(bfloat16 val) { - // bfloat16 values are stored as double in MLIR. - return llvm::APFloat(static_cast(val)); + return llvm::APFloat(llvm::APFloat::BFloat(), llvm::APInt(16, val.value)); } mlir::APFloat ConvertToAPFloat(half val) { diff --git a/tensorflow/compiler/mlir/xla/mlir_hlo_to_hlo.cc b/tensorflow/compiler/mlir/xla/mlir_hlo_to_hlo.cc index 1c25625802f..bd61b10f827 100644 --- a/tensorflow/compiler/mlir/xla/mlir_hlo_to_hlo.cc +++ b/tensorflow/compiler/mlir/xla/mlir_hlo_to_hlo.cc @@ -979,10 +979,10 @@ StatusOr CreateLiteralFromAttr(ElementsAttr attr) { values.reserve(attr.getNumElements()); for (APFloat val : attr.getValues()) { bool loses_info = false; - CHECK_EQ(val.convert(llvm::APFloat::IEEEsingle(), - llvm::APFloat::rmTowardZero, &loses_info), - llvm::APFloat::opOK); - CHECK(!loses_info); + TF_RET_CHECK(val.convert(llvm::APFloat::IEEEsingle(), + llvm::APFloat::rmTowardZero, + &loses_info) == llvm::APFloat::opOK); + TF_RET_CHECK(!loses_info); values.push_back(xla::half(val.convertToFloat())); } xla::Array source_data(shape.dimensions()); @@ -992,10 +992,15 @@ StatusOr CreateLiteralFromAttr(ElementsAttr attr) { case xla::PrimitiveType::BF16: { xla::Array source_data(shape.dimensions()); auto attr_values = attr.getValues(); - std::vector values_double(source_data.num_elements()); - for (auto index_and_value : llvm::enumerate(attr_values)) { - values_double[index_and_value.index()] = - index_and_value.value().convertToDouble(); + std::vector values_double; + values_double.reserve(source_data.num_elements()); + for (APFloat val : attr_values) { + bool loses_info = false; + TF_RET_CHECK(val.convert(llvm::APFloat::IEEEdouble(), + llvm::APFloat::rmTowardZero, + &loses_info) == llvm::APFloat::opOK); + TF_RET_CHECK(!loses_info); + values_double.push_back(val.convertToDouble()); } source_data.SetValues(values_double); return xla::LiteralUtil::ConvertF64ToBF16( diff --git a/tensorflow/compiler/mlir/xla/tests/convert.mlir b/tensorflow/compiler/mlir/xla/tests/convert.mlir index 63ce724adb7..26d91132d32 100644 --- a/tensorflow/compiler/mlir/xla/tests/convert.mlir +++ b/tensorflow/compiler/mlir/xla/tests/convert.mlir @@ -191,7 +191,7 @@ func @const_f32_bf16() -> tensor { // CHECK-LABEL: func @const_bf16_f64 func @const_bf16_f64() -> tensor { - // CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<4.2{{0*}}e+00> : tensor + // CHECK-NEXT: [[CST:%.+]] = xla_hlo.constant dense<4.187500e+00> : tensor %cst = xla_hlo.constant dense<4.2> : tensor %0 = "xla_hlo.convert"(%cst) : (tensor) -> tensor // CHECK-NEXT: return [[CST]] diff --git a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc index 6375bf7341f..e4ca08f972b 100644 --- a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc +++ b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc @@ -22,6 +22,7 @@ limitations under the License. #include "absl/base/casts.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" +#include "llvm/ADT/Triple.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/GlobalVariable.h" diff --git a/third_party/mlir/BUILD b/third_party/mlir/BUILD index 27159203cf9..dda04d560c0 100644 --- a/third_party/mlir/BUILD +++ b/third_party/mlir/BUILD @@ -686,25 +686,6 @@ gentbl( ], ) -gentbl( - name = "MLIRShapeCanonicalizationIncGen", - strip_include_prefix = "include/mlir/Dialect/Shape", - tbl_outs = [ - ( - "-gen-rewriters", - "include/mlir/Dialect/Shape/IR/ShapeCanonicalization.inc", - ), - ], - tblgen = ":mlir-tblgen", - td_file = "lib/Dialect/Shape/IR/ShapeCanonicalization.td", - td_srcs = [ - ":StdOpsTdFiles", - "include/mlir/Dialect/Shape/IR/ShapeBase.td", - "include/mlir/Dialect/Shape/IR/ShapeOps.td", - "include/mlir/Interfaces/InferTypeOpInterface.td", - ], -) - cc_library( name = "Shape", srcs = glob( @@ -723,7 +704,6 @@ cc_library( ":Dialect", ":IR", ":InferTypeOpInterface", - ":MLIRShapeCanonicalizationIncGen", ":ShapeOpsIncGen", ":SideEffects", ":Support",