Bump OSS LLVM to 37ac1c19bed7b7d22e9312dfa61e7a4506ed4e49
PiperOrigin-RevId: 306409008 Change-Id: I206b6f3f2df5a3c42b2f4b6ad89b48dfc7240a7f
This commit is contained in:
parent
02b3bc3c0d
commit
6b6736c316
@ -59,7 +59,6 @@ limitations under the License.
|
||||
#include "mlir/IR/StandardTypes.h" // from @llvm-project
|
||||
#include "mlir/IR/Types.h" // from @llvm-project
|
||||
#include "mlir/IR/Value.h" // from @llvm-project
|
||||
#include "mlir/Support/Functional.h" // from @llvm-project
|
||||
#include "mlir/Support/LLVM.h" // from @llvm-project
|
||||
#include "mlir/Translation.h" // from @llvm-project
|
||||
#include "tensorflow/compiler/mlir/lite/flatbuffer_operator.h"
|
||||
@ -676,8 +675,8 @@ template <typename ContainerType>
|
||||
mlir::NamedAttribute BuildTFEntryFunctionAttribute(
|
||||
const tflite::SubGraphT& subgraph, Builder* builder, const std::string name,
|
||||
const ContainerType indices) {
|
||||
llvm::SmallVector<std::string, 8> tensor_names = mlir::functional::map(
|
||||
[&](int i) { return subgraph.tensors.at(i)->name; }, indices);
|
||||
auto tensor_names = llvm::map_range(
|
||||
indices, [&](int i) { return subgraph.tensors.at(i)->name; });
|
||||
return builder->getNamedAttr(
|
||||
name, builder->getStringAttr(llvm::join(tensor_names, ",")));
|
||||
}
|
||||
|
@ -28,7 +28,6 @@ limitations under the License.
|
||||
#include "mlir/Interfaces/DerivedAttributeOpInterface.h" // from @llvm-project
|
||||
#include "mlir/Interfaces/LoopLikeInterface.h" // from @llvm-project
|
||||
#include "mlir/Interfaces/SideEffects.h" // from @llvm-project
|
||||
#include "mlir/Support/Functional.h" // from @llvm-project
|
||||
#include "mlir/Support/LLVM.h" // from @llvm-project
|
||||
#include "tensorflow/compiler/mlir/lite/quantization/quantization_traits.h"
|
||||
#include "tensorflow/lite/schema/schema_generated.h"
|
||||
|
@ -33,7 +33,6 @@ limitations under the License.
|
||||
#include "mlir/IR/PatternMatch.h" // from @llvm-project
|
||||
#include "mlir/IR/StandardTypes.h" // from @llvm-project
|
||||
#include "mlir/Pass/Pass.h" // from @llvm-project
|
||||
#include "mlir/Support/Functional.h" // from @llvm-project
|
||||
#include "mlir/Support/LLVM.h" // from @llvm-project
|
||||
#include "tensorflow/compiler/mlir/lite/quantization/quantization_info.pb.h"
|
||||
#include "tensorflow/compiler/mlir/lite/quantization/quantization_passes.h"
|
||||
|
@ -20,7 +20,6 @@ limitations under the License.
|
||||
#include "mlir/IR/PatternMatch.h"
|
||||
#include "mlir/IR/StandardTypes.h"
|
||||
#include "mlir/Pass/Pass.h"
|
||||
#include "mlir/Support/Functional.h"
|
||||
#include "mlir/Support/LLVM.h"
|
||||
#include "absl/memory/memory.h"
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
|
@ -38,7 +38,6 @@ limitations under the License.
|
||||
#include "mlir/IR/Value.h" // from @llvm-project
|
||||
#include "mlir/Pass/Pass.h" // from @llvm-project
|
||||
#include "mlir/Pass/PassRegistry.h" // from @llvm-project
|
||||
#include "mlir/Support/Functional.h" // from @llvm-project
|
||||
#include "mlir/Support/LLVM.h" // from @llvm-project
|
||||
#include "mlir/Support/LogicalResult.h" // from @llvm-project
|
||||
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
|
||||
|
@ -36,7 +36,6 @@ limitations under the License.
|
||||
#include "mlir/IR/PatternMatch.h" // from @llvm-project
|
||||
#include "mlir/IR/StandardTypes.h" // from @llvm-project
|
||||
#include "mlir/Pass/Pass.h" // from @llvm-project
|
||||
#include "mlir/Support/Functional.h" // from @llvm-project
|
||||
#include "mlir/Support/LLVM.h" // from @llvm-project
|
||||
#include "mlir/Transforms/DialectConversion.h" // from @llvm-project
|
||||
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
|
||||
@ -288,12 +287,10 @@ LogicalResult ConvertTFSplitOp::matchAndRewrite(
|
||||
Operation* op, PatternRewriter& rewriter) const {
|
||||
auto tf_split_op = cast<TF::SplitOp>(op);
|
||||
|
||||
auto output_types = functional::map([](Value v) { return v.getType(); },
|
||||
tf_split_op.output());
|
||||
// Number of splits cannot be negative.
|
||||
auto num_split = rewriter.getI32IntegerAttr(tf_split_op.num_split());
|
||||
|
||||
rewriter.replaceOpWithNewOp<TFL::SplitOp>(op, output_types,
|
||||
rewriter.replaceOpWithNewOp<TFL::SplitOp>(op, tf_split_op.output().getTypes(),
|
||||
tf_split_op.split_dim(),
|
||||
tf_split_op.value(), num_split);
|
||||
return success();
|
||||
@ -303,14 +300,12 @@ LogicalResult ConvertTFSplitVOp::matchAndRewrite(
|
||||
Operation* op, PatternRewriter& rewriter) const {
|
||||
auto tf_splitv_op = cast<TF::SplitVOp>(op);
|
||||
|
||||
auto output_types = functional::map([](Value v) { return v.getType(); },
|
||||
tf_splitv_op.output());
|
||||
// Number of splits cannot be negative.
|
||||
auto num_split = rewriter.getI32IntegerAttr(tf_splitv_op.num_split());
|
||||
|
||||
rewriter.replaceOpWithNewOp<TFL::SplitVOp>(
|
||||
op, output_types, tf_splitv_op.value(), tf_splitv_op.size_splits(),
|
||||
tf_splitv_op.split_dim(), num_split);
|
||||
op, tf_splitv_op.output().getTypes(), tf_splitv_op.value(),
|
||||
tf_splitv_op.size_splits(), tf_splitv_op.split_dim(), num_split);
|
||||
return success();
|
||||
}
|
||||
|
||||
@ -402,13 +397,12 @@ LogicalResult ConvertTFUnpackOp::matchAndRewrite(
|
||||
auto tf_unpack_op = cast<TF::UnpackOp>(op);
|
||||
|
||||
auto input = tf_unpack_op.value();
|
||||
auto output_types = functional::map([](Value v) { return v.getType(); },
|
||||
tf_unpack_op.output());
|
||||
auto num = rewriter.getI32IntegerAttr(tf_unpack_op.num());
|
||||
// Axis can be negative.
|
||||
auto axis = rewriter.getI32IntegerAttr(tf_unpack_op.axis().getSExtValue());
|
||||
|
||||
rewriter.replaceOpWithNewOp<UnpackOp>(op, output_types, input, num, axis);
|
||||
rewriter.replaceOpWithNewOp<UnpackOp>(op, tf_unpack_op.output().getTypes(),
|
||||
input, num, axis);
|
||||
return success();
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,6 @@ limitations under the License.
|
||||
#include "mlir/IR/Value.h" // from @llvm-project
|
||||
#include "mlir/Pass/Pass.h" // from @llvm-project
|
||||
#include "mlir/Pass/PassRegistry.h" // from @llvm-project
|
||||
#include "mlir/Support/Functional.h" // from @llvm-project
|
||||
#include "mlir/Support/LLVM.h" // from @llvm-project
|
||||
#include "mlir/Support/LogicalResult.h" // from @llvm-project
|
||||
#include "mlir/Transforms/DialectConversion.h" // from @llvm-project
|
||||
|
@ -37,7 +37,6 @@ limitations under the License.
|
||||
#include "mlir/IR/PatternMatch.h" // from @llvm-project
|
||||
#include "mlir/IR/StandardTypes.h" // from @llvm-project
|
||||
#include "mlir/Pass/Pass.h" // from @llvm-project
|
||||
#include "mlir/Support/Functional.h" // from @llvm-project
|
||||
#include "mlir/Support/LLVM.h" // from @llvm-project
|
||||
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
|
||||
#include "tensorflow/compiler/mlir/lite/quantization/quantization_utils.h"
|
||||
|
@ -46,7 +46,6 @@ limitations under the License.
|
||||
#include "mlir/IR/PatternMatch.h" // from @llvm-project
|
||||
#include "mlir/IR/StandardTypes.h" // from @llvm-project
|
||||
#include "mlir/Pass/Pass.h" // from @llvm-project
|
||||
#include "mlir/Support/Functional.h" // from @llvm-project
|
||||
#include "mlir/Support/LLVM.h" // from @llvm-project
|
||||
#include "mlir/Support/LogicalResult.h" // from @llvm-project
|
||||
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
|
||||
@ -322,9 +321,10 @@ class ConvertTFConv2D : public ConvertTFConvOp<ConvertTFConv2D, TF::Conv2DOp> {
|
||||
|
||||
// Create tensor type for the transpose result.
|
||||
auto filter_type = filter.getType().cast<RankedTensorType>();
|
||||
auto result_shape = functional::map(
|
||||
[filter_type](int64_t dim) { return filter_type.getDimSize(dim); },
|
||||
perm);
|
||||
auto result_shape =
|
||||
llvm::to_vector<4>(llvm::map_range(perm, [filter_type](int64_t dim) {
|
||||
return filter_type.getDimSize(dim);
|
||||
}));
|
||||
auto elem_type = filter_type.getElementType();
|
||||
auto result_type = RankedTensorType::get(result_shape, elem_type);
|
||||
|
||||
|
@ -29,7 +29,6 @@ limitations under the License.
|
||||
#include "mlir/IR/OperationSupport.h" // from @llvm-project
|
||||
#include "mlir/IR/PatternMatch.h" // from @llvm-project
|
||||
#include "mlir/Pass/Pass.h" // from @llvm-project
|
||||
#include "mlir/Support/Functional.h" // from @llvm-project
|
||||
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
|
||||
#include "tensorflow/compiler/mlir/lite/quantization/quantization_utils.h"
|
||||
#include "tensorflow/compiler/mlir/lite/transforms/passes.h"
|
||||
|
@ -94,9 +94,10 @@ Value Transpose(OpBuilder* builder, Value value_to_transpose,
|
||||
|
||||
// Create tensor type for the transpose result.
|
||||
auto transpose_type = original_type;
|
||||
auto transpose_shape = functional::map(
|
||||
[transpose_type](int32_t dim) { return transpose_type.getDimSize(dim); },
|
||||
perm);
|
||||
auto transpose_shape =
|
||||
llvm::to_vector<8>(llvm::map_range(perm, [transpose_type](int32_t dim) {
|
||||
return transpose_type.getDimSize(dim);
|
||||
}));
|
||||
auto elem_type = transpose_type.getElementType();
|
||||
auto result_type = RankedTensorType::get(transpose_shape, elem_type);
|
||||
|
||||
|
@ -32,7 +32,6 @@ limitations under the License.
|
||||
#include "mlir/IR/PatternMatch.h" // from @llvm-project
|
||||
#include "mlir/IR/StandardTypes.h" // from @llvm-project
|
||||
#include "mlir/Pass/Pass.h" // from @llvm-project
|
||||
#include "mlir/Support/Functional.h" // from @llvm-project
|
||||
#include "mlir/Support/LLVM.h" // from @llvm-project
|
||||
#include "mlir/Support/LogicalResult.h" // from @llvm-project
|
||||
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
|
||||
|
@ -33,7 +33,6 @@ limitations under the License.
|
||||
#include "mlir/IR/PatternMatch.h" // from @llvm-project
|
||||
#include "mlir/IR/StandardTypes.h" // from @llvm-project
|
||||
#include "mlir/Pass/Pass.h" // from @llvm-project
|
||||
#include "mlir/Support/Functional.h" // from @llvm-project
|
||||
#include "mlir/Support/LLVM.h" // from @llvm-project
|
||||
#include "mlir/Support/LogicalResult.h" // from @llvm-project
|
||||
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
|
||||
|
@ -45,7 +45,6 @@ limitations under the License.
|
||||
#include "mlir/IR/Visitors.h" // from @llvm-project
|
||||
#include "mlir/Pass/Pass.h" // from @llvm-project
|
||||
#include "mlir/Pass/PassRegistry.h" // from @llvm-project
|
||||
#include "mlir/Support/Functional.h" // from @llvm-project
|
||||
#include "mlir/Support/LLVM.h" // from @llvm-project
|
||||
#include "mlir/Support/LogicalResult.h" // from @llvm-project
|
||||
#include "tensorflow/compiler/mlir/tensorflow/ir/control_flow_ops.h"
|
||||
|
@ -31,7 +31,6 @@ limitations under the License.
|
||||
#include "mlir/IR/PatternMatch.h" // from @llvm-project
|
||||
#include "mlir/IR/StandardTypes.h" // from @llvm-project
|
||||
#include "mlir/Pass/Pass.h" // from @llvm-project
|
||||
#include "mlir/Support/Functional.h" // from @llvm-project
|
||||
#include "mlir/Support/LLVM.h" // from @llvm-project
|
||||
#include "mlir/Support/LogicalResult.h" // from @llvm-project
|
||||
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
|
||||
|
@ -27,7 +27,6 @@ limitations under the License.
|
||||
#include "mlir/IR/OpImplementation.h" // from @llvm-project
|
||||
#include "mlir/IR/StandardTypes.h" // from @llvm-project
|
||||
#include "mlir/Interfaces/SideEffects.h" // from @llvm-project
|
||||
#include "mlir/Support/Functional.h" // from @llvm-project
|
||||
#include "mlir/Support/LLVM.h" // from @llvm-project
|
||||
namespace mlir {
|
||||
namespace tfjs {
|
||||
|
@ -30,7 +30,6 @@ limitations under the License.
|
||||
#include "mlir/IR/Types.h" // from @llvm-project
|
||||
#include "mlir/Interfaces/InferTypeOpInterface.h" // from @llvm-project
|
||||
#include "mlir/Interfaces/SideEffects.h" // from @llvm-project
|
||||
#include "mlir/Support/Functional.h" // from @llvm-project
|
||||
|
||||
namespace mlir {
|
||||
class OpBuilder;
|
||||
|
@ -28,7 +28,6 @@ limitations under the License.
|
||||
#include "mlir/IR/StandardTypes.h" // from @llvm-project
|
||||
#include "mlir/IR/Types.h" // from @llvm-project
|
||||
#include "mlir/Interfaces/SideEffects.h" // from @llvm-project
|
||||
#include "mlir/Support/Functional.h" // from @llvm-project
|
||||
|
||||
namespace mlir {
|
||||
class OpBuilder;
|
||||
|
@ -142,8 +142,8 @@ void RewriteCalls(
|
||||
}
|
||||
for (auto* call_to_inline : calls_to_inline) {
|
||||
llvm::InlineFunctionInfo inline_function_info;
|
||||
CHECK(
|
||||
llvm::InlineFunction(call_to_inline, inline_function_info).isSuccess());
|
||||
CHECK(llvm::InlineFunction(*call_to_inline, inline_function_info)
|
||||
.isSuccess());
|
||||
}
|
||||
// LLVM's InjectTLIMappings adds functions that might be used for
|
||||
// vectorization to 'llvm.compiler.used'. Remove it before deleting the
|
||||
|
@ -58,7 +58,7 @@ TEST_F(GpuNoAliasTest, Concat) {
|
||||
; CHECK: load float, float* %[[y_gep]], {{.*}}, !noalias ![[param_noalias]]
|
||||
; CHECK: %[[result_ptr:.*]] = bitcast [2 x [6 x float]]* %fusion{{.*}} to float*
|
||||
; CHECK: %[[result_gep:.*]] = getelementptr inbounds float, float* %[[result_ptr]]
|
||||
; CHECK: store float {{.*}}, float* %[[result_gep]], !alias.scope ![[param_noalias]]
|
||||
; CHECK: store float {{.*}}, float* %[[result_gep]], align 4, !alias.scope ![[param_noalias]]
|
||||
; CHECK: ![[param_noalias]] = !{![[retval_buffer:.*]]}
|
||||
)",
|
||||
/*match_optimized_ir=*/false);
|
||||
|
@ -59,13 +59,13 @@ ENTRY while3 {
|
||||
CompileAndVerifyIr(hlo_string, R"(
|
||||
; CHECK-LABEL: @body(i8* %retval
|
||||
; CHECK: %[[add_result:.*]] = fadd fast float %[[fadd_lhs:.*]], %[[fadd_rhs:.*]]
|
||||
; CHECK: store float %[[add_result]], float* %[[store_dest:.*]], !alias.scope ![[alias_scope_md_for_store:[0-9]+]]
|
||||
; CHECK: store float %[[add_result]], float* %[[store_dest:.*]], align 4, !alias.scope ![[alias_scope_md_for_store:[0-9]+]]
|
||||
;
|
||||
; CHECK-LABEL: @condition(i8* %retval, i8* noalias %run_options, i8** noalias %params
|
||||
; CHECK: %[[cond_state_buf_ptr:.*]] = getelementptr inbounds i8*, i8** %buffer_table, i64 0
|
||||
; CHECK: %[[cond_state_buf_untyped:.*]] = load i8*, i8** %[[cond_state_buf_ptr]]
|
||||
; CHECK: %[[cond_state_buf_typed:.*]] = bitcast i8* %[[cond_state_buf_untyped]] to float*
|
||||
; CHECK: load float, float* %[[cond_state_buf_typed]], !alias.scope ![[alias_scope_md_for_store]], !noalias ![[noalias_md_for_load:.*]]
|
||||
; CHECK: load float, float* %[[cond_state_buf_typed]], align 4, !alias.scope ![[alias_scope_md_for_store]], !noalias ![[noalias_md_for_load:.*]]
|
||||
;
|
||||
; CHECK-LABEL: @while3(
|
||||
|
||||
|
@ -658,8 +658,8 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""):
|
||||
)
|
||||
|
||||
# Check out LLVM and MLIR from llvm-project.
|
||||
LLVM_COMMIT = "813f438baaa9638529023b2218875e01ea037735"
|
||||
LLVM_SHA256 = "d264734ecd31d6d3d16b6a0c10eeb2b43a9ba8ddc9a79db6256847b91c956aa9"
|
||||
LLVM_COMMIT = "37ac1c19bed7b7d22e9312dfa61e7a4506ed4e49"
|
||||
LLVM_SHA256 = "b694cd7ed85cbc938f11534d56a326126015de21a5fec3c073dc6125bec1fc24"
|
||||
LLVM_URLS = [
|
||||
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
|
||||
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
|
||||
|
Loading…
Reference in New Issue
Block a user