Bump OSS LLVM to f6bb58542aca5959acd1ab2e6ec757570df534e2

PiperOrigin-RevId: 292146195
Change-Id: If859953eaba0ee672abf79ae44612839f74471af
This commit is contained in:
Benjamin Kramer 2020-01-29 08:23:51 -08:00 committed by TensorFlower Gardener
parent 642708d212
commit b1171f57ad
26 changed files with 76 additions and 78 deletions

View File

@ -523,7 +523,7 @@ class Translator {
};
std::string Translator::UniqueName(mlir::Value val) {
return name_mapper_.GetUniqueName(val);
return std::string(name_mapper_.GetUniqueName(val));
}
Optional<BufferOffset<tflite::Buffer>> Translator::BuildBuffer(
@ -1089,7 +1089,7 @@ Optional<BufferOffset<tflite::SubGraph>> Translator::BuildSubGraph(FuncOp fn) {
for (unsigned i = 0, e = bb.getNumArguments(); i < e; ++i) {
mlir::BlockArgument arg = bb.getArgument(i);
std::string name;
if (has_input_attr) name = name_mapper_.GetUniqueName(arg);
if (has_input_attr) name = std::string(name_mapper_.GetUniqueName(arg));
if (name.empty()) name = absl::StrCat("arg", i);
if (!build_tensor_and_buffer(arg, name)) return llvm::None;
}

View File

@ -797,8 +797,7 @@ struct RemoveAdjacentReshape : public RewritePattern {
// With
// %2 = "tfl.reshape"(%0, %shape1)
rewriter.replaceOpWithNewOp<ReshapeOp>(
{prevOp.getResult()}, op, thisOp.getType(), prevOp.getOperand(0),
thisOp.getOperand(1));
op, thisOp.getType(), prevOp.getOperand(0), thisOp.getOperand(1));
}
};

View File

@ -122,7 +122,7 @@ static void EmitOptionBuilders(const RecordKeeper &record_keeper,
os << formatv(
" auto {0} = Convert{1}ForOptionWriter(op.{0}(), fbb);\n",
val.getName(), record->getClasses()[0]->getName());
options.push_back(val.getName());
options.push_back(std::string(val.getName()));
}
}
}

View File

@ -701,7 +701,7 @@ struct ConvertTensorListStack
if ((ranked_type && ranked_type.getRank() == 0) ||
!matchPattern(element_shape, m_Constant(&dense_elem_attr))) {
// If no constant is spotted, just forward the operand.
rewriter.replaceOp(op, {input}, llvm::None);
rewriter.replaceOp(op, {input});
return matchSuccess();
}

View File

@ -153,7 +153,7 @@ std::string OpOrArgLocNameMapper::GetName(OpOrVal op_or_val) {
if (!name_from_loc.empty()) return name_from_loc;
// If the location is none of the expected types, then simply use name
// generated using the op type.
return op->getName().getStringRef();
return std::string(op->getName().getStringRef());
}
auto val = op_or_val.dyn_cast<mlir::Value>();
auto name_from_loc = GetNameFromLoc(val.getLoc());
@ -166,7 +166,7 @@ std::string OpOrArgLocNameMapper::GetName(OpOrVal op_or_val) {
return llvm::formatv("{0}:{1}",
result.getOwner()->getName().getStringRef(),
result.getResultNumber());
return result.getOwner()->getName().getStringRef();
return std::string(result.getOwner()->getName().getStringRef());
}
return "";
}

View File

@ -2680,7 +2680,6 @@ static LogicalResult Verify(WhileOp op) {
return op.emitOpError("requires cond function to have exactly one result");
SmallVector<Type, 4> operands(op.getOperandTypes());
SmallVector<Type, 4> results(op.getResultTypes());
// Collect all the type lists for the op so that different pairs of type lists
// can be compared for the compatibility.
@ -2688,7 +2687,7 @@ static LogicalResult Verify(WhileOp op) {
std::pair<std::string, ArrayRef<Type>> typeLists[] = {
{"operand", operands},
{"body function result", bodyFuncType.getResults()},
{"result", results},
{"result", op.getResultTypes()},
{"cond function input", condFuncType.getInputs()},
{"body function input", bodyFuncType.getInputs()},
};

View File

@ -58,10 +58,8 @@ FuncOp BuildFunction(StringRef device, llvm::ArrayRef<Value> live_ins,
operand_types.reserve(live_ins.size());
for (Value v : live_ins) operand_types.emplace_back(v.getType());
llvm::SmallVector<Type, 4> result_types(launch_op.getResultTypes());
auto func_type =
FunctionType::get(operand_types, result_types, builder->getContext());
auto func_type = FunctionType::get(operand_types, launch_op.getResultTypes(),
builder->getContext());
std::string func_name_prefix = Twine(device, "_func").str();
FuncOp outlined_func =

View File

@ -44,7 +44,7 @@ void MaterializePassthroughOpPass::runOnFunction() {
getFunction().walk([](Operation *op) {
auto passthrough_op = dyn_cast<TF::MlirPassthroughOp>(op);
if (!passthrough_op) return;
std::string module_string = passthrough_op.mlir_module();
std::string module_string(passthrough_op.mlir_module());
// Parse the module.
auto nested_module = parseSourceString(module_string, op->getContext());
if (!nested_module) {

View File

@ -170,7 +170,7 @@ LogicalResult SetMetadataProtoFromLaunchFuncOp(
xla::DebugOptions::STEP_MARK_AT_ENTRY;
if (!step_marker_location.getValue().empty() &&
!xla::DebugOptions::StepMarkerLocation_Parse(
step_marker_location.getValue(), &location))
std::string(step_marker_location.getValue()), &location))
return op.emitOpError(llvm::formatv("bad '{0}' attribute with value '{1}'",
kStepMarkerLocationAttr,
step_marker_location.getValue()));
@ -191,7 +191,7 @@ LogicalResult SetMetadataProtoFromLaunchFuncOp(
tensorflow::tpu::PaddingMap* padding =
metadata->mutable_padding_maps()->Add();
if (!padding->ParseFromString(padding_attr_str.getValue()))
if (!padding->ParseFromString(std::string(padding_attr_str.getValue())))
return op.emitOpError(llvm::formatv(
"bad '{0}' attribute at index {1} with value '{2}'", kPaddingMapAttr,
padding_and_idx.index(), padding_attr_str.getValue()));
@ -339,10 +339,9 @@ Operation* BuildExecuteOp(Operation* compile_op,
// follow-up CLs.
// TPUExecute has same output types as launch_func.
llvm::SmallVector<Type, 4> output_types(launch_func.getResultTypes());
return builder->create<TF::TPUExecuteOp>(launch_func.getLoc(), output_types,
tensor_inputs,
llvm::ArrayRef<NamedAttribute>{});
return builder->create<TF::TPUExecuteOp>(
launch_func.getLoc(), launch_func.getResultTypes(), tensor_inputs,
llvm::ArrayRef<NamedAttribute>{});
}
// Creates a `tf.TPUCompileSucceededAssert` operation that parses compilation

View File

@ -167,7 +167,7 @@ AnnotateCompileOpAndGetExecuteArgToWhileArgsMapping(
auto metadata_str = compile->getAttrOfType<StringAttr>("metadata");
assert(metadata_str && "Missing compilation metadata");
tensorflow::tpu::TPUCompileMetadataProto metadata;
metadata.ParseFromString(metadata_str.getValue());
metadata.ParseFromString(std::string(metadata_str.getValue()));
int64_t num_replicas = replicate.n().getLimitedValue();
// Find the formattable operands of `execute`, which must be mirrored
// variables (arguments of `replicate`), and must be pass-throughs from while

View File

@ -244,7 +244,8 @@ StatusOr<std::unique_ptr<NodeDef>> Exporter::GetArgumentNode(
if (!name.empty())
node_def->set_name(name.str());
else
node_def->set_name(op_to_name_.GetUniqueName(func.getName().str()));
node_def->set_name(
std::string(op_to_name_.GetUniqueName(func.getName().str())));
node_def->set_op(FunctionLibraryDefinition::kArgOp);
@ -282,7 +283,8 @@ StatusOr<std::unique_ptr<NodeDef>> Exporter::GetReturnNode(
if (!name.empty())
node_def->set_name(name.str());
else
node_def->set_name(op_to_name_.GetUniqueName(function.getName().str()));
node_def->set_name(
std::string(op_to_name_.GetUniqueName(function.getName().str())));
node_def->set_op(FunctionLibraryDefinition::kRetOp);
DataType dtype;
@ -579,7 +581,7 @@ StatusOr<std::unique_ptr<Graph>> Exporter::Convert(
// If there is a result index specified, ensure only one and that it
// matches the result index of the op.
auto result = it.value().cast<mlir::OpResult>();
std::string orig_name = output_names[it.index()];
std::string orig_name(output_names[it.index()]);
auto tensor_id = ParseTensorName(orig_name);
auto name = LegalizeNodeName(
llvm::StringRef(tensor_id.node().data(), tensor_id.node().size()));
@ -607,7 +609,7 @@ StatusOr<std::unique_ptr<Graph>> Exporter::Convert(
TF_RET_CHECK(input_names.size() == block.getNumArguments());
for (auto it : llvm::enumerate(function.getArguments())) {
// TODO(lyandy): Update when changing feed/fetch import.
std::string orig_name = input_names[it.index()];
std::string orig_name(input_names[it.index()]);
std::string name = LegalizeNodeName(orig_name);
auto tensor_id = ParseTensorName(name);
TF_RET_CHECK(tensor_id.index() == 0)

View File

@ -101,7 +101,7 @@ Status GetUnregisteredAttrs(
GetTensorFlowOpName(inst->getName().getStringRef()));
const tensorflow::OpRegistrationData* op_reg_data =
tensorflow::OpRegistry::Global()->LookUp(op_name);
tensorflow::OpRegistry::Global()->LookUp(std::string(op_name));
if (!op_reg_data) {
// This is likely a function call node, so we should continue.
return Status::OK();

View File

@ -124,7 +124,9 @@ class NameUniquifier : public OpOrArgNameMapper {
: flib_(flib) {}
private:
bool IsUnique(llvm::StringRef name) override { return !flib_.Contains(name); }
bool IsUnique(llvm::StringRef name) override {
return !flib_.Contains(std::string(name));
}
std::string GetName(OpOrVal op_or_val) override {
DCHECK(false) << "Unimplemented";
@ -1047,15 +1049,16 @@ void ImporterBase::GetArgsAndRetsFromFunctionBody(
Status ImporterBase::ConvertLibFunction(llvm::StringRef func_name) {
// If the library function has been converted already, nothing needs to be
// done.
if (tf_name_to_mlir_name_->find(func_name) != tf_name_to_mlir_name_->end())
if (tf_name_to_mlir_name_->find(std::string(func_name)) !=
tf_name_to_mlir_name_->end())
return Status::OK();
std::string mlir_func_name =
function_name_uniquifier_->GetUniqueName(func_name);
(*tf_name_to_mlir_name_)[func_name] = mlir_func_name;
std::string mlir_func_name(
function_name_uniquifier_->GetUniqueName(func_name));
(*tf_name_to_mlir_name_)[std::string(func_name)] = mlir_func_name;
const auto& func_lib = graph_flib_;
const auto* func_def = func_lib.Find(func_name);
const auto* func_def = func_lib.Find(std::string(func_name));
if (func_def == nullptr) {
return errors::FailedPrecondition(
absl::StrCat("Failed to find function '", StringRefToView(func_name),
@ -1089,7 +1092,7 @@ Status ImporterBase::ConvertLibFunction(llvm::StringRef func_name) {
// Checks for an associated custom gradient function. Adds it to the attribute
// list of this function.
auto grad_func_name = func_lib.FindGradient(func_name);
auto grad_func_name = func_lib.FindGradient(std::string(func_name));
if (!grad_func_name.empty()) {
TF_RETURN_IF_ERROR(ConvertLibFunction(grad_func_name));
auto mlir_grad_func_name = (*tf_name_to_mlir_name_)[grad_func_name];
@ -3019,7 +3022,7 @@ Status SavedModelV1Importer::ReadVariablesFromSession(
std::vector<std::string> variable_names;
variable_names.reserve(variable_names_and_ops.size());
for (const auto& name_and_location : variable_names_and_ops)
variable_names.push_back(name_and_location.first);
variable_names.push_back(std::string(name_and_location.first));
std::vector<Tensor> resource_tensors;
TF_RETURN_IF_ERROR(bundle_.GetSession()->Run(

View File

@ -126,8 +126,10 @@ Status CreateFileForDumping(llvm::StringRef name,
<< "' directory for dumping: " << status;
return Status(error::Code::UNAVAILABLE, "(unavailable)");
}
*filepath =
llvm::Twine(dir).concat("/").concat(MakeUniqueFilename(name)).str();
*filepath = llvm::Twine(dir)
.concat("/")
.concat(MakeUniqueFilename(std::string(name)))
.str();
// Try to open the file and generate a raw_ostream.
std::unique_ptr<WritableFile> file;

View File

@ -97,7 +97,7 @@ mlir::LogicalResult EvaluateOperation(
// Builds TF operation and sets all the attributes.
std::string node_name = "unnamed";
if (auto attr = inst->getAttrOfType<mlir::StringAttr>("name")) {
node_name = attr.getValue();
node_name = std::string(attr.getValue());
}
auto node_def_or = ConvertTFDialectOpToNodeDef(
inst, node_name.c_str(), /*ignore_unregistered_attrs=*/true);

View File

@ -136,7 +136,7 @@ Status ConvertAttribute(const mlir::UnitAttr& attr, AttrValue* value) {
}
Status ConvertAttribute(const mlir::FlatSymbolRefAttr& attr, AttrValue* value) {
value->mutable_func()->set_name(attr.getValue());
value->mutable_func()->set_name(std::string(attr.getValue()));
return Status::OK();
}
@ -291,7 +291,7 @@ StatusOr<std::unique_ptr<NodeDef>> GetOperationNodeDef(
}
node_def->set_name(name.str());
node_def->set_op(op_name.str());
node_def->set_op(std::string(op_name.str()));
// Add inputs to the NodeDef based on the number of operands. This is required
// as later when edges are added to the Node using Graph::AddEdge the
@ -300,7 +300,7 @@ StatusOr<std::unique_ptr<NodeDef>> GetOperationNodeDef(
node_def->add_input();
}
if (auto attr = inst->getAttrOfType<mlir::StringAttr>("device")) {
node_def->set_device(attr.getValue());
node_def->set_device(std::string(attr.getValue()));
}
// Add the node attributes.
@ -343,7 +343,7 @@ Status ConvertAttributes(
switch (attr.getKind()) {
case mlir::StandardAttributes::SymbolRef: {
auto func_attr = attr.cast<mlir::FlatSymbolRefAttr>();
value.mutable_func()->set_name(func_attr.getValue());
value.mutable_func()->set_name(std::string(func_attr.getValue()));
func_call_attrs[string(name)] = value;
continue;
}

View File

@ -58,7 +58,7 @@ namespace {
// direction. Longterm solution is to add a function attribute to maintain the
// original HLO naming.
string SanitizeFunctionName(llvm::StringRef name) {
string output = name;
string output(name);
llvm::for_each(output, [](char& x) { x = x == '-' ? '_' : x; });
return output;
}

View File

@ -124,7 +124,8 @@ static xla::FftType Convert_fft_type(llvm::StringRef fft_type_str) {
xla::FftType fft_type_enum;
// Illegal fft_type string would be caught by the verifier, so 'FftType_Parse'
// call below should never return false.
if (!FftType_Parse(fft_type_str, &fft_type_enum)) return xla::FftType::FFT;
if (!FftType_Parse(std::string(fft_type_str), &fft_type_enum))
return xla::FftType::FFT;
return fft_type_enum;
}
@ -179,7 +180,7 @@ static xla::TriangularSolveOptions::Transpose Convert_transpose_a(
xla::TriangularSolveOptions::Transpose transpose_enum;
// Illegal tanspose string would be caught by the verifier, so
// 'Transpose_Parse' call below should never return false.
if (!xla::TriangularSolveOptions::Transpose_Parse(transpose_str,
if (!xla::TriangularSolveOptions::Transpose_Parse(std::string(transpose_str),
&transpose_enum))
return xla::TriangularSolveOptions::NO_TRANSPOSE;
return transpose_enum;
@ -550,8 +551,8 @@ LogicalResult ExportXlaOp(CustomCallOp op, OpLoweringContext ctx) {
if (op.has_side_effect()) return failure();
auto& value_map = *ctx.values;
value_map[op] = xla::CustomCall(
ctx.builder, op.call_target_name(), GetTuple(op.args(), ctx),
xla::TypeToShape(op.getType()), op.backend_config());
ctx.builder, std::string(op.call_target_name()), GetTuple(op.args(), ctx),
xla::TypeToShape(op.getType()), std::string(op.backend_config()));
return success();
}
@ -560,8 +561,9 @@ LogicalResult ExportXlaOp(InfeedOp op, OpLoweringContext ctx) {
// The shape argument expected by the xla client API is the type of the first
// element in the result tuple.
auto result_type = op.getType().cast<mlir::TupleType>().getType(0);
value_map[op] = xla::InfeedWithToken(
value_map[op.token()], xla::TypeToShape(result_type), op.infeed_config());
value_map[op] =
xla::InfeedWithToken(value_map[op.token()], xla::TypeToShape(result_type),
std::string(op.infeed_config()));
return success();
}
@ -586,9 +588,10 @@ LogicalResult ExportXlaOp(MapOp op, OpLoweringContext ctx) {
LogicalResult ExportXlaOp(OutfeedOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
value_map[op] = xla::OutfeedWithToken(
value_map[op.operand()], value_map[op.token()],
xla::TypeToShape(op.operand().getType()), op.outfeed_config());
value_map[op] =
xla::OutfeedWithToken(value_map[op.operand()], value_map[op.token()],
xla::TypeToShape(op.operand().getType()),
std::string(op.outfeed_config()));
return success();
}
@ -754,7 +757,7 @@ LogicalResult ExportXlaOp(SortOp op, OpLoweringContext ctx) {
LogicalResult ExportXlaOp(TraceOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::Trace(op.tag(), value_map[op.operand()]);
xla::Trace(std::string(op.tag()), value_map[op.operand()]);
return success();
}

View File

@ -52,7 +52,7 @@ static std::string GetDefaultAttrExport(
return "Convert_" + named_attr.name.str();
}
static std::string GetClientBuilder(const Operator& op) {
static StringRef GetClientBuilder(const Operator& op) {
static const auto* kOpToXLABuilderMap =
new llvm::StringMap<StringRef>{{"ReverseOp", "Rev"},
{"ConcatenateOp", "ConcatInDim"},

View File

@ -123,8 +123,7 @@ class HloToLhloOpConverter : public ConversionPattern {
}
rewriter.create<LhloOpTy>(op->getLoc(), llvm::None, buffer_args,
op->getAttrs());
rewriter.replaceOp(op, ArrayRef<Value>(buffer_args).slice(operands.size()),
original_results);
rewriter.replaceOp(op, ArrayRef<Value>(buffer_args).slice(operands.size()));
return matchSuccess();
}
};
@ -178,8 +177,7 @@ struct HloToLHloReduceConverter
rewriter.setInsertionPointToEnd(&entry_block);
rewriter.create<xla_lhlo::TerminatorOp>(loc);
rewriter.replaceOp(op, ArrayRef<Value>(buffer_args).slice(operands.size()),
original_results);
rewriter.replaceOp(op, ArrayRef<Value>(buffer_args).slice(operands.size()));
return matchSuccess();
}
@ -193,7 +191,7 @@ class HloToLhloTensorLoadConverter : public ConversionPattern {
PatternMatchResult matchAndRewrite(
Operation* op, ArrayRef<Value> operands,
ConversionPatternRewriter& rewriter) const final {
rewriter.replaceOp(op, operands, op->getResults());
rewriter.replaceOp(op, operands);
return matchSuccess();
}
};

View File

@ -2040,7 +2040,7 @@ class GenericConvertReductionOp : public OpRewritePattern<OpTy> {
if (op.keep_dims()) {
result = rewriter.create<ReshapeOp>(loc, op.getType(), result);
}
rewriter.replaceOp(op, {result}, {op.reduction_indices()});
rewriter.replaceOp(op, {result});
return this->matchSuccess();
}
@ -2338,7 +2338,7 @@ class ConvertTileOp : public OpRewritePattern<TF::TileOp> {
result = rewriter.create<ReshapeOp>(loc, output_type, result);
}
rewriter.replaceOp(op, {result}, {op.multiples()});
rewriter.replaceOp(op, {result});
return matchSuccess();
}
@ -2385,7 +2385,7 @@ class ConvertMaxPoolGradOp : public OpRewritePattern<TF::MaxPoolGradOp> {
rewriter.create<ReturnOp>(loc, reducer.getResult());
}
rewriter.replaceOp(op, {result}, {op.orig_output()});
rewriter.replaceOp(op, {result});
return matchSuccess();
}
@ -2530,7 +2530,7 @@ class ConvertConv2DBackpropInputOp
/*batch_group_count=*/rewriter.getI64IntegerAttr(1),
/*precision_config=*/ArrayAttr());
rewriter.replaceOp(op, {result}, {op.input_sizes()});
rewriter.replaceOp(op, {result});
return matchSuccess();
}
@ -2732,7 +2732,7 @@ class ConvertConv2DBackpropFilterOp
/*batch_group_count=*/rewriter.getI64IntegerAttr(1),
/*precision_config=*/ArrayAttr());
rewriter.replaceOp(op, {result}, {op.filter_sizes()});
rewriter.replaceOp(op, {result});
return matchSuccess();
}
@ -2784,9 +2784,7 @@ class ConvertOneHotOp : public OpRewritePattern<TF::OneHotOp> {
Value result = rewriter.create<SelectOp>(loc, op.getType(), compare,
on_value, off_value);
rewriter.replaceOp(
op, {result},
{op.indices(), op.on_value(), op.depth(), op.off_value()});
rewriter.replaceOp(op, {result});
return matchSuccess();
}

View File

@ -115,8 +115,7 @@ void LowerIf(TF::IfOp op, ModuleOp module) {
// Create the new conditional op with tuple inputs.
SmallVector<Value, 3> operands(op.getOperands());
SmallVector<Type, 4> types(op.getResultTypes());
auto result_type = builder.getTupleType(types);
auto result_type = builder.getTupleType(op.getResultTypes());
auto conditional = builder.create<xla_hlo::ConditionalOp>(
loc, result_type, op.cond(), tuple_input, tuple_input);
@ -147,9 +146,8 @@ void LowerWhile(TF::WhileOp op, ModuleOp module) {
// Create the new while op with tuple inputs.
SmallVector<Value, 3> operands(op.getOperands());
SmallVector<Type, 4> types(op.getResultTypes());
auto while_op = builder.create<xla_hlo::WhileOp>(
loc, builder.getTupleType(types), tuple_input);
loc, builder.getTupleType(op.getResultTypes()), tuple_input);
// Import the regions for both the cond and body. These regions must be
// updated to tuple the return results together and use the xla hlo return op.

View File

@ -57,7 +57,7 @@ llvm::SmallVector<std::string, 0> DetectMachineAttributes() {
if (llvm::sys::getHostCPUFeatures(host_features)) {
for (auto& feature : host_features) {
if (feature.second) {
result.push_back(feature.first());
result.push_back(std::string(feature.first()));
}
}
}
@ -93,8 +93,8 @@ SimpleOrcJIT::SimpleOrcJIT(
data_layout_(target_machine_->createDataLayout()),
symbol_resolver_(llvm::orc::createLegacyLookupResolver(
execution_session_,
[this](const std::string& name) -> llvm::JITSymbol {
return this->ResolveRuntimeSymbol(name);
[this](llvm::StringRef name) -> llvm::JITSymbol {
return this->ResolveRuntimeSymbol(std::string(name));
},
[](llvm::Error Err) {
cantFail(std::move(Err), "lookupFlags failed");

View File

@ -1514,7 +1514,7 @@ std::unique_ptr<KernelThunk> IrEmitterUnnested::BuildKernelThunk(
}
return absl::make_unique<KernelThunk>(
non_constant_buffers, kernel->getName(),
non_constant_buffers, std::string(kernel->getName()),
implements_whole_instruction ? inst : nullptr, unroll_factor);
}

View File

@ -595,8 +595,8 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""):
)
# Check out LLVM and MLIR from llvm-project.
LLVM_COMMIT = "c8695ba9cdebfc25af3312a84d91ae6f0f98487b"
LLVM_SHA256 = "d11de654ff4d374a3807788a30c62224ff5ac6299dd10d5e0afaafc4776fbc6c"
LLVM_COMMIT = "f6bb58542aca5959acd1ab2e6ec757570df534e2"
LLVM_SHA256 = "158ee622595aa1775a3ba99da2e74cc35ffbb292e6ff215e6fa022e4218322f7"
LLVM_URLS = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),

View File

@ -2279,7 +2279,6 @@ cc_library(
"include/mlir/Dialect/Linalg/EDSC/Intrinsics.h",
"include/mlir/Dialect/Linalg/Passes.h",
"include/mlir/Dialect/Linalg/Transforms/LinalgTransforms.h",
"include/mlir/Dialect/Linalg/Utils/Intrinsics.h",
"include/mlir/Dialect/Linalg/Utils/Utils.h",
],
includes = ["include"],