[TensorFlow PE] Address static analyzer findings in grappler.

PiperOrigin-RevId: 304685874
Change-Id: I0e7a5e26079b7100aa9bdfe49309f22a7c6820b0
This commit is contained in:
A. Unique TensorFlower 2020-04-03 13:47:34 -07:00 committed by TensorFlower Gardener
parent 34c95ccc99
commit 850ae50ab4
8 changed files with 123 additions and 116 deletions

View File

@ -863,11 +863,11 @@ Status ValidateLists(const gtl::FlatSet<string>& white_list,
std::vector<gtl::FlatSet<string>> lists{white_list, black_list, gray_list,
clear_list};
std::multiset<string> counts;
for (auto list : lists) {
for (const auto& list : lists) {
counts.insert(list.begin(), list.end());
}
bool duplicates = false;
for (auto s : counts) {
for (const auto& s : counts) {
if (counts.count(s) > 1) {
duplicates = true;
LOG(ERROR) << "Op present in multiple lists: " << s;
@ -1054,20 +1054,20 @@ Status AutoMixedPrecisionImpl::PrintDebugLogs(bool preop, size_t timestamp) {
strings::StrCat("paintbuckets", suffix, ".txt"));
f.open(fname.c_str(), std::fstream::out);
f << "WhiteList:\n";
for (auto x :
for (const auto& x :
AutoMixedPrecisionLists::WhiteList(cuda_version_, cudnn_version_)) {
f << x << "\n";
}
f << "\nBlackList:\n";
for (auto x : AutoMixedPrecisionLists::BlackList()) {
for (const auto& x : AutoMixedPrecisionLists::BlackList()) {
f << x << "\n";
}
f << "\nGrayList:\n";
for (auto x : AutoMixedPrecisionLists::GrayList()) {
for (const auto& x : AutoMixedPrecisionLists::GrayList()) {
f << x << "\n";
}
f << "\nClearList:\n";
for (auto x : AutoMixedPrecisionLists::ClearList()) {
for (const auto& x : AutoMixedPrecisionLists::ClearList()) {
f << x << "\n";
}
f.close();

View File

@ -27,10 +27,10 @@ class AutoMixedPrecisionLists {
private:
static void UpdateList(gtl::FlatSet<string>* list, const string& to_add,
const string& to_remove) {
for (auto x : str_util::Split(to_add, ",")) {
for (const auto& x : str_util::Split(to_add, ",")) {
list->insert(x);
}
for (auto x : str_util::Split(to_remove, ",")) {
for (const auto& x : str_util::Split(to_remove, ",")) {
list->erase(x);
}
}

View File

@ -1053,7 +1053,7 @@ bool ConstantFolding::MaybeFoldable(const NodeDef& node,
op.find("Reader") != string::npos) {
return false;
}
if (op.find("Quantized") != string::npos || op.find("Sparse") == 0) {
if (op.find("Quantized") != string::npos || absl::StartsWith(op, "Sparse")) {
return false;
}

View File

@ -136,7 +136,7 @@ int DependencyOptimizer::NumEdgesIfBypassed(
// multi-input identity_n with input/output control dependencies will likely
// increase number of edges after optimization.
int num_edges_if_bypassed(0);
for (string input_node_name : node.input()) {
for (const string& input_node_name : node.input()) {
if (IsControlInput(input_node_name)) {
num_edges_if_bypassed += num_outputs;
} else {

View File

@ -466,7 +466,8 @@ void RecomputationRewritingPass(RewriterConfig::MemOptType optimization_level,
// meaning it either begins with or contains the name scope.
// Defaults to "gradients/" which will match any node names that begins
// with "gradients/" or contains "/gradients/".
return node.name().find(recomputation_targets_name_scope) == 0 ||
return absl::StartsWith(node.name(),
recomputation_targets_name_scope) ||
node.name().find("/" + recomputation_targets_name_scope) != -1;
};

View File

@ -971,7 +971,7 @@ class Tree {
public:
Tree(const string& edge, int depth) : edge_(edge), depth_(depth) {}
~Tree() {
for (auto it : subtrees_) delete it.second;
for (const auto& it : subtrees_) delete it.second;
}
Tree* GetSubTree(const string& edge) {
@ -996,7 +996,7 @@ class Tree {
// on any non-OK Status.
Status ApplyToAll(Tree* tree, const std::function<Status(Tree*)>& func) {
Status s;
for (auto it : tree->subtrees_) {
for (const auto& it : tree->subtrees_) {
s = ApplyToAll(it.second, func);
if (!s.ok()) return s;
}

View File

@ -66,121 +66,125 @@ Status ShapeOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
}
*optimized_graph = item.graph;
MutableGraphView graph(optimized_graph);
GraphProperties properties(item);
bool inferred_properties = false;
// The product of all the dimensions in a tensor shape can be expressed more
// simply as the size of the tensor.
for (auto& node : *optimized_graph->mutable_node()) {
if (!IsShape(node)) {
continue;
}
for (MutableGraphView::InputPort fanout :
graph.GetFanout(MutableGraphView::OutputPort(&node, 0))) {
if (fanout.node->op() != "Prod") {
{
MutableGraphView graph(optimized_graph);
// The product of all the dimensions in a tensor shape can be expressed more
// simply as the size of the tensor.
for (auto& node : *optimized_graph->mutable_node()) {
if (!IsShape(node)) {
continue;
}
if (fanout.node->attr().count("keep_dims") != 0 &&
fanout.node->attr().at("keep_dims").b()) {
// Keeping the reduced dimensions won't result in a scalar, so we can't
// rewrite the whole expression directly as a Size operation.
continue;
}
const MutableGraphView::OutputPort reduce_indices =
graph.GetRegularFanin(MutableGraphView::InputPort(fanout.node, 1));
if (!inferred_properties) {
// Infer properties lazily in case they are not needed.
TF_RETURN_IF_ERROR(
properties.InferStatically(/*assume_valid_feeds=*/false,
/*aggressive_shape_inference=*/false,
/*include_tensor_values=*/false));
inferred_properties = true;
}
const auto& prop =
properties.GetOutputProperties(reduce_indices.node->name());
if (prop.size() <= reduce_indices.port_id) {
continue;
}
const TensorShapeProto& reduction_indices_shape =
prop[reduce_indices.port_id].shape();
if (NumCoefficients(reduction_indices_shape) == 1) {
const auto& input_props = properties.GetInputProperties(node.name());
if (input_props.size() != 1) {
for (MutableGraphView::InputPort fanout :
graph.GetFanout(MutableGraphView::OutputPort(&node, 0))) {
if (fanout.node->op() != "Prod") {
continue;
}
// Rewrite the reduction of the shape dimensions as a Size operation.
NodeDef size_node(*fanout.node);
const DataType type = input_props[0].dtype();
size_node.set_op("Size");
size_node.set_input(0, node.input(0));
size_node.set_input(1, AsControlDependency(node));
size_node.mutable_attr()->erase("Tidx");
size_node.mutable_attr()->erase("keep_dims");
(*size_node.mutable_attr())["out_type"] = fanout.node->attr().at("T");
(*size_node.mutable_attr())["T"].set_type(type);
// The corresponding Size kernel might not exist on the device where
// Prod was placed, so assign the Size kernel to the same device as the
// input.
size_node.set_device(node.device());
// In the unlikely even that "Size" is not registered on the input
// device, skip the optimization.
Status s = IsKernelRegisteredForNode(size_node);
if (!s.ok()) {
if (fanout.node->attr().count("keep_dims") != 0 &&
fanout.node->attr().at("keep_dims").b()) {
// Keeping the reduced dimensions won't result in a scalar, so we
// can't rewrite the whole expression directly as a Size operation.
continue;
}
const MutableGraphView::OutputPort reduce_indices =
graph.GetRegularFanin(MutableGraphView::InputPort(fanout.node, 1));
if (!inferred_properties) {
// Infer properties lazily in case they are not needed.
TF_RETURN_IF_ERROR(
properties.InferStatically(/*assume_valid_feeds=*/false,
/*aggressive_shape_inference=*/false,
/*include_tensor_values=*/false));
inferred_properties = true;
}
const auto& prop =
properties.GetOutputProperties(reduce_indices.node->name());
if (prop.size() <= reduce_indices.port_id) {
continue;
}
const TensorShapeProto& reduction_indices_shape =
prop[reduce_indices.port_id].shape();
if (NumCoefficients(reduction_indices_shape) == 1) {
const auto& input_props = properties.GetInputProperties(node.name());
if (input_props.size() != 1) {
continue;
}
// Rewrite the reduction of the shape dimensions as a Size operation.
NodeDef size_node(*fanout.node);
const DataType type = input_props[0].dtype();
size_node.set_op("Size");
size_node.set_input(0, node.input(0));
size_node.set_input(1, AsControlDependency(node));
size_node.mutable_attr()->erase("Tidx");
size_node.mutable_attr()->erase("keep_dims");
(*size_node.mutable_attr())["out_type"] = fanout.node->attr().at("T");
(*size_node.mutable_attr())["T"].set_type(type);
fanout.node->Swap(&size_node);
// The corresponding Size kernel might not exist on the device where
// Prod was placed, so assign the Size kernel to the same device as
// the input.
size_node.set_device(node.device());
// In the unlikely even that "Size" is not registered on the input
// device, skip the optimization.
Status s = IsKernelRegisteredForNode(size_node);
if (!s.ok()) {
continue;
}
fanout.node->Swap(&size_node);
}
}
}
}
for (auto& node : *optimized_graph->mutable_node()) {
// Try to convert the ratio of 2 symbolic tensor sizes into a constant. This
// is possible whenever the symbolic dimensions in the numerator and
// denominator cancel each other.
if (node.op() == "Div") {
const MutableGraphView::OutputPort input1 =
graph.GetRegularFanin(MutableGraphView::InputPort(&node, 0));
const MutableGraphView::OutputPort input2 =
graph.GetRegularFanin(MutableGraphView::InputPort(&node, 1));
if (input1.node == nullptr || input2.node == nullptr) continue;
if (!IsSize(*input1.node) || !IsSize(*input2.node)) {
continue;
}
if (!inferred_properties) {
// Infer properties lazily in case they are not needed.
TF_RETURN_IF_ERROR(
properties.InferStatically(/*assume_valid_feeds=*/false,
/*aggressive_shape_inference=*/false,
/*include_tensor_values=*/false));
inferred_properties = true;
}
const auto& prop1 = properties.GetInputProperties(input1.node->name());
const auto& prop2 = properties.GetInputProperties(input2.node->name());
if (prop1.size() != 1 || prop2.size() != 1) {
continue;
}
const TensorShapeProto& shape1 = prop1[0].shape();
const TensorShapeProto& shape2 = prop2[0].shape();
int64 result = ComputeSizeRatio(shape1, shape2);
if (result >= 0) {
// Replace div with constant.
node.set_op("Const");
DataType dtype = node.attr().at("T").type();
node.mutable_attr()->erase("T");
(*node.mutable_attr())["dtype"].set_type(dtype);
TensorProto* t = (*node.mutable_attr())["value"].mutable_tensor();
t->set_dtype(dtype);
*t->mutable_tensor_shape() = TensorShapeProto();
if (dtype == DT_INT32) {
t->add_int_val(result);
} else {
t->add_int64_val(result);
{
MutableGraphView graph(optimized_graph);
for (auto& node : *optimized_graph->mutable_node()) {
// Try to convert the ratio of 2 symbolic tensor sizes into a constant.
// This is possible whenever the symbolic dimensions in the numerator and
// denominator cancel each other.
if (node.op() == "Div") {
const MutableGraphView::OutputPort input1 =
graph.GetRegularFanin(MutableGraphView::InputPort(&node, 0));
const MutableGraphView::OutputPort input2 =
graph.GetRegularFanin(MutableGraphView::InputPort(&node, 1));
if (input1.node == nullptr || input2.node == nullptr) continue;
if (!IsSize(*input1.node) || !IsSize(*input2.node)) {
continue;
}
if (!inferred_properties) {
// Infer properties lazily in case they are not needed.
TF_RETURN_IF_ERROR(
properties.InferStatically(/*assume_valid_feeds=*/false,
/*aggressive_shape_inference=*/false,
/*include_tensor_values=*/false));
inferred_properties = true;
}
const auto& prop1 = properties.GetInputProperties(input1.node->name());
const auto& prop2 = properties.GetInputProperties(input2.node->name());
if (prop1.size() != 1 || prop2.size() != 1) {
continue;
}
const TensorShapeProto& shape1 = prop1[0].shape();
const TensorShapeProto& shape2 = prop2[0].shape();
int64 result = ComputeSizeRatio(shape1, shape2);
if (result >= 0) {
// Replace div with constant.
node.set_op("Const");
DataType dtype = node.attr().at("T").type();
node.mutable_attr()->erase("T");
(*node.mutable_attr())["dtype"].set_type(dtype);
TensorProto* t = (*node.mutable_attr())["value"].mutable_tensor();
t->set_dtype(dtype);
*t->mutable_tensor_shape() = TensorShapeProto();
if (dtype == DT_INT32) {
t->add_int_val(result);
} else {
t->add_int64_val(result);
}
node.set_input(0, AsControlDependency(node.input(0)));
node.set_input(1, AsControlDependency(node.input(1)));
}
node.set_input(0, AsControlDependency(node.input(0)));
node.set_input(1, AsControlDependency(node.input(1)));
}
}
}

View File

@ -58,7 +58,9 @@ void CompressConstants(GraphDef* graph) {
if ((IsConstant(*node) || IsHostConstant(*node)) &&
HasNodeAttr(*node, "value")) {
AttrValue& attr_val = (*node->mutable_attr())["value"];
tensor::CompressTensorProtoInPlace(attr_val.mutable_tensor());
if (attr_val.has_tensor()) {
tensor::CompressTensorProtoInPlace(attr_val.mutable_tensor());
}
}
}
}