Replace instances of "blacklist" with "denylist" where possible. See Google Developer guidelines at https://developers.google.com/style/word-list#blacklist for more information.

PiperOrigin-RevId: 322242499
Change-Id: I66ea33f87811bbc734f538029d806240da91130f
This commit is contained in:
Karmel Allison 2020-07-20 15:49:15 -07:00 committed by TensorFlower Gardener
parent 9f3c94aba6
commit 18ebe824d2
64 changed files with 572 additions and 559 deletions

View File

@ -66,9 +66,9 @@
* Tracing and Debugging:
* <ADD RELEASE NOTES HERE>
* Other:
* We have replaced uses of "whitelist" with "allowlist" where possible.
Please see https://developers.google.com/style/word-list#blacklist for more
context.
* We have replaced uses of "whitelist" and "blacklist" with "allowlist"
and "denylist" where possible. Please see
https://developers.google.com/style/word-list#blacklist for more context.
* <ADD RELEASE NOTES HERE>
## Thanks to our Contributors

View File

@ -1829,7 +1829,7 @@ TEST(XlaCompilationTest, XLALiteAllowlist) {
}
EXPECT_TRUE(unknow_op.empty())
<< "Someone added support for a new TF opeations inside XLA. They must "
"be included in the XLALite allowlist or blacklist:\n"
"be included in the XLALite allowlist or denylist:\n"
<< absl::StrJoin(unknow_op, "\n");
}
} // namespace

View File

@ -311,7 +311,7 @@ class EagerFunctionTest(xla_test.XLATestCase):
if 'GPU' in self.device:
# TODO(b/32333178)
self.skipTest('Current implementation of RandomStandardNormal kernel '
'is very slow on GPU, and has been blacklisted.')
'is very slow on GPU, and has been denylisted.')
with self.test_scope():
data_format = 'channels_last'
conv = convolutional.Conv2D(

View File

@ -711,15 +711,15 @@ Status SegmentGraph(const Graph* tf_graph,
std::unordered_set<string> unsupported_ops;
int num_unsupported_ops = 0;
// Getting the operations blacklisted for conversion
string tftrt_op_blacklist_str;
// Getting the operations denylisted for conversion
string tftrt_op_denylist_str;
TF_CHECK_OK(
ReadStringFromEnvVar("TF_TRT_OP_BLACKLIST", "", &tftrt_op_blacklist_str));
ReadStringFromEnvVar("TF_TRT_OP_DENYLIST", "", &tftrt_op_denylist_str));
auto tftrt_op_blacklist = gtl::FlatSet<string>{}; // non-absl ok
auto tftrt_op_denylist = gtl::FlatSet<string>{}; // non-absl ok
for (const auto& x : str_util::Split(tftrt_op_blacklist_str, ",")) {
tftrt_op_blacklist.insert(x);
for (const auto& x : str_util::Split(tftrt_op_denylist_str, ",")) {
tftrt_op_denylist.insert(x);
}
// Parsing each node of the graph
@ -761,13 +761,13 @@ Status SegmentGraph(const Graph* tf_graph,
const Status status = candidate_fn(node->tf_node());
if (!status.ok()) {
exclude_node(status.error_message());
} else if (tftrt_op_blacklist.count(node->tf_node()->type_string())) {
} else if (tftrt_op_denylist.count(node->tf_node()->type_string())) {
// WARNING verbosity since the user explicitly requests this behavior.
LOG_WARNING_WITH_PREFIX
<< "Blacklisted as TF-TRT candidate, "
<< "Denylisted as TF-TRT candidate, "
<< "(Op type: " << node->tf_node()->type_string() << "), "
<< "(Op name: " << node->name() << ")";
exclude_node("Blacklisted with the env var TF_TRT_OP_BLACKLIST");
exclude_node("Denylisted with the env var TF_TRT_OP_DENYLIST");
} else {
VLOG(2) << "Accepted as a TF-TRT candidate, "
<< "(Op type: " << node->tf_node()->type_string() << "), "

View File

@ -535,10 +535,10 @@ static void AllocateFlags() {
flag_values->xla_gpu_force_conv_nchw(),
"For cuDNN convolutions, always NCHW layouts."));
flag_objects->push_back(tensorflow::Flag(
"xla_gpu_algorithm_blacklist_path",
string_setter_for(&DebugOptions::set_xla_gpu_algorithm_blacklist_path),
flag_values->xla_gpu_algorithm_blacklist_path(),
"An AlgorithmBlacklist text proto file as a blacklist of convolutions to "
"xla_gpu_algorithm_denylist_path",
string_setter_for(&DebugOptions::set_xla_gpu_algorithm_denylist_path),
flag_values->xla_gpu_algorithm_denylist_path(),
"An AlgorithmDenylist text proto file as a denylist of convolutions to "
"avoid to use."));
flag_objects->push_back(tensorflow::Flag(
"xla_gpu_deterministic_reductions",

View File

@ -1676,7 +1676,7 @@ cc_library(
tf_cc_test(
name = "hlo_algorithm_blacklist_test",
srcs = ["hlo_algorithm_blacklist_test.cc"],
data = ["data/hlo_algorithm_blacklist.pbtxt"],
data = ["data/hlo_algorithm_denylist.pbtxt"],
tags = ["no_pip"],
deps = [
":hlo_algorithm_blacklist",

View File

@ -15,19 +15,19 @@ message ConvInstructionLog {
repeated uint64 operand_addresses = 4;
}
message BlacklistedAlgorithm {
message DenylistedAlgorithm {
int64 id = 1;
bool tensor_ops = 2;
}
message AlgorithmBlacklistEntry {
message AlgorithmDenylistEntry {
string hlo = 1;
tensorflow.ComputeCapability cc = 2;
tensorflow.CudnnVersion cudnn_version = 3;
string blas_version = 5;
repeated BlacklistedAlgorithm algos = 4;
repeated DenylistedAlgorithm algos = 4;
}
message AlgorithmBlacklist {
repeated AlgorithmBlacklistEntry entries = 1;
message AlgorithmDenylist {
repeated AlgorithmDenylistEntry entries = 1;
}

View File

@ -438,10 +438,9 @@ GpuConvAlgorithmPicker::PickBestAlgorithmNoCacheCuda(
(void)blas->GetVersion(&blas_version);
}
absl::Span<const AlgorithmDesc> blacklisted_algos =
GetBlacklistedConvAlgorithms(GetComputeCapability(stream_exec_),
GetCudnnVersion(stream_exec_), blas_version,
canonical_hlo);
absl::Span<const AlgorithmDesc> disabled_algos = GetDisabledConvAlgorithms(
GetComputeCapability(stream_exec_), GetCudnnVersion(stream_exec_),
blas_version, canonical_hlo);
for (const AlgorithmDesc& alg : GetAlgorithms(kind, stream_exec_)) {
XLA_SCOPED_LOGGING_TIMER_LEVEL(
@ -449,7 +448,7 @@ GpuConvAlgorithmPicker::PickBestAlgorithmNoCacheCuda(
AlgorithmToString(alg)),
2);
if (absl::c_linear_search(blacklisted_algos, alg)) {
if (absl::c_linear_search(disabled_algos, alg)) {
LOG(INFO) << "Omitted potentially buggy algorithm "
<< AlgorithmToString(alg) << " for conv " << instr->ToString();
continue;
@ -503,7 +502,7 @@ GpuConvAlgorithmPicker::PickBestAlgorithmNoCacheCuda(
if (!input_output_allocator_redzone_clear ||
!scratch_allocator_redzone_clear) {
AlgorithmBlacklist proto;
AlgorithmDenylist proto;
auto entry = proto.add_entries();
entry->set_hlo(canonical_hlo);
*entry->mutable_cc() = GetComputeCapability(stream_exec_);
@ -513,13 +512,12 @@ GpuConvAlgorithmPicker::PickBestAlgorithmNoCacheCuda(
algo->set_id(alg.algo_id());
algo->set_tensor_ops(alg.tensor_ops_enabled());
LOG(ERROR)
<< "To blacklist this algorithm for this convolution, "
"copy-paste the following "
"proto to the blacklist file pointed by XLA_FLAGS "
"--xla_gpu_algorithm_blacklist_path="
<< GetDebugOptionsFromFlags().xla_gpu_algorithm_blacklist_path()
<< " : " << proto.ShortDebugString();
LOG(ERROR) << "To denylist this algorithm for this convolution, "
"copy-paste the following "
"proto to the denylist file pointed by XLA_FLAGS "
"--xla_gpu_algorithm_denylist_path="
<< GetDebugOptionsFromFlags().xla_gpu_algorithm_denylist_path()
<< " : " << proto.ShortDebugString();
continue;
}

View File

@ -24,7 +24,7 @@ limitations under the License.
namespace xla {
namespace gpu {
constexpr char kDefaultBlacklist[] = R"pb(
constexpr char kDefaultDenylist[] = R"pb(
entries {
hlo: "(f32[4,32,32,32]{2,1,3,0}, u8[0]{0}) custom-call(f32[4,32,32,32]{2,1,3,0}, f32[5,5,32,32]{1,0,2,3}), window={size=5x5 pad=2_2x2_2}, dim_labels=b01f_01io->b01f, custom_call_target=\"__cudnn$convForward\", backend_config=\"{conv_result_scale:1}\""
cc { major: 7 }
@ -41,28 +41,26 @@ constexpr char kDefaultBlacklist[] = R"pb(
}
)pb";
absl::Span<const stream_executor::dnn::AlgorithmDesc>
GetBlacklistedConvAlgorithms(tensorflow::ComputeCapability cc,
tensorflow::CudnnVersion cudnn_version,
const std::string& blas_version,
const std::string& hlo) {
absl::Span<const stream_executor::dnn::AlgorithmDesc> GetDisabledConvAlgorithms(
tensorflow::ComputeCapability cc, tensorflow::CudnnVersion cudnn_version,
const std::string& blas_version, const std::string& hlo) {
// Key is the tuple of canonicalized hlo, compute capability major/minor,
// cudnn version major/minor/patch, blas version.
using MapType = absl::flat_hash_map<
std::tuple<std::string, int, int, int, int, int, std::string>,
std::vector<stream_executor::dnn::AlgorithmDesc>>;
static MapType* blacklist = [] {
static MapType* denylist = [] {
MapType* list = new MapType();
AlgorithmBlacklist proto;
AlgorithmDenylist proto;
std::string file_path =
GetDebugOptionsFromFlags().xla_gpu_algorithm_blacklist_path();
GetDebugOptionsFromFlags().xla_gpu_algorithm_denylist_path();
if (!file_path.empty()) {
TF_CHECK_OK(tensorflow::ReadTextProto(tensorflow::Env::Default(),
file_path, &proto));
} else {
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(
std::string(kDefaultBlacklist), &proto));
std::string(kDefaultDenylist), &proto));
}
for (const auto& entry : proto.entries()) {
for (const auto& algo : entry.algos()) {
@ -77,10 +75,10 @@ GetBlacklistedConvAlgorithms(tensorflow::ComputeCapability cc,
return list;
}();
auto iter = blacklist->find(std::make_tuple(
auto iter = denylist->find(std::make_tuple(
hlo, cc.major(), cc.minor(), cudnn_version.major(), cudnn_version.minor(),
cudnn_version.patch(), std::string(blas_version)));
if (iter != blacklist->end()) {
if (iter != denylist->end()) {
return iter->second;
}
return {};

View File

@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_HLO_ALGORITHM_BLACKLIST_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_HLO_ALGORITHM_BLACKLIST_H_
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_HLO_ALGORITHM_DENYLIST_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_HLO_ALGORITHM_DENYLIST_H_
#include <vector>
@ -24,13 +24,11 @@ limitations under the License.
namespace xla {
namespace gpu {
absl::Span<const stream_executor::dnn::AlgorithmDesc>
GetBlacklistedConvAlgorithms(tensorflow::ComputeCapability cc,
tensorflow::CudnnVersion cudnn_version,
const std::string& blas_version,
const std::string& hlo);
absl::Span<const stream_executor::dnn::AlgorithmDesc> GetDisabledConvAlgorithms(
tensorflow::ComputeCapability cc, tensorflow::CudnnVersion cudnn_version,
const std::string& blas_version, const std::string& hlo);
} // namespace gpu
} // namespace xla
#endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_HLO_ALGORITHM_BLACKLIST_H_
#endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_HLO_ALGORITHM_DENYLIST_H_

View File

@ -26,22 +26,22 @@ namespace xla {
namespace gpu {
namespace {
class BlacklistTest : public testing::Test {
class DenylistTest : public testing::Test {
protected:
BlacklistTest() {
DenylistTest() {
tensorflow::setenv(
"XLA_FLAGS",
absl::StrCat(
"--xla_gpu_algorithm_blacklist_path=",
"--xla_gpu_algorithm_denylist_path=",
tensorflow::GetDataDependencyFilepath(tensorflow::io::JoinPath(
"tensorflow", "compiler", "xla", "service", "gpu", "data",
"hlo_algorithm_blacklist.pbtxt")))
"hlo_algorithm_denylist.pbtxt")))
.data(),
0);
}
};
TEST_F(BlacklistTest, DefaultTest) {
TEST_F(DenylistTest, DefaultTest) {
tensorflow::ComputeCapability cc;
cc.set_major(7);
cc.set_minor(0);
@ -49,7 +49,7 @@ TEST_F(BlacklistTest, DefaultTest) {
cudnn_version.set_major(7);
cudnn_version.set_minor(6);
cudnn_version.set_patch(2);
auto list = GetBlacklistedConvAlgorithms(
auto list = GetDisabledConvAlgorithms(
cc, cudnn_version, /*blas_version=*/"9000",
R"((f16[256,112,112,64]{3,2,1,0}, u8[0]{0}) custom-call(f16[256,224,224,4]{3,2,1,0}, f16[7,7,4,64]{2,1,0,3}), window={size=7x7 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward", backend_config="{conv_result_scale:1}")");
ASSERT_EQ(4, list.size());
@ -59,7 +59,7 @@ TEST_F(BlacklistTest, DefaultTest) {
EXPECT_EQ(stream_executor::dnn::AlgorithmDesc(1, true), list[3]);
}
TEST_F(BlacklistTest, NegativeTest) {
TEST_F(DenylistTest, NegativeTest) {
tensorflow::ComputeCapability cc;
cc.set_major(7);
cc.set_minor(0);
@ -68,7 +68,7 @@ TEST_F(BlacklistTest, NegativeTest) {
cudnn_version.set_minor(6);
cudnn_version.set_minor(2);
auto list =
GetBlacklistedConvAlgorithms(cc, cudnn_version, "9000", R"(invalid hlo)");
GetDisabledConvAlgorithms(cc, cudnn_version, "9000", R"(invalid hlo)");
ASSERT_EQ(0, list.size());
}

View File

@ -121,9 +121,9 @@ struct Item {
bool placed = false;
// To avoid an infinite loop rematerializing the same set of
// instructions ad infinitum, keep a blacklist of instructions
// instructions ad infinitum, keep a denylist of instructions
// which should not be rematerialized.
bool blacklisted = false;
bool denylisted = false;
// The buffers defined by this instruction.
BufferIdList buffers_defined;
@ -292,8 +292,8 @@ class InstructionList {
InsertBeforeInstructions(to_insert, {max_position_item->next});
}
void Blacklist(const HloInstruction* inst) {
GetItem(inst)->blacklisted = true;
void Denylist(const HloInstruction* inst) {
GetItem(inst)->denylisted = true;
}
private:
@ -1158,13 +1158,13 @@ std::vector<Item*> GetInitialBlock(const InstructionList& instruction_list,
return item_block;
}
// Returns whether any instruction in 'block' is blacklisted or
// Returns whether any instruction in 'block' is denylisted or
// non-rematerializable.
bool AnyBlacklistedOrNonRematerializable(
bool AnyDenylistedOrNonRematerializable(
const std::vector<Item*>& block,
absl::flat_hash_map<const HloInstruction*, bool>* rematerializable_map) {
for (auto* item : block) {
if (item->blacklisted) {
if (item->denylisted) {
return true;
}
if (!CanBeRematerialized(item->instruction, rematerializable_map)) {
@ -1195,10 +1195,10 @@ MemoryUsageTracker::PickRematerializationCandidates(
// instructions.
break;
}
// If any item in the starting block are blacklisted or non-rematable, then
// If any item in the starting block are denylisted or non-rematable, then
// break and move on to next start_item (we can actually move to the last
// invalid item in this block, but let's ignore that optimization for now).
if (AnyBlacklistedOrNonRematerializable(block, rematerializable_map)) {
if (AnyDenylistedOrNonRematerializable(block, rematerializable_map)) {
continue;
}
while (block.size() <= max_block_size) {
@ -1289,8 +1289,8 @@ MemoryUsageTracker::PickRematerializationCandidates(
// Time to update the block to include the next instruction.
auto* last_item = block[block.size() - 1];
auto* next_item = instruction_list.next(last_item);
if (next_item == nullptr || next_item->blacklisted ||
!next_item->placed || next_item == in_progress_item_ ||
if (next_item == nullptr || next_item->denylisted || !next_item->placed ||
next_item == in_progress_item_ ||
!CanBeRematerialized(next_item->instruction, rematerializable_map)) {
break;
}
@ -1404,7 +1404,7 @@ StatusOr<int64> RematerializeInstructions(
// instruction it was a copying of. Now 'remat' is a rematerialization
// of 'best' and kills 'best'. Stop rematerializing this instruction
// to avoid an infinite loop.
instruction_list->Blacklist(remat);
instruction_list->Denylist(remat);
}
remat_move_instructions->insert(remat);
} else {
@ -1460,8 +1460,8 @@ StatusOr<int64> CompressInstruction(MemoryUsageTracker* memory_tracker,
place_before.push_back(instruction_list->GetItem(user));
}
instruction_list->Blacklist(compressed_item->instruction);
instruction_list->Blacklist(uncompressed_item->instruction);
instruction_list->Denylist(compressed_item->instruction);
instruction_list->Denylist(uncompressed_item->instruction);
instruction_list->InsertBeforeInstructions(uncompressed_item, place_before);
@ -1583,7 +1583,7 @@ StatusOr<bool> HloRematerialization::RematerializeComputation(
// rematerialization is added to 'remat_move_instructions' (the
// rematerialization is essentially a move). If the next rematerialization of
// the instruction is also a move then the rematerialization is added to the
// blacklist.
// denylist.
absl::flat_hash_set<const HloInstruction*> remat_move_instructions;
// The map from instructions to their rematerializable status.

View File

@ -270,8 +270,8 @@ message DebugOptions {
// Paths to files with ptx code.
repeated string xla_gpu_ptx_file = 127;
// Blacklist for cuDNN convolutions.
string xla_gpu_algorithm_blacklist_path = 128;
// Denylist for cuDNN convolutions.
string xla_gpu_algorithm_denylist_path = 128;
// Guarantee run-to-run determinism from reductions on XLA:GPU.
bool xla_gpu_deterministic_reductions = 130;

View File

@ -293,7 +293,7 @@ class NodeTypeAttrMap {
}
// Note that the mappings generated here include inputs/outputs with fixed
// types. This makes the mappings complete (all inputs and outputs are
// included), and allows the graph rewriter to propagate black paint
// included), and allows the graph rewriter to propagate deny paint
// from/through ops with fixed types.
io2type_entry.first.reserve(input_arg_inds.size());
for (int i = 0; i < static_cast<int>(input_arg_inds.size()); ++i) {
@ -843,10 +843,10 @@ DataTypeSet AllowedDataTypes(const OpDef& op_def, const TypeAttrId& t_attr_id) {
}
Status ValidateLists(const gtl::FlatSet<string>& allow_list,
const gtl::FlatSet<string>& black_list,
const gtl::FlatSet<string>& gray_list,
const gtl::FlatSet<string>& deny_list,
const gtl::FlatSet<string>& infer_list,
const gtl::FlatSet<string>& clear_list) {
std::vector<gtl::FlatSet<string>> lists{allow_list, black_list, gray_list,
std::vector<gtl::FlatSet<string>> lists{allow_list, deny_list, infer_list,
clear_list};
std::multiset<string> counts;
for (const auto& list : lists) {
@ -967,23 +967,23 @@ class AutoMixedPrecisionImpl {
bool SupportsF16(const NodeTypeId& node_type) const;
const NodeTypeId* GetTensorListFloat32NodeTypeId(const NodeDef& node) const;
bool IsSourceOrSinkOp(const string& op) const;
void FindFloat32TensorListOpClustersAndBlacklistUnsafe(
void FindFloat32TensorListOpClustersAndDenylistUnsafe(
std::vector<absl::flat_hash_set<const NodeDef*>>* clusters,
absl::flat_hash_set<int>* black_set) const;
absl::flat_hash_set<int>* deny_set) const;
void FindTensorListImplicitFloat32Edges(
const absl::flat_hash_set<const NodeDef*>& tensor_list_nodes,
std::vector<NodeTypeIdEdge>* implicit_data_edges) const;
void AddAllowlistOps(absl::flat_hash_set<int>* allow_set) const;
void PropagateBlackFwdThroughClearAndGray(
absl::flat_hash_set<int>* black_set) const;
void PropagateDenyFwdThroughClearAndInfer(
absl::flat_hash_set<int>* deny_set) const;
void ForceColorMatchBetweenTensorListOps(
const absl::flat_hash_set<const NodeDef*>& tensor_list_nodes,
absl::flat_hash_set<int>* allow_set,
absl::flat_hash_set<int>* black_set) const;
void AddClearAndGrayToAllowIfBetweenAllow(
const absl::flat_hash_set<int>& black_set,
absl::flat_hash_set<int>* deny_set) const;
void AddClearAndInferToAllowIfBetweenAllow(
const absl::flat_hash_set<int>& deny_set,
absl::flat_hash_set<int>* allow_set) const;
void PropagateAllowThroughClear(const absl::flat_hash_set<int>& black_set,
void PropagateAllowThroughClear(const absl::flat_hash_set<int>& deny_set,
absl::flat_hash_set<int>* allow_set) const;
Status ForceColorMatchOnRecurrentEdges(
absl::flat_hash_set<int>* allow_set) const;
@ -1006,8 +1006,8 @@ class AutoMixedPrecisionImpl {
bool force_all_fp16_;
AutoMixedPrecisionMode mode_;
gtl::FlatSet<string> f16_allowlist_;
gtl::FlatSet<string> f16_blacklist_;
gtl::FlatSet<string> f16_graylist_;
gtl::FlatSet<string> f16_denylist_;
gtl::FlatSet<string> f16_inferlist_;
gtl::FlatSet<string> f16_clearlist_;
absl::flat_hash_set<const NodeDef*> should_process_nodes_;
DataType target_dtype_; // Either DT_HALF or DT_BFLOAT16
@ -1083,12 +1083,12 @@ Status AutoMixedPrecisionImpl::PrintDebugLogs(bool preop, size_t timestamp) {
for (const auto& x : mp_lists->AllowList()) {
f << x << "\n";
}
f << "\nBlackList:\n";
for (const auto& x : mp_lists->BlackList()) {
f << "\nDenyList:\n";
for (const auto& x : mp_lists->DenyList()) {
f << x << "\n";
}
f << "\nGrayList:\n";
for (const auto& x : mp_lists->GrayList()) {
f << "\nInferList:\n";
for (const auto& x : mp_lists->InferList()) {
f << x << "\n";
}
f << "\nClearList:\n";
@ -1255,11 +1255,11 @@ Status AutoMixedPrecisionImpl::Optimize() {
std::unique_ptr<AutoMixedPrecisionLists> mp_lists =
get_mixed_precision_lists();
f16_allowlist_ = mp_lists->AllowList();
f16_blacklist_ = mp_lists->BlackList();
f16_graylist_ = mp_lists->GrayList();
f16_denylist_ = mp_lists->DenyList();
f16_inferlist_ = mp_lists->InferList();
f16_clearlist_ = mp_lists->ClearList();
TF_RETURN_IF_ERROR(ValidateLists(f16_allowlist_, f16_blacklist_,
f16_graylist_, f16_clearlist_));
TF_RETURN_IF_ERROR(ValidateLists(f16_allowlist_, f16_denylist_,
f16_inferlist_, f16_clearlist_));
size_t timestamp = Env::Default()->NowMicros() / 1000;
TF_RETURN_IF_ERROR(PrintDebugLogs(/* preop = */ true, timestamp));
@ -1294,11 +1294,11 @@ Status AutoMixedPrecisionImpl::Optimize() {
TF_RETURN_IF_ERROR(
graph_type_view_.InitializeFromGraph(*graph_, node_type_map_));
absl::flat_hash_set<int> black_set;
absl::flat_hash_set<int> deny_set;
std::vector<absl::flat_hash_set<const NodeDef*>> tensor_list_clusters;
FindFloat32TensorListOpClustersAndBlacklistUnsafe(&tensor_list_clusters,
&black_set);
FindFloat32TensorListOpClustersAndDenylistUnsafe(&tensor_list_clusters,
&deny_set);
std::vector<NodeTypeIdEdge> ephemeral_edges;
for (const auto& cluster : tensor_list_clusters) {
VLOG(1) << "Found safe Tensor List cluster of size " << cluster.size();
@ -1320,14 +1320,14 @@ Status AutoMixedPrecisionImpl::Optimize() {
// This is done under the assumption that allowlist ops are always
// numerically-safe in f16 and that they are the most important ops for
// improving performance.
// 2) Add nodes to the black_set iff they are numerically-dangerous (aka
// "blacklist" ops) or they are on a forward path from a blacklist node to
// a black/gray node (including the node at the end of the path) through
// non-numerically-dangerous ops (aka "greylist" and "clearlist" ops).
// 2) Add nodes to the deny_set iff they are numerically-dangerous (aka
// "denylist" ops) or they are on a forward path from a denylist node to
// a deny/infer node (including the node at the end of the path) through
// non-numerically-dangerous ops (aka "inferlist" and "clearlist" ops).
// This is done to prevent numerically-dangerous ops and their downstream
// effects from being changed to f16, which would risk breaking the
// numerical accuracy of the model.
// 3) For all remaining nodes that are not considered dangerous (greylist
// 3) For all remaining nodes that are not considered dangerous (inferlist
// and clearlist ops), find those that are between (i.e., both upstream
// and downstream of) allow nodes, and add them to the allow_set.
// This is done to avoid unnecessary casts between allowlist ops.
@ -1346,29 +1346,29 @@ Status AutoMixedPrecisionImpl::Optimize() {
return Status::OK();
}
VLOG(2) << "Beginning pass 2 to propagate black forwards from blacklist ops "
"through clear/graylist ops";
PropagateBlackFwdThroughClearAndGray(&black_set);
VLOG(2) << "Beginning pass 2 to propagate deny forwards from denylist ops "
"through clear/inferlist ops";
PropagateDenyFwdThroughClearAndInfer(&deny_set);
VLOG(2) << "Finished pass 2";
VLOG(2) << "Forcing color match between data structure ops";
for (const auto& cluster : tensor_list_clusters) {
ForceColorMatchBetweenTensorListOps(cluster, &allow_set, &black_set);
ForceColorMatchBetweenTensorListOps(cluster, &allow_set, &deny_set);
}
VLOG(2) << "Beginning pass 3 to set clear and gray nodes to allow if they "
VLOG(2) << "Beginning pass 3 to set clear and infer nodes to allow if they "
"are between allow ops";
AddClearAndGrayToAllowIfBetweenAllow(black_set, &allow_set);
AddClearAndInferToAllowIfBetweenAllow(deny_set, &allow_set);
VLOG(2) << "Finished pass 3";
VLOG(2) << "Beginning pass 4 to propagate allow from allow nodes through "
"clearlist ops";
PropagateAllowThroughClear(black_set, &allow_set);
PropagateAllowThroughClear(deny_set, &allow_set);
VLOG(2) << "Finished pass 4";
VLOG(2) << "Forcing color match between data structure ops";
for (const auto& cluster : tensor_list_clusters) {
ForceColorMatchBetweenTensorListOps(cluster, &allow_set, &black_set);
ForceColorMatchBetweenTensorListOps(cluster, &allow_set, &deny_set);
}
VLOG(2) << "Forcing color match on loop edges";
@ -1426,11 +1426,11 @@ bool AutoMixedPrecisionImpl::IsSourceOrSinkOp(const string& op) const {
// Finds all clusters of float32 Tensor List nodes that are connected via their
// handle edges. Unsafe clusters (those with unprocessable nodes, or with edges
// that cross untraversable boundaries via _Arg, _Ret, PartitionedCall etc.
// nodes) are added to black_set. The caller should paint all nodes in a cluster
// nodes) are added to deny_set. The caller should paint all nodes in a cluster
// the same color, as they may all refer to the same Tensor List.
void AutoMixedPrecisionImpl::FindFloat32TensorListOpClustersAndBlacklistUnsafe(
void AutoMixedPrecisionImpl::FindFloat32TensorListOpClustersAndDenylistUnsafe(
std::vector<absl::flat_hash_set<const NodeDef*>>* tensor_list_clusters,
absl::flat_hash_set<int>* black_set) const {
absl::flat_hash_set<int>* deny_set) const {
absl::flat_hash_set<const NodeDef*> tensor_list_prop_set;
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
@ -1463,7 +1463,7 @@ void AutoMixedPrecisionImpl::FindFloat32TensorListOpClustersAndBlacklistUnsafe(
cluster.insert(node);
if (!ShouldProcess(*node)) {
// The cluster contains an un-processable node.
black_set->insert(root_fp32_idx);
deny_set->insert(root_fp32_idx);
}
// TODO(benbarsdell): In a theoretical pathological
// case of a Tensor List of Tensor List handles, the
@ -1471,7 +1471,7 @@ void AutoMixedPrecisionImpl::FindFloat32TensorListOpClustersAndBlacklistUnsafe(
// sink.
} else if (IsSourceOrSinkOp(node->op())) {
// The cluster crosses an untraversable boundary.
black_set->insert(root_fp32_idx);
deny_set->insert(root_fp32_idx);
}
}));
tensor_list_clusters->push_back(cluster);
@ -1534,21 +1534,21 @@ void AutoMixedPrecisionImpl::AddAllowlistOps(
}
}
// Adds nodes to black_set iff they are on the blacklist or they are on a
// forward path from a blacklist node to a black/gray node (including the node
// at the end of the path) through clear and gray nodes.
// E.g., black -> gray -> clear -> gray -> clear -> allow -> gray
// becomes: black -> black -> black -> black -> clear -> allow -> gray.
void AutoMixedPrecisionImpl::PropagateBlackFwdThroughClearAndGray(
absl::flat_hash_set<int>* black_set) const {
// Adds nodes to deny_set iff they are on the denylist or they are on a
// forward path from a denylist node to a deny/infer node (including the node
// at the end of the path) through clear and infer nodes.
// E.g., deny -> infer -> clear -> infer -> clear -> allow -> infer
// becomes: deny -> deny -> deny -> deny -> clear -> allow -> infer.
void AutoMixedPrecisionImpl::PropagateDenyFwdThroughClearAndInfer(
absl::flat_hash_set<int>* deny_set) const {
if (force_all_fp16_) return;
// Find clear nodes that are upstream of black or gray.
absl::flat_hash_set<int> upstream_of_black_or_gray_set;
// Find clear nodes that are upstream of deny or infer.
absl::flat_hash_set<int> upstream_of_deny_or_infer_set;
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
if (!(f16_blacklist_.count(root.node->op()) ||
f16_graylist_.count(root.node->op()))) {
if (!(f16_denylist_.count(root.node->op()) ||
f16_inferlist_.count(root.node->op()))) {
continue;
}
DfsTypeTraversal(graph_type_view_, {&root},
@ -1556,42 +1556,42 @@ void AutoMixedPrecisionImpl::PropagateBlackFwdThroughClearAndGray(
DfsTypePredicates::Enter([&](int idx) -> bool {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
return idx == root_idx ||
(!upstream_of_black_or_gray_set.count(idx) &&
(!upstream_of_deny_or_infer_set.count(idx) &&
f16_clearlist_.count(item.node->op()));
}),
DfsTypeCallbacks::PreOrder([&](int idx) {
upstream_of_black_or_gray_set.insert(idx);
upstream_of_deny_or_infer_set.insert(idx);
}));
}
// Propagate black forward through nodes in upstream_of_black_or_gray_set.
// Propagate deny forward through nodes in upstream_of_deny_or_infer_set.
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
if (black_set->count(root_idx) || !f16_blacklist_.count(root.node->op())) {
if (deny_set->count(root_idx) || !f16_denylist_.count(root.node->op())) {
continue;
}
DfsTypeTraversal(
graph_type_view_, {&root}, TypeTraversalDirection::kFollowOutputs,
DfsTypePredicates::Enter([&](int idx) -> bool {
return idx == root_idx || (!black_set->count(idx) &&
upstream_of_black_or_gray_set.count(idx));
return idx == root_idx || (!deny_set->count(idx) &&
upstream_of_deny_or_infer_set.count(idx));
}),
DfsTypeCallbacks::PreOrder([&](int idx) {
bool inserted = black_set->insert(idx).second;
bool inserted = deny_set->insert(idx).second;
if (VLOG_IS_ON(2) && inserted) {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
VLOG(2) << "Painting type " << item.type_attr.DebugString()
<< " of " << item.node->op() << " node "
<< item.node->name() << " BLACK";
<< item.node->name() << " DENY";
}
}));
}
}
void AutoMixedPrecisionImpl::AddClearAndGrayToAllowIfBetweenAllow(
const absl::flat_hash_set<int>& black_set,
void AutoMixedPrecisionImpl::AddClearAndInferToAllowIfBetweenAllow(
const absl::flat_hash_set<int>& deny_set,
absl::flat_hash_set<int>* allow_set) const {
// Find clear/graylist ops that are downstream of allow ops.
// Find clear/inferlist ops that are downstream of allow ops.
absl::flat_hash_set<int> downstream_of_allow_set;
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
@ -1605,13 +1605,13 @@ void AutoMixedPrecisionImpl::AddClearAndGrayToAllowIfBetweenAllow(
return idx == root_idx ||
(!downstream_of_allow_set.count(idx) &&
!f16_allowlist_.count(item.node->op()) &&
!black_set.count(idx) && ShouldProcess(*item.node) &&
!deny_set.count(idx) && ShouldProcess(*item.node) &&
// TODO(benbarsdell): Consider allowing propagation through
// ops that are already float16 in order to reduce the number
// of casts.
IsFloat32(item) && SupportsF16(item) &&
(f16_clearlist_.count(item.node->op()) ||
f16_graylist_.count(item.node->op())));
f16_inferlist_.count(item.node->op())));
}),
DfsTypeCallbacks::PreOrder(
[&](int idx) { downstream_of_allow_set.insert(idx); }));
@ -1645,7 +1645,7 @@ void AutoMixedPrecisionImpl::AddClearAndGrayToAllowIfBetweenAllow(
}
void AutoMixedPrecisionImpl::PropagateAllowThroughClear(
const absl::flat_hash_set<int>& black_set,
const absl::flat_hash_set<int>& deny_set,
absl::flat_hash_set<int>* allow_set) const {
// Propagate allow from allow nodes through clearlist ops.
absl::flat_hash_set<int> clear_prop_set;
@ -1661,7 +1661,7 @@ void AutoMixedPrecisionImpl::PropagateAllowThroughClear(
DfsTypePredicates::Enter([&](int idx) -> bool {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
return idx == root_idx ||
(!allow_set->count(idx) && !black_set.count(idx) &&
(!allow_set->count(idx) && !deny_set.count(idx) &&
ShouldProcess(*item.node) && IsFloat32(item) &&
SupportsF16(item) &&
(f16_clearlist_.count(item.node->op())) &&
@ -1727,14 +1727,14 @@ Status AutoMixedPrecisionImpl::ForceColorMatchOnRecurrentEdges(
if (allow_set->erase(merge_idx)) {
VLOG(2) << "Painting type T of Merge node "
<< graph_type_view_.GetNode(merge_idx)->node->name()
<< " BLACK to match the color of its sibling Merge nodes "
<< " DENY to match the color of its sibling Merge nodes "
"with common NextIteration node "
<< node.name();
}
}
if (allow_set->erase(nextiter_idx)) {
VLOG(2) << "Painting type T of NextIteration node " << node.name()
<< " BLACK to match the color of its output Merge node(s)";
<< " DENY to match the color of its output Merge node(s)";
}
} else {
if (allow_set->insert(nextiter_idx).second) {
@ -1751,8 +1751,8 @@ Status AutoMixedPrecisionImpl::ForceColorMatchOnRecurrentEdges(
void AutoMixedPrecisionImpl::ForceColorMatchBetweenTensorListOps(
const absl::flat_hash_set<const NodeDef*>& tensor_list_nodes,
absl::flat_hash_set<int>* allow_set,
absl::flat_hash_set<int>* black_set) const {
bool any_black = false;
absl::flat_hash_set<int>* deny_set) const {
bool any_deny = false;
bool any_allow = false;
std::vector<int> node_type_idxs;
node_type_idxs.reserve(tensor_list_nodes.size());
@ -1766,24 +1766,24 @@ void AutoMixedPrecisionImpl::ForceColorMatchBetweenTensorListOps(
node_type_idxs.push_back(maybe_node_type_idx.value());
}
for (int node_type_idx : node_type_idxs) {
if (black_set->count(node_type_idx)) {
any_black = true;
if (deny_set->count(node_type_idx)) {
any_deny = true;
break;
} else if (allow_set->count(node_type_idx)) {
any_allow = true;
}
}
if (!any_black && !any_allow) return;
if (!any_deny && !any_allow) return;
for (int node_type_idx : node_type_idxs) {
const NodeTypeId& node_type = *graph_type_view_.GetNode(node_type_idx);
VLOG(2) << "Painting type " << node_type.type_attr.DebugString() << " of "
<< node_type.node->op() << " node " << node_type.node->name() << " "
<< (any_black ? "BLACK" : "ALLOW")
<< (any_deny ? "DENY" : "ALLOW")
<< " because at least one of its siblings is "
<< (any_black ? "BLACK" : "ALLOW");
if (any_black) {
<< (any_deny ? "DENY" : "ALLOW");
if (any_deny) {
allow_set->erase(node_type_idx);
black_set->insert(node_type_idx);
deny_set->insert(node_type_idx);
} else {
allow_set->insert(node_type_idx);
}

View File

@ -23,7 +23,7 @@ limitations under the License.
namespace tensorflow {
namespace grappler {
// Represents the four lists of ops: the allow list, gray list, black list, and
// Represents the four lists of ops: the allow list, infer list, deny list, and
// clear list. These lists determine which ops are converted to fp16/bf16
// (referred to as 'f16' for short) and which ops stay as fp32.
class AutoMixedPrecisionLists {
@ -36,13 +36,13 @@ class AutoMixedPrecisionLists {
virtual gtl::FlatSet<string> AllowList() = 0;
// Returns the set of ops that can run in f16 and are considered numerically-
// safe (for execution in f16), but which may be made unsafe by an upstream
// blacklist op.
virtual gtl::FlatSet<string> GrayList() = 0;
// denylist op.
virtual gtl::FlatSet<string> InferList() = 0;
// Returns the set of ops that are considered numerically-dangerous (i.e.,
// unsafe for execution in f16) and whose effects may also be observed in
// downstream nodes (e.g. for f16, in Exp -> Add, the Add is unsafe due to
// the Exp).
virtual gtl::FlatSet<string> BlackList() = 0;
virtual gtl::FlatSet<string> DenyList() = 0;
// Returns the set of ops that do not have numerically-significant effects
// (i.e., they are always considered safe for execution in f16 precision), and
// can run in f16.
@ -51,10 +51,11 @@ class AutoMixedPrecisionLists {
protected:
// Adds or removes ops from list if certain environmental variables are set.
static void UpdateList(const string& list_name, gtl::FlatSet<string>* list) {
CHECK(list_name == "ALLOWLIST" || list_name == "GRAYLIST" || // Crash OK.
list_name == "BLACKLIST" || list_name == "CLEARLIST" ||
CHECK(list_name == "ALLOWLIST" || list_name == "INFERLIST" || // Crash OK.
list_name == "DENYLIST" || list_name == "CLEARLIST" ||
// TODO(reedwm): for bkwds compat; remove when no longer necessary:
list_name == "WHITELIST");
list_name == "WHITELIST" || list_name == "GRAYLIST" ||
list_name == "BLACKLIST");
string add_env_var =
"TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_" + list_name + "_ADD";
string remove_env_var =
@ -154,7 +155,7 @@ class AutoMixedPrecisionListsCuda : public AutoMixedPrecisionLists {
return list;
}
gtl::FlatSet<string> GrayList() override {
gtl::FlatSet<string> InferList() override {
if (IsPseudoFastMath()) {
return gtl::FlatSet<string>{};
}
@ -204,11 +205,14 @@ class AutoMixedPrecisionListsCuda : public AutoMixedPrecisionLists {
"Tanh",
"TanhGrad",
};
UpdateList("INFERLIST", &list);
// For backwards compatibility, keeping the original env variable here.
// TODO(reedwm): This should be removed if we don't have active users.
UpdateList("GRAYLIST", &list);
return list;
}
gtl::FlatSet<string> BlackList() override {
gtl::FlatSet<string> DenyList() override {
if (IsPseudoFastMath()) {
return gtl::FlatSet<string>{};
}
@ -224,6 +228,9 @@ class AutoMixedPrecisionListsCuda : public AutoMixedPrecisionLists {
"SparseSoftmaxCrossEntropyWithLogits",
"Sum",
};
UpdateList("DENYLIST", &list);
// For backwards compatibility, keeping the original env variable here.
// TODO(reedwm): This should be removed if we don't have active users.
UpdateList("BLACKLIST", &list);
return list;
}
@ -344,7 +351,7 @@ class AutoMixedPrecisionListsMkl : public AutoMixedPrecisionLists {
AutoMixedPrecisionListsMkl() {}
// Only ops which are supported by MKL in bfloat16 should be added to the
// allow list, gray list, or clear list.
// allow list, infer list, or clear list.
gtl::FlatSet<string> AllowList() override {
auto list = gtl::FlatSet<string>{"Conv2D",
"Conv2DBackpropFilter",
@ -360,10 +367,13 @@ class AutoMixedPrecisionListsMkl : public AutoMixedPrecisionLists {
"BatchMatMulV2"};
UpdateList("ALLOWLIST", &list);
// For backwards compatibility, keeping the original env variable here.
// TODO(reedwm): This should be removed if we don't have active users.
UpdateList("WHITELIST", &list);
return list;
}
gtl::FlatSet<string> GrayList() override {
gtl::FlatSet<string> InferList() override {
auto list = gtl::FlatSet<string>{
"Add",
"AddN",
@ -384,11 +394,14 @@ class AutoMixedPrecisionListsMkl : public AutoMixedPrecisionLists {
"Mul",
"Sub",
};
UpdateList("INFERLIST", &list);
// For backwards compatibility, keeping the original env variable here.
// TODO(reedwm): This should be removed if we don't have active users.
UpdateList("GRAYLIST", &list);
return list;
}
gtl::FlatSet<string> BlackList() override {
gtl::FlatSet<string> DenyList() override {
auto list = gtl::FlatSet<string>{
"Exp",
"Expm1",
@ -401,6 +414,9 @@ class AutoMixedPrecisionListsMkl : public AutoMixedPrecisionLists {
"SparseSoftmaxCrossEntropyWithLogits",
"Sum",
};
UpdateList("DENYLIST", &list);
// For backwards compatibility, keeping the original env variable here.
// TODO(reedwm): This should be removed if we don't have active users.
UpdateList("BLACKLIST", &list);
return list;
}

View File

@ -160,7 +160,7 @@ class AutoMixedPrecisionTest : public GrapplerTest {
return AddNode(name, op, inputs, attributes, graph);
}
void TestSimpleUnaryGrayOp(
void TestSimpleUnaryInferOp(
double input_min, double input_max, double atol, double rtol,
const std::function<Output(const tensorflow::Scope&, Output)>&
test_op_factory) {
@ -170,8 +170,8 @@ class AutoMixedPrecisionTest : public GrapplerTest {
GenerateIdentityMatrix<DT_FLOAT>(size, size));
Output input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, eye);
Output gry1 = test_op_factory(s.WithOpName("gry1"), allow1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), gry1, eye);
Output infer1 = test_op_factory(s.WithOpName("infer1"), allow1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, eye);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow2);
GrapplerItem item;
item.fetch = {"fetch1"};
@ -191,7 +191,7 @@ class AutoMixedPrecisionTest : public GrapplerTest {
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(),
DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("gry1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch, feed);
@ -209,10 +209,10 @@ class AutoMixedPrecisionTest : public GrapplerTest {
TEST_F(AutoMixedPrecisionTest, NoOp) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.234f, {32});
Output blk1 = ops::Exp(s.WithOpName("blk1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), blk1);
Output gry1 = ops::Sqrt(s.WithOpName("gry1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), gry1);
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), deny1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), infer1);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr2);
GrapplerItem item;
@ -230,9 +230,9 @@ TEST_F(AutoMixedPrecisionTest, NoOp) {
GraphView output_view(&output);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("blk1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("gry1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
@ -284,16 +284,16 @@ TEST_F(AutoMixedPrecisionTest, AlreadyFp16) {
TEST_F(AutoMixedPrecisionTest, Simple) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output blk1 = ops::Exp(s.WithOpName("blk1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), blk1);
Output gry1 = ops::Sqrt(s.WithOpName("gry1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), gry1);
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), deny1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), infer1);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr2, clr2);
Output clr3 = ops::Relu(s.WithOpName("clr3"), allow1);
Output gry2 = ops::Log(s.WithOpName("gry2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), gry2);
Output blk2 = ops::SparseMatMul(s.WithOpName("blk2"), clr4, clr4);
Output clr5 = ops::Relu(s.WithOpName("clr5"), blk2);
Output infer2 = ops::Log(s.WithOpName("infer2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), infer2);
Output deny2 = ops::SparseMatMul(s.WithOpName("deny2"), clr4, clr4);
Output clr5 = ops::Relu(s.WithOpName("clr5"), deny2);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr5);
GrapplerItem item;
@ -310,16 +310,16 @@ TEST_F(AutoMixedPrecisionTest, Simple) {
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("blk1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("gry1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr3")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("gry2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr4")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("blk2")->attr().at("Ta").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("blk2")->attr().at("Tb").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny2")->attr().at("Ta").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny2")->attr().at("Tb").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr5")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
@ -374,13 +374,13 @@ TEST_F(AutoMixedPrecisionTest, PreserveFetches) {
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), allow1);
Output gry1 = ops::Sqrt(s.WithOpName("gry1"), clr1);
Output blk1 = ops::Exp(s.WithOpName("blk1"), gry1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), blk1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output deny1 = ops::Exp(s.WithOpName("deny1"), infer1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), deny1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), clr2, clr2);
Output clr3 = ops::Relu(s.WithOpName("clr3"), allow2);
Output blk2 = ops::Exp(s.WithOpName("blk2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), blk2);
Output deny2 = ops::Exp(s.WithOpName("deny2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), deny2);
GrapplerItem item;
item.fetch = {"allow1", "clr2", "clr3"};
@ -398,12 +398,12 @@ TEST_F(AutoMixedPrecisionTest, PreserveFetches) {
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("gry1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("blk1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr3")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("blk2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr4")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
@ -419,11 +419,11 @@ TEST_F(AutoMixedPrecisionTest, PreserveCPUNodes) {
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output clr1 = ops::Relu(s.WithOpName("clr1"), input);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr1, clr1);
Output gry1 = ops::Tanh(s.WithOpName("gry1"), allow1);
Output infer1 = ops::Tanh(s.WithOpName("infer1"), allow1);
Output allow2 =
ops::MatMul(s.WithOpName("allow2").WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0"),
gry1, gry1);
infer1, infer1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), allow2);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr2);
@ -443,7 +443,7 @@ TEST_F(AutoMixedPrecisionTest, PreserveCPUNodes) {
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("gry1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_FLOAT);
@ -521,9 +521,9 @@ TEST_F(AutoMixedPrecisionTest, FusedBatchNorm) {
s.WithOpName("bng1"), fbn1, allow1, scale, fbn1_rs1,
fbn1_rs2, ops::FusedBatchNormGrad::DataFormat("NHWC"))
.x_backprop;
Output gry1 = ops::Add(s.WithOpName("gry1"), fbn1, bng1);
Output infer1 = ops::Add(s.WithOpName("infer1"), fbn1, bng1);
Output allow2 =
ops::Conv2D(s.WithOpName("allow2"), gry1, weight, {1, 1, 1, 1}, "SAME",
ops::Conv2D(s.WithOpName("allow2"), infer1, weight, {1, 1, 1, 1}, "SAME",
ops::Conv2D::DataFormat("NHWC"));
Output fetch = ops::Identity(s.WithOpName("fetch"), allow2);
@ -547,7 +547,7 @@ TEST_F(AutoMixedPrecisionTest, FusedBatchNorm) {
EXPECT_EQ(output_view.GetNode("bng1")->op(), "FusedBatchNormGradV2");
EXPECT_EQ(output_view.GetNode("bng1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("bng1")->attr().at("U").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("gry1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
@ -563,10 +563,10 @@ TEST_F(AutoMixedPrecisionTest, RepeatedAndListTypeAttrs) {
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
auto clr1_op = ops::IdentityN(s.WithOpName("clr1"), {allow1, allow1, allow1});
Output gry1 =
ops::AddN(s.WithOpName("gry1"),
Output infer1 =
ops::AddN(s.WithOpName("infer1"),
{clr1_op.output[0], clr1_op.output[1], clr1_op.output[2]});
Output allow2 = ops::MatMul(s.WithOpName("allow2"), gry1, gry1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, infer1);
Output fetch = ops::Identity(s.WithOpName("fetch"), allow2);
GrapplerItem item;
@ -587,7 +587,7 @@ TEST_F(AutoMixedPrecisionTest, RepeatedAndListTypeAttrs) {
for (auto type : output_view.GetNode("clr1")->attr().at("T").list().type()) {
EXPECT_EQ(type, DT_HALF);
}
EXPECT_EQ(output_view.GetNode("gry1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
@ -633,17 +633,17 @@ TEST_F(AutoMixedPrecisionTest, ExistingCast) {
TEST_F(AutoMixedPrecisionTest, RecurrentEdgeColorMismatch) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output blk1 = ops::Exp(s.WithOpName("blk1"), input);
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output ent1 =
ops::internal::Enter(s.WithOpName("ent1"), blk1, "loop1").output;
ops::internal::Enter(s.WithOpName("ent1"), deny1, "loop1").output;
// Note that the second input is later replaced with "nxt1".
Output mrg1 = ops::Merge(s.WithOpName("mrg1"), {ent1, ent1}).output;
// For simplicity, the loop condition is constant false.
Output con1 = ops::Const(s.WithOpName("con1"), false, {});
Output lpc1 = ops::LoopCond(s.WithOpName("lpc1"), con1).output;
auto swt1 = ops::Switch(s.WithOpName("swt1"), mrg1, lpc1);
Output gry1 = ops::Sqrt(s.WithOpName("gry1"), swt1.output_true);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), gry1, gry1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), swt1.output_true);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), infer1, infer1);
Output nxt1 = ops::NextIteration(s.WithOpName("nxt1"), allow1);
Output ext1 = ops::internal::Exit(s.WithOpName("ext1"), swt1.output_false);
Output fetch = ops::Identity(s.WithOpName("fetch"), ext1);
@ -671,14 +671,14 @@ TEST_F(AutoMixedPrecisionTest, RecurrentEdgeColorMismatch) {
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
// Note that mrg1 gets painted black because it is between blk1 and gry1. This
// forces nxt1 and mrg2 to be painted black as well (they would otherwise be
// painted allow because they are clear and have a direct path to allow1).
EXPECT_EQ(output_view.GetNode("blk1")->attr().at("T").type(), DT_FLOAT);
// Note that mrg1 gets painted deny because it is between deny1 and infer1.
// This forces nxt1 and mrg2 to be painted deny as well (they would otherwise
// be painted allow because they are clear and have a direct path to allow1).
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("ent1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("mrg1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("swt1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("gry1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("nxt1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("ext1")->attr().at("T").type(), DT_FLOAT);
@ -711,8 +711,8 @@ TEST_F(AutoMixedPrecisionTest, TensorListSetGet) {
Output tl1r1 = ops::TensorListGetItem(s.WithOpName("tl1r1"), tl1rs, idx2,
shape, DT_FLOAT)
.item;
Output gry1 = ops::Tanh(s.WithOpName("gry1"), tl1r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), gry1, gry1);
Output infer1 = ops::Tanh(s.WithOpName("infer1"), tl1r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, infer1);
auto tl1w3 =
ops::TensorListSetItem(s.WithOpName("tl1w3"), tl1.handle, idx3, allow2);
Output tl1r2 =
@ -748,7 +748,7 @@ TEST_F(AutoMixedPrecisionTest, TensorListSetGet) {
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1w2")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1r1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("gry1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1w3")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2")->attr().at(type_key).type(), DT_FLOAT);
@ -776,8 +776,8 @@ TEST_F(AutoMixedPrecisionTest, TensorListPushPop) {
Output tl1r1 = ops::TensorListPopBack(s.WithOpName("tl1r1"),
tl1w2.output_handle, shape, DT_FLOAT)
.tensor;
Output gry1 = ops::Tanh(s.WithOpName("gry1"), tl1r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), gry1, gry1);
Output infer1 = ops::Tanh(s.WithOpName("infer1"), tl1r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, infer1);
auto tl1w3 =
ops::TensorListPushBack(s.WithOpName("tl1w3"), tl1.handle, allow2);
Output tl1r2 = ops::TensorListPopBack(s.WithOpName("tl1r2"),
@ -811,7 +811,7 @@ TEST_F(AutoMixedPrecisionTest, TensorListPushPop) {
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1w2")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1r1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("gry1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1w3")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2")->attr().at(type_key).type(), DT_FLOAT);
@ -835,8 +835,8 @@ TEST_F(AutoMixedPrecisionTest, TensorListFromTensor) {
Output tl1r1 = ops::TensorListStack(s.WithOpName("tl1r1"), tl1.output_handle,
shape, DT_FLOAT)
.tensor;
Output gry1 = ops::Tanh(s.WithOpName("gry1"), tl1r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), gry1, gry1);
Output infer1 = ops::Tanh(s.WithOpName("infer1"), tl1r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, infer1);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow2);
// This tests that a allow-painted object node (tl2) will force an unpainted
@ -863,7 +863,7 @@ TEST_F(AutoMixedPrecisionTest, TensorListFromTensor) {
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1r1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("gry1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2w1")->attr().at(type_key).type(), DT_HALF);
@ -902,8 +902,8 @@ TEST_F(AutoMixedPrecisionTest, TensorListPushBackBatchAndConcatLists) {
Output tl3r1 =
ops::TensorListPopBack(s.WithOpName("tl3r1"), tl3, shape, DT_FLOAT)
.tensor;
Output gry1 = ops::Tanh(s.WithOpName("gry1"), tl3r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), gry1, gry1);
Output infer1 = ops::Tanh(s.WithOpName("infer1"), tl3r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, infer1);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow2);
GrapplerItem item;
@ -922,7 +922,7 @@ TEST_F(AutoMixedPrecisionTest, TensorListPushBackBatchAndConcatLists) {
const char* type_key = "element_dtype";
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("gry1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl3")->attr().at(type_key).type(), DT_HALF);
@ -967,22 +967,25 @@ TEST_F(AutoMixedPrecisionTest, TensorListThroughFunction) {
tensorflow::Input shape = {32, 32};
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
Output gry1 = ops::Tanh(s.WithOpName("gry1"), allow1);
Output infer1 = ops::Tanh(s.WithOpName("infer1"), allow1);
auto tl1 = ops::EmptyTensorList(s.WithOpName("tl1"), {32, 32}, 8, DT_FLOAT);
auto tl1w1 = ops::TensorListPushBack(s.WithOpName("tl1w1"), tl1.handle, gry1);
auto _gry1 = tensorflow::ops::AsNodeOut(s, gry1);
auto tl1w1 =
ops::TensorListPushBack(s.WithOpName("tl1w1"), tl1.handle, infer1);
auto _infer1 = tensorflow::ops::AsNodeOut(s, infer1);
auto _tl1w1_handle = tensorflow::ops::AsNodeOut(s, tl1w1.output_handle);
auto builder =
tensorflow::NodeBuilder("Func1", "Func1", s.graph()->op_registry());
tensorflow::Node* func1_op;
TF_CHECK_OK(
builder.Input(_tl1w1_handle).Input(_gry1).Finalize(s.graph(), &func1_op));
TF_CHECK_OK(builder.Input(_tl1w1_handle)
.Input(_infer1)
.Finalize(s.graph(), &func1_op));
Output func1_handle(func1_op, 0);
Output tl1r1 = ops::TensorListPopBack(s.WithOpName("tl1r1"), func1_handle,
shape, DT_FLOAT)
.tensor;
auto tl2 = ops::EmptyTensorList(s.WithOpName("tl2"), {32, 32}, 8, DT_FLOAT);
auto tl2w1 = ops::TensorListPushBack(s.WithOpName("tl2w1"), tl2.handle, gry1);
auto tl2w1 =
ops::TensorListPushBack(s.WithOpName("tl2w1"), tl2.handle, infer1);
Output tl2r1 = ops::TensorListPopBack(s.WithOpName("tl2r1"),
tl2w1.output_handle, shape, DT_FLOAT)
.tensor;
@ -1004,7 +1007,7 @@ TEST_F(AutoMixedPrecisionTest, TensorListThroughFunction) {
const char* type_key = "element_dtype";
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("gry1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2w1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2r1")->attr().at(type_key).type(), DT_HALF);
@ -1069,7 +1072,7 @@ TEST_F(AutoMixedPrecisionTest, BatchMatMul) {
}
TEST_F(AutoMixedPrecisionTest, EluOp) {
TestSimpleUnaryGrayOp(
TestSimpleUnaryInferOp(
-5, 5, 1.0e-3, 1.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Elu(scope, input);
@ -1077,7 +1080,7 @@ TEST_F(AutoMixedPrecisionTest, EluOp) {
}
TEST_F(AutoMixedPrecisionTest, ErfOp) {
TestSimpleUnaryGrayOp(
TestSimpleUnaryInferOp(
-5, 5, 1.0e-3, -1,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Erf(scope, input);
@ -1085,7 +1088,7 @@ TEST_F(AutoMixedPrecisionTest, ErfOp) {
}
TEST_F(AutoMixedPrecisionTest, ErfcOp) {
TestSimpleUnaryGrayOp(
TestSimpleUnaryInferOp(
-5, 5, 1.0e-3, -1,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Erfc(scope, input);
@ -1093,7 +1096,7 @@ TEST_F(AutoMixedPrecisionTest, ErfcOp) {
}
TEST_F(AutoMixedPrecisionTest, InvOp) {
TestSimpleUnaryGrayOp(
TestSimpleUnaryInferOp(
0.01, 10, -1, 1.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Inv(scope, input);
@ -1101,7 +1104,7 @@ TEST_F(AutoMixedPrecisionTest, InvOp) {
}
TEST_F(AutoMixedPrecisionTest, LogOp) {
TestSimpleUnaryGrayOp(
TestSimpleUnaryInferOp(
0.01, 10, 1.0e-3, 2.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Log(scope, input);
@ -1109,7 +1112,7 @@ TEST_F(AutoMixedPrecisionTest, LogOp) {
}
TEST_F(AutoMixedPrecisionTest, Log1pOp) {
TestSimpleUnaryGrayOp(
TestSimpleUnaryInferOp(
-0.99, 9, 1.0e-3, 5.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Log1p(scope, input);
@ -1117,7 +1120,7 @@ TEST_F(AutoMixedPrecisionTest, Log1pOp) {
}
TEST_F(AutoMixedPrecisionTest, LogSoftmaxOp) {
TestSimpleUnaryGrayOp(
TestSimpleUnaryInferOp(
-8, 8, -1, 1.0e-2,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::LogSoftmax(scope, input);
@ -1125,7 +1128,7 @@ TEST_F(AutoMixedPrecisionTest, LogSoftmaxOp) {
}
TEST_F(AutoMixedPrecisionTest, ReciprocalOp) {
TestSimpleUnaryGrayOp(
TestSimpleUnaryInferOp(
0.01, 10, -1, 1.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Reciprocal(scope, input);
@ -1133,7 +1136,7 @@ TEST_F(AutoMixedPrecisionTest, ReciprocalOp) {
}
TEST_F(AutoMixedPrecisionTest, SigmoidOp) {
TestSimpleUnaryGrayOp(
TestSimpleUnaryInferOp(
-5, 5, 1.0e-3, -1,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Sigmoid(scope, input);
@ -1141,7 +1144,7 @@ TEST_F(AutoMixedPrecisionTest, SigmoidOp) {
}
TEST_F(AutoMixedPrecisionTest, SoftmaxOp) {
TestSimpleUnaryGrayOp(
TestSimpleUnaryInferOp(
-8, 8, 2.0e-3, -1,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Softmax(scope, input);
@ -1149,7 +1152,7 @@ TEST_F(AutoMixedPrecisionTest, SoftmaxOp) {
}
TEST_F(AutoMixedPrecisionTest, SoftplusOp) {
TestSimpleUnaryGrayOp(
TestSimpleUnaryInferOp(
-5, 5, 1.0e-3, 1.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Softplus(scope, input);
@ -1157,7 +1160,7 @@ TEST_F(AutoMixedPrecisionTest, SoftplusOp) {
}
TEST_F(AutoMixedPrecisionTest, SqrtOp) {
TestSimpleUnaryGrayOp(
TestSimpleUnaryInferOp(
0, 10, 1.0e-3, 1.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Sqrt(scope, input);
@ -1165,7 +1168,7 @@ TEST_F(AutoMixedPrecisionTest, SqrtOp) {
}
TEST_F(AutoMixedPrecisionTest, TanhOp) {
TestSimpleUnaryGrayOp(
TestSimpleUnaryInferOp(
-5, 5, 1.0e-3, -1,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Tanh(scope, input);
@ -1229,16 +1232,16 @@ TEST_F(AutoMixedPrecisionMklTest, AlreadyBf16) {
TEST_F(AutoMixedPrecisionMklTest, Simple) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output blk1 = ops::Exp(s.WithOpName("blk1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), blk1);
Output gry1 = ops::Sqrt(s.WithOpName("gry1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), gry1);
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), deny1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), infer1);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr2, clr2);
Output clr3 = ops::Relu(s.WithOpName("clr3"), allow1);
Output blk2 = ops::Log(s.WithOpName("blk2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), blk2);
Output blk3 = ops::SparseMatMul(s.WithOpName("blk3"), clr4, clr4);
Output clr5 = ops::Relu(s.WithOpName("clr5"), blk3);
Output deny2 = ops::Log(s.WithOpName("deny2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), deny2);
Output deny3 = ops::SparseMatMul(s.WithOpName("deny3"), clr4, clr4);
Output clr5 = ops::Relu(s.WithOpName("clr5"), deny3);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr5);
GrapplerItem item;
@ -1255,16 +1258,16 @@ TEST_F(AutoMixedPrecisionMklTest, Simple) {
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("blk1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("gry1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("clr3")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("blk2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr4")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("blk3")->attr().at("Ta").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("blk3")->attr().at("Tb").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny3")->attr().at("Ta").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny3")->attr().at("Tb").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr5")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
@ -1294,8 +1297,8 @@ TEST_F(AutoMixedPrecisionMklTest, TensorListSetGet) {
Output tl1r1 = ops::TensorListGetItem(s.WithOpName("tl1r1"), tl1rs, idx2,
shape, DT_FLOAT)
.item;
Output gry1 = ops::Mul(s.WithOpName("gry1"), tl1r1, tl1r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), gry1, gry1);
Output infer1 = ops::Mul(s.WithOpName("infer1"), tl1r1, tl1r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, infer1);
auto tl1w3 =
ops::TensorListSetItem(s.WithOpName("tl1w3"), tl1.handle, idx3, allow2);
Output tl1r2 =
@ -1335,7 +1338,7 @@ TEST_F(AutoMixedPrecisionMklTest, TensorListSetGet) {
DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("tl1r1")->attr().at(type_key).type(),
DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("gry1")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("tl1w3")->attr().at(type_key).type(),
DT_BFLOAT16);

View File

@ -36,8 +36,8 @@ namespace internal {
// dynamically determined.
constexpr int64 kTensorMaxSize = 64;
// All the nodes that should be blacklisted and not swapped.
bool IsBlacklisted(const NodeDef& node) {
// All the nodes that should be denylisted and not swapped.
bool IsDenylisted(const NodeDef& node) {
return
// Collective ops should not be swapped.
IsCollective(node) ||
@ -94,8 +94,8 @@ Status IsNodeOutputPortHostFriendly(const GraphView& graph,
bool* is_candidate) {
*is_candidate = false;
// Make sure we are not a blacklisted op.
if (IsBlacklisted(node)) {
// Make sure we are not a denylisted op.
if (IsDenylisted(node)) {
return Status::OK();
}
@ -215,7 +215,7 @@ bool IsNodeInputPortHostFriendly(const NodeDef& node, int port_id) {
// Checks if a node is a candidate to pin to Host.
// The rough algorithm is as follows:
// 1] Check if node is blacklisted.
// 1] Check if node is denylisted.
// 2] Check if node can run on Host.
// 3] Check all input/outputs are Host "friendly" (atm, friendly means small,
// ints, and pinned to Host).
@ -230,7 +230,7 @@ Status IsNodeHostCandidate(const GraphView& graph, GraphProperties* properties,
}
// Skip these node types.
if (IsBlacklisted(node)) {
if (IsDenylisted(node)) {
return Status::OK();
}

View File

@ -64,7 +64,7 @@ GcsDnsCache::GcsDnsCache(Env* env, int64 refresh_rate_secs)
: env_(env), refresh_rate_secs_(refresh_rate_secs) {}
void GcsDnsCache::AnnotateRequest(HttpRequest* request) {
// TODO(saeta): Blacklist failing IP addresses.
// TODO(saeta): Denylist failing IP addresses.
mutex_lock l(mu_);
if (!started_) {
VLOG(1) << "Starting GCS DNS cache.";

View File

@ -33,7 +33,7 @@ namespace {
const char* const kProfilePrefix = "Profile:\n";
bool CreateRunMetadataNode(const string& name, NodeDef* def) {
// TODO(xpan): Better solution than blacklisting this 2 nodes. They
// TODO(xpan): Better solution than denylisting this 2 nodes. They
// actually cost some resources, maybe include them. Some nodes, such
// as _SOURCE appear in multiple devices, which breaks tfprof's assumption.
if (name == "RecvTensor" || name == "_SOURCE" ||

View File

@ -110,13 +110,13 @@ func generateFunctionsForOps(w io.Writer, ops *odpb.OpList, apimap *apiDefMap) e
if err := tmplHeader.Execute(w, thisPackage); err != nil {
return err
}
blacklist := map[string]bool{
denylist := map[string]bool{
"Const": true,
"PyFunc": true,
"PyFuncStateless": true,
}
for _, op := range ops.Op {
if blacklist[op.Name] {
if denylist[op.Name] {
continue
}
apidef, err := apimap.Get(op.Name)

View File

@ -18,7 +18,7 @@ namespace tflite {
const constexpr char* NnapiAccelerationTestParams::kAccelerationTestConfig =
R"(
## Every Test can be allowlisted or blacklisted using a regexp on its test_id
## Every Test can be allowlisted or denylisted using a regexp on its test_id
## Test_id
#
@ -28,8 +28,8 @@ const constexpr char* NnapiAccelerationTestParams::kAccelerationTestConfig =
# the ordinal is the position in the list of parameters generated by the
# cardinal product of all the different parameter sets
# Blacklist/Allowlist
# To blacklist an element simply add - before the test_id regex
# Denylist/Allowlist
# To denylist an element simply add - before the test_id regex
## Rules evaluation
#

View File

@ -159,10 +159,10 @@ if (delegate == nullptr) {
interpreter->ModifyGraphWithDelegate(delegate);
```
The delegate creation logic reads device's machine id (e.g. iPhone11,1)
to determine its Neural Engine availability. See the
The delegate creation logic reads device's machine id (e.g. iPhone11,1) to
determine its Neural Engine availability. See the
[code](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/experimental/delegates/coreml/coreml_delegate.mm)
for more detail. Alternatively, you can implement your own set of blacklist
for more detail. Alternatively, you can implement your own set of denylist
devices using other libraries such as
[DeviceKit](https://github.com/devicekit/DeviceKit).

View File

@ -46,7 +46,7 @@ void ReadAccelerationConfig(
auto first_sep_pos =
std::find(curr_config_line.begin(), curr_config_line.end(), ',');
bool is_blacklist = false;
bool is_denylist = false;
std::string key = curr_config_line;
std::string value{};
if (first_sep_pos != curr_config_line.end()) {
@ -54,13 +54,13 @@ void ReadAccelerationConfig(
value = std::string(first_sep_pos + 1, curr_config_line.end());
}
// Regexps starting with '-'' are blacklist ones.
// Regexps starting with '-'' are denylist ones.
if (key[0] == '-') {
key = key.substr(1);
is_blacklist = true;
is_denylist = true;
}
consumer(key, value, is_blacklist);
consumer(key, value, is_denylist);
}
}
}

View File

@ -39,15 +39,15 @@ template <typename T>
class ConfigurationEntry {
public:
ConfigurationEntry(const std::string& test_id_rex, T test_config,
bool is_blacklist)
bool is_denylist)
: test_id_rex_(test_id_rex),
test_config_(test_config),
is_blacklist_(is_blacklist) {}
is_denylist_(is_denylist) {}
bool Matches(const std::string& test_id) {
return RE2::FullMatch(test_id, test_id_rex_);
}
bool IsBlacklistEntry() const { return is_blacklist_; }
bool IsDenylistEntry() const { return is_denylist_; }
const T& TestConfig() const { return test_config_; }
const std::string& TestIdRex() const { return test_id_rex_; }
@ -55,7 +55,7 @@ class ConfigurationEntry {
private:
std::string test_id_rex_;
T test_config_;
bool is_blacklist_;
bool is_denylist_;
};
// Returns the acceleration test configuration for the given test id and
@ -71,9 +71,9 @@ absl::optional<T> GetAccelerationTestParam(std::string test_id) {
auto config = new std::vector<ConfigurationEntry<T>>();
auto consumer = [&config](std::string key, std::string value_str,
bool is_blacklist) mutable {
bool is_denylist) mutable {
T value = T::ParseConfigurationLine(value_str);
config->push_back(ConfigurationEntry<T>(key, value, is_blacklist));
config->push_back(ConfigurationEntry<T>(key, value, is_denylist));
};
ReadAccelerationConfig(T::kAccelerationTestConfig, consumer);
@ -91,7 +91,7 @@ absl::optional<T> GetAccelerationTestParam(std::string test_id) {
test_config->begin(), test_config->end(),
[&test_id](ConfigurationEntry<T> elem) { return elem.Matches(test_id); });
if (test_config_iter != test_config->end() &&
!test_config_iter->IsBlacklistEntry()) {
!test_config_iter->IsDenylistEntry()) {
return absl::optional<T>(test_config_iter->TestConfig());
} else {
return absl::optional<T>();

View File

@ -52,11 +52,11 @@ struct SimpleConfig {
class ReadAccelerationConfigTest : public ::testing::Test {
public:
std::unordered_map<std::string, SimpleConfig> allowlist_;
std::unordered_map<std::string, SimpleConfig> blacklist_;
std::unordered_map<std::string, SimpleConfig> denylist_;
std::function<void(std::string, std::string, bool)> consumer_ =
[this](std::string key, std::string value, bool is_blacklist) {
if (is_blacklist) {
blacklist_[key] = {value};
[this](std::string key, std::string value, bool is_denylist) {
if (is_denylist) {
denylist_[key] = {value};
} else {
allowlist_[key] = {value};
}
@ -67,13 +67,13 @@ TEST_F(ReadAccelerationConfigTest, ReadsAKeyOnlyLine) {
ReadAccelerationConfig("key", consumer_);
EXPECT_THAT(allowlist_.find("key"), Not(Eq(allowlist_.end())));
EXPECT_TRUE(blacklist_.empty());
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, ReadsABlacklistKeyOnlyLine) {
TEST_F(ReadAccelerationConfigTest, ReadsADenylistKeyOnlyLine) {
ReadAccelerationConfig("-key", consumer_);
EXPECT_THAT(blacklist_.find("key"), Not(Eq(allowlist_.end())));
EXPECT_THAT(denylist_.find("key"), Not(Eq(allowlist_.end())));
EXPECT_TRUE(allowlist_.empty());
}
@ -81,13 +81,13 @@ TEST_F(ReadAccelerationConfigTest, ReadsAKeyValueLine) {
ReadAccelerationConfig("key,value", consumer_);
EXPECT_THAT(allowlist_["key"].value, Eq("value"));
EXPECT_TRUE(blacklist_.empty());
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, ReadsABlackListKeyValueLine) {
TEST_F(ReadAccelerationConfigTest, ReadsADenyListKeyValueLine) {
ReadAccelerationConfig("-key,value", consumer_);
EXPECT_THAT(blacklist_["key"].value, Eq("value"));
EXPECT_THAT(denylist_["key"].value, Eq("value"));
EXPECT_TRUE(allowlist_.empty());
}
@ -95,13 +95,13 @@ TEST_F(ReadAccelerationConfigTest, KeysAreLeftTrimmed) {
ReadAccelerationConfig(" key,value", consumer_);
EXPECT_THAT(allowlist_["key"].value, Eq("value"));
EXPECT_TRUE(blacklist_.empty());
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, BlKeysAreLeftTrimmed) {
ReadAccelerationConfig(" -key,value", consumer_);
EXPECT_THAT(blacklist_["key"].value, Eq("value"));
EXPECT_THAT(denylist_["key"].value, Eq("value"));
EXPECT_TRUE(allowlist_.empty());
}
@ -109,14 +109,14 @@ TEST_F(ReadAccelerationConfigTest, IgnoresCommentedLines) {
ReadAccelerationConfig("#key,value", consumer_);
EXPECT_TRUE(allowlist_.empty());
EXPECT_TRUE(blacklist_.empty());
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, CommentCanHaveTrailingBlanks) {
ReadAccelerationConfig(" #key,value", consumer_);
EXPECT_TRUE(allowlist_.empty());
EXPECT_TRUE(blacklist_.empty());
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, CommentsAreOnlyForTheFullLine) {
@ -129,7 +129,7 @@ TEST_F(ReadAccelerationConfigTest, IgnoresEmptyLines) {
ReadAccelerationConfig("", consumer_);
EXPECT_TRUE(allowlist_.empty());
EXPECT_TRUE(blacklist_.empty());
EXPECT_TRUE(denylist_.empty());
}
TEST_F(ReadAccelerationConfigTest, ParsesMultipleLines) {
@ -137,7 +137,7 @@ TEST_F(ReadAccelerationConfigTest, ParsesMultipleLines) {
EXPECT_THAT(allowlist_["key1"].value, Eq("value1"));
EXPECT_THAT(allowlist_["key2"].value, Eq("value2"));
EXPECT_THAT(blacklist_["key3"].value, Eq("value3"));
EXPECT_THAT(denylist_["key3"].value, Eq("value3"));
}
TEST_F(ReadAccelerationConfigTest, ParsesMultipleLinesWithCommentsAndSpaces) {
@ -177,7 +177,7 @@ TEST(GetAccelerationTestParam, SupportsWildcards) {
ASSERT_THAT(config_value_maybe.value().value, Eq("data-4"));
}
TEST(GetAccelerationTestParam, SupportBlacklist) {
TEST(GetAccelerationTestParam, SupportDenylist) {
const auto config_value_maybe =
GetAccelerationTestParam<SimpleConfig>("test-5");
ASSERT_FALSE(config_value_maybe.has_value());

View File

@ -796,7 +796,7 @@ TEST(OperatorKeyTest, TestFlexWithUnsupportedOp) {
EXPECT_EQ(key.version(), 1);
// While HashTableV2 is excluded from the allowlisted flex op list, eventually
// it won't be, and the following expectations will need to change as the op
// is explicitly blacklisted due to lack of asset support.
// is explicitly denylisted due to lack of asset support.
EXPECT_FALSE(key.is_flex_op());
EXPECT_FALSE(key.is_unsupported_flex_op());
}

View File

@ -150,32 +150,31 @@ EvaluationStageMetrics ImageClassificationStage::LatestMetrics() {
return metrics;
}
TfLiteStatus FilterBlackListedImages(const std::string& blacklist_file_path,
std::vector<ImageLabel>* image_labels) {
if (!blacklist_file_path.empty()) {
TfLiteStatus FilterDenyListedImages(const std::string& denylist_file_path,
std::vector<ImageLabel>* image_labels) {
if (!denylist_file_path.empty()) {
std::vector<std::string> lines;
if (!tflite::evaluation::ReadFileLines(blacklist_file_path, &lines)) {
LOG(ERROR) << "Could not read: " << blacklist_file_path;
if (!tflite::evaluation::ReadFileLines(denylist_file_path, &lines)) {
LOG(ERROR) << "Could not read: " << denylist_file_path;
return kTfLiteError;
}
std::vector<int> blacklist_ids;
blacklist_ids.reserve(lines.size());
// Populate blacklist_ids with indices of images.
std::transform(lines.begin(), lines.end(),
std::back_inserter(blacklist_ids),
std::vector<int> denylist_ids;
denylist_ids.reserve(lines.size());
// Populate denylist_ids with indices of images.
std::transform(lines.begin(), lines.end(), std::back_inserter(denylist_ids),
[](const std::string& val) { return std::stoi(val) - 1; });
std::vector<ImageLabel> filtered_images;
std::sort(blacklist_ids.begin(), blacklist_ids.end());
std::sort(denylist_ids.begin(), denylist_ids.end());
const size_t size_post_filtering =
image_labels->size() - blacklist_ids.size();
image_labels->size() - denylist_ids.size();
filtered_images.reserve(size_post_filtering);
int blacklist_index = 0;
int denylist_index = 0;
for (int image_index = 0; image_index < image_labels->size();
image_index++) {
if (blacklist_index < blacklist_ids.size() &&
blacklist_ids[blacklist_index] == image_index) {
blacklist_index++;
if (denylist_index < denylist_ids.size() &&
denylist_ids[denylist_index] == image_index) {
denylist_index++;
continue;
}
filtered_images.push_back((*image_labels)[image_index]);

View File

@ -80,10 +80,10 @@ struct ImageLabel {
std::string label;
};
// Reads a file containing newline-separated blacklisted image indices and
// Reads a file containing newline-separated denylisted image indices and
// filters them out from image_labels.
TfLiteStatus FilterBlackListedImages(const std::string& blacklist_file_path,
std::vector<ImageLabel>* image_labels);
TfLiteStatus FilterDenyListedImages(const std::string& denylist_file_path,
std::vector<ImageLabel>* image_labels);
} // namespace evaluation
} // namespace tflite

View File

@ -57,9 +57,9 @@ The binary takes the following parameters:
and the following optional parameters:
* `blacklist_file_path`: `string` \
Path to blacklist file. This file contains the indices of images that are
blacklisted for evaluation. 1762 images are blacklisted in ILSVRC dataset.
* `denylist_file_path`: `string` \
Path to denylist file. This file contains the indices of images that are
denylisted for evaluation. 1762 images are denylisted in ILSVRC dataset.
For details please refer to readme.txt of ILSVRC2014 devkit.
* `num_images`: `int` (default=0) \

View File

@ -35,7 +35,7 @@ constexpr char kGroundTruthImagesPathFlag[] = "ground_truth_images_path";
constexpr char kGroundTruthLabelsFlag[] = "ground_truth_labels";
constexpr char kOutputFilePathFlag[] = "output_file_path";
constexpr char kModelOutputLabelsFlag[] = "model_output_labels";
constexpr char kBlacklistFilePathFlag[] = "blacklist_file_path";
constexpr char kDenylistFilePathFlag[] = "denylist_file_path";
constexpr char kNumImagesFlag[] = "num_images";
constexpr char kInterpreterThreadsFlag[] = "num_interpreter_threads";
constexpr char kDelegateFlag[] = "delegate";
@ -64,7 +64,7 @@ class ImagenetClassification : public TaskExecutor {
std::string ground_truth_images_path_;
std::string ground_truth_labels_path_;
std::string model_output_labels_path_;
std::string blacklist_file_path_;
std::string denylist_file_path_;
std::string output_file_path_;
std::string delegate_;
int num_images_;
@ -90,10 +90,10 @@ std::vector<Flag> ImagenetClassification::GetFlags() {
"Path to ground truth labels, corresponding to alphabetical ordering "
"of ground truth images."),
tflite::Flag::CreateFlag(
kBlacklistFilePathFlag, &blacklist_file_path_,
"Path to blacklist file (optional) where each line is a single "
kDenylistFilePathFlag, &denylist_file_path_,
"Path to denylist file (optional) where each line is a single "
"integer that is "
"equal to index number of blacklisted image."),
"equal to index number of denylisted image."),
tflite::Flag::CreateFlag(kOutputFilePathFlag, &output_file_path_,
"File to output metrics proto to."),
tflite::Flag::CreateFlag(kNumImagesFlag, &num_images_,
@ -131,9 +131,8 @@ absl::optional<EvaluationStageMetrics> ImagenetClassification::RunImpl() {
image_labels.push_back({image_files[i], ground_truth_image_labels[i]});
}
// Filter out blacklisted/unwanted images.
if (FilterBlackListedImages(blacklist_file_path_, &image_labels) !=
kTfLiteOk) {
// Filter out denylisted/unwanted images.
if (FilterDenyListedImages(denylist_file_path_, &image_labels) != kTfLiteOk) {
return absl::nullopt;
}
if (num_images_ > 0) {

View File

@ -147,7 +147,7 @@ class QuantizationAwareTrainingMNISTTest(test_util.TensorFlowTestCase):
len(graph_def.node))
converter = trt_convert.TrtGraphConverter(
input_graph_def=graph_def,
nodes_blacklist=[OUTPUT_NODE_NAME],
nodes_denylist=[OUTPUT_NODE_NAME],
max_batch_size=max_batch_size,
precision_mode='INT8',
# There is a 2GB GPU memory limit for each test, so we set

View File

@ -432,7 +432,7 @@ class TrtGraphConverter(object):
input_saved_model_tags=None,
input_saved_model_signature_key=None,
input_graph_def=None,
nodes_blacklist=None,
nodes_denylist=None,
session_config=None,
max_batch_size=1,
max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES,
@ -452,7 +452,7 @@ class TrtGraphConverter(object):
input_graph_def: a GraphDef object containing a model to be transformed.
If set to None, the graph will be read from the SavedModel loaded from
input_saved_model_dir.
nodes_blacklist: list of node names to prevent the converter from
nodes_denylist: list of node names to prevent the converter from
touching.
session_config: the ConfigProto used to create a Session. It's also used
as a template to create a TRT-enabled ConfigProto for conversion. If not
@ -497,7 +497,7 @@ class TrtGraphConverter(object):
_check_trt_version_compatibility()
self._input_graph_def = input_graph_def
self._nodes_blacklist = nodes_blacklist
self._nodes_denylist = nodes_denylist
self._input_saved_model_dir = input_saved_model_dir
self._converted = False
@ -558,15 +558,15 @@ class TrtGraphConverter(object):
graph_id=b"tf_graph")
self._converted = True
def _add_nodes_blacklist(self):
if self._nodes_blacklist:
def _add_nodes_denylist(self):
if self._nodes_denylist:
collection_def = self._grappler_meta_graph_def.collection_def["train_op"]
blacklist = collection_def.node_list.value
for i in self._nodes_blacklist:
denylist = collection_def.node_list.value
for i in self._nodes_denylist:
if isinstance(i, ops.Tensor):
blacklist.append(_to_bytes(i.name))
denylist.append(_to_bytes(i.name))
else:
blacklist.append(_to_bytes(i))
denylist.append(_to_bytes(i))
def _convert_graph_def(self):
"""Convert the input GraphDef."""
@ -575,7 +575,7 @@ class TrtGraphConverter(object):
importer.import_graph_def(self._input_graph_def, name="")
self._grappler_meta_graph_def = saver.export_meta_graph(
graph_def=graph.as_graph_def(add_shapes=True), graph=graph)
self._add_nodes_blacklist()
self._add_nodes_denylist()
self._run_conversion()
@ -629,7 +629,7 @@ class TrtGraphConverter(object):
self._grappler_meta_graph_def.collection_def[collection_key].CopyFrom(
input_meta_graph_def.collection_def[collection_key])
self._add_nodes_blacklist()
self._add_nodes_denylist()
# Copy other information.
self._grappler_meta_graph_def.meta_info_def.CopyFrom(
@ -1342,7 +1342,7 @@ def create_inference_graph(
input_saved_model_tags=input_saved_model_tags,
input_saved_model_signature_key=input_saved_model_signature_key,
input_graph_def=input_graph_def,
nodes_blacklist=outputs,
nodes_denylist=outputs,
session_config=session_config,
max_batch_size=max_batch_size,
max_workspace_size_bytes=max_workspace_size_bytes,

View File

@ -280,7 +280,7 @@ class TrtConvertTest(test_util.TensorFlowTestCase, parameterized.TestCase):
input_saved_model_signature_key=_SAVED_MODEL_SIGNATURE_KEY,
input_graph_def=None
if input_saved_model_dir else self._GetGraphDefForV1(device),
nodes_blacklist=None if input_saved_model_dir else ["output"],
nodes_denylist=None if input_saved_model_dir else ["output"],
session_config=self._GetConfigProto(),
max_batch_size=max_batch_size,
max_workspace_size_bytes=TrtConvertTest._TRT_MAX_WORKSPACE_SIZE_BYTES,

View File

@ -44,7 +44,7 @@ _MAX_WARNING_LINES = 5
# Operations that indicate some error in the users graph. For example, XLA
# computation should not have any Placeholder op.
_BLACKLISTED_OPS = set([
_DENYLISTED_OPS = set([
'Placeholder',
])
@ -195,7 +195,7 @@ class XLACompileContext(control_flow_ops.XLAControlFlowContext):
def AddOp(self, op):
"""Create op in XLACompileContext and notifies outer context recursively."""
# pylint: disable=protected-access
if op.type in _BLACKLISTED_OPS:
if op.type in _DENYLISTED_OPS:
logging.error(
'Operation of type %s (%s) is not supported in XLA. Execution will '
'fail if this op is used in the graph. ', op.type, op.name)

View File

@ -18,7 +18,7 @@ See the [TFDBG](https://www.tensorflow.org/guide/debugger) guide.
@@add_debug_tensor_watch
@@watch_graph
@@watch_graph_with_blacklists
@@watch_graph_with_denylists
@@DebugTensorDatum
@@DebugDumpDir
@@load_tensor_from_event
@ -57,7 +57,7 @@ from tensorflow.python.debug.lib.debug_graphs import reconstruct_non_debug_graph
from tensorflow.python.debug.lib.debug_utils import add_debug_tensor_watch
from tensorflow.python.debug.lib.debug_utils import watch_graph
from tensorflow.python.debug.lib.debug_utils import watch_graph_with_blacklists
from tensorflow.python.debug.lib.debug_utils import watch_graph_with_denylists
from tensorflow.python.debug.wrappers.dumping_wrapper import DumpingDebugWrapperSession
from tensorflow.python.debug.wrappers.framework import WatchOptions

View File

@ -136,8 +136,8 @@ class DebugAnalyzer(object):
_TENSOR_NAME_COLUMN_HEAD = "Tensor name"
# Op types to be omitted when generating descriptions of graph structure.
_GRAPH_STRUCT_OP_TYPE_BLACKLIST = (
"_Send", "_Recv", "_HostSend", "_HostRecv", "_Retval")
_GRAPH_STRUCT_OP_TYPE_DENYLIST = ("_Send", "_Recv", "_HostSend", "_HostRecv",
"_Retval")
def __init__(self, debug_dump, config):
"""DebugAnalyzer constructor.
@ -795,16 +795,16 @@ class DebugAnalyzer(object):
lines, font_attr_segs=font_attr_segs)
# List node inputs (non-control and control).
inputs = self._exclude_blacklisted_ops(
inputs = self._exclude_denylisted_ops(
self._debug_dump.node_inputs(node_name))
ctrl_inputs = self._exclude_blacklisted_ops(
ctrl_inputs = self._exclude_denylisted_ops(
self._debug_dump.node_inputs(node_name, is_control=True))
output.extend(self._format_neighbors("input", inputs, ctrl_inputs))
# List node output recipients (non-control and control).
recs = self._exclude_blacklisted_ops(
recs = self._exclude_denylisted_ops(
self._debug_dump.node_recipients(node_name))
ctrl_recs = self._exclude_blacklisted_ops(
ctrl_recs = self._exclude_denylisted_ops(
self._debug_dump.node_recipients(node_name, is_control=True))
output.extend(self._format_neighbors("recipient", recs, ctrl_recs))
@ -822,19 +822,20 @@ class DebugAnalyzer(object):
_add_main_menu(output, node_name=node_name, enable_node_info=False)
return output
def _exclude_blacklisted_ops(self, node_names):
"""Exclude all nodes whose op types are in _GRAPH_STRUCT_OP_TYPE_BLACKLIST.
def _exclude_denylisted_ops(self, node_names):
"""Exclude all nodes whose op types are in _GRAPH_STRUCT_OP_TYPE_DENYLIST.
Args:
node_names: An iterable of node or graph element names.
Returns:
A list of node names that are not blacklisted.
A list of node names that are not denylisted.
"""
return [node_name for node_name in node_names
if self._debug_dump.node_op_type(
debug_graphs.get_node_name(node_name)) not in
self._GRAPH_STRUCT_OP_TYPE_BLACKLIST]
return [
node_name for node_name in node_names
if self._debug_dump.node_op_type(debug_graphs.get_node_name(node_name))
not in self._GRAPH_STRUCT_OP_TYPE_DENYLIST
]
def _render_node_traceback(self, node_name):
"""Render traceback of a node's creation in Python, if available.
@ -1401,13 +1402,13 @@ class DebugAnalyzer(object):
"""
# Make a shallow copy of the list because it may be extended later.
all_inputs = self._exclude_blacklisted_ops(
all_inputs = self._exclude_denylisted_ops(
copy.copy(tracker(node_name, is_control=False)))
is_ctrl = [False] * len(all_inputs)
if include_control:
# Sort control inputs or recipients in alphabetical order of the node
# names.
ctrl_inputs = self._exclude_blacklisted_ops(
ctrl_inputs = self._exclude_denylisted_ops(
sorted(tracker(node_name, is_control=True)))
all_inputs.extend(ctrl_inputs)
is_ctrl.extend([True] * len(ctrl_inputs))
@ -1440,7 +1441,7 @@ class DebugAnalyzer(object):
for i, inp in enumerate(all_inputs):
op_type = self._debug_dump.node_op_type(debug_graphs.get_node_name(inp))
if op_type in self._GRAPH_STRUCT_OP_TYPE_BLACKLIST:
if op_type in self._GRAPH_STRUCT_OP_TYPE_DENYLIST:
continue
if is_ctrl[i]:

View File

@ -39,8 +39,7 @@ from tensorflow.python.training import gradient_descent
class ReconstructNonDebugGraphTest(test_util.TensorFlowTestCase):
_OP_TYPE_BLACKLIST = (
"_Send", "_Recv", "_HostSend", "_HostRecv", "_Retval")
_OP_TYPE_DENYLIST = ("_Send", "_Recv", "_HostSend", "_HostRecv", "_Retval")
def _no_rewrite_session_config(self):
rewriter_config = rewriter_config_pb2.RewriterConfig(
@ -60,10 +59,10 @@ class ReconstructNonDebugGraphTest(test_util.TensorFlowTestCase):
file_io.delete_recursively(self._dump_dir)
super(ReconstructNonDebugGraphTest, self).tearDown()
def _graphDefWithoutBlacklistedNodes(self, graph_def):
def _graphDefWithoutDenylistedNodes(self, graph_def):
output_graph_def = graph_pb2.GraphDef()
for node in graph_def.node:
if node.op not in self._OP_TYPE_BLACKLIST:
if node.op not in self._OP_TYPE_DENYLIST:
new_node = output_graph_def.node.add()
new_node.CopyFrom(node)
@ -110,16 +109,16 @@ class ReconstructNonDebugGraphTest(test_util.TensorFlowTestCase):
for i, non_debug_graph_def in enumerate(non_debug_graph_defs):
device_name = debug_graphs._infer_device_name(non_debug_graph_def)
test_util.assert_equal_graph_def(
self._graphDefWithoutBlacklistedNodes(reconstructed[device_name]),
self._graphDefWithoutBlacklistedNodes(non_debug_graph_def))
self._graphDefWithoutDenylistedNodes(reconstructed[device_name]),
self._graphDefWithoutDenylistedNodes(non_debug_graph_def))
# Test debug_graphs.reconstruct_non_debug_graph_def.
reconstructed_again = (
debug_graphs.reconstruct_non_debug_graph_def(
run_metadata.partition_graphs[i]))
test_util.assert_equal_graph_def(
self._graphDefWithoutBlacklistedNodes(reconstructed_again),
self._graphDefWithoutBlacklistedNodes(non_debug_graph_def))
self._graphDefWithoutDenylistedNodes(reconstructed_again),
self._graphDefWithoutDenylistedNodes(non_debug_graph_def))
def testReconstructSimpleGraph(self):
with session.Session() as sess:

View File

@ -199,20 +199,20 @@ def watch_graph(run_options,
run_options.debug_options.reset_disk_byte_usage = reset_disk_byte_usage
def watch_graph_with_blacklists(run_options,
graph,
debug_ops="DebugIdentity",
debug_urls=None,
node_name_regex_blacklist=None,
op_type_regex_blacklist=None,
tensor_dtype_regex_blacklist=None,
tolerate_debug_op_creation_failures=False,
global_step=-1,
reset_disk_byte_usage=False):
"""Add debug tensor watches, blacklisting nodes and op types.
def watch_graph_with_denylists(run_options,
graph,
debug_ops="DebugIdentity",
debug_urls=None,
node_name_regex_denylist=None,
op_type_regex_denylist=None,
tensor_dtype_regex_denylist=None,
tolerate_debug_op_creation_failures=False,
global_step=-1,
reset_disk_byte_usage=False):
"""Add debug tensor watches, denylisting nodes and op types.
This is similar to `watch_graph()`, but the node names and op types are
blacklisted, instead of allowlisted.
denylisted, instead of allowlisted.
N.B.:
1. Under certain circumstances, the `Tensor` may not get actually watched
@ -225,28 +225,25 @@ def watch_graph_with_blacklists(run_options,
Args:
run_options: An instance of `config_pb2.RunOptions` to be modified.
graph: An instance of `ops.Graph`.
debug_ops: (`str` or `list` of `str`) name(s) of the debug op(s) to use.
See the documentation of `watch_graph` for more details.
debug_ops: (`str` or `list` of `str`) name(s) of the debug op(s) to use. See
the documentation of `watch_graph` for more details.
debug_urls: URL(s) to send debug values to, e.g.,
`file:///tmp/tfdbg_dump_1`, `grpc://localhost:12345`.
node_name_regex_blacklist: Regular-expression blacklist for node_name.
This should be a string, e.g., `"(weight_[0-9]+|bias_.*)"`.
op_type_regex_blacklist: Regular-expression blacklist for the op type of
nodes, e.g., `"(Variable|Add)"`.
If both node_name_regex_blacklist and op_type_regex_blacklist
are set, the two filtering operations will occur in a logical `OR`
relation. In other words, a node will be excluded if it hits either of
the two blacklists; a node will be included if and only if it hits
neither of the blacklists.
tensor_dtype_regex_blacklist: Regular-expression blacklist for Tensor
data type, e.g., `"^int.*"`.
This blacklist operates in logical `OR` relations to the two allowlists
above.
node_name_regex_denylist: Regular-expression denylist for node_name. This
should be a string, e.g., `"(weight_[0-9]+|bias_.*)"`.
op_type_regex_denylist: Regular-expression denylist for the op type of
nodes, e.g., `"(Variable|Add)"`. If both node_name_regex_denylist and
op_type_regex_denylist are set, the two filtering operations will occur in
a logical `OR` relation. In other words, a node will be excluded if it
hits either of the two denylists; a node will be included if and only if
it hits neither of the denylists.
tensor_dtype_regex_denylist: Regular-expression denylist for Tensor data
type, e.g., `"^int.*"`. This denylist operates in logical `OR` relations
to the two allowlists above.
tolerate_debug_op_creation_failures: (`bool`) whether debug op creation
failures (e.g., due to dtype incompatibility) are to be tolerated by not
throwing exceptions.
global_step: (`int`) Optional global_step count for this debug tensor
watch.
global_step: (`int`) Optional global_step count for this debug tensor watch.
reset_disk_byte_usage: (`bool`) whether to reset the tracked disk byte
usage to zero (default: `False`).
"""
@ -254,12 +251,14 @@ def watch_graph_with_blacklists(run_options,
if isinstance(debug_ops, str):
debug_ops = [debug_ops]
node_name_pattern = (re.compile(node_name_regex_blacklist) if
node_name_regex_blacklist else None)
op_type_pattern = (re.compile(op_type_regex_blacklist) if
op_type_regex_blacklist else None)
tensor_dtype_pattern = (re.compile(tensor_dtype_regex_blacklist) if
tensor_dtype_regex_blacklist else None)
node_name_pattern = (
re.compile(node_name_regex_denylist)
if node_name_regex_denylist else None)
op_type_pattern = (
re.compile(op_type_regex_denylist) if op_type_regex_denylist else None)
tensor_dtype_pattern = (
re.compile(tensor_dtype_regex_denylist)
if tensor_dtype_regex_denylist else None)
ops = graph.get_operations()
for op in ops:

View File

@ -291,12 +291,12 @@ class DebugUtilsTest(test_util.TensorFlowTestCase):
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertItemsEqual(["a1", "a1/Assign"], node_names)
def testWatchGraph_nodeNameBlacklist(self):
debug_utils.watch_graph_with_blacklists(
def testWatchGraph_nodeNameDenylist(self):
debug_utils.watch_graph_with_denylists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_blacklist="(a1$|a1_init$|a1/.*|p1$)")
node_name_regex_denylist="(a1$|a1_init$|a1/.*|p1$)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
@ -305,37 +305,37 @@ class DebugUtilsTest(test_util.TensorFlowTestCase):
sorted(["b_init", "b", "b/Assign", "b/read", "c", "s"]),
sorted(node_names))
def testWatchGraph_opTypeBlacklist(self):
debug_utils.watch_graph_with_blacklists(
def testWatchGraph_opTypeDenylist(self):
debug_utils.watch_graph_with_denylists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
op_type_regex_blacklist="(Variable|Identity|Assign|Const)")
op_type_regex_denylist="(Variable|Identity|Assign|Const)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(sorted(["p1", "s"]), sorted(node_names))
def testWatchGraph_nodeNameAndOpTypeBlacklists(self):
debug_utils.watch_graph_with_blacklists(
def testWatchGraph_nodeNameAndOpTypeDenylists(self):
debug_utils.watch_graph_with_denylists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_blacklist="p1$",
op_type_regex_blacklist="(Variable|Identity|Assign|Const)")
node_name_regex_denylist="p1$",
op_type_regex_denylist="(Variable|Identity|Assign|Const)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(["s"], node_names)
def testWatchGraph_tensorDTypeBlacklists(self):
debug_utils.watch_graph_with_blacklists(
def testWatchGraph_tensorDTypeDenylists(self):
debug_utils.watch_graph_with_denylists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
tensor_dtype_regex_blacklist=".*_ref")
tensor_dtype_regex_denylist=".*_ref")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
@ -346,13 +346,13 @@ class DebugUtilsTest(test_util.TensorFlowTestCase):
self.assertNotIn("b/Assign", node_names)
self.assertIn("s", node_names)
def testWatchGraph_nodeNameAndTensorDTypeBlacklists(self):
debug_utils.watch_graph_with_blacklists(
def testWatchGraph_nodeNameAndTensorDTypeDenylists(self):
debug_utils.watch_graph_with_denylists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_blacklist="^s$",
tensor_dtype_regex_blacklist=".*_ref")
node_name_regex_denylist="^s$",
tensor_dtype_regex_denylist=".*_ref")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,

View File

@ -588,10 +588,10 @@ class SessionDebugTestBase(test_util.TensorFlowTestCase):
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph_with_blacklists(
debug_utils.watch_graph_with_denylists(
run_options,
sess.graph,
node_name_regex_blacklist="(.*rnn/while/.*|.*TensorArray.*)",
node_name_regex_denylist="(.*rnn/while/.*|.*TensorArray.*)",
debug_urls=self._debug_urls())
# b/36870549: Nodes with these name patterns need to be excluded from
# tfdbg in order to prevent MSAN warnings of uninitialized Tensors

View File

@ -74,13 +74,13 @@ LEGACY_RANDOM_OPS = [
# random OpKernel instantiation is reused across multiple steps
# of the loop. Since legacy Random OpKernels have an internal rng state,
# automatic dependency tracking across loop steps would likely
# fix this race; and for that case this blacklist is problematic.
# fix this race; and for that case this denylist is problematic.
# However, since automatic dependency tracking inside while loops is not
# currently supported, and there are no other examples of OpKernel reuse
# (each OpKernel is associated with a unique op in graph mode),
# this blacklist has no effect on the aforementioned behavior.
# this denylist has no effect on the aforementioned behavior.
#
# TODO(ebrevdo,skyewm): Modify the check against this blacklist to
# TODO(ebrevdo,skyewm): Modify the check against this denylist to
# only occur when the op is inside a "variable initialization scope"; and
# add proper autodeps inside while_loops that respects this updated check.
"RandomUniform",
@ -104,7 +104,7 @@ _ORDER_INSENSITIVE_STATEFUL_OPS = [
]
# LINT.ThenChange(//tensorflow/core/grappler/optimizers/function_optimizer.cc)
_ALL_BLACKLISTED_OPS = (
_ALL_DENYLISTED_OPS = (
set(ASYNC_STATEFUL_OPS) | set(LEGACY_RANDOM_OPS)
| set(_ORDER_INSENSITIVE_STATEFUL_OPS))
@ -124,7 +124,7 @@ _ALLOWLIST_STATELESS_OPS = [
def op_is_stateful(op):
# pylint: disable=protected-access
return (op._is_stateful and op.type not in _ALL_BLACKLISTED_OPS) or (
return (op._is_stateful and op.type not in _ALL_DENYLISTED_OPS) or (
op.type in _ALLOWLIST_STATELESS_OPS)

View File

@ -711,12 +711,12 @@ class _ConverterData(object):
def __init__(self,
graph_def,
variable_names_allowlist=None,
variable_names_blacklist=None):
variable_names_denylist=None):
self._graph_def = graph_def
self._tensor_data = {}
self._build_node_defs_list()
self._variable_names_allowlist = variable_names_allowlist
self._variable_names_blacklist = variable_names_blacklist
self._variable_names_denylist = variable_names_denylist
@property
def graph_def(self):
@ -742,8 +742,8 @@ class _ConverterData(object):
"""Checks whether to convert the given variable name to a constant."""
return (self._variable_names_allowlist is None or
name in self._variable_names_allowlist) and (
self._variable_names_blacklist is None or
name not in self._variable_names_blacklist)
self._variable_names_denylist is None or
name not in self._variable_names_denylist)
def _build_node_defs_list(self):
"""Builds the list of NodeDefs in the GraphDef.
@ -777,7 +777,7 @@ class _FunctionConverterData(_ConverterData):
lower_control_flow,
aggressive_inlining,
variable_names_allowlist=None,
variable_names_blacklist=None):
variable_names_denylist=None):
"""Creates the conversion data for the given function.
Args:
@ -789,7 +789,7 @@ class _FunctionConverterData(_ConverterData):
properly connected to control outputs).
variable_names_allowlist: The set of variable names to convert (by
default, all variables are converted).
variable_names_blacklist: The set of variable names to omit converting to
variable_names_denylist: The set of variable names to omit converting to
constants.
"""
@ -800,7 +800,7 @@ class _FunctionConverterData(_ConverterData):
super(_FunctionConverterData, self).__init__(
graph_def,
variable_names_allowlist=variable_names_allowlist,
variable_names_blacklist=variable_names_blacklist)
variable_names_denylist=variable_names_denylist)
self._build_tensor_data()
def _build_tensor_data(self):
@ -850,12 +850,12 @@ class _SessionConverterData(_ConverterData):
graph_def,
output_node_names,
variable_names_allowlist=None,
variable_names_blacklist=None):
variable_names_denylist=None):
graph_def = graph_util.extract_sub_graph(graph_def, output_node_names)
super(_SessionConverterData, self).__init__(
graph_def,
variable_names_allowlist=variable_names_allowlist,
variable_names_blacklist=variable_names_blacklist)
variable_names_denylist=variable_names_denylist)
nodes_to_convert = []
tensor_names_to_convert = []
@ -1115,7 +1115,7 @@ def convert_variables_to_constants_from_session_graph(
graph_def,
output_node_names,
variable_names_allowlist=None,
variable_names_blacklist=None):
variable_names_denylist=None):
"""Replaces all the variables in a graph with constants of the same values.
This function works similarly to convert_variables_to_constants_v2, but it
@ -1131,7 +1131,7 @@ def convert_variables_to_constants_from_session_graph(
output_node_names: List of name strings for the result nodes of the graph.
variable_names_allowlist: The set of variable names to convert (by default,
all variables are converted).
variable_names_blacklist: The set of variable names to omit converting to
variable_names_denylist: The set of variable names to omit converting to
constants.
Returns:
@ -1143,5 +1143,5 @@ def convert_variables_to_constants_from_session_graph(
graph_def=graph_def,
output_node_names=output_node_names,
variable_names_allowlist=variable_names_allowlist,
variable_names_blacklist=variable_names_blacklist))
variable_names_denylist=variable_names_denylist))
return graph_def

View File

@ -594,7 +594,7 @@ class ConvertVariablesToConstantsSessionTest(test.TestCase):
output = self.evaluate(output_node)
self.assertNear(2.0, output, 0.00001)
def test_resource_variable_can_be_written_after_blacklisting(self):
def test_resource_variable_can_be_written_after_denylisting(self):
with ops.Graph().as_default():
with variable_scope.variable_scope("", use_resource=True):
variable_node = variable_scope.get_variable(
@ -614,17 +614,17 @@ class ConvertVariablesToConstantsSessionTest(test.TestCase):
# Test variable name black list. This should result in the variable
# not being a const. Furthermore, the paths that read from and assign
# to the blacklisted variable should continue to be valid.
constant_graph_def_with_blacklist = (
# to the denylisted variable should continue to be valid.
constant_graph_def_with_denylist = (
convert_to_constants
.convert_variables_to_constants_from_session_graph(
session=sess,
graph_def=variable_graph_def,
output_node_names=["output_node", initializer_name],
variable_names_blacklist=set(["variable_node"])))
variable_names_denylist=set(["variable_node"])))
variable_node = None
for node in constant_graph_def_with_blacklist.node:
for node in constant_graph_def_with_denylist.node:
if node.name == "variable_node":
variable_node = node
self.assertIsNotNone(variable_node)
@ -634,7 +634,7 @@ class ConvertVariablesToConstantsSessionTest(test.TestCase):
# variable is not, and that the graph can be executed and update the
# variable can be updated with each execution.
with ops.Graph().as_default():
_ = importer.import_graph_def(constant_graph_def_with_blacklist, name="")
_ = importer.import_graph_def(constant_graph_def_with_denylist, name="")
with session_lib.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
self.evaluate(sess.graph.get_operation_by_name(initializer_name))
@ -798,7 +798,7 @@ class ConvertVariablesToConstantsSessionTest(test.TestCase):
.convert_variables_to_constants_from_session_graph(
sess,
variable_graph_def, ["out"],
variable_names_blacklist=["y"]))
variable_names_denylist=["y"]))
self._assertGraphContains(
constant_graph_def, """
node {
@ -840,7 +840,7 @@ class ConvertVariablesToConstantsSessionTest(test.TestCase):
.convert_variables_to_constants_from_session_graph(
sess,
variable_graph_def, ["out"],
variable_names_blacklist=["y"]))
variable_names_denylist=["y"]))
self._assertGraphContains(
constant_graph_def, """
node {
@ -1086,7 +1086,7 @@ class ConvertVariablesToConstantsSessionTest(test.TestCase):
.convert_variables_to_constants_from_session_graph(
sess,
variable_graph_def, ["case/cond"],
variable_names_blacklist=["y"]))
variable_names_denylist=["y"]))
self._assertGraphContains(
constant_graph_def, """
node {name: "x" op: "Const"}

View File

@ -270,14 +270,14 @@ def convert_variables_to_constants(sess,
Raises:
RuntimeError: if a DT_RESOURCE op is found whose ancestor Variables are both
blacklisted AND whitelisted for freezing.
denylisted AND whitelisted for freezing.
"""
ret = convert_to_constants.convert_variables_to_constants_from_session_graph(
session=sess,
graph_def=input_graph_def,
output_node_names=output_node_names,
variable_names_allowlist=variable_names_whitelist,
variable_names_blacklist=variable_names_blacklist)
variable_names_denylist=variable_names_blacklist)
# The previous code logic generated an empty versions field, we clear it here
# to maintain backwards compatibility.
ret.versions.Clear()

View File

@ -773,34 +773,34 @@ def assert_no_new_tensors(f):
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, blacklist):
def get_ignore_reason(obj, denylist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(blacklist) > 100:
if len(denylist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in blacklist:
for b in denylist:
if b is obj:
return "<test code>"
if obj is blacklist:
if obj is denylist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, blacklist, leaves_only=False):
def describe(obj, denylist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
blacklist: same as blacklist in get_ignore_reason.
denylist: same as denylist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, blacklist):
return "{}{}".format(get_ignore_reason(obj, blacklist), type(obj))
if get_ignore_reason(obj, denylist):
return "{}{}".format(get_ignore_reason(obj, denylist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
@ -810,10 +810,10 @@ def _find_reference_cycle(objects, idx):
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
@ -822,7 +822,7 @@ def _find_reference_cycle(objects, idx):
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, blacklist):
def build_ref_graph(obj, graph, reprs, denylist):
"""Builds a reference graph as <referrer> -> <list of referents>.
Args:
@ -832,21 +832,21 @@ def _find_reference_cycle(objects, idx):
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
blacklist: List of objects to ignore.
denylist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
blacklist = blacklist + (referrers,)
denylist = denylist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, blacklist) is None:
if get_ignore_reason(r, denylist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, blacklist)
reprs[r_id] = describe(r, blacklist)
build_ref_graph(r, graph, reprs, denylist)
reprs[r_id] = describe(r, denylist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""

View File

@ -59,8 +59,8 @@ def _input(shape):
def _weight(shape):
"""Generates a weight of a given shape."""
# Note that the lambda is needed to allow construction inside loops.
return variables.Variable(
lambda: init_ops.glorot_uniform_initializer(seed=0)(shape))
return variables.Variable(lambda: init_ops.glorot_uniform_initializer(seed=0)
(shape))
def _bias(shape):
@ -204,11 +204,11 @@ def _make_node_with_color(color, input_tensor, name=None):
if color == 'w': # Allow node
weights = _weight(input_tensor.get_shape().as_list())
return math_ops.matmul(input_tensor, weights, name=name)
if color == 'g': # Gray node
if color == 'g': # Infer node
return math_ops.add(input_tensor, 0.1, name=name)
if color == 'c': # Clear node
return nn.relu(input_tensor, name=name)
if color == 'b': # Black node
if color == 'b': # Deny node
return math_ops.pow(math_ops.pow(input_tensor, 2.), 0.5, name=name)
raise ValueError('Invalid node color: ' + str(color))
@ -371,8 +371,8 @@ class AutoMixedPrecisionTest(test.TestCase, parameterized.TestCase):
The loop has different node colors in different sections of the graph. The
arguments must be strings where each character represents the color of a
node in that section of the graph: w = allow, g = gray, c = clear,
b = black. CAPITALIZED characters indicate that the node is expected to be
node in that section of the graph: w = allow, g = infer, c = clear,
b = deny. CAPITALIZED characters indicate that the node is expected to be
changed to DT_HALF during graph optimization.
inp -> loop [ body ] -> out.

View File

@ -84,7 +84,7 @@ def freeze_graph_with_def_protos(input_graph_def,
clear_devices,
initializer_nodes,
variable_names_whitelist="",
variable_names_blacklist="",
variable_names_denylist="",
input_meta_graph_def=None,
input_saved_model_dir=None,
saved_model_tags=None,
@ -107,7 +107,7 @@ def freeze_graph_with_def_protos(input_graph_def,
freezing.
variable_names_whitelist: The set of variable names to convert (optional, by
default, all variables are converted).
variable_names_blacklist: The set of variable names to omit converting
variable_names_denylist: The set of variable names to omit converting
to constants (optional).
input_meta_graph_def: A `MetaGraphDef` (optional),
input_saved_model_dir: Path to the dir with TensorFlow 'SavedModel' file
@ -213,9 +213,9 @@ def freeze_graph_with_def_protos(input_graph_def,
variable_names_whitelist = (
variable_names_whitelist.replace(" ", "").split(",")
if variable_names_whitelist else None)
variable_names_blacklist = (
variable_names_blacklist.replace(" ", "").split(",")
if variable_names_blacklist else None)
variable_names_denylist = (
variable_names_denylist.replace(" ", "").split(",")
if variable_names_denylist else None)
if input_meta_graph_def:
output_graph_def = graph_util.convert_variables_to_constants(
@ -223,14 +223,14 @@ def freeze_graph_with_def_protos(input_graph_def,
input_meta_graph_def.graph_def,
output_node_names.replace(" ", "").split(","),
variable_names_whitelist=variable_names_whitelist,
variable_names_blacklist=variable_names_blacklist)
variable_names_blacklist=variable_names_denylist)
else:
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_graph_def,
output_node_names.replace(" ", "").split(","),
variable_names_whitelist=variable_names_whitelist,
variable_names_blacklist=variable_names_blacklist)
variable_names_blacklist=variable_names_denylist)
# Write GraphDef to file if output path has been given.
if output_graph:
@ -294,7 +294,7 @@ def freeze_graph(input_graph,
clear_devices,
initializer_nodes,
variable_names_whitelist="",
variable_names_blacklist="",
variable_names_denylist="",
input_meta_graph=None,
input_saved_model_dir=None,
saved_model_tags=tag_constants.SERVING,
@ -318,7 +318,7 @@ def freeze_graph(input_graph,
freezing.
variable_names_whitelist: The set of variable names to convert (optional, by
default, all variables are converted),
variable_names_blacklist: The set of variable names to omit converting
variable_names_denylist: The set of variable names to omit converting
to constants (optional).
input_meta_graph: A `MetaGraphDef` file to load (optional).
input_saved_model_dir: Path to the dir with TensorFlow 'SavedModel' file and
@ -354,7 +354,7 @@ def freeze_graph(input_graph,
clear_devices,
initializer_nodes,
variable_names_whitelist,
variable_names_blacklist,
variable_names_denylist,
input_meta_graph_def,
input_saved_model_dir,
[tag for tag in saved_model_tags.replace(" ", "").split(",") if tag],
@ -373,7 +373,7 @@ def main(unused_args, flags):
flags.input_checkpoint, flags.output_node_names,
flags.restore_op_name, flags.filename_tensor_name,
flags.output_graph, flags.clear_devices, flags.initializer_nodes,
flags.variable_names_whitelist, flags.variable_names_blacklist,
flags.variable_names_whitelist, flags.variable_names_denylist,
flags.input_meta_graph, flags.input_saved_model_dir,
flags.saved_model_tags, checkpoint_version)
@ -456,7 +456,7 @@ def run_main():
only those variables will be converted to constants.\
""")
parser.add_argument(
"--variable_names_blacklist",
"--variable_names_denylist",
type=str,
default="",
help="""\

View File

@ -58,8 +58,8 @@ _XLA_DEBUG_OPTIONS_URL = (
'tensorflow/compiler/xla/debug_options_flags.cc')
# Set of ops to blacklist.
_OP_BLACKLIST = set(['WriteFile', 'ReadFile', 'PrintV2'])
# Set of ops to denylist.
_OP_DENYLIST = set(['WriteFile', 'ReadFile', 'PrintV2'])
def _show_tag_sets(saved_model_dir):
@ -349,9 +349,9 @@ def get_signature_def_map(saved_model_dir, tag_set):
def scan_meta_graph_def(meta_graph_def):
"""Scans meta_graph_def and reports if there are ops on blacklist.
"""Scans meta_graph_def and reports if there are ops on denylist.
Print ops if they are on black list, or print success if no blacklisted ops
Print ops if they are on black list, or print success if no denylisted ops
found.
Args:
@ -359,13 +359,14 @@ def scan_meta_graph_def(meta_graph_def):
"""
all_ops_set = set(
meta_graph_lib.ops_used_by_graph_def(meta_graph_def.graph_def))
blacklisted_ops = _OP_BLACKLIST & all_ops_set
if blacklisted_ops:
denylisted_ops = _OP_DENYLIST & all_ops_set
if denylisted_ops:
# TODO(yifeif): print more warnings
print('MetaGraph with tag set %s contains the following blacklisted ops:' %
meta_graph_def.meta_info_def.tags, blacklisted_ops)
print(
'MetaGraph with tag set %s contains the following denylisted ops:' %
meta_graph_def.meta_info_def.tags, denylisted_ops)
else:
print('MetaGraph with tag set %s does not contain blacklisted ops.' %
print('MetaGraph with tag set %s does not contain denylisted ops.' %
meta_graph_def.meta_info_def.tags)
@ -957,7 +958,7 @@ def add_run_subparser(subparsers):
def add_scan_subparser(subparsers):
"""Add parser for `scan`."""
scan_msg = ('Usage example:\n'
'To scan for blacklisted ops in SavedModel:\n'
'To scan for denylisted ops in SavedModel:\n'
'$saved_model_cli scan --dir /tmp/saved_model\n'
'To scan a specific MetaGraph, pass in --tag_set\n')
parser_scan = subparsers.add_parser(

View File

@ -698,18 +698,18 @@ Defined Functions:
with captured_output() as (out, _):
saved_model_cli.scan(args)
output = out.getvalue().strip()
self.assertTrue('does not contain blacklisted ops' in output)
self.assertTrue('does not contain denylisted ops' in output)
def testScanCommandFoundBlacklistedOp(self):
def testScanCommandFoundDenylistedOp(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
args = self.parser.parse_args(
['scan', '--dir', base_path, '--tag_set', 'serve'])
op_blacklist = saved_model_cli._OP_BLACKLIST
saved_model_cli._OP_BLACKLIST = set(['VariableV2'])
op_denylist = saved_model_cli._OP_DENYLIST
saved_model_cli._OP_DENYLIST = set(['VariableV2'])
with captured_output() as (out, _):
saved_model_cli.scan(args)
saved_model_cli._OP_BLACKLIST = op_blacklist
saved_model_cli._OP_DENYLIST = op_denylist
output = out.getvalue().strip()
self.assertTrue('\'VariableV2\'' in output)

View File

@ -36,13 +36,13 @@ _SUPPORTED_SEQUENCE_COLUMNS = (fc._SequenceCategoricalColumn,
# For V2 columns, we support anything that inherits from CategoricalColumn
# other than those in the blacklist. User-provided columns that inherit from
# other than those in the denylist. User-provided columns that inherit from
# CategoricalColumn may or may not be compatible; it is up to the user to
# manage TPU compatibility for custom columns.
_SUPPORTED_CATEGORICAL_COLUMNS_V2 = (fc_lib.CategoricalColumn,)
_BLACKLISTED_CATEGORICAL_COLUMNS_V2 = (fc_lib.HashedCategoricalColumn,
fc_lib.BucketizedColumn,
fc_lib.CrossedColumn)
_DENYLISTED_CATEGORICAL_COLUMNS_V2 = (fc_lib.HashedCategoricalColumn,
fc_lib.BucketizedColumn,
fc_lib.CrossedColumn)
_SUPPORTED_CATEGORICAL_COLUMNS = (fc._IdentityCategoricalColumn,
fc._VocabularyFileCategoricalColumn,
fc._VocabularyListCategoricalColumn,
@ -106,9 +106,9 @@ def embedding_column(categorical_column,
ValueError: if `initializer` is specified but not callable.
TypeError: if categorical_column is not a supported type.
"""
if isinstance(categorical_column, _BLACKLISTED_CATEGORICAL_COLUMNS_V2):
if isinstance(categorical_column, _DENYLISTED_CATEGORICAL_COLUMNS_V2):
raise TypeError('categorical_column for tpu '
' embedding_column was blacklisted type %s' %
' embedding_column was denylisted type %s' %
type(categorical_column))
if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS):
raise TypeError(
@ -223,9 +223,9 @@ def shared_embedding_columns(categorical_columns,
or 0 for a sequence column.
"""
for categorical_column in categorical_columns:
if isinstance(categorical_column, _BLACKLISTED_CATEGORICAL_COLUMNS_V2):
if isinstance(categorical_column, _DENYLISTED_CATEGORICAL_COLUMNS_V2):
raise TypeError('categorical_column for tpu '
' embedding_column was blacklisted type %s' %
' embedding_column was denylisted type %s' %
type(categorical_column))
if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS):
raise TypeError(

View File

@ -59,8 +59,8 @@ class EmbeddingColumnTest(test.TestCase):
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
def test_blacklisted_column(self):
# HashedCategoricalColumn is blacklisted and so will raise an exception.
def test_denylisted_column(self):
# HashedCategoricalColumn is denylisted and so will raise an exception.
categorical_column = fc_lib.categorical_column_with_hash_bucket(
key='aaa', hash_bucket_size=3)
embedding_dimension = 2

View File

@ -57,7 +57,7 @@ ops.NotDifferentiable("TPUReplicatedInput")
# Operations that indicate some error in the users graph, e.g. a placeholder
# that's introduced outside of the infeed.
_BLACKLISTED_OPS = set([
_DENYLISTED_OPS = set([
"Placeholder",
])
@ -526,7 +526,7 @@ class TPUReplicateContext(control_flow_ops.XLAControlFlowContext):
def AddOp(self, op):
# pylint: disable=protected-access
if op.type in _BLACKLISTED_OPS:
if op.type in _DENYLISTED_OPS:
logging.error("Operation of type %s (%s) is not supported on the TPU. "
"Execution will fail if this op is used in the graph. " %
(op.type, op.name))
@ -1947,7 +1947,9 @@ def rewrite(computation,
# pylint: enable=indexing-exception
# Operations that indicate some error in the user's inference graph.
_BLACKLISTED_INFERENCE_OPS = set([
_DENYLISTED_INFERENCE_OPS = set([
"ReadVariableOp",
"AssignVariableOp",
"AssignAddVariableOp",
@ -1993,7 +1995,7 @@ class _TPUInferenceContext(control_flow_ops.XLAControlFlowContext):
def _AddOpInternal(self, op):
# pylint: disable=protected-access
if self._check_ops and op.type in _BLACKLISTED_INFERENCE_OPS:
if self._check_ops and op.type in _DENYLISTED_INFERENCE_OPS:
raise NotImplementedError(
"Operation of type %s (%s) is not supported on the TPU for inference."
" Execution will fail if this op is used in the graph. Make sure your"

View File

@ -124,10 +124,10 @@ def enable_mixed_precision_graph_rewrite(opt, loss_scale='dynamic'):
E.g. `ArgMax` and `Floor`.
* `AllowList`: Ops that are considered numerically safe for execution in
float16, and thus are always converted. E.g. `Conv2D`.
* `BlackList`: Ops that are numerically unsafe to execute in float16 and
* `DenyList`: Ops that are numerically unsafe to execute in float16 and
can negatively affect downstream nodes. E.g. `Softmax`.
* `GrayList`: Ops that are considered numerically safe for execution in
float16 unless downstream from a BlackList Op. E.g. `Add` and `AvgPool`.
float16 unless downstream from a DenyList Op. E.g. `Add` and `AvgPool`.
When this function is used, gradients should be computed and applied with the
returned optimizer, either by calling `opt.minimize()` or
@ -269,10 +269,10 @@ def enable_mixed_precision_graph_rewrite_v1(opt, loss_scale='dynamic'):
E.g. `ArgMax` and `Floor`.
* `AllowList`: Ops that are considered numerically safe for execution in
float16, and thus are always converted. E.g. `Conv2D`.
* `BlackList`: Ops that are numerically unsafe to execute in float16 and
* `DenyList`: Ops that are numerically unsafe to execute in float16 and
can negatively affect downstream nodes. E.g. `Softmax`.
* `GrayList`: Ops that are considered numerically safe for execution in
float16 unless downstream from a BlackList Op. E.g. `Add` and `AvgPool`.
float16 unless downstream from a DenyList Op. E.g. `Add` and `AvgPool`.
When this function is used, gradients should only be computed and applied
with the returned optimizer, either by calling `opt.minimize()` or

View File

@ -126,7 +126,7 @@ test_runner() {
# Run a suite of tests, print failure logs (if any), wall-time each test,
# and show the summary at the end.
#
# Usage: test_runner <TEST_DESC> <ALL_TESTS> <TEST_BLACKLIST> <LOGS_DIR>
# Usage: test_runner <TEST_DESC> <ALL_TESTS> <TEST_DENYLIST> <LOGS_DIR>
# e.g., test_runner "Tutorial test-on-install" \
# "test1 test2 test3" "test2 test3" "/tmp/log_dir"
@ -136,7 +136,7 @@ test_runner() {
TEST_DESC=$1
ALL_TESTS_STR=$2
TEST_BLACKLIST_SR=$3
TEST_DENYLIST_SR=$3
LOGS_DIR=$4
NUM_TESTS=$(echo "${ALL_TESTS_STR}" | wc -w)
@ -152,9 +152,9 @@ test_runner() {
((COUNTER++))
STAT_STR="(${COUNTER} / ${NUM_TESTS})"
if [[ "${TEST_BLACKLIST_STR}" == *"${CURR_TEST}"* ]]; then
if [[ "${TEST_DENYLIST_STR}" == *"${CURR_TEST}"* ]]; then
((SKIPPED_COUNTER++))
echo "${STAT_STR} Blacklisted ${TEST_DESC} SKIPPED: ${CURR_TEST}"
echo "${STAT_STR} Denylisted ${TEST_DESC} SKIPPED: ${CURR_TEST}"
continue
fi

View File

@ -24,19 +24,19 @@
# the Python binary path.
#
# This script obeys the following environment variables (if exists):
# TF_BUILD_INTEG_TEST_BLACKLIST: Force skipping of specified integration tests
# TF_BUILD_INTEG_TEST_DENYLIST: Force skipping of specified integration tests
# listed in INTEG_TESTS below.
#
# List of all integration tests to run, separated by spaces
INTEG_TESTS="ffmpeg_lib"
if [[ -z "${TF_BUILD_INTEG_TEST_BLACKLIST}" ]]; then
TF_BUILD_INTEG_TEST_BLACKLIST=""
if [[ -z "${TF_BUILD_INTEG_TEST_DENYLIST}" ]]; then
TF_BUILD_INTEG_TEST_DENYLIST=""
fi
echo ""
echo "=== Integration Tests ==="
echo "TF_BUILD_INTEG_TEST_BLACKLIST = \"${TF_BUILD_INTEG_TEST_BLACKLIST}\""
echo "TF_BUILD_INTEG_TEST_DENYLIST = \"${TF_BUILD_INTEG_TEST_DENYLIST}\""
# Timeout (in seconds) for each integration test
TIMEOUT=1800
@ -121,4 +121,4 @@ test_ffmpeg_lib() {
# Run the integration tests
test_runner "integration test-on-install" \
"${INTEG_TESTS}" "${TF_BUILD_INTEG_TEST_BLACKLIST}" "${LOGS_DIR}"
"${INTEG_TESTS}" "${TF_BUILD_INTEG_TEST_DENYLIST}" "${LOGS_DIR}"

View File

@ -25,7 +25,7 @@
# the Python binary path.
#
# The --gpu flag informs the script that this is a GPU build, so that the
# appropriate test blacklists can be applied accordingly.
# appropriate test denylists can be applied accordingly.
#
# The --mac flag informs the script that this is running on mac. Mac does not
# have flock, so we should skip using parallel_gpu_execute on mac.

View File

@ -28,19 +28,19 @@
# the Python binary path.
#
# This script obeys the following environment variables (if exists):
# TUT_TESTS_BLACKLIST: Force skipping of specified tutorial tests listed
# TUT_TESTS_DENYLIST: Force skipping of specified tutorial tests listed
# in TUT_TESTS below.
#
# List of all tutorial tests to run, separated by spaces
TUT_TESTS="mnist_with_summaries word2vec"
if [[ -z "${TUT_TESTS_BLACKLIST}" ]]; then
TF_BUILD_TUT_TEST_BLACKLIST=""
if [[ -z "${TUT_TESTS_DENYLIST}" ]]; then
TF_BUILD_TUT_TEST_DENYLIST=""
fi
echo ""
echo "=== Testing tutorials ==="
echo "TF_BUILD_TUT_TEST_BLACKLIST = \"${TF_BUILD_TUT_TEST_BLACKLIST}\""
echo "TF_BUILD_TUT_TEST_DENYLIST = \"${TF_BUILD_TUT_TEST_DENYLIST}\""
# Timeout (in seconds) for each tutorial test
TIMEOUT=1800
@ -269,4 +269,4 @@ test_ptb_word_lm() {
# Run the tutorial tests
test_runner "tutorial test-on-install" \
"${TUT_TESTS}" "${TF_BUILD_TUT_TEST_BLACKLIST}" "${LOGS_DIR}"
"${TUT_TESTS}" "${TF_BUILD_TUT_TEST_DENYLIST}" "${LOGS_DIR}"

View File

@ -23,7 +23,7 @@
# the Python binary path.
#
# The --gpu flag informs the script that this is a GPU build, so that the
# appropriate test blacklists can be applied accordingly.
# appropriate test denylists can be applied accordingly.
#
echo ""

View File

@ -355,7 +355,7 @@ do_external_licenses_check(){
EXTERNAL_LICENSES_CHECK_END_TIME=$(date +'%s')
# Blacklist
# Denylist
echo ${MISSING_LICENSES_FILE}
grep \
-e "@bazel_tools//third_party/" \

View File

@ -10,7 +10,7 @@
# Profiled execution.
profile=no
# Add files or directories to the blacklist. They should be base names, not
# Add files or directories to the denylist. They should be base names, not
# paths.
ignore=CVS

View File

@ -101,7 +101,7 @@ def traverse(root, visit):
is already in the stack.
Traversing system modules can take a long time, it is advisable to pass a
`visit` callable which blacklists such modules.
`visit` callable which denylists such modules.
Args:
root: A python object with which to start the traversal.

View File

@ -41,7 +41,7 @@ def main():
# compile for C
return call([CPU_C_COMPILER] + compiler_flags)
# create a blacklist of folders that will be skipped when compiling with ComputeCpp
# create a denylist of folders that will be skipped when compiling with ComputeCpp
skip_extensions = [".cu.cc"]
skip_folders = ["tensorflow/compiler", "tensorflow/docs_src", "third_party", "external", "hexagon"]
skip_folders = [(folder + '/') for folder in skip_folders]

View File

@ -57,7 +57,7 @@ def main():
] + opt_flags
if (compiling_cpp == 1):
# create a blacklist of folders that will be skipped when compiling
# create a denylist of folders that will be skipped when compiling
# with triSYCL
skip_extensions = ['.cu.cc']
skip_folders = [