From 6bf093ca016a2b1caf40e30f7cd73809ab3257f4 Mon Sep 17 00:00:00 2001 From: Dandelion Man? Date: Tue, 13 Jun 2017 17:42:43 -0700 Subject: [PATCH 001/180] Autogenerated Change: Change TensorBoard TAG to 55 PiperOrigin-RevId: 158924850 --- tensorflow/tensorboard/TAG | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/tensorboard/TAG b/tensorflow/tensorboard/TAG index fb1e7bc8699..c3f407c0955 100644 --- a/tensorflow/tensorboard/TAG +++ b/tensorflow/tensorboard/TAG @@ -1 +1 @@ -54 +55 From 75145524ffed31bb749a49c3dba4518590767cf6 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 13 Jun 2017 17:49:59 -0700 Subject: [PATCH 002/180] Don't split basic blocks that lack a terminator. This replicates the idiom used in ForLoop::Emit to handle the same situation into ElementalIrEmitter::MakeRngElementGenerator. PiperOrigin-RevId: 158925513 --- .../xla/service/elemental_ir_emitter.cc | 35 ++++++++++++++----- tensorflow/compiler/xla/tests/prng_test.cc | 11 ++++++ 2 files changed, 37 insertions(+), 9 deletions(-) diff --git a/tensorflow/compiler/xla/service/elemental_ir_emitter.cc b/tensorflow/compiler/xla/service/elemental_ir_emitter.cc index bea1da40446..dbc65e80eb5 100644 --- a/tensorflow/compiler/xla/service/elemental_ir_emitter.cc +++ b/tensorflow/compiler/xla/service/elemental_ir_emitter.cc @@ -588,20 +588,37 @@ llvm_ir::ElementGenerator ElementalIrEmitter::MakeRngElementGenerator( llvm::Intrinsic::ctlz, {r, ir_builder_->getInt1(1)}, {param_ir_type}, ir_builder_); auto in_block = ir_builder_->GetInsertBlock(); - auto body_block = in_block->splitBasicBlock( - ir_builder_->GetInsertPoint(), "rng_body"); - SetToFirstInsertPoint(body_block, ir_builder_); - auto out_block = body_block->splitBasicBlock( - ir_builder_->GetInsertPoint(), "rng_out"); + + // A terminator should be present iff we're emitting code + // into the middle (as opposed to the end) of a basic block. + CHECK_EQ(ir_builder_->GetInsertPoint() == in_block->end(), + in_block->getTerminator() == nullptr); + + llvm::BasicBlock* body_block; + llvm::BasicBlock* out_block; + + if (ir_builder_->GetInsertPoint() == in_block->end()) { + body_block = + llvm_ir::CreateBasicBlock(nullptr, "rng_body", ir_builder_); + out_block = + llvm_ir::CreateBasicBlock(nullptr, "rng_out", ir_builder_); + llvm::BranchInst::Create(body_block, in_block); + } else { + body_block = in_block->splitBasicBlock( + ir_builder_->GetInsertPoint(), "rng_body"); + out_block = body_block->splitBasicBlock( + ir_builder_->GetInsertPoint(), "rng_out"); + body_block->getTerminator()->eraseFromParent(); + } + SetToFirstInsertPoint(body_block, ir_builder_); auto random = ir_builder_->CreateAnd( ir_builder_->CreateZExtOrTrunc(get_next_i64(), param_ir_type), ir_builder_->CreateLShr(llvm::ConstantInt::get(param_ir_type, ~0), leading_zeros)); - llvm::ReplaceInstWithInst( - body_block->getTerminator(), - llvm::BranchInst::Create(out_block, body_block, - ir_builder_->CreateICmpULT(random, r))); + llvm::BranchInst::Create(out_block, body_block, + ir_builder_->CreateICmpULT(random, r), + body_block); SetToFirstInsertPoint(out_block, ir_builder_); return ir_builder_->CreateAdd( p, ir_builder_->CreateSelect( diff --git a/tensorflow/compiler/xla/tests/prng_test.cc b/tensorflow/compiler/xla/tests/prng_test.cc index 5117478bfd5..b77b8e2ee30 100644 --- a/tensorflow/compiler/xla/tests/prng_test.cc +++ b/tensorflow/compiler/xla/tests/prng_test.cc @@ -273,6 +273,17 @@ XLA_TEST_F(PrngTest, TenValuesN01) { // TODO(b/25995601): Test that resultant values are reasonable } +XLA_TEST_F(PrngTest, RngUniformCrash) { + ComputationBuilder builder(client_, TestName()); + + // This used to crash XLA during LLVM IR generation for CPUs. + auto rng_uniform = builder.RngUniform(builder.ConstantR0(0), + builder.ConstantR0(1000 * 1000), + ShapeUtil::MakeShape(S32, {})); + SetSeed(0); + ExecuteAndTransferOrDie(&builder, /*arguments=*/{}); +} + } // namespace } // namespace xla From 5ccf1b28c1a65227bd9f76d9d5c1b3ed2862c977 Mon Sep 17 00:00:00 2001 From: Justine Tunney Date: Tue, 13 Jun 2017 18:13:34 -0700 Subject: [PATCH 003/180] Upgrade TensorBoard third party deps PiperOrigin-RevId: 158927523 --- third_party/js.bzl | 31 ++-- third_party/polymer.bzl | 314 ++++++++++++++++++++-------------------- third_party/typings.bzl | 220 ++++++++++++++-------------- 3 files changed, 283 insertions(+), 282 deletions(-) diff --git a/third_party/js.bzl b/third_party/js.bzl index 2d2339c95e5..46466c3f312 100644 --- a/third_party/js.bzl +++ b/third_party/js.bzl @@ -17,9 +17,11 @@ load("@io_bazel_rules_closure//closure:defs.bzl", "filegroup_external") load("@io_bazel_rules_closure//closure:defs.bzl", "web_library_external") +def tensorboard_js_workspace(): + ############################################################################## # TensorBoard Build Tools -def tensorboard_js_workspace(): + filegroup_external( name = "org_nodejs", # MIT with portions licensed: @@ -67,7 +69,7 @@ def tensorboard_js_workspace(): "node.exe", ], ) - + filegroup_external( name = "com_microsoft_typescript", licenses = ["notice"], # Apache 2.0 @@ -151,19 +153,20 @@ def tensorboard_js_workspace(): # no @license header licenses = ["notice"], # MIT sha256_urls = { - "7aff264bd84c90bed3c72a4dc31db8c19151853c6df6980f52b01d3e9872c82d": [ - "http://mirror.bazel.build/raw.githubusercontent.com/mrdoob/three.js/ad419d40bdaab80abbb34b8f359b4ee840033a02/build/three.js", - "https://raw.githubusercontent.com/mrdoob/three.js/ad419d40bdaab80abbb34b8f359b4ee840033a02/build/three.js", + "881cc79c84c34a1f61f8c8af0ee3f237d83a2eda3868720fdcb47bcacf8da44a": [ + "http://mirror.bazel.build/raw.githubusercontent.com/mrdoob/three.js/r77/build/three.js", + "https://raw.githubusercontent.com/mrdoob/three.js/r77/build/three.js", ], - "0e98ded15bb7fe398a655667e76b39909d36c0973a8950d01c62f65f93161c27": [ - "http://mirror.bazel.build/raw.githubusercontent.com/mrdoob/three.js/ad419d40bdaab80abbb34b8f359b4ee840033a02/examples/js/controls/OrbitControls.js", - "https://raw.githubusercontent.com/mrdoob/three.js/ad419d40bdaab80abbb34b8f359b4ee840033a02/examples/js/controls/OrbitControls.js", + "98b8b5954901025a98033c8bdd65969be1f30b59e11f823ec864253bb72f768d": [ + "http://mirror.bazel.build/raw.githubusercontent.com/mrdoob/three.js/r77/examples/js/controls/OrbitControls.js", + "https://raw.githubusercontent.com/mrdoob/three.js/r77/examples/js/controls/OrbitControls.js", ], }, ) - + ############################################################################## # TensorBoard JavaScript Production Dependencies + web_library_external( name = "com_lodash", licenses = ["notice"], # MIT @@ -245,7 +248,7 @@ def tensorboard_js_workspace(): licenses = ["notice"], # MIT sha256_urls = { "633f2861a9a862b9cd7967e841e14dd3527912f209d6563595774fa31e3d84cb": [ - "http://mirror.bazel.build/raw.githubusercontent.com/waylonflinn/weblas/v0.9.0/LICENSES", + "http://mirror.bazel.build/raw.githubusercontent.com/waylonflinn/weblas/v0.9.0/LICENSE", "https://raw.githubusercontent.com/waylonflinn/weblas/v0.9.0/LICENSE", ], "f138fce57f673ca8a633f4aee5ae5b6fcb6ad0de59069a42a74e996fd04d8fcc": [ @@ -260,9 +263,9 @@ def tensorboard_js_workspace(): # no @license header licenses = ["notice"], # BSD-3-Clause sha256_urls_extract = { - "b5fac5b296bc196e6aa7b59f9e33986fc44d23d59a0e211705187be9e35b943d": [ - "http://mirror.bazel.build/github.com/d3/d3/releases/download/v4.8.0/d3.zip", - "https://github.com/d3/d3/releases/download/v4.8.0/d3.zip", + "d858c0878af36bd00e2af6029029106328d408c2bff0a60a9d78c4e27f47b99a": [ + "http://mirror.bazel.build/github.com/d3/d3/releases/download/v4.9.1/d3.zip", + "https://github.com/d3/d3/releases/download/v4.9.1/d3.zip", ], }, # TODO(jart): Use srcs=["d3.js"] instead of this once supported. @@ -292,6 +295,7 @@ def tensorboard_js_workspace(): ############################################################################## # TensorBoard Testing Dependencies + web_library_external( name = "org_npmjs_registry_accessibility_developer_tools", licenses = ["notice"], # Apache License 2.0 @@ -417,4 +421,3 @@ def tensorboard_js_workspace(): path = "/test-fixture", exclude = ["test/**"], ) - diff --git a/third_party/polymer.bzl b/third_party/polymer.bzl index bd6e05803cf..812afbf3f7d 100644 --- a/third_party/polymer.bzl +++ b/third_party/polymer.bzl @@ -17,6 +17,23 @@ load("@io_bazel_rules_closure//closure:defs.bzl", "web_library_external") def tensorboard_polymer_workspace(): + web_library_external( + name = "org_polymer", + licenses = ["notice"], # BSD-3-Clause + sha256 = "2c38cf73bdd09e0f80a4b7210fd58f88a6bdd8624da73fb3e028e66a84c9e095", + strip_prefix = "polymer-1.8.1", + urls = [ + "http://mirror.bazel.build/github.com/polymer/polymer/archive/v1.8.1.tar.gz", + "https://github.com/polymer/polymer/archive/v1.8.1.tar.gz", + ], + path = "/polymer", + srcs = [ + "polymer.html", + "polymer-micro.html", + "polymer-mini.html", + ], + ) + web_library_external( name = "org_polymer_font_roboto", licenses = ["notice"], # BSD-3-Clause @@ -29,7 +46,7 @@ def tensorboard_polymer_workspace(): path = "/font-roboto", srcs = ["roboto.html"], ) - + web_library_external( name = "org_polymer_hydrolysis", licenses = ["notice"], # BSD-3-Clause @@ -47,35 +64,35 @@ def tensorboard_polymer_workspace(): ], deps = ["@org_polymer"], ) - + web_library_external( name = "org_polymer_iron_a11y_announcer", licenses = ["notice"], # BSD-3-Clause - sha256 = "6bce143db7a374a68535ec8b861a5f30e81f2f1e4ee36a55bda2a891f6fd2818", + sha256 = "53114ceb57d9f33a7a8058488cf06450e48502e5d033adf51c91330f61620353", urls = [ - "http://mirror.bazel.build/github.com/PolymerElements/iron-a11y-announcer/archive/v1.0.5.tar.gz", - "https://github.com/PolymerElements/iron-a11y-announcer/archive/v1.0.5.tar.gz", + "http://mirror.bazel.build/github.com/PolymerElements/iron-a11y-announcer/archive/v2.0.0.tar.gz", + "https://github.com/PolymerElements/iron-a11y-announcer/archive/v2.0.0.tar.gz", ], - strip_prefix = "iron-a11y-announcer-1.0.5", + strip_prefix = "iron-a11y-announcer-2.0.0", path = "/iron-a11y-announcer", srcs = ["iron-a11y-announcer.html"], deps = ["@org_polymer"], ) - + web_library_external( name = "org_polymer_iron_a11y_keys_behavior", licenses = ["notice"], # BSD-3-Clause - sha256 = "6823efc47a83208fd51d39c5a1d3eb0c0bebc705df1ce01310509da22a13ebd2", + sha256 = "09274155c8d537f8bb567b3be5e747253ef760995a59ee06cb0ab38e704212fb", urls = [ - "http://mirror.bazel.build/github.com/PolymerElements/iron-a11y-keys-behavior/archive/v1.1.8.tar.gz", - "https://github.com/PolymerElements/iron-a11y-keys-behavior/archive/v1.1.8.tar.gz", + "http://mirror.bazel.build/github.com/PolymerElements/iron-a11y-keys-behavior/archive/v2.0.0.tar.gz", + "https://github.com/PolymerElements/iron-a11y-keys-behavior/archive/v2.0.0.tar.gz", ], - strip_prefix = "iron-a11y-keys-behavior-1.1.8", + strip_prefix = "iron-a11y-keys-behavior-2.0.0", path = "/iron-a11y-keys-behavior", srcs = ["iron-a11y-keys-behavior.html"], deps = ["@org_polymer"], ) - + web_library_external( name = "org_polymer_iron_ajax", licenses = ["notice"], # BSD-3-Clause @@ -95,7 +112,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_promise_polyfill", ], ) - + web_library_external( name = "org_polymer_iron_autogrow_textarea", licenses = ["notice"], # BSD-3-Clause @@ -115,7 +132,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_iron_validatable_behavior", ], ) - + web_library_external( name = "org_polymer_iron_behaviors", licenses = ["notice"], # BSD-3-Clause @@ -135,7 +152,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_iron_a11y_keys_behavior", ], ) - + web_library_external( name = "org_polymer_iron_checked_element_behavior", licenses = ["notice"], # BSD-3-Clause @@ -153,7 +170,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_iron_validatable_behavior", ], ) - + web_library_external( name = "org_polymer_iron_component_page", licenses = ["notice"], # BSD-3-Clause @@ -178,16 +195,16 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_toolbar", ], ) - + web_library_external( name = "org_polymer_iron_collapse", licenses = ["notice"], # BSD-3-Clause - sha256 = "275808994a609a2f9923e2dd2db1957945ab141ba840eadc33f19e1f406d600e", + sha256 = "eb72f459a2a5adbcd922327eea02ed909e8056ad72fd8a32d04a14ce54b2e480", urls = [ - "http://mirror.bazel.build/github.com/PolymerElements/iron-collapse/archive/v1.0.8.tar.gz", - "https://github.com/PolymerElements/iron-collapse/archive/v1.0.8.tar.gz", + "http://mirror.bazel.build/github.com/PolymerElements/iron-collapse/archive/v2.0.0.tar.gz", + "https://github.com/PolymerElements/iron-collapse/archive/v2.0.0.tar.gz", ], - strip_prefix = "iron-collapse-1.0.8", + strip_prefix = "iron-collapse-2.0.0", path = "/iron-collapse", srcs = ["iron-collapse.html"], deps = [ @@ -195,16 +212,16 @@ def tensorboard_polymer_workspace(): "@org_polymer_iron_resizable_behavior", ], ) - + web_library_external( name = "org_polymer_iron_demo_helpers", licenses = ["notice"], # BSD-3-Clause - sha256 = "aa7458492a6ac3d1f6344640a4c2ab07bce64e7ad0422b83b5d665707598cce6", + sha256 = "e196985cb7e50108283c3fc189a3a018e697b4648107d597df71a5c17f5ee907", urls = [ - "http://mirror.bazel.build/github.com/PolymerElements/iron-demo-helpers/archive/v1.1.0.tar.gz", - "https://github.com/PolymerElements/iron-demo-helpers/archive/v1.1.0.tar.gz", + "http://mirror.bazel.build/github.com/PolymerElements/iron-demo-helpers/archive/v2.0.0.tar.gz", + "https://github.com/PolymerElements/iron-demo-helpers/archive/v2.0.0.tar.gz", ], - strip_prefix = "iron-demo-helpers-1.1.0", + strip_prefix = "iron-demo-helpers-2.0.0", path = "/iron-demo-helpers", srcs = [ "demo-pages-shared-styles.html", @@ -212,6 +229,7 @@ def tensorboard_polymer_workspace(): ], deps = [ "@org_polymer", + "@org_polymer_font_roboto", "@org_polymer_iron_flex_layout", "@org_polymer_iron_icons", "@org_polymer_marked_element", @@ -220,16 +238,16 @@ def tensorboard_polymer_workspace(): "@org_polymer_prism_element", ], ) - + web_library_external( name = "org_polymer_iron_doc_viewer", licenses = ["notice"], # BSD-3-Clause - sha256 = "f0e9dfbbcd94d7e88ce82cb61e615406ace63c185fee9396f7f182206ca5cc9a", + sha256 = "5d487c99dd0cf626c800ae8667b0c8c88095f4482a68e837a1d3f58484ca8fb4", urls = [ - "http://mirror.bazel.build/github.com/PolymerElements/iron-doc-viewer/archive/v1.0.12.tar.gz", - "https://github.com/PolymerElements/iron-doc-viewer/archive/v1.0.12.tar.gz", + "http://mirror.bazel.build/github.com/PolymerElements/iron-doc-viewer/archive/v2.0.0.tar.gz", + "https://github.com/PolymerElements/iron-doc-viewer/archive/v2.0.0.tar.gz", ], - strip_prefix = "iron-doc-viewer-1.0.12", + strip_prefix = "iron-doc-viewer-2.0.0", path = "/iron-doc-viewer", srcs = [ "iron-doc-property-styles.html", @@ -245,16 +263,16 @@ def tensorboard_polymer_workspace(): "@org_polymer_prism_element", ], ) - + web_library_external( name = "org_polymer_iron_dropdown", licenses = ["notice"], # BSD-3-Clause - sha256 = "f7e4a31d096d10d8af1920397695cb17f3eb1cbe5e5ff91a861dabfcc085f376", + sha256 = "db9d6598157f8b114f1be1e6ed1c74917d4c37e660b2dda1e31f6873f1a33b80", urls = [ - "http://mirror.bazel.build/github.com/PolymerElements/iron-dropdown/archive/v1.4.0.tar.gz", - "https://github.com/PolymerElements/iron-dropdown/archive/v1.4.0.tar.gz", + "http://mirror.bazel.build/github.com/PolymerElements/iron-dropdown/archive/v1.5.5.tar.gz", + "https://github.com/PolymerElements/iron-dropdown/archive/v1.5.5.tar.gz", ], - strip_prefix = "iron-dropdown-1.4.0", + strip_prefix = "iron-dropdown-1.5.5", path = "/iron-dropdown", srcs = [ "iron-dropdown.html", @@ -269,7 +287,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_neon_animation", ], ) - + web_library_external( name = "org_polymer_iron_fit_behavior", licenses = ["notice"], # BSD-3-Clause @@ -283,7 +301,7 @@ def tensorboard_polymer_workspace(): srcs = ["iron-fit-behavior.html"], deps = ["@org_polymer"], ) - + web_library_external( name = "org_polymer_iron_flex_layout", licenses = ["notice"], # BSD-3-Clause @@ -302,7 +320,7 @@ def tensorboard_polymer_workspace(): ], deps = ["@org_polymer"], ) - + web_library_external( name = "org_polymer_iron_form_element_behavior", licenses = ["notice"], # BSD-3-Clause @@ -316,7 +334,7 @@ def tensorboard_polymer_workspace(): srcs = ["iron-form-element-behavior.html"], deps = ["@org_polymer"], ) - + web_library_external( name = "org_polymer_iron_icon", licenses = ["notice"], # BSD-3-Clause @@ -334,7 +352,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_iron_meta", ], ) - + web_library_external( name = "org_polymer_iron_icons", licenses = ["notice"], # BSD-3-Clause @@ -363,16 +381,16 @@ def tensorboard_polymer_workspace(): "@org_polymer_iron_iconset_svg", ], ) - + web_library_external( name = "org_polymer_iron_iconset_svg", licenses = ["notice"], # BSD-3-Clause - sha256 = "7e3925b7e63a7d22524c4b43ce16ab80d06a576649644783643c11a003284368", + sha256 = "75cfb41e78f86ef6cb5d201ad12021785ef9e192b490ad46dcc15a9c19bdf71a", urls = [ - "http://mirror.bazel.build/github.com/PolymerElements/iron-iconset-svg/archive/v1.1.0.tar.gz", - "https://github.com/PolymerElements/iron-iconset-svg/archive/v1.1.0.tar.gz", + "http://mirror.bazel.build/github.com/PolymerElements/iron-iconset-svg/archive/v2.0.0.tar.gz", + "https://github.com/PolymerElements/iron-iconset-svg/archive/v2.0.0.tar.gz", ], - strip_prefix = "iron-iconset-svg-1.1.0", + strip_prefix = "iron-iconset-svg-2.0.0", path = "/iron-iconset-svg", srcs = ["iron-iconset-svg.html"], deps = [ @@ -380,7 +398,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_iron_meta", ], ) - + web_library_external( name = "org_polymer_iron_input", licenses = ["notice"], # BSD-3-Clause @@ -398,7 +416,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_iron_validatable_behavior", ], ) - + web_library_external( name = "org_polymer_iron_list", licenses = ["notice"], # BSD-3-Clause @@ -417,16 +435,16 @@ def tensorboard_polymer_workspace(): "@org_polymer_iron_scroll_target_behavior", ], ) - + web_library_external( name = "org_polymer_iron_menu_behavior", licenses = ["notice"], # BSD-3-Clause - sha256 = "ad27889343bc9a709258b073f69abc028bb1ffd3fdb975cd2d3939f7f5d7bb6c", + sha256 = "35d33d1ae55c6efaa0c3744ebe8a06cc0a8b2af9286dd8d36e20726a8540a11a", urls = [ - "http://mirror.bazel.build/github.com/PolymerElements/iron-menu-behavior/archive/v1.1.10.tar.gz", - "https://github.com/PolymerElements/iron-menu-behavior/archive/v1.1.10.tar.gz", + "http://mirror.bazel.build/github.com/PolymerElements/iron-menu-behavior/archive/v2.0.0.tar.gz", + "https://github.com/PolymerElements/iron-menu-behavior/archive/v2.0.0.tar.gz", ], - strip_prefix = "iron-menu-behavior-1.1.10", + strip_prefix = "iron-menu-behavior-2.0.0", path = "/iron-menu-behavior", srcs = [ "iron-menu-behavior.html", @@ -438,7 +456,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_iron_selector", ], ) - + web_library_external( name = "org_polymer_iron_meta", licenses = ["notice"], # BSD-3-Clause @@ -452,7 +470,7 @@ def tensorboard_polymer_workspace(): srcs = ["iron-meta.html"], deps = ["@org_polymer"], ) - + web_library_external( name = "org_polymer_iron_overlay_behavior", licenses = ["notice"], # BSD-3-Clause @@ -476,7 +494,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_iron_resizable_behavior", ], ) - + web_library_external( name = "org_polymer_iron_range_behavior", licenses = ["notice"], # BSD-3-Clause @@ -490,7 +508,7 @@ def tensorboard_polymer_workspace(): srcs = ["iron-range-behavior.html"], deps = ["@org_polymer"], ) - + web_library_external( name = "org_polymer_iron_resizable_behavior", licenses = ["notice"], # BSD-3-Clause @@ -504,21 +522,21 @@ def tensorboard_polymer_workspace(): srcs = ["iron-resizable-behavior.html"], deps = ["@org_polymer"], ) - + web_library_external( name = "org_polymer_iron_scroll_target_behavior", licenses = ["notice"], # BSD-3-Clause - sha256 = "d0de0c804b1ec91d814754144afd9da1cdb082690de88bd5e47fd5f41990746f", + sha256 = "7c6614c07d354375666ee96eea9f6d485dbf7898146a444ec26094a70d4e0afa", urls = [ - "http://mirror.bazel.build/github.com/PolymerElements/iron-scroll-target-behavior/archive/v1.0.3.tar.gz", - "https://github.com/PolymerElements/iron-scroll-target-behavior/archive/v1.0.3.tar.gz", + "http://mirror.bazel.build/github.com/PolymerElements/iron-scroll-target-behavior/archive/v1.1.1.tar.gz", + "https://github.com/PolymerElements/iron-scroll-target-behavior/archive/v1.1.1.tar.gz", ], - strip_prefix = "iron-scroll-target-behavior-1.0.3", + strip_prefix = "iron-scroll-target-behavior-1.1.1", path = "/iron-scroll-target-behavior", srcs = ["iron-scroll-target-behavior.html"], deps = ["@org_polymer"], ) - + web_library_external( name = "org_polymer_iron_selector", licenses = ["notice"], # BSD-3-Clause @@ -537,7 +555,7 @@ def tensorboard_polymer_workspace(): ], deps = ["@org_polymer"], ) - + web_library_external( name = "org_polymer_iron_validatable_behavior", licenses = ["notice"], # BSD-3-Clause @@ -554,7 +572,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_iron_meta", ], ) - + web_library_external( name = "org_polymer_marked", licenses = ["notice"], # MIT @@ -567,16 +585,16 @@ def tensorboard_polymer_workspace(): path = "/marked", srcs = ["lib/marked.js"], ) - + web_library_external( name = "org_polymer_marked_element", licenses = ["notice"], # BSD-3-Clause - sha256 = "7547616df95f8b903757e6afbabfcdba5322c2bcec3f17c726b8bba5adf4bc5f", + sha256 = "2ac1f7fae0c1b656e671b2772492809714d836f27c9efa7f2c5fe077ee760f3c", urls = [ - "http://mirror.bazel.build/github.com/PolymerElements/marked-element/archive/v1.1.3.tar.gz", - "https://github.com/PolymerElements/marked-element/archive/v1.1.3.tar.gz", + "http://mirror.bazel.build/github.com/PolymerElements/marked-element/archive/v2.1.0.tar.gz", + "https://github.com/PolymerElements/marked-element/archive/v2.1.0.tar.gz", ], - strip_prefix = "marked-element-1.1.3", + strip_prefix = "marked-element-2.1.0", path = "/marked-element", srcs = [ "marked-element.html", @@ -587,7 +605,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_marked", ], ) - + web_library_external( name = "org_polymer_neon_animation", licenses = ["notice"], # BSD-3-Clause @@ -636,7 +654,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_web_animations_js", ], ) - + web_library_external( name = "org_polymer_paper_behaviors", licenses = ["notice"], # BSD-3-Clause @@ -660,7 +678,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_ripple", ], ) - + web_library_external( name = "org_polymer_paper_button", licenses = ["notice"], # BSD-3-Clause @@ -680,16 +698,16 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_ripple", ], ) - + web_library_external( name = "org_polymer_paper_checkbox", licenses = ["notice"], # BSD-3-Clause - sha256 = "6828a6954a048b1230fbd2606faffbae950ba1d042175b96ec50ae355786a166", + sha256 = "0a291d0c64de1b6b807d66697bead9c66c0d7bc3c68b8037e6667f3d66a5904c", urls = [ - "http://mirror.bazel.build/github.com/PolymerElements/paper-checkbox/archive/v1.4.0.tar.gz", - "https://github.com/PolymerElements/paper-checkbox/archive/v1.4.0.tar.gz", + "http://mirror.bazel.build/github.com/PolymerElements/paper-checkbox/archive/v2.0.0.tar.gz", + "https://github.com/PolymerElements/paper-checkbox/archive/v2.0.0.tar.gz", ], - strip_prefix = "paper-checkbox-1.4.0", + strip_prefix = "paper-checkbox-2.0.0", path = "/paper-checkbox", srcs = ["paper-checkbox.html"], deps = [ @@ -698,16 +716,16 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_styles", ], ) - + web_library_external( name = "org_polymer_paper_dialog", licenses = ["notice"], # BSD-3-Clause - sha256 = "c6a9709e7f528d03dcd574503c18b72d4751ca30017346d16e6a791d37ed9259", + sha256 = "cf60d3aa6ad57ba4eb8b1c16713e65057735eed94b009aeebdbcf3436c95a161", urls = [ - "http://mirror.bazel.build/github.com/PolymerElements/paper-dialog/archive/v1.0.4.tar.gz", - "https://github.com/PolymerElements/paper-dialog/archive/v1.0.4.tar.gz", + "http://mirror.bazel.build/github.com/PolymerElements/paper-dialog/archive/v2.0.0.tar.gz", + "https://github.com/PolymerElements/paper-dialog/archive/v2.0.0.tar.gz", ], - strip_prefix = "paper-dialog-1.0.4", + strip_prefix = "paper-dialog-2.0.0", path = "/paper-dialog", srcs = ["paper-dialog.html"], deps = [ @@ -716,16 +734,16 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_dialog_behavior", ], ) - + web_library_external( name = "org_polymer_paper_dialog_behavior", licenses = ["notice"], # BSD-3-Clause - sha256 = "a7e0e27ce63554bc14f384cf94bcfa24da8dc5f5120dfd565f45e166261aee40", + sha256 = "d78e4f7d008c22537a9255ccda1e919fddae5cc125ef26a66eb2c47f648c20ab", urls = [ - "http://mirror.bazel.build/github.com/PolymerElements/paper-dialog-behavior/archive/v1.2.5.tar.gz", - "https://github.com/PolymerElements/paper-dialog-behavior/archive/v1.2.5.tar.gz", + "http://mirror.bazel.build/github.com/PolymerElements/paper-dialog-behavior/archive/v1.2.7.tar.gz", + "https://github.com/PolymerElements/paper-dialog-behavior/archive/v1.2.7.tar.gz", ], - strip_prefix = "paper-dialog-behavior-1.2.5", + strip_prefix = "paper-dialog-behavior-1.2.7", path = "/paper-dialog-behavior", srcs = [ "paper-dialog-behavior.html", @@ -739,7 +757,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_styles", ], ) - + web_library_external( name = "org_polymer_paper_dialog_scrollable", licenses = ["notice"], # BSD-3-Clause @@ -758,7 +776,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_styles", ], ) - + web_library_external( name = "org_polymer_paper_dropdown_menu", licenses = ["notice"], # BSD-3-Clause @@ -790,7 +808,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_styles", ], ) - + web_library_external( name = "org_polymer_paper_header_panel", licenses = ["notice"], # BSD-3-Clause @@ -807,7 +825,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_iron_flex_layout", ], ) - + web_library_external( name = "org_polymer_paper_icon_button", licenses = ["notice"], # BSD-3-Clause @@ -829,7 +847,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_styles", ], ) - + web_library_external( name = "org_polymer_paper_input", licenses = ["notice"], # BSD-3-Clause @@ -860,7 +878,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_styles", ], ) - + web_library_external( name = "org_polymer_paper_item", licenses = ["notice"], # BSD-3-Clause @@ -885,16 +903,16 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_styles", ], ) - + web_library_external( name = "org_polymer_paper_listbox", licenses = ["notice"], # BSD-3-Clause - sha256 = "3cb35f4fe9a3f15185a9e91711dba8f27e9291c8cd371ebf1be21b8f1d5f65fb", + sha256 = "674992d882b18a0618fa697180f196dbc052fb2f5d9ce4e19026a918b568ffd6", urls = [ - "http://mirror.bazel.build/github.com/PolymerElements/paper-listbox/archive/v1.1.2.tar.gz", - "https://github.com/PolymerElements/paper-listbox/archive/v1.1.2.tar.gz", + "http://mirror.bazel.build/github.com/PolymerElements/paper-listbox/archive/v2.0.0.tar.gz", + "https://github.com/PolymerElements/paper-listbox/archive/v2.0.0.tar.gz", ], - strip_prefix = "paper-listbox-1.1.2", + strip_prefix = "paper-listbox-2.0.0", path = "/paper-listbox", srcs = ["paper-listbox.html"], deps = [ @@ -903,16 +921,16 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_styles", ], ) - + web_library_external( name = "org_polymer_paper_material", licenses = ["notice"], # BSD-3-Clause - sha256 = "09f6c8bd6ddbea2be541dc86306efe41cdfb31bec0b69d35a5dc29772bbc8506", + sha256 = "913e9c63cf5c8286b0fab817079d7dc900a343d2c05809995d8d9ba0e41f8a29", urls = [ - "http://mirror.bazel.build/github.com/PolymerElements/paper-material/archive/v1.0.6.tar.gz", - "https://github.com/PolymerElements/paper-material/archive/v1.0.6.tar.gz", + "http://mirror.bazel.build/github.com/PolymerElements/paper-material/archive/v2.0.0.tar.gz", + "https://github.com/PolymerElements/paper-material/archive/v2.0.0.tar.gz", ], - strip_prefix = "paper-material-1.0.6", + strip_prefix = "paper-material-2.0.0", path = "/paper-material", srcs = [ "paper-material.html", @@ -923,7 +941,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_styles", ], ) - + web_library_external( name = "org_polymer_paper_menu", licenses = ["notice"], # BSD-3-Clause @@ -948,7 +966,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_styles", ], ) - + web_library_external( name = "org_polymer_paper_menu_button", licenses = ["notice"], # BSD-3-Clause @@ -972,7 +990,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_styles", ], ) - + web_library_external( name = "org_polymer_paper_progress", licenses = ["notice"], # BSD-3-Clause @@ -991,7 +1009,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_styles", ], ) - + web_library_external( name = "org_polymer_paper_radio_button", licenses = ["notice"], # BSD-3-Clause @@ -1009,7 +1027,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_styles", ], ) - + web_library_external( name = "org_polymer_paper_radio_group", licenses = ["notice"], # BSD-3-Clause @@ -1028,7 +1046,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_radio_button", ], ) - + web_library_external( name = "org_polymer_paper_ripple", licenses = ["notice"], # BSD-3-Clause @@ -1045,16 +1063,16 @@ def tensorboard_polymer_workspace(): "@org_polymer_iron_a11y_keys_behavior", ], ) - + web_library_external( name = "org_polymer_paper_slider", licenses = ["notice"], # BSD-3-Clause - sha256 = "08e7c541dbf5d2e959208810bfc03188e82ced87e4d30d325172967f67962c3c", + sha256 = "8bb4db532e8b11b11f78006d9aefa217841392c1aeb449ee2a12e3b56748b774", urls = [ - "http://mirror.bazel.build/github.com/PolymerElements/paper-slider/archive/v1.0.10.tar.gz", - "https://github.com/PolymerElements/paper-slider/archive/v1.0.10.tar.gz", + "http://mirror.bazel.build/github.com/PolymerElements/paper-slider/archive/v1.0.14.tar.gz", + "https://github.com/PolymerElements/paper-slider/archive/v1.0.14.tar.gz", ], - strip_prefix = "paper-slider-1.0.10", + strip_prefix = "paper-slider-1.0.14", path = "/paper-slider", srcs = ["paper-slider.html"], deps = [ @@ -1069,7 +1087,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_styles", ], ) - + web_library_external( name = "org_polymer_paper_spinner", licenses = ["notice"], # BSD-3-Clause @@ -1090,7 +1108,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_styles", ], ) - + web_library_external( name = "org_polymer_paper_styles", licenses = ["notice"], # BSD-3-Clause @@ -1121,7 +1139,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_iron_flex_layout", ], ) - + web_library_external( name = "org_polymer_paper_tabs", licenses = ["notice"], # BSD-3-Clause @@ -1150,16 +1168,16 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_styles", ], ) - + web_library_external( name = "org_polymer_paper_toast", licenses = ["notice"], # BSD-3-Clause - sha256 = "55f623712ed1f2bae6d6fadc522a2458e083ccd44cc0a907672547e7b10758a9", + sha256 = "b1c677e1681ef8d3f688a83da8f7b263902f757f395a9354a1c35f93b9125b60", urls = [ - "http://mirror.bazel.build/github.com/PolymerElements/paper-toast/archive/v1.3.0.tar.gz", - "https://github.com/PolymerElements/paper-toast/archive/v1.3.0.tar.gz", + "http://mirror.bazel.build/github.com/PolymerElements/paper-toast/archive/v2.0.0.tar.gz", + "https://github.com/PolymerElements/paper-toast/archive/v2.0.0.tar.gz", ], - strip_prefix = "paper-toast-1.3.0", + strip_prefix = "paper-toast-2.0.0", path = "/paper-toast", srcs = ["paper-toast.html"], deps = [ @@ -1168,7 +1186,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_iron_overlay_behavior", ], ) - + web_library_external( name = "org_polymer_paper_toggle_button", licenses = ["notice"], # BSD-3-Clause @@ -1187,7 +1205,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_styles", ], ) - + web_library_external( name = "org_polymer_paper_toolbar", licenses = ["notice"], # BSD-3-Clause @@ -1205,7 +1223,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_paper_styles", ], ) - + web_library_external( name = "org_polymer_paper_tooltip", licenses = ["notice"], # BSD-3-Clause @@ -1222,24 +1240,7 @@ def tensorboard_polymer_workspace(): "@org_polymer_neon_animation", ], ) - - web_library_external( - name = "org_polymer", - licenses = ["notice"], # BSD-3-Clause - sha256 = "07a9e62ffb52193da3af09adda2fbac5cc690439978520e2d03e783863f65f91", - strip_prefix = "polymer-1.7.0", - urls = [ - "http://mirror.bazel.build/github.com/polymer/polymer/archive/v1.7.0.tar.gz", - "https://github.com/polymer/polymer/archive/v1.7.0.tar.gz", - ], - path = "/polymer", - srcs = [ - "polymer.html", - "polymer-micro.html", - "polymer-mini.html", - ], - ) - + web_library_external( name = "org_polymer_prism", licenses = ["notice"], # MIT @@ -1255,27 +1256,28 @@ def tensorboard_polymer_workspace(): "themes/prism.css", ], ) - + web_library_external( name = "org_polymer_prism_element", licenses = ["notice"], # BSD-3-Clause - sha256 = "ad70bf9cd5bbdf525d465e1b0658867ab4022193eb9c74087a839044b46312b4", + sha256 = "c5c03c17520d4992a3576e397aa7a375e64c7f6794ec2af58031f47eef458945", urls = [ - "http://mirror.bazel.build/github.com/PolymerElements/prism-element/archive/1.0.4.tar.gz", - "https://github.com/PolymerElements/prism-element/archive/1.0.4.tar.gz", + "http://mirror.bazel.build/github.com/PolymerElements/prism-element/archive/v1.2.0.tar.gz", + "https://github.com/PolymerElements/prism-element/archive/v1.2.0.tar.gz", ], - strip_prefix = "prism-element-1.0.4", + strip_prefix = "prism-element-1.2.0", path = "/prism-element", srcs = [ "prism-highlighter.html", "prism-import.html", + "prism-theme-default.html", ], deps = [ "@org_polymer", "@org_polymer_prism", ], ) - + web_library_external( name = "org_polymer_promise_polyfill", licenses = ["notice"], # BSD-3-Clause @@ -1294,7 +1296,7 @@ def tensorboard_polymer_workspace(): ], deps = ["@org_polymer"], ) - + web_library_external( name = "org_polymer_web_animations_js", licenses = ["notice"], # BSD-3-Clause @@ -1307,16 +1309,16 @@ def tensorboard_polymer_workspace(): path = "/web-animations-js", srcs = ["web-animations-next-lite.min.js"], ) - + web_library_external( name = "org_polymer_webcomponentsjs", licenses = ["notice"], # BSD-3-Clause - sha256 = "138c43306ee0a6d699ddca9b3c6b0f4982974ea8b7bdad291ea7276c72301df9", + sha256 = "1f58decac693deb926e6b62b5dbd459fb7c2e961f7241e6e646d1cd9a60281d2", urls = [ - "http://mirror.bazel.build/github.com/webcomponents/webcomponentsjs/archive/v0.7.22.tar.gz", - "https://github.com/webcomponents/webcomponentsjs/archive/v0.7.22.tar.gz", + "http://mirror.bazel.build/github.com/webcomponents/webcomponentsjs/archive/v0.7.23.tar.gz", + "https://github.com/webcomponents/webcomponentsjs/archive/v0.7.23.tar.gz", ], - strip_prefix = "webcomponentsjs-0.7.22", + strip_prefix = "webcomponentsjs-0.7.23", path = "/webcomponentsjs", srcs = [ "CustomElements.js", diff --git a/third_party/typings.bzl b/third_party/typings.bzl index d0c9eddbb3f..e3931fa68a8 100644 --- a/third_party/typings.bzl +++ b/third_party/typings.bzl @@ -25,10 +25,6 @@ def tensorboard_typings_workspace(): "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/ebc69904eb78f94030d5d517b42db20867f679c0/chai/chai.d.ts", "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/ebc69904eb78f94030d5d517b42db20867f679c0/chai/chai.d.ts", ], - "177293828c7a206bf2a7f725753d51396d38668311aa37c96445f91bbf8128a7": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/6e2f2280ef16ef277049d0ce8583af167d586c59/d3/d3.d.ts", # v3 - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/6e2f2280ef16ef277049d0ce8583af167d586c59/d3/d3.d.ts", # v3 - ], "e4cd3d5de0eb3bc7b1063b50d336764a0ac82a658b39b5cf90511f489ffdee60": [ "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/efd40e67ff323f7147651bdbef03c03ead7b1675/lodash/lodash.d.ts", "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/efd40e67ff323f7147651bdbef03c03ead7b1675/lodash/lodash.d.ts", @@ -43,11 +39,11 @@ def tensorboard_typings_workspace(): ], "44eba36339bd1c0792072b7b204ee926fe5ffe1e9e2da916e67ac55548e3668a": [ "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/a872802c0c84ba98ff207d5e673a1fa867c67fd6/polymer/polymer.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/a872802c0c84ba98ff207d5e673a1fa867c67fd6/polymer/polymer.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/a872802c0c84ba98ff207d5e673a1fa867c67fd6/polymer/polymer.d.ts", # 2016-09-22 ], - "9453c3e6bae824e90758c3b38975c1ed77e6abd79bf513bcb08368fcdb14898e": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/f5407eba29c04fb8387c86df27512bd055b195d2/threejs/three.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/f5407eba29c04fb8387c86df27512bd055b195d2/threejs/three.d.ts", + "7ce67447146eb2b9e9cdaaf8bf45b3209865378022cc8acf86616d3be84f6481": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/8cb9ee3fdfe352cfef672bdfdb5f9c428f915e9f/threejs/three.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/8cb9ee3fdfe352cfef672bdfdb5f9c428f915e9f/threejs/three.d.ts", # r74 @ 2016-04-06 ], "691756a6eb455f340c9e834de0d49fff269e7b8c1799c2454465dcd6a4435b80": [ "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/46719185c564694c5583c4b7ad94dbb786ecad46/webcomponents.js/webcomponents.js.d.ts", @@ -55,311 +51,311 @@ def tensorboard_typings_workspace(): ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_array", licenses = ["notice"], # MIT sha256_urls = { "61e7abb7b1f01fbcb0cab8cf39003392f422566209edd681fbd070eaa84ca000": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-array/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-array/index.d.ts", + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-array/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-array/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_axis", licenses = ["notice"], # MIT sha256_urls = { "95f75c8dcc89850b2e72581d96a7b5f46ea4ac852f828893f141f14a597421f9": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-axis/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-axis/index.d.ts", + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-axis/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-axis/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_brush", licenses = ["notice"], # MIT sha256_urls = { "a2738e693ce8a8640c2d29001e77582c9c361fd23bda44db471629866b60ada7": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-brush/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-brush/index.d.ts", + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-brush/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-brush/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_chord", licenses = ["notice"], # MIT sha256_urls = { "c54d24756eb6d744b31e538ad9bab3a75f6d54e2288b29cc72338d4a057d3e83": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-chord/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-chord/index.d.ts", + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-chord/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-chord/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_collection", licenses = ["notice"], # MIT sha256_urls = { - "f987667167b1d2970911247e325eb1c37ca0823646f81ccec837ae59039822f7": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-collection/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-collection/index.d.ts", + "39e8599a768f45f80aa70ca3032f026111da50d409c7e39a2ef091667cc343d9": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-collection/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-collection/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_color", licenses = ["notice"], # MIT sha256_urls = { - "9580c81f38ddcce7be0ac9bd3d0d083adebc34e17441709f90b9e4dcd1c19a56": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-color/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-color/index.d.ts", + "6dd19edd11276476c5d535279237d1a009c1a733611cc44621a88fda1ca04377": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-color/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-color/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_dispatch", licenses = ["notice"], # MIT sha256_urls = { - "169f80b4cceca8e2e9ed384d81a5db0624cc01a26451dfb5a7e0cec6ea9cfb06": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-dispatch/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-dispatch/index.d.ts", + "af1474301e594fcb4bbdb134361fb6d26c7b333386c3213821532acde59e61a3": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-dispatch/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-dispatch/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_drag", licenses = ["notice"], # MIT sha256_urls = { - "08d35d139dde58c2722be98d718d01204fd6167d310f09b379e832f3c741489d": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-drag/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-drag/index.d.ts", + "2f8248ae2bf33fb1d61bb1ea4271cb4bacfd9a9939dc8d7bde7ec8b66d4441ed": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-drag/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-drag/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_dsv", licenses = ["notice"], # MIT sha256_urls = { "62594d00cf9e4bb895339c8e56f64330e202a5eb2a0fa580a1f6e6336f2c93ce": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-dsv/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-dsv/index.d.ts", + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-dsv/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-dsv/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_ease", licenses = ["notice"], # MIT sha256_urls = { - "d1cf8f99b7bf758c2ba3c0a4ce553e151d4d9b4cf45a6e8bd0edec7ce90f725b": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-ease/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-ease/index.d.ts", + "d5a9be5316b2d1823a3faa7f75de1e2c2efda5c75f0631b44a0f7b69e11f3a90": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-ease/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-ease/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_force", licenses = ["notice"], # MIT sha256_urls = { "288421e2008668d2076a4684657dd3d29b992832ef02c552981eb94a91042553": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-force/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-force/index.d.ts", + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-force/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-force/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_format", licenses = ["notice"], # MIT sha256_urls = { "b42cb17e580c1fd0b64d478f7bd80ca806efaefda24426a833cf1f30a7275bca": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-format/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-format/index.d.ts", + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-format/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-format/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_hierarchy", licenses = ["notice"], # MIT sha256_urls = { "a5683f5835d8716c6b89c075235078438cfab5897023ed720bfa492e244e969e": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-hierarchy/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-hierarchy/index.d.ts", + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-hierarchy/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-hierarchy/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_interpolate", licenses = ["notice"], # MIT sha256_urls = { - "590a71b741323ac3139b333ec8b743e24717fdd5b32bcff48ee521162a9dfe1c": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-interpolate/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-interpolate/index.d.ts", + "effeefea9ac02539def43d7b9aa2f39e8672c03aac9b407a61b09563ff141fad": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-interpolate/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-interpolate/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_path", licenses = ["notice"], # MIT sha256_urls = { - "96f35ba041bcaa265e2b373ee675177410d44d31c980e4f7fbeefd4bcba15b00": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-path/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-path/index.d.ts", + "deea4ab3654925d365dd1ffab69a2140808c6173e7f23c461ded2852c309eb9c": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-path/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-path/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_polygon", licenses = ["notice"], # MIT sha256_urls = { - "ce453451e8105cac6a4f4a4263ca2142ebb4bf442e342f470a81da691f220fcb": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-polygon/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-polygon/index.d.ts", + "ec7a42affe79c87066f14173fcbc8d8b5747f54bfbe0e60111e2786ee4d227bf": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-polygon/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-polygon/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_quadtree", licenses = ["notice"], # MIT sha256_urls = { - "238e278f1be5d6985a19800800cffee80f81199f71d848e3bbc288d1791a6f90": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-quadtree/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-quadtree/index.d.ts", + "2908631a7da3bfb0096e3b89f464b45390bbb31ec798d1b6c0898ff82e344560": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-quadtree/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-quadtree/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_queue", licenses = ["notice"], # MIT sha256_urls = { - "e6ae19aad83495475653578de64fb9d6bf9764eda6c84d70f7935ec84bcc482e": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-queue/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-queue/index.d.ts", + "4fc0503e3558d136b855335f36ea8984937ab63a2a28b8c7b293d35825388615": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-queue/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-queue/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_random", licenses = ["notice"], # MIT sha256_urls = { - "d31b92ed86c23ec0a4776f99fa81ff033c95b96c8304d8aa9baf3b94af779aa8": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-random/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-random/index.d.ts", + "5130e803ba26d2dc931ddd0fa574b5abbb0fc4486e7975f97a83c01630763676": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-random/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-random/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_request", licenses = ["notice"], # MIT sha256_urls = { - "44bb7b07d977028e6567540a3303b06fc9b33fb0960bc75c520e0733c840d89f": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-request/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-request/index.d.ts", + "fc2b7c2c05498011eb039825aab76a7916698fb3e7133e278fc92ae529ae99f0": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-request/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-request/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_scale", licenses = ["notice"], # MIT sha256_urls = { - "02ce7c644ba34bd1abb84da2e832f248b048b6a23812be4365bd837f186c9f1f": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-scale/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-scale/index.d.ts", + "ff3e2d2033a37d698c3bd2896ffd9dd4ceab1903d96aa90d388a6a2d14d8ee05": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-scale/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-scale/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_selection", licenses = ["notice"], # MIT sha256_urls = { - "699043ddb28dfa5e46d87bc6a24cfc6d604237f298259d3fb3c7066e05e8c86e": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-selection/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-selection/index.d.ts", + "47fae7c4bc425101490daae067727b74ee09e6c830331a4cf333cdb532a5d108": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-selection/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-selection/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_shape", licenses = ["notice"], # MIT sha256_urls = { - "62668a7aaaf6232762b544f9f89c0f557ca7cfb0cd343a358dda7ecbe26f5739": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-shape/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-shape/index.d.ts", + "7fec580ba54bc29417dc9030bb3731c9756a65c5e57dcce5a4f183fff7180cd8": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-shape/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-shape/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_time", licenses = ["notice"], # MIT sha256_urls = { - "0502490ce682fd9265fb1d5d693ce6cd82e3b05e5f5ee3433731266ecb03d5fc": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-time/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-time/index.d.ts", + "4b68f2a4ee428f21f2e7d706c0a64f628f0ff5f130cd9f023ab23a04a8fe31de": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-time/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-time/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_timer", licenses = ["notice"], # MIT sha256_urls = { - "6f191f9aea704aa64b1defa40dfdff1447a6e6bb815feff1660f894500a9c94d": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-timer/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-timer/index.d.ts", + "a196f42560be9fa1a77d473c0180f9f2f8d570ed0eee616aad0da94d90ef3661": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-timer/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-timer/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_transition", licenses = ["notice"], # MIT sha256_urls = { - "a0a7c0c9bfb5c7d6d9d22a8d16b4484b66d13f2ed226954037546cb3da4098ba": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-transition/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-transition/index.d.ts", + "10c6cf259d6f965014e75a63925f302911c5afb8581d6d63b0597544fe104bd7": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-transition/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-transition/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_voronoi", licenses = ["notice"], # MIT sha256_urls = { - "c6bd5f229f915151d0ef678fe50b1aa6a62334ea0a8c6fc0effbac9f7032efc7": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-voronoi/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-voronoi/index.d.ts", + "411482515e2ccda4659f7b3d2fbd3a7ef5ea2c7053eec62c95a174b68ad60c3d": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-voronoi/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-voronoi/index.d.ts", # 2017-06-08 ], }, ) - + filegroup_external( name = "org_definitelytyped_types_d3_zoom", licenses = ["notice"], # MIT sha256_urls = { - "a25dc17fbd304cf7a0e5e7bbb8339c930d464eb40c4d6e5f839ce9c0191f4110": [ - "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-zoom/index.d.ts", - "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/1550dfd1b8e38d9bf104b3fd16ea9bf98a2b358e/types/d3-zoom/index.d.ts", + "df0bedbb7711366a43418d6a3b47c4688ccb02a3d8ad0c2468cafcb6c2faa346": [ + "http://mirror.bazel.build/raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-zoom/index.d.ts", + "https://raw.githubusercontent.com/DefinitelyTyped/DefinitelyTyped/dc27c3788c00d279ae5ff61e8e2dfd568aae5e8e/types/d3-zoom/index.d.ts", # 2017-06-08 ], }, ) From 3c393ac87e5bae55bd6e2f7963ee9644782cfa9a Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 13 Jun 2017 18:28:27 -0700 Subject: [PATCH 004/180] [XLA] Add missing documentation for HLO Clamp operation. PiperOrigin-RevId: 158928626 --- .../performance/xla/operation_semantics.md | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/tensorflow/docs_src/performance/xla/operation_semantics.md b/tensorflow/docs_src/performance/xla/operation_semantics.md index b970aa5f5fe..ed055691ce7 100644 --- a/tensorflow/docs_src/performance/xla/operation_semantics.md +++ b/tensorflow/docs_src/performance/xla/operation_semantics.md @@ -61,6 +61,42 @@ Invokes a computation with the given arguments. The arity and types of the `args` must match the parameters of the `computation`. It is allowed to have no `args`. +## Clamp + +See also +[`ComputationBuilder::Clamp`](https://www.tensorflow.org/code/tensorflow/compiler/xla/client/computation_builder.h). + +Clamps an operand to within the range between a minimum and maximum value. + + `Clamp(computation, args...)` + +| Arguments | Type | Semantics | +| ------------- | ----------------------- | -------------------------------- | +| `computation` | `Computation` | computation of type `T_0, T_1, | +: : : ..., T_N -> S` with N parameters : +: : : of arbitrary type : +| `operand` | `ComputationDataHandle` | array of type T | +| `min` | `ComputationDataHandle` | array of type T | +| `max` | `ComputationDataHandle` | array of type T | + +Given an operand and minimum and maximum values, returns the operand if it is in +the range between the minimum and maximum, else returns the minimum value if the +operand is below this range or the maximum value if the operand is above this +range. That is, `clamp(x, a, b) = max(min(x, a), b)`. + +All three arrays must be the same shape. Alternately, as a restricted form of +[broadcasting](broadcasting.md), `min` and/or `max` can be a scalar of type `T`. + +Example with scalar `min` and `max`: + +``` +let operand: s32[3] = {-1, 5, 9}; +let min: s32 = 0; +let max: s32 = 6; +==> +Clamp(operand, min, max) = s32[3]{0, 5, 6}; +``` + ## Collapse See also From b6039c875290cdd5c9a62e01393b75b928827504 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 13 Jun 2017 18:48:53 -0700 Subject: [PATCH 005/180] We believe a fused version of batch_norm_op can speed the algorithm up. This pr implements a new op: fused_batch_norm_op in tf-xla and HLO. This is the CPU implementation for batch norm training. This CL is big but a lot of code are boilerplate. PiperOrigin-RevId: 158930166 --- .../xla/client/computation_builder.cc | 26 ++ .../compiler/xla/service/cpu/ir_emitter.cc | 225 ++++++++++++++++++ .../compiler/xla/service/cpu/ir_emitter.h | 1 + .../compiler/xla/service/dfs_hlo_visitor.h | 2 + .../service/dfs_hlo_visitor_with_default.h | 5 + .../compiler/xla/service/hlo_cost_analysis.cc | 6 + .../compiler/xla/service/hlo_cost_analysis.h | 1 + .../compiler/xla/service/hlo_graph_dumper.cc | 4 + .../compiler/xla/service/hlo_instruction.cc | 23 ++ .../compiler/xla/service/hlo_instruction.h | 25 ++ tensorflow/compiler/xla/service/hlo_opcode.cc | 2 + tensorflow/compiler/xla/service/hlo_opcode.h | 1 + .../xla/service/instruction_fusion.cc | 1 + tensorflow/compiler/xla/service/service.cc | 4 + .../compiler/xla/service/shape_inference.cc | 103 ++++++++ .../compiler/xla/service/shape_inference.h | 7 + .../compiler/xla/service/user_computation.cc | 79 ++++++ .../compiler/xla/service/user_computation.h | 4 + tensorflow/compiler/xla/xla_data.proto | 11 +- 19 files changed, 529 insertions(+), 1 deletion(-) diff --git a/tensorflow/compiler/xla/client/computation_builder.cc b/tensorflow/compiler/xla/client/computation_builder.cc index 4035e0f460e..6e95e623ba2 100644 --- a/tensorflow/compiler/xla/client/computation_builder.cc +++ b/tensorflow/compiler/xla/client/computation_builder.cc @@ -1407,6 +1407,32 @@ ComputationDataHandle ComputationBuilder::ReduceWindowWithGeneralPadding( return ParseOpResponse(s, &response); } +ComputationDataHandle ComputationBuilder::BatchNormTraining( + const ComputationDataHandle& operand, const ComputationDataHandle& scale, + const ComputationDataHandle& offset, float epsilon, int64 feature_index) { + if (!first_error_.ok() || !PrepareComputation().ok()) { + return ComputationDataHandle(); + } + BatchNormTrainingRequest request; + *request.mutable_operand() = operand; + *request.mutable_scale() = scale; + *request.mutable_offset() = offset; + request.set_epsilon(epsilon); + request.set_feature_index(feature_index); + + OpRequest op_request; + *op_request.mutable_batch_norm_training_request() = request; + *op_request.mutable_computation() = computation_.handle(); + AddOpMetadata(&op_request); + + OpResponse response; + + VLOG(2) << "making BatchNormTraining request"; + + Status s = client_->stub()->Op(&op_request, &response); + return ParseOpResponse(s, &response); +} + ComputationDataHandle ComputationBuilder::CrossReplicaSum( const ComputationDataHandle& operand) { if (!first_error_.ok() || !PrepareComputation().ok()) { diff --git a/tensorflow/compiler/xla/service/cpu/ir_emitter.cc b/tensorflow/compiler/xla/service/cpu/ir_emitter.cc index 7ad497ff1a2..94c10a64179 100644 --- a/tensorflow/compiler/xla/service/cpu/ir_emitter.cc +++ b/tensorflow/compiler/xla/service/cpu/ir_emitter.cc @@ -1039,6 +1039,231 @@ Status IrEmitter::HandleCrossReplicaSum(HloInstruction* crs) { "Cross replica sum not implemented on CPU. See b/33011107."); } +// Fills up the free variables in 'index_with_free_var' with values from +// 'filler_index'. The size of free variables must be the same as the +// size of 'filler_index'. +// +// This is often used after dimension reduction, where +// 'index_with_free_var' has one or more dimensions reduced, which serves as +// free variables (represented as nullptr). For example, if we have a 4 +// dimensional input and index for the dimension being reduced is +// 2 (third dimension), we will have an index like [i, j, NULL, k] +// after reduced dimension. +// +// Here we fill up that free variable by 'filler_index', which contains +// the value in the reduced dimension. +static llvm_ir::IrArray::Index FillReducedDimensionIndex( + llvm_ir::IrArray::Index index_with_free_var, + llvm_ir::IrArray::Index filler_index) { + llvm_ir::IrArray::Index::const_iterator it = filler_index.begin(); + + for (size_t i = 0; i < index_with_free_var.size(); ++i) { + if (index_with_free_var[i] == nullptr) { + index_with_free_var[i] = *it++; + } + } + CHECK(filler_index.end() == it); + return index_with_free_var; +} + +Status IrEmitter::HandleBatchNormTraining(HloInstruction* batch_norm_training) { + // The output of BatchNormTraining is a tuple of three element: + // - An N-dimensional array containing normalized values. + // - A 1 dimensional array containing the mean value for each feature. + // - A 1 dimensional array containing the variance value for each feature. + HloInstruction* operand = batch_norm_training->operands()[0]; + HloInstruction* scale = batch_norm_training->operands()[1]; + HloInstruction* offset = batch_norm_training->operands()[2]; + float epsilon = batch_norm_training->epsilon(); + int64 feature_index = batch_norm_training->feature_index(); + TF_RET_CHECK(ShapeUtil::IsTuple(batch_norm_training->shape()) && + ShapeUtil::TupleElementCount(batch_norm_training->shape()) == 3); + + const Shape& output_shape = + ShapeUtil::GetTupleElementShape(batch_norm_training->shape(), 0); + const Shape& feature_shape = + ShapeUtil::GetTupleElementShape(batch_norm_training->shape(), 1); + + // Reduce vector of the non-feature dimensions. + std::vector dimensions_to_reduce; + + for (int64 i = 0; i < operand->shape().dimensions_size(); ++i) { + if (i != feature_index) { + dimensions_to_reduce.push_back(i); + } + } + + // Get the second and third allocations in the output tuple, which should be + // used to store the result of mean and variance value calculation. + TF_ASSIGN_OR_RETURN( + const BufferAllocation::Slice slice_mean, + assignment_.GetUniqueSlice(batch_norm_training, /*index=*/{1})); + TF_ASSIGN_OR_RETURN( + const BufferAllocation::Slice slice_var, + assignment_.GetUniqueSlice(batch_norm_training, /*index=*/{2})); + const int feature_count = output_shape.dimensions(feature_index); + const int size_in_elements = ShapeUtil::ElementsIn(output_shape); + TF_RET_CHECK(ShapeUtil::ElementsIn(operand->shape()) == size_in_elements); + const int elements_per_feature = size_in_elements / feature_count; + + llvm::Value* mean = EmitTempBufferPointer(slice_mean, feature_shape); + llvm_ir::IrArray mean_array(mean, feature_shape); + + llvm::Value* var = EmitTempBufferPointer(slice_var, feature_shape); + llvm_ir::IrArray var_array(var, feature_shape); + + // This loop calculates mean and variance for each feature. + // + // In theory this could be swapped by multi-output fusion. We will evaluate + // this when it's ready. + // + // For variance calculation, we use a simplified formula so we can fuse the + // computation into the same loop to calculate mean: Var=E(X^2) - E(X)^2. + TF_RETURN_IF_ERROR( + llvm_ir::LoopEmitter( + [this, operand, dimensions_to_reduce, feature_shape, var_array, + elements_per_feature](const llvm_ir::IrArray::Index& index) { + PrimitiveType element_type = operand->shape().element_type(); + // Used to calculate E(X). + llvm::Value* sum_address = llvm_ir::EmitAllocaAtFunctionEntry( + llvm_ir::PrimitiveTypeToIrType(element_type, &ir_builder_), + "sum_address", &ir_builder_, + MinimumAlignmentForPrimitiveType(element_type)); + + // Used to calculate E(X^2). + llvm::Value* sum_square_address = + llvm_ir::EmitAllocaAtFunctionEntry( + llvm_ir::PrimitiveTypeToIrType(element_type, &ir_builder_), + "sum_square_address", &ir_builder_, + MinimumAlignmentForPrimitiveType(element_type)); + + ir_builder_.CreateStore( + llvm::ConstantFP::get(ir_builder_.getFloatTy(), 0.0), + sum_address); + + ir_builder_.CreateStore( + llvm::ConstantFP::get(ir_builder_.getFloatTy(), 0.0), + sum_square_address); + + llvm_ir::ForLoopNest loops(&ir_builder_); + + const llvm_ir::IrArray::Index reduced_dims_index = + loops.AddLoopsForShapeOnDimensions( + operand->shape(), dimensions_to_reduce, "reduction_dim"); + + SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), + &ir_builder_); + + llvm_ir::IrArray operand_array(GetIrArrayForOp(operand)); + llvm_ir::IrArray::Index input_index = + FillReducedDimensionIndex(reduced_dims_index, index); + llvm::Value* new_value = + operand_array.EmitReadArrayElement(input_index, &ir_builder_); + + llvm::Value* new_value_square = + ir_builder_.CreateFMul(new_value, new_value); + + llvm::Value* current_sum = ir_builder_.CreateLoad(sum_address); + llvm::Value* current_sum_square = + ir_builder_.CreateLoad(sum_square_address); + // Update sum. + ir_builder_.CreateStore( + ir_builder_.CreateFAdd(current_sum, new_value), sum_address); + + // Update sum square. + ir_builder_.CreateStore( + ir_builder_.CreateFAdd(current_sum_square, new_value_square), + sum_square_address); + + SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), + &ir_builder_); + + llvm::Value* sum = ir_builder_.CreateLoad(sum_address); + llvm::Value* elements_per_feature_value = llvm::ConstantFP::get( + ir_builder_.getFloatTy(), elements_per_feature); + llvm::Value* mean = + ir_builder_.CreateFDiv(sum, elements_per_feature_value); + llvm::Value* mean_square = ir_builder_.CreateFMul(mean, mean); + llvm::Value* sum_square = + ir_builder_.CreateLoad(sum_square_address); + + // Var=E(X^2) - E(X)^2. + llvm::Value* var = ir_builder_.CreateFSub( + ir_builder_.CreateFDiv(sum_square, elements_per_feature_value), + mean_square); + + var_array.EmitWriteArrayElement(index, var, &ir_builder_); + return mean; + }, + mean_array, &ir_builder_) + .EmitLoop()); + + TF_ASSIGN_OR_RETURN(llvm::Value * target_address, + EmitTargetAddressForOp(batch_norm_training)); + + TF_ASSIGN_OR_RETURN( + const BufferAllocation::Slice slice, + assignment_.GetUniqueSlice(batch_norm_training, /*index=*/{0})); + + llvm::Value* normalized = EmitTempBufferPointer(slice, output_shape); + + llvm_ir::IrArray target_array(normalized, output_shape); + + AddAliasingInformationToIrArray(*batch_norm_training, &target_array); + + TF_RETURN_IF_ERROR( + llvm_ir::LoopEmitter( + [this, mean_array, var_array, epsilon, operand, dimensions_to_reduce, + feature_index, offset, scale](const llvm_ir::IrArray::Index& index) { + // The following logic normalizes the input value, scales and shifts + // it: + // + // normalized = (input - mean) / sqrt(variance + epsilon) + // result = normalized * scale + offset + + // Current index in the feature dimension. + llvm_ir::IrArray::Index feature_index_value(1, + index[feature_index]); + + llvm::Value* mean = mean_array.EmitReadArrayElement( + feature_index_value, &ir_builder_); + llvm::Value* var = var_array.EmitReadArrayElement( + feature_index_value, &ir_builder_); + + llvm_ir::IrArray operand_array(GetIrArrayForOp(operand)); + llvm::Value* input = + operand_array.EmitReadArrayElement(index, &ir_builder_); + + llvm::Value* variance_with_epsilon = ir_builder_.CreateFAdd( + var, llvm::ConstantFP::get(ir_builder_.getFloatTy(), epsilon)); + llvm::Function* func_llvm_sqrt = llvm::Intrinsic::getDeclaration( + module_, llvm::Intrinsic::sqrt, {ir_builder_.getFloatTy()}); + llvm::Value* variance_sqrt = + ir_builder_.CreateCall(func_llvm_sqrt, {variance_with_epsilon}); + llvm::Value* normalized = ir_builder_.CreateFDiv( + ir_builder_.CreateFSub(input, mean), variance_sqrt); + llvm_ir::IrArray offset_array(GetIrArrayForOp(offset)); + llvm::Value* offset = offset_array.EmitReadArrayElement( + feature_index_value, &ir_builder_); + llvm_ir::IrArray scale_array(GetIrArrayForOp(scale)); + llvm::Value* scale = scale_array.EmitReadArrayElement( + feature_index_value, &ir_builder_); + llvm::Value* result = ir_builder_.CreateFAdd( + ir_builder_.CreateFMul(normalized, scale), offset); + + return result; + }, + target_array, &ir_builder_) + .EmitLoop()); + + llvm_ir::EmitTuple( + llvm_ir::IrArray(target_address, batch_norm_training->shape()), + {normalized, mean, var}, &ir_builder_); + emitted_value_[batch_norm_training] = target_address; + + return Status::OK(); +} + Status IrEmitter::HandleParameter(HloInstruction* parameter) { VLOG(2) << "HandleParameter: " << parameter->ToString(); auto param_number = parameter->parameter_number(); diff --git a/tensorflow/compiler/xla/service/cpu/ir_emitter.h b/tensorflow/compiler/xla/service/cpu/ir_emitter.h index ebb7296a075..4fb86434a17 100644 --- a/tensorflow/compiler/xla/service/cpu/ir_emitter.h +++ b/tensorflow/compiler/xla/service/cpu/ir_emitter.h @@ -106,6 +106,7 @@ class IrEmitter : public DfsHloVisitorWithDefault { HloInstruction* rhs) override; Status HandleConvolution(HloInstruction* convolution, HloInstruction* lhs, HloInstruction* rhs, const Window& window) override; + Status HandleBatchNormTraining(HloInstruction* batch_norm_training) override; Status HandleCrossReplicaSum(HloInstruction* crs) override; Status HandleInfeed(HloInstruction* infeed) override; Status HandleOutfeed(HloInstruction* infeed) override; diff --git a/tensorflow/compiler/xla/service/dfs_hlo_visitor.h b/tensorflow/compiler/xla/service/dfs_hlo_visitor.h index 78a398f8efa..bfa6f241a30 100644 --- a/tensorflow/compiler/xla/service/dfs_hlo_visitor.h +++ b/tensorflow/compiler/xla/service/dfs_hlo_visitor.h @@ -225,6 +225,8 @@ class DfsHloVisitor { virtual Status HandleRecv(HloInstruction* recv) = 0; + virtual Status HandleBatchNormTraining(HloInstruction* batchNormTraining) = 0; + // Invoked to inform the visitor that the traversal has completed, and that // the root was "root". virtual Status FinishVisit(HloInstruction* root) = 0; diff --git a/tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h b/tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h index 6557c3aa8e6..1bcc03bae1a 100644 --- a/tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h +++ b/tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h @@ -50,6 +50,11 @@ class DfsHloVisitorWithDefault : public DfsHloVisitor { HloInstruction* rhs) override { return DefaultAction(hlo); } + + Status HandleBatchNormTraining(HloInstruction* hlo) override { + return DefaultAction(hlo); + } + Status HandleClamp(HloInstruction* clamp, HloInstruction* /*min*/, HloInstruction* /*arg*/, HloInstruction* /*max*/) override { diff --git a/tensorflow/compiler/xla/service/hlo_cost_analysis.cc b/tensorflow/compiler/xla/service/hlo_cost_analysis.cc index 38cc74b0f1e..7f88474a274 100644 --- a/tensorflow/compiler/xla/service/hlo_cost_analysis.cc +++ b/tensorflow/compiler/xla/service/hlo_cost_analysis.cc @@ -314,6 +314,12 @@ Status HloCostAnalysis::HandleReshape(HloInstruction* reshape) { return Status::OK(); } +Status HloCostAnalysis::HandleBatchNormTraining( + HloInstruction* batchNormTraining) { + // TODO(b/62294698): Implement cost analysis for batch-norm-learning. + return Status::OK(); +} + Status HloCostAnalysis::HandleTranspose(HloInstruction* transpose) { return Status::OK(); } diff --git a/tensorflow/compiler/xla/service/hlo_cost_analysis.h b/tensorflow/compiler/xla/service/hlo_cost_analysis.h index b2c40f75ca4..30f553a81fc 100644 --- a/tensorflow/compiler/xla/service/hlo_cost_analysis.h +++ b/tensorflow/compiler/xla/service/hlo_cost_analysis.h @@ -83,6 +83,7 @@ class HloCostAnalysis : public DfsHloVisitor { HloInstruction* init_value, tensorflow::gtl::ArraySlice dimensions, HloComputation* function_handle) override; + Status HandleBatchNormTraining(HloInstruction* batchNormTraining) override; Status HandleFusion(HloInstruction* fusion) override; Status HandleCall(HloInstruction* call) override; Status HandleCustomCall(HloInstruction* custom_call, diff --git a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc index eb2e5dfb37f..6abc733646c 100644 --- a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc +++ b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc @@ -282,6 +282,10 @@ string InstructionSequenceGraph( // port for each parameter instruction. No need to emit anything in this // case. continue; + case HloOpcode::kBatchNormTraining: + StrAppend(&name, " feature_index=", instruction->feature_index()); + color = kPurple; + break; case HloOpcode::kReduce: StrAppend(&name, " dims=", Join(instruction->dimensions(), ",")); color = kPurple; diff --git a/tensorflow/compiler/xla/service/hlo_instruction.cc b/tensorflow/compiler/xla/service/hlo_instruction.cc index ecbf1dd1e5d..9e020f93919 100644 --- a/tensorflow/compiler/xla/service/hlo_instruction.cc +++ b/tensorflow/compiler/xla/service/hlo_instruction.cc @@ -369,6 +369,22 @@ HloInstruction::CreateDynamicUpdateSlice(const Shape& shape, return instruction; } +/* static */ std::unique_ptr +HloInstruction::CreateBatchNormTraining(const Shape& shape, + HloInstruction* operand, + HloInstruction* scale, + HloInstruction* offset, float epsilon, + int64 feature_index) { + auto instruction = + WrapUnique(new HloInstruction(HloOpcode::kBatchNormTraining, shape)); + instruction->AppendOperand(operand); + instruction->AppendOperand(scale); + instruction->AppendOperand(offset); + instruction->epsilon_ = epsilon; + instruction->feature_index_ = feature_index; + return instruction; +} + /* static */ std::unique_ptr HloInstruction::CreateSelectAndScatter( const Shape& shape, HloInstruction* operand, HloComputation* select, @@ -841,6 +857,7 @@ std::unique_ptr HloInstruction::CloneWithNewOperands( case HloOpcode::kParameter: return CreateParameter(parameter_number_, shape, parameter_name_); // Unsupported ops for cloning. + case HloOpcode::kBatchNormTraining: case HloOpcode::kRecv: case HloOpcode::kSend: case HloOpcode::kUpdate: @@ -1138,6 +1155,10 @@ bool HloInstruction::Identical( // different HloComputations. ShapeUtil::Compatible(shape(), other.shape()); + case HloOpcode::kBatchNormTraining: + return feature_index() == other.feature_index() && + epsilon() == other.epsilon(); + // A constant is defined by the value in the literal. case HloOpcode::kConstant: return LiteralUtil::Equal(literal(), other.literal()); @@ -1733,6 +1754,8 @@ Status HloInstruction::Visit(DfsHloVisitor* visitor) { switch (opcode_) { case HloOpcode::kAbs: return visitor->HandleAbs(this, operands_[0]); + case HloOpcode::kBatchNormTraining: + return visitor->HandleBatchNormTraining(this); case HloOpcode::kSign: return visitor->HandleSign(this, operands_[0]); case HloOpcode::kConstant: diff --git a/tensorflow/compiler/xla/service/hlo_instruction.h b/tensorflow/compiler/xla/service/hlo_instruction.h index 522414325e6..f98bafe81ea 100644 --- a/tensorflow/compiler/xla/service/hlo_instruction.h +++ b/tensorflow/compiler/xla/service/hlo_instruction.h @@ -208,6 +208,11 @@ class HloInstruction { const Shape& shape, HloInstruction* operand, HloInstruction* init_value, const Window& window, HloComputation* reduce_computation); + // Creates a batch-norm-training instruction. + static std::unique_ptr CreateBatchNormTraining( + const Shape& shape, HloInstruction* operand, HloInstruction* scale, + HloInstruction* offset, float epsilon, int64 feature_index); + // Creates a scatter computation that scatters the `source` array to the // selected indices of each window. static std::unique_ptr CreateSelectAndScatter( @@ -527,6 +532,18 @@ class HloInstruction { // Precondition: opcode() == HloOpcode::kSend or HloOpcode::kRecv int64 channel_id() const { return channel_id_; } + // Returns feature_index field associated with the instruction. The index + // represents the index of the feature dimension. + // + // Precondition: opcode() == HloOpcode::kBatchNormTraining + int64 feature_index() const { return feature_index_; } + + // Returns a epsilon value associated with the instruction. The is a small + // number added to the variance to avoid divide-by-zero error. + // + // Precondition: opcode() == HloOpcode::kBatchNormTraining + int64 epsilon() const { return epsilon_; } + // Returns the infeed configuration string. The infeed configuration includes // any metadata needed for the backend compiler (e.g., infeed buffer address) // and is target-dependent. @@ -923,6 +940,14 @@ class HloInstruction { // Only present for kRng. RandomDistribution distribution_; + // A small float number added to the variance to avoid divide-by-zero error. + // Only present for kBatchNormTraining. + float epsilon_; + + // An integer value representing the index of the feature dimension. + // Only present for kBatchNormTraining. + int64 feature_index_; + // Represents a unique identifier for each Send/Recv instruction pair. // Only present for kSend or kRecv. int64 channel_id_ = -1; diff --git a/tensorflow/compiler/xla/service/hlo_opcode.cc b/tensorflow/compiler/xla/service/hlo_opcode.cc index ceb0cdaa316..5bda6b6dabc 100644 --- a/tensorflow/compiler/xla/service/hlo_opcode.cc +++ b/tensorflow/compiler/xla/service/hlo_opcode.cc @@ -24,6 +24,8 @@ string HloOpcodeString(HloOpcode opcode) { return "abs"; case HloOpcode::kAdd: return "add"; + case HloOpcode::kBatchNormTraining: + return "batch-norm-training"; case HloOpcode::kBitcast: return "bitcast"; case HloOpcode::kBroadcast: diff --git a/tensorflow/compiler/xla/service/hlo_opcode.h b/tensorflow/compiler/xla/service/hlo_opcode.h index e2cdbfdfa7a..65aef63dcdd 100644 --- a/tensorflow/compiler/xla/service/hlo_opcode.h +++ b/tensorflow/compiler/xla/service/hlo_opcode.h @@ -30,6 +30,7 @@ namespace xla { enum class HloOpcode { kAbs, kAdd, + kBatchNormTraining, kBitcast, kBroadcast, kCall, diff --git a/tensorflow/compiler/xla/service/instruction_fusion.cc b/tensorflow/compiler/xla/service/instruction_fusion.cc index 721640cdbd8..06fa8bc6195 100644 --- a/tensorflow/compiler/xla/service/instruction_fusion.cc +++ b/tensorflow/compiler/xla/service/instruction_fusion.cc @@ -75,6 +75,7 @@ namespace xla { return false; // Expensive instructions. + case HloOpcode::kBatchNormTraining: case HloOpcode::kCall: case HloOpcode::kConvolution: case HloOpcode::kCrossReplicaSum: diff --git a/tensorflow/compiler/xla/service/service.cc b/tensorflow/compiler/xla/service/service.cc index 85ca7e4e59c..79bf679c5fd 100644 --- a/tensorflow/compiler/xla/service/service.cc +++ b/tensorflow/compiler/xla/service/service.cc @@ -1195,6 +1195,10 @@ tensorflow::Status Service::Op(const OpRequest* arg, OpResponse* result) { StatusOr handle_status; switch (arg->op_case()) { + case OpRequest::kBatchNormTrainingRequest: + handle_status = computation->AddBatchNormTrainingInstruction( + arg->batch_norm_training_request()); + break; case OpRequest::kBinaryOpRequest: handle_status = computation->AddBinaryInstruction(arg->binary_op_request()); diff --git a/tensorflow/compiler/xla/service/shape_inference.cc b/tensorflow/compiler/xla/service/shape_inference.cc index afe1a54d3e2..670e1ca84af 100644 --- a/tensorflow/compiler/xla/service/shape_inference.cc +++ b/tensorflow/compiler/xla/service/shape_inference.cc @@ -754,6 +754,109 @@ ShapeInference::InferDegenerateDimensionBroadcastShape( AsInt64Slice(arg_shape->dimensions())); } +/* static */ StatusOr ShapeInference::InferBatchNormTrainingShape( + const Shape& operand_shape, const Shape& offset_shape, + const Shape& scale_shape, int64 feature_index) { + TF_RETURN_IF_ERROR( + ExpectNotTupleOrOpaque(operand_shape, "operand of batch norm training")); + TF_RETURN_IF_ERROR(ExpectNotTupleOrOpaque( + offset_shape, "offset input of batch norm training")); + TF_RETURN_IF_ERROR(ExpectNotTupleOrOpaque( + scale_shape, "scale input of batch norm training")); + + TF_RET_CHECK(ShapeUtil::ValidateShape(operand_shape) == + tensorflow::Status::OK()); + TF_RET_CHECK(ShapeUtil::ValidateShape(offset_shape) == + tensorflow::Status::OK()); + TF_RET_CHECK(ShapeUtil::ValidateShape(scale_shape) == + tensorflow::Status::OK()); + + if (feature_index >= ShapeUtil::Rank(operand_shape)) { + return InvalidArgument( + "Expected feature_index of batch-norm-training to be " + "smaller than the rank of operand_shape; " + "got feature_index %lld, and rank %lld", + feature_index, ShapeUtil::Rank(offset_shape)); + } + + if (feature_index < 0) { + return InvalidArgument( + "Expected feature_index of batch-norm-training to " + "be a non-negative number, got %lld", + feature_index); + } + + if (ShapeUtil::Rank(operand_shape) < 1) { + return InvalidArgument( + "Expected the rank of operand to " + "batch-norm-training to be at least 1; got %lld", + ShapeUtil::Rank(offset_shape)); + } + + if (ShapeUtil::Rank(offset_shape) != 1) { + return InvalidArgument( + "Offset input of batch-norm-training must have" + " rank 1, but has rank %lld.", + ShapeUtil::Rank(offset_shape)); + } + + if (ShapeUtil::Rank(scale_shape) != 1) { + return InvalidArgument( + "Scale input of batch-norm-training must have" + " rank 1, but has rank %lld.", + ShapeUtil::Rank(scale_shape)); + } + + if (!ShapeUtil::ElementIsFloating(operand_shape)) { + return InvalidArgument( + "The operand to batch-norm-training must have a floating point " + "element type, but the shape is %s", + PrimitiveType_Name(operand_shape.element_type()).c_str()); + } + + if (!ShapeUtil::SameElementType(offset_shape, operand_shape)) { + return InvalidArgument( + "The inputs should have the same element type for batch-norm-training, " + "but the shape of offset factor is %s " + "and the shape of operand is %s", + PrimitiveType_Name(offset_shape.element_type()).c_str(), + PrimitiveType_Name(operand_shape.element_type()).c_str()); + } + + if (!ShapeUtil::SameElementType(scale_shape, operand_shape)) { + return InvalidArgument( + "The inputs should have the same element type for batch-norm-training, " + "but the shape of scale factor is %s " + "and the shape of operand is %s", + PrimitiveType_Name(scale_shape.element_type()).c_str(), + PrimitiveType_Name(operand_shape.element_type()).c_str()); + } + + const int64 feature_count = operand_shape.dimensions(feature_index); + Shape output_shape_for_mean_and_var = + ShapeUtil::MakeShape(operand_shape.element_type(), {feature_count}); + + if (ShapeUtil::GetDimension(offset_shape, 0) != feature_count) { + return InvalidArgument( + "The size of offset factor should be the same as feature count," + "but the size of offset factor is %lld " + "and the feature count is %lld", + ShapeUtil::GetDimension(offset_shape, 0), feature_count); + } + + if (ShapeUtil::GetDimension(scale_shape, 0) != feature_count) { + return InvalidArgument( + "The size of scale factor should be the same as feature count," + "but the size of scale factor is %lld " + "and the feature count is %lld", + ShapeUtil::GetDimension(scale_shape, 0), feature_count); + } + + return ShapeUtil::MakeTupleShape({operand_shape, + output_shape_for_mean_and_var, + output_shape_for_mean_and_var}); +} + /* static */ StatusOr ShapeInference::InferConvolveShape( const Shape& lhs, const Shape& rhs, const Window& window, const ConvolutionDimensionNumbers& dnums) { diff --git a/tensorflow/compiler/xla/service/shape_inference.h b/tensorflow/compiler/xla/service/shape_inference.h index c2223423e92..8bd35851332 100644 --- a/tensorflow/compiler/xla/service/shape_inference.h +++ b/tensorflow/compiler/xla/service/shape_inference.h @@ -64,6 +64,13 @@ class ShapeInference { tensorflow::gtl::ArraySlice arg_shapes, const ProgramShape& to_apply); + // Infers the shape produced by InferBatchNormTraining with the given + // operands. + static StatusOr InferBatchNormTrainingShape(const Shape& operand_shape, + const Shape& offset_shape, + const Shape& scale_shape, + int64 feature_index); + // Infers the shape produced by applying the given convolutional // filter (rhs) to lhs in the way specified by the fields on window. static StatusOr InferConvolveShape( diff --git a/tensorflow/compiler/xla/service/user_computation.cc b/tensorflow/compiler/xla/service/user_computation.cc index b97823d2dc0..417ed584aaf 100644 --- a/tensorflow/compiler/xla/service/user_computation.cc +++ b/tensorflow/compiler/xla/service/user_computation.cc @@ -465,6 +465,45 @@ StatusOr UserComputation::AddReduceInstruction( return handle; } +StatusOr +UserComputation::AddBatchNormTrainingInstruction( + const BatchNormTrainingRequest& batch_norm_training_request) { + tensorflow::mutex_lock lock(mutex_); + + TF_ASSIGN_OR_RETURN(const OperationRequest* operand, + LookUpRequest(batch_norm_training_request.operand())); + + TF_ASSIGN_OR_RETURN(const OperationRequest* scale, + LookUpRequest(batch_norm_training_request.scale())); + + TF_ASSIGN_OR_RETURN(const OperationRequest* offset, + LookUpRequest(batch_norm_training_request.offset())); + + ComputationDataHandle handle = CreateComputationDataHandle(); + + OperationRequest& request = + (*session_computation_.mutable_requests())[handle.handle()]; + + TF_ASSIGN_OR_RETURN( + Shape inferred_shape, + ShapeInference::InferBatchNormTrainingShape( + operand->output_shape(), scale->output_shape(), + offset->output_shape(), batch_norm_training_request.feature_index())); + + *request.mutable_output_shape() = inferred_shape; + + *request.mutable_output_handle() = handle; + + *request.mutable_request()->mutable_batch_norm_training_request() = + batch_norm_training_request; + + VLOG(1) << "AddBatchNormTrainingInstruction (" << GetVersionedHandleInternal() + << "), data handle " << handle.handle() << ": " + << batch_norm_training_request.ShortDebugString(); + + return handle; +} + StatusOr UserComputation::AddReduceWindowInstruction( const ReduceWindowRequest& reduce_window_request, const UserComputation& to_apply_computation) { @@ -1555,6 +1594,19 @@ void ConstantVisitor(const SessionComputation& session_computation, break; } + case OpRequest::kBatchNormTrainingRequest: { + const BatchNormTrainingRequest& batch_norm_training_request = + request.request().batch_norm_training_request(); + ConstantVisitor(session_computation, + batch_norm_training_request.operand(), visited, + is_constant); + ConstantVisitor(session_computation, batch_norm_training_request.scale(), + visited, is_constant); + ConstantVisitor(session_computation, batch_norm_training_request.offset(), + visited, is_constant); + break; + } + case OpRequest::kBinaryOpRequest: { const BinaryOpRequest& binary_op_request = request.request().binary_op_request(); @@ -1963,6 +2015,16 @@ static void ForEachOperand( break; } + case OpRequest::kBatchNormTrainingRequest: { + const BatchNormTrainingRequest& batch_norm_training_request = + request.request().batch_norm_training_request(); + + apply(batch_norm_training_request.operand()); + apply(batch_norm_training_request.scale()); + apply(batch_norm_training_request.offset()); + break; + } + case OpRequest::kCrossReplicaSumRequest: { const CrossReplicaSumRequest& cross_replica_sum_request = request.request().cross_replica_sum_request(); @@ -2455,6 +2517,23 @@ void ComputationLowerer::Visit( break; } + case OpRequest::kBatchNormTrainingRequest: { + const BatchNormTrainingRequest& batch_norm_training_request = + request.request().batch_norm_training_request(); + HloInstruction* operand = + lookup_instruction(batch_norm_training_request.operand()); + HloInstruction* scale = + lookup_instruction(batch_norm_training_request.scale()); + HloInstruction* offset = + lookup_instruction(batch_norm_training_request.offset()); + + hlo_instruction = add_instruction(HloInstruction::CreateBatchNormTraining( + request.output_shape(), operand, scale, offset, + batch_norm_training_request.epsilon(), + batch_norm_training_request.feature_index())); + break; + } + case OpRequest::kBroadcastRequest: { const BroadcastRequest& broadcast_request = request.request().broadcast_request(); diff --git a/tensorflow/compiler/xla/service/user_computation.h b/tensorflow/compiler/xla/service/user_computation.h index fb5425ae61a..a8bedf20b53 100644 --- a/tensorflow/compiler/xla/service/user_computation.h +++ b/tensorflow/compiler/xla/service/user_computation.h @@ -84,6 +84,10 @@ class UserComputation { StatusOr AddUnaryInstruction( const UnaryOpRequest& unary_request); + // Enqueues a batch norm training instruction onto this user computation. + StatusOr AddBatchNormTrainingInstruction( + const BatchNormTrainingRequest& batch_norm_training_request); + // Enqueues a binary instruction onto this user computation. // Returns an error status if the operand indices are out of bounds. StatusOr AddBinaryInstruction( diff --git a/tensorflow/compiler/xla/xla_data.proto b/tensorflow/compiler/xla/xla_data.proto index 44a94e171fa..23ef79d0d75 100644 --- a/tensorflow/compiler/xla/xla_data.proto +++ b/tensorflow/compiler/xla/xla_data.proto @@ -462,6 +462,14 @@ message ReduceWindowRequest { ComputationHandle to_apply = 5; } +message BatchNormTrainingRequest { + ComputationDataHandle operand = 1; + ComputationDataHandle scale = 2; + ComputationDataHandle offset = 3; + float epsilon = 4; + int64 feature_index = 5; +} + message CrossReplicaSumRequest { ComputationDataHandle operand = 2; } @@ -759,7 +767,8 @@ message OpRequest { SendRequest send_request = 30; RecvRequest recv_request = 31; OutfeedRequest outfeed_request = 32; - // Next: 35 + BatchNormTrainingRequest batch_norm_training_request = 35; + // Next: 36 } } From 9263faf2a7c413e4f7a5fdc6a56152ec9c2c316b Mon Sep 17 00:00:00 2001 From: Justine Tunney Date: Tue, 13 Jun 2017 19:29:45 -0700 Subject: [PATCH 006/180] Use correct NumericJS in open source The GitHub source turns out to be very different from what's on the web server. Because the source code has a lot of tags in the comments, which are difficult for jsoup to escape, I had to add a basic minifier feature to Vulcanize. So as a bonus, this change makes the TensorBoard binary much smaller too. PiperOrigin-RevId: 158932524 --- .../components/tf_imports/dagre.html | 2 +- .../components/tf_imports/graphlib.html | 2 +- .../components/tf_imports/lodash.html | 2 +- .../components/tf_imports/numericjs.html | 2 +- .../tensorboard/vulcanize/Vulcanize.java | 29 ++++++++++++++++++- .../tensorboard/vulcanize/externs.js | 1 + third_party/js.bzl | 7 +++-- 7 files changed, 37 insertions(+), 8 deletions(-) diff --git a/tensorflow/tensorboard/components/tf_imports/dagre.html b/tensorflow/tensorboard/components/tf_imports/dagre.html index b90dc58e390..49522db38a6 100644 --- a/tensorflow/tensorboard/components/tf_imports/dagre.html +++ b/tensorflow/tensorboard/components/tf_imports/dagre.html @@ -42,4 +42,4 @@ THE SOFTWARE. - + diff --git a/tensorflow/tensorboard/components/tf_imports/graphlib.html b/tensorflow/tensorboard/components/tf_imports/graphlib.html index 664b855f17f..aa868b00b40 100644 --- a/tensorflow/tensorboard/components/tf_imports/graphlib.html +++ b/tensorflow/tensorboard/components/tf_imports/graphlib.html @@ -17,4 +17,4 @@ limitations under the License. - + diff --git a/tensorflow/tensorboard/components/tf_imports/lodash.html b/tensorflow/tensorboard/components/tf_imports/lodash.html index 65ff6a4b032..192e61cc99f 100644 --- a/tensorflow/tensorboard/components/tf_imports/lodash.html +++ b/tensorflow/tensorboard/components/tf_imports/lodash.html @@ -15,4 +15,4 @@ See the License for the specific language governing permissions and limitations under the License. --> - + diff --git a/tensorflow/tensorboard/components/tf_imports/numericjs.html b/tensorflow/tensorboard/components/tf_imports/numericjs.html index 81fa9491688..69e948eb99e 100644 --- a/tensorflow/tensorboard/components/tf_imports/numericjs.html +++ b/tensorflow/tensorboard/components/tf_imports/numericjs.html @@ -40,4 +40,4 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --> - + diff --git a/tensorflow/tensorboard/java/org/tensorflow/tensorboard/vulcanize/Vulcanize.java b/tensorflow/tensorboard/java/org/tensorflow/tensorboard/vulcanize/Vulcanize.java index 533907dd64d..80e48124eb6 100644 --- a/tensorflow/tensorboard/java/org/tensorflow/tensorboard/vulcanize/Vulcanize.java +++ b/tensorflow/tensorboard/java/org/tensorflow/tensorboard/vulcanize/Vulcanize.java @@ -24,6 +24,7 @@ import com.google.common.base.Joiner; import com.google.common.base.Optional; import com.google.common.base.Splitter; import com.google.common.collect.HashMultimap; +import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMultimap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; @@ -259,12 +260,18 @@ public final class Vulcanize { path = me().lookup(Webpath.get(node.attr("src"))); script = new String(Files.readAllBytes(getWebfile(path)), UTF_8); } + boolean wantsMinify = getAttrTransitive(node, "jscomp-minify").isPresent(); if (node.attr("src").endsWith(".min.js") - || getAttrTransitive(node, "jscomp-nocompile").isPresent()) { + || getAttrTransitive(node, "jscomp-nocompile").isPresent() + || wantsMinify) { + if (wantsMinify) { + script = minify(path, script); + } Node newScript = new Element(Tag.valueOf("script"), node.baseUri(), node.attributes()) .appendChild(new DataNode(script, node.baseUri())) .removeAttr("src") + .removeAttr("jscomp-minify") .removeAttr("jscomp-nocompile"); if (firstCompiledScript != null) { firstCompiledScript.before(newScript); @@ -458,6 +465,26 @@ public final class Vulcanize { .appendChild(new DataNode(script, tag.baseUri()))); } + private static String minify(Webpath path, String script) { + CompilerOptions options = new CompilerOptions(); + options.skipAllCompilerPasses(); + options.setLanguageIn(CompilerOptions.LanguageMode.ECMASCRIPT_2016); + options.setLanguageOut(CompilerOptions.LanguageMode.ECMASCRIPT5); + options.setContinueAfterErrors(true); + CompilationLevel.SIMPLE_OPTIMIZATIONS.setOptionsForCompilationLevel(options); + if (testOnly) { + options.setPrettyPrint(true); + options.setGeneratePseudoNames(true); + } + Compiler compiler = new Compiler(); + compiler.disableThreads(); + compiler.compile( + ImmutableList.of(), + ImmutableList.of(SourceFile.fromCode(path.toString(), script)), + options); + return compiler.toSource(); + } + private static void handleLicense(String text) { if (legalese.add(CharMatcher.whitespace().removeFrom(text))) { licenses.add(CharMatcher.anyOf("\r\n").trimFrom(text)); diff --git a/tensorflow/tensorboard/java/org/tensorflow/tensorboard/vulcanize/externs.js b/tensorflow/tensorboard/java/org/tensorflow/tensorboard/vulcanize/externs.js index 2e56562c1c4..85d0ea80c07 100644 --- a/tensorflow/tensorboard/java/org/tensorflow/tensorboard/vulcanize/externs.js +++ b/tensorflow/tensorboard/java/org/tensorflow/tensorboard/vulcanize/externs.js @@ -20,6 +20,7 @@ /** @type {!Object} */ var _; /** @type {!Object} */ var d3; /** @type {!Object} */ var dagre; +/** @type {!Object} */ var numeric; /** @type {!Object} */ var weblas; /** @type {!Object} */ var graphlib; /** @type {!Object} */ var Plottable; diff --git a/third_party/js.bzl b/third_party/js.bzl index 46466c3f312..b5395a0b0c0 100644 --- a/third_party/js.bzl +++ b/third_party/js.bzl @@ -189,11 +189,12 @@ def tensorboard_js_workspace(): "http://mirror.bazel.build/raw.githubusercontent.com/sloisel/numeric/v1.2.6/license.txt", "https://raw.githubusercontent.com/sloisel/numeric/v1.2.6/license.txt", ], - "dfaca3b8485bee735788cc6eebca82ea25719adc1fb8911c7799c6bd5a95df3b": [ - "http://mirror.bazel.build/raw.githubusercontent.com/sloisel/numeric/v1.2.6/src/numeric.js", - "https://raw.githubusercontent.com/sloisel/numeric/v1.2.6/src/numeric.js", + "5dcaba2016fd237091e3a17b0dc272fb21f0e2b15d7628f95a0ad0cd4cdf4020": [ + "http://mirror.bazel.build/www.numericjs.com/lib/numeric-1.2.6.js", + "http://www.numericjs.com/lib/numeric-1.2.6.js", ], }, + rename = {"numeric-1.2.6.js": "numeric.js"}, ) filegroup_external( From 838571b0a05f38db0715a149b22094cc1cd6ff75 Mon Sep 17 00:00:00 2001 From: Vijay Vasudevan Date: Tue, 13 Jun 2017 19:37:52 -0700 Subject: [PATCH 007/180] python import_graph_def: keep track of multiple colocations and fill in the first non-empty device. Useful for visualizations, but correctness doesn't change. PiperOrigin-RevId: 158932961 --- tensorflow/python/framework/importer.py | 33 ++++++++++++------ tensorflow/python/framework/importer_test.py | 36 ++++++++++++++++++++ 2 files changed, 59 insertions(+), 10 deletions(-) diff --git a/tensorflow/python/framework/importer.py b/tensorflow/python/framework/importer.py index 025e2136206..5043e7285fe 100644 --- a/tensorflow/python/framework/importer.py +++ b/tensorflow/python/framework/importer.py @@ -18,6 +18,7 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function +import collections import contextlib import copy @@ -311,9 +312,10 @@ def import_graph_def(graph_def, input_map=None, return_elements=None, compute_shapes=False, compute_device=False, op_def=op_def) - # Maps from a node to the op it is colocated with, if colocation + # Maps from a node to the ops it is colocated with, if colocation # is specified in the attributes. - colocation_pairs = {} + colocation_pairs = collections.defaultdict(list) + # 2. Add inputs to the operations. for node in graph_def.node: op = name_to_op[node.name] @@ -339,7 +341,7 @@ def import_graph_def(graph_def, input_map=None, return_elements=None, 'loc:@' + original_op.name)) if op_to_bind_to != node.name: # Keep track of this mapping for a later phase. - colocation_pairs[op] = original_op + colocation_pairs[op].append(original_op) # Don't apply this op's device function, # the colocation constraint will ensure # the proper device gets assigned at runtime. @@ -474,13 +476,24 @@ def import_graph_def(graph_def, input_map=None, return_elements=None, # The following loop populates the device field of ops that are # colocated with another op. This is implied by the colocation # attribute, but we propagate the device field for completeness. - for op, coloc_op in colocation_pairs.items(): - # If the colocation op has no device, even after a device - # application, there's nothing to do here. - if not coloc_op.device: - continue - coloc_device = pydev.DeviceSpec.from_string(coloc_op.device) - op._set_device(coloc_device) # pylint: disable=protected-access + for op, coloc_op_list in colocation_pairs.items(): + coloc_device = None + # Find any device in the list of colocated ops that have a + # device, if it exists. We assume that if multiple ops + # have devices, they refer to the same device. Otherwise, a + # runtime error will occur since the colocation property + # cannot be guaranteed. + # + # One possible improvement is to try to check for compatibility + # of all devices in this list at import time here, which would + # require implementing a compatibility function for device specs + # in python. + for coloc_op in coloc_op_list: + if coloc_op.device: + coloc_device = pydev.DeviceSpec.from_string(coloc_op.device) + break + if coloc_device: + op._set_device(coloc_device) # pylint: disable=protected-access # Treat unused input mappings as an error, because they are likely to be # due to a typo. diff --git a/tensorflow/python/framework/importer_test.py b/tensorflow/python/framework/importer_test.py index 7fdbcfd8561..5a683dc733e 100644 --- a/tensorflow/python/framework/importer_test.py +++ b/tensorflow/python/framework/importer_test.py @@ -682,6 +682,42 @@ class ImportGraphDefTest(test.TestCase): key: '_class' value { list { s: 'loc:@imported_graph/A' } } } }""", b.graph.as_graph_def()) + def testMultipleColocationWithDeviceFn(self): + original_graph_def = self._MakeGraphDef(""" + node { name: 'A' op: 'None'} + node { name: 'B' op: 'None'} + node { name: 'C' op: 'None' attr { + key: '_class' + value { list { s: 'loc:@A' s: 'loc:@B' } } + } }""") + + # A device function that places "B" on a device, and "A" is empty. + # + # B and C should contain "/device:B". A will not right now. But + # because of the colocation property, at runtime it would be + # placed with B and C. + def CustomDeviceFn(op): + if "B" in op.name: + return "/device:B:0" + return "" + + with ops.Graph().as_default(): + with ops.device(CustomDeviceFn): + c, = importer.import_graph_def( + original_graph_def, return_elements=["C"], name="imported_graph") + + self.assertProtoEqualsVersion(""" + node { name: 'imported_graph/A' op: 'None' } + node { name: 'imported_graph/B' op: 'None' device: "/device:B:0" } + node { name: 'imported_graph/C' op: 'None' device: "/device:B:0" + attr { + key: '_class' value { + list { s: 'loc:@imported_graph/A' + s: 'loc:@imported_graph/B' } + } + } + }""", c.graph.as_graph_def()) + def testNamePrefixColocationAttrsMultipleImport(self): original_graph_def = self._MakeGraphDef(""" node { name: 'A' op: 'None' } From 6ffa51f1e0e76c87ee2164a8d421279768372501 Mon Sep 17 00:00:00 2001 From: Yao Zhang Date: Tue, 13 Jun 2017 19:47:59 -0700 Subject: [PATCH 008/180] Add a standalone cost analysis tool. Improved logging. PiperOrigin-RevId: 158933442 --- .../core/grappler/clusters/single_machine.cc | 9 +++- .../costs/measuring_cost_estimator.cc | 1 + .../grappler/costs/op_level_cost_estimator.cc | 6 ++- .../core/grappler/costs/virtual_placer.cc | 2 + .../core/grappler/grappler_item_builder.h | 2 +- tensorflow/python/BUILD | 13 +++++ tensorflow/python/grappler/cost_analyzer.cc | 11 +++-- tensorflow/python/grappler/cost_analyzer.h | 4 +- tensorflow/python/grappler/cost_analyzer.i | 12 +++-- tensorflow/python/grappler/cost_analyzer.py | 17 +++++-- .../python/grappler/cost_analyzer_tool.py | 49 +++++++++++++++++++ 11 files changed, 110 insertions(+), 16 deletions(-) create mode 100644 tensorflow/python/grappler/cost_analyzer_tool.py diff --git a/tensorflow/core/grappler/clusters/single_machine.cc b/tensorflow/core/grappler/clusters/single_machine.cc index 22ccf5208c1..b9ed7c0590c 100644 --- a/tensorflow/core/grappler/clusters/single_machine.cc +++ b/tensorflow/core/grappler/clusters/single_machine.cc @@ -36,6 +36,8 @@ SingleMachine::SingleMachine(int timeout_s, int num_cpu_cores, int num_gpus) num_gpus_(num_gpus), expected_init_time_s_(0), closing_(false) { + VLOG(1) << "Number of CPU cores: " << num_cpu_cores + << " Number of GPUs: " << num_gpus; thread_pool_.reset(new thread::ThreadPool( Env::Default(), SanitizeThreadSuffix("single_machine"), 2)); @@ -73,9 +75,12 @@ Status SingleMachine::Provision() { DeviceProperties attr = GetLocalCPUInfo(); devices_["/job:localhost/replica:0/task:0/cpu:0"] = GetLocalCPUInfo(); + VLOG(1) << "Number of GPUs: " << num_gpus_; for (int i = 0; i < num_gpus_; ++i) { - devices_[strings::StrCat("/job:localhost/replica:0/task:0/gpu:", i)] = - GetLocalGPUInfo(i); + string device_name = + strings::StrCat("/job:localhost/replica:0/task:0/gpu:", i); + VLOG(1) << "Adding GPU device " << device_name; + devices_[device_name] = GetLocalGPUInfo(i); } return Status::OK(); } diff --git a/tensorflow/core/grappler/costs/measuring_cost_estimator.cc b/tensorflow/core/grappler/costs/measuring_cost_estimator.cc index e4a0d6f1b86..8fd1801863a 100644 --- a/tensorflow/core/grappler/costs/measuring_cost_estimator.cc +++ b/tensorflow/core/grappler/costs/measuring_cost_estimator.cc @@ -101,6 +101,7 @@ Status MeasuringCostEstimator::PredictCosts(const GraphDef& optimized_graph, } // Run "measurement_steps_" and measure the time. + VLOG(1) << "Number of measurement steps: " << measurement_steps_; if (measurement_threads_ > 0) { for (int i = 0; i < measurement_steps_; ++i) { thread_pool_->Schedule([i, &measurement_fn]() { measurement_fn(i); }); diff --git a/tensorflow/core/grappler/costs/op_level_cost_estimator.cc b/tensorflow/core/grappler/costs/op_level_cost_estimator.cc index d8b8a12eb29..ba6686e7df9 100644 --- a/tensorflow/core/grappler/costs/op_level_cost_estimator.cc +++ b/tensorflow/core/grappler/costs/op_level_cost_estimator.cc @@ -314,6 +314,8 @@ std::pair OpLevelCostEstimator::GetDeviceInfo( bandwidth = 100; } } + VLOG(1) << "Device: " << device.type() << " GFLOPS: " << gflops + << " Bandwidth: " << bandwidth; return std::make_pair(gflops, bandwidth); } @@ -461,7 +463,7 @@ int64 OpLevelCostEstimator::CountConv2DOperations( ops *= conv_dims.kx * conv_dims.ky; ops *= conv_dims.iz * conv_dims.oz; ops *= kOpsPerMac; - VLOG(1) << "Operations for Conv2D" << ops; + VLOG(1) << "Operations for Conv2D " << ops; if (conv_info != nullptr) { *conv_info = conv_dims; @@ -679,7 +681,7 @@ int64 OpLevelCostEstimator::CountConv2DBackPropInputOperations( ops *= conv_dims.iz * conv_dims.oz; ops *= kOpsPerMac; - VLOG(1) << "Operations for Conv2DBackPropInput" << ops; + VLOG(1) << "Operations for Conv2DBackPropInput " << ops; if (returned_conv_dims != nullptr) { *returned_conv_dims = conv_dims; diff --git a/tensorflow/core/grappler/costs/virtual_placer.cc b/tensorflow/core/grappler/costs/virtual_placer.cc index 0291bd04909..a2d463e7652 100644 --- a/tensorflow/core/grappler/costs/virtual_placer.cc +++ b/tensorflow/core/grappler/costs/virtual_placer.cc @@ -36,6 +36,7 @@ VirtualPlacer::VirtualPlacer(const Cluster* cluster) { } else { default_device_ = devices_.begin()->first; + VLOG(1) << "Number of devices: " << devices_.size(); for (const auto& device : devices_) { if (str_util::Lowercase(device.first).find("gpu") != string::npos) { default_device_ = device.first; @@ -47,6 +48,7 @@ VirtualPlacer::VirtualPlacer(const Cluster* cluster) { const DeviceProperties& VirtualPlacer::get_device(const NodeDef& node) const { string device = get_canonical_device_name(node); + VLOG(3) << "Device name: " << device; auto it = devices_.find(device); DCHECK(it != devices_.end()); return it->second; diff --git a/tensorflow/core/grappler/grappler_item_builder.h b/tensorflow/core/grappler/grappler_item_builder.h index 3aa1d2027f5..7135c83801a 100644 --- a/tensorflow/core/grappler/grappler_item_builder.h +++ b/tensorflow/core/grappler/grappler_item_builder.h @@ -31,7 +31,7 @@ struct ItemConfig { : ignore_user_placement(true), ignore_colocation(true), placeholder_unknown_output_shape_dim(-1), - apply_optimizations(true), + apply_optimizations(false), inline_functions(true) {} // If true, ignore all user specified node placement. diff --git a/tensorflow/python/BUILD b/tensorflow/python/BUILD index 29b5a2574c8..c6b98e686ff 100644 --- a/tensorflow/python/BUILD +++ b/tensorflow/python/BUILD @@ -3832,6 +3832,19 @@ py_library( deps = [":pywrap_tensorflow_internal"], ) +py_binary( + name = "cost_analyzer_tool", + srcs = [ + "grappler/cost_analyzer_tool.py", + ], + srcs_version = "PY2AND3", + deps = [ + ":cost_analyzer", + ":framework_for_generated_wrappers", + "//tensorflow/core:protos_all_py", + ], +) + py_test( name = "cost_analyzer_test", size = "small", diff --git a/tensorflow/python/grappler/cost_analyzer.cc b/tensorflow/python/grappler/cost_analyzer.cc index 29976b79495..88bf900dca6 100644 --- a/tensorflow/python/grappler/cost_analyzer.cc +++ b/tensorflow/python/grappler/cost_analyzer.cc @@ -30,11 +30,11 @@ CostAnalyzer::CostAnalyzer(const GrapplerItem& item, Cluster* cluster, analytical_estimator_(cluster, false), suffix_(suffix) {} -Status CostAnalyzer::GenerateReport(std::ostream& os) { +Status CostAnalyzer::GenerateReport(std::ostream& os, bool per_node_report) { GatherCosts(); PreprocessCosts(); AnalyzeCosts(); - PrintAnalysis(os); + PrintAnalysis(os, per_node_report); return Status::OK(); } @@ -158,7 +158,7 @@ void CostAnalyzer::AnalyzeCosts() { } } -void CostAnalyzer::PrintAnalysis(std::ostream& os) const { +void CostAnalyzer::PrintAnalysis(std::ostream& os, bool per_node_report) const { os << std::endl; os << std::left << std::setw(50) << "Total time measured in ns (serialized): " << std::right @@ -225,6 +225,11 @@ void CostAnalyzer::PrintAnalysis(std::ostream& os) const { os << std::endl; } os << std::endl; + + if (per_node_report) { + os << "Below is the per-node report:" << std::endl; + os << op_perf_.DebugString(); + } } } // end namespace grappler diff --git a/tensorflow/python/grappler/cost_analyzer.h b/tensorflow/python/grappler/cost_analyzer.h index 3700bf5fb37..0e860e0fee9 100644 --- a/tensorflow/python/grappler/cost_analyzer.h +++ b/tensorflow/python/grappler/cost_analyzer.h @@ -50,7 +50,7 @@ class CostAnalyzer { public: explicit CostAnalyzer(const GrapplerItem& item, Cluster* cluster, const string& suffix); - Status GenerateReport(std::ostream& os); + Status GenerateReport(std::ostream& os, bool per_node_report); private: void PredictCosts(CostEstimator* cost_estimator, CostGraphDef* cost_graph, @@ -59,7 +59,7 @@ class CostAnalyzer { void PreprocessCosts(); void AnalyzeCosts(); void SortOpsByTime(std::map ops); - void PrintAnalysis(std::ostream& os) const; + void PrintAnalysis(std::ostream& os, bool per_node_report) const; const GrapplerItem* item_; MeasuringCostEstimator measure_estimator_; diff --git a/tensorflow/python/grappler/cost_analyzer.i b/tensorflow/python/grappler/cost_analyzer.i index a51d8673c99..6066b6131ff 100644 --- a/tensorflow/python/grappler/cost_analyzer.i +++ b/tensorflow/python/grappler/cost_analyzer.i @@ -42,8 +42,10 @@ limitations under the License. %} %{ -string GenerateCostReport(const tensorflow::MetaGraphDef& metagraph) { +string GenerateCostReport(const tensorflow::MetaGraphDef& metagraph, bool +per_node_report) { tensorflow::grappler::ItemConfig cfg; + cfg.apply_optimizations = false; std::unique_ptr item = tensorflow::grappler::GrapplerItemFromMetaGraphDef("metagraph", metagraph, cfg); @@ -53,16 +55,20 @@ string GenerateCostReport(const tensorflow::MetaGraphDef& metagraph) { int num_cpu_cores = tensorflow::grappler::GetNumAvailableLogicalCPUCores(); int num_gpus = tensorflow::grappler::GetNumAvailableGPUs(); tensorflow::grappler::SingleMachine cluster(timeout_s, num_cpu_cores, num_gpus); + cluster.SetNumWarmupSteps(10); + cluster.AllowSoftPlacement(true); + cluster.DisableDetailedStats(false); TF_CHECK_OK(cluster.Provision()); string suffix; tensorflow::grappler::CostAnalyzer analyzer(*item, &cluster, suffix); std::stringstream os; - analyzer.GenerateReport(os); + analyzer.GenerateReport(os, per_node_report); return os.str(); } %} -string GenerateCostReport(const tensorflow::MetaGraphDef& metagraph); +string GenerateCostReport(const tensorflow::MetaGraphDef& metagraph, bool +per_node_report); diff --git a/tensorflow/python/grappler/cost_analyzer.py b/tensorflow/python/grappler/cost_analyzer.py index d16614c7c75..75c21e57271 100644 --- a/tensorflow/python/grappler/cost_analyzer.py +++ b/tensorflow/python/grappler/cost_analyzer.py @@ -22,8 +22,19 @@ from tensorflow.python import pywrap_tensorflow as tf_wrap from tensorflow.python.framework import errors -def GenerateCostReport(metagraph): - """Analyze the cost of each TensorFlow operation in the provided metagraph.""" +def GenerateCostReport(metagraph, per_node_report=False): + """Analyze the cost of each TensorFlow op and node in the provided metagraph. + + Args: + metagraph: An TensorFlow MetaGraphDef. + per_node_report: by default the report contains stats aggregated on a per op + type basis, setting per_node_report to True adds results for each + individual node to the report. + + Returns: + A string of cost report. + """ with errors.raise_exception_on_not_ok_status(): - ret_from_swig = tf_wrap.GenerateCostReport(metagraph.SerializeToString()) + ret_from_swig = tf_wrap.GenerateCostReport(metagraph.SerializeToString(), + per_node_report) return ret_from_swig diff --git a/tensorflow/python/grappler/cost_analyzer_tool.py b/tensorflow/python/grappler/cost_analyzer_tool.py new file mode 100644 index 00000000000..80c8970c0bb --- /dev/null +++ b/tensorflow/python/grappler/cost_analyzer_tool.py @@ -0,0 +1,49 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +"""A tool for cost analysis.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import sys + +from tensorflow.core.protobuf import meta_graph_pb2 +from tensorflow.python.grappler import cost_analyzer +from tensorflow.python.platform import app + + +def main(_): + with open(FLAGS.input) as input_file: + metagraph = meta_graph_pb2.MetaGraphDef() + metagraph.ParseFromString(input_file.read()) + + report = cost_analyzer.GenerateCostReport(metagraph, FLAGS.per_node_report) + print(report) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--input", type=str, default=None, help="Input .meta file path.") + parser.add_argument( + "--per_node_report", + action="store_true", + help="Generate per-node report. By default the report contains stats " + "aggregated on a per op type basis, per_node_report adds results " + "for each individual node to the report.") + FLAGS, unparsed = parser.parse_known_args() + app.run(main=main, argv=[sys.argv[0]] + unparsed) From 3df6e18b59b63ea4f5b68ba8c8ec878940a1ada1 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Tue, 13 Jun 2017 20:31:22 -0700 Subject: [PATCH 009/180] Fix reset_uids Keras layers utility PiperOrigin-RevId: 158935673 --- tensorflow/contrib/keras/python/keras/backend.py | 7 ++++--- tensorflow/contrib/keras/python/keras/backend_test.py | 5 ++++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/tensorflow/contrib/keras/python/keras/backend.py b/tensorflow/contrib/keras/python/keras/backend.py index b7adf9461a8..9f02fc0958e 100644 --- a/tensorflow/contrib/keras/python/keras/backend.py +++ b/tensorflow/contrib/keras/python/keras/backend.py @@ -269,9 +269,10 @@ def get_uid(prefix=''): def reset_uids(): - layer_name_uids_collection = ops.get_collection_ref('LAYER_NAME_UIDS') - if layer_name_uids_collection: - layer_name_uids_collection.pop() + per_graph_layer_name_uids = tf_base_layers.PER_GRAPH_LAYER_NAME_UIDS + keys = list(per_graph_layer_name_uids.keys()) + for key in keys: + del per_graph_layer_name_uids[key] def clear_session(): diff --git a/tensorflow/contrib/keras/python/keras/backend_test.py b/tensorflow/contrib/keras/python/keras/backend_test.py index 2da5aee58e5..a2bc95e4a10 100644 --- a/tensorflow/contrib/keras/python/keras/backend_test.py +++ b/tensorflow/contrib/keras/python/keras/backend_test.py @@ -105,10 +105,13 @@ class BackendUtilsTest(test.TestCase): self.assertEqual(keras.backend.image_data_format(), image_data_format) keras.backend.set_image_data_format('channels_last') - def test_get_uid(self): + def test_get_reset_uids(self): self.assertEqual(keras.backend.get_uid('foo'), 1) self.assertEqual(keras.backend.get_uid('foo'), 2) + keras.backend.reset_uids() + self.assertEqual(keras.backend.get_uid('foo'), 1) + class BackendVariableTest(test.TestCase): From e05f78a9b688a8ae37b1a03bfc4459e18e3b88e4 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 13 Jun 2017 21:48:01 -0700 Subject: [PATCH 010/180] After synchronizing CUDA device, check for errors. PiperOrigin-RevId: 158939543 --- tensorflow/stream_executor/cuda/cuda_driver.cc | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tensorflow/stream_executor/cuda/cuda_driver.cc b/tensorflow/stream_executor/cuda/cuda_driver.cc index 76778dbeece..b02aefe3e87 100644 --- a/tensorflow/stream_executor/cuda/cuda_driver.cc +++ b/tensorflow/stream_executor/cuda/cuda_driver.cc @@ -15,6 +15,7 @@ limitations under the License. #include "tensorflow/stream_executor/cuda/cuda_driver.h" +#include #include #include #include @@ -1093,12 +1094,19 @@ CUDADriver::ContextGetSharedMemConfig(CudaContext* context) { /* static */ bool CUDADriver::SynchronizeContext(CudaContext* context) { ScopedActivateContext activation{context}; - CUresult res = cuCtxSynchronize(); + const CUresult res = cuCtxSynchronize(); if (res != CUDA_SUCCESS) { LOG(ERROR) << "could not synchronize on CUDA context: " << ToString(res) << " :: " << port::CurrentStackTrace(); return false; } + const auto cudart_error = cudaPeekAtLastError(); + if (cudart_error != cudaSuccess) { + LOG(ERROR) << "could not synchronize on CUDA context: " + << cudaGetErrorString(cudart_error) + << " :: " << port::CurrentStackTrace(); + return false; + } return true; } From 4f2ed15a0f6acc82ea8764171c366c4de8fb1513 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 13 Jun 2017 22:47:54 -0700 Subject: [PATCH 011/180] Automated g4 rollback of changelist 158939543 PiperOrigin-RevId: 158942736 --- tensorflow/stream_executor/cuda/cuda_driver.cc | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/tensorflow/stream_executor/cuda/cuda_driver.cc b/tensorflow/stream_executor/cuda/cuda_driver.cc index b02aefe3e87..76778dbeece 100644 --- a/tensorflow/stream_executor/cuda/cuda_driver.cc +++ b/tensorflow/stream_executor/cuda/cuda_driver.cc @@ -15,7 +15,6 @@ limitations under the License. #include "tensorflow/stream_executor/cuda/cuda_driver.h" -#include #include #include #include @@ -1094,19 +1093,12 @@ CUDADriver::ContextGetSharedMemConfig(CudaContext* context) { /* static */ bool CUDADriver::SynchronizeContext(CudaContext* context) { ScopedActivateContext activation{context}; - const CUresult res = cuCtxSynchronize(); + CUresult res = cuCtxSynchronize(); if (res != CUDA_SUCCESS) { LOG(ERROR) << "could not synchronize on CUDA context: " << ToString(res) << " :: " << port::CurrentStackTrace(); return false; } - const auto cudart_error = cudaPeekAtLastError(); - if (cudart_error != cudaSuccess) { - LOG(ERROR) << "could not synchronize on CUDA context: " - << cudaGetErrorString(cudart_error) - << " :: " << port::CurrentStackTrace(); - return false; - } return true; } From f4ccadc2d4cea60001733dfa7626e8e13a85ba7c Mon Sep 17 00:00:00 2001 From: Justine Tunney Date: Tue, 13 Jun 2017 22:57:03 -0700 Subject: [PATCH 012/180] Refactor histogram compression into new module This will be useful for pluginification, so we can access this function independently of the EventAccumulator. PiperOrigin-RevId: 158943129 --- .../backend/event_processing/BUILD | 2 + .../event_processing/event_accumulator.py | 74 ++---------- .../event_accumulator_test.py | 59 +-------- .../tensorboard/plugins/distributions/BUILD | 19 +++ .../plugins/distributions/compressor.py | 88 ++++++++++++++ .../plugins/distributions/compressor_test.py | 113 ++++++++++++++++++ 6 files changed, 235 insertions(+), 120 deletions(-) create mode 100644 tensorflow/tensorboard/plugins/distributions/compressor.py create mode 100644 tensorflow/tensorboard/plugins/distributions/compressor_test.py diff --git a/tensorflow/tensorboard/backend/event_processing/BUILD b/tensorflow/tensorboard/backend/event_processing/BUILD index 9c9ca29be2d..c5601438974 100644 --- a/tensorflow/tensorboard/backend/event_processing/BUILD +++ b/tensorflow/tensorboard/backend/event_processing/BUILD @@ -83,6 +83,7 @@ py_library( ":plugin_asset_util", ":reservoir", "//tensorflow:tensorflow_py", + "//tensorflow/tensorboard/plugins/distributions:compressor", ], ) @@ -94,6 +95,7 @@ py_test( deps = [ ":event_accumulator", "//tensorflow:tensorflow_py", + "//tensorflow/tensorboard/plugins/distributions:compressor", ], ) diff --git a/tensorflow/tensorboard/backend/event_processing/event_accumulator.py b/tensorflow/tensorboard/backend/event_processing/event_accumulator.py index 1562f0f8339..59b925cccda 100644 --- a/tensorflow/tensorboard/backend/event_processing/event_accumulator.py +++ b/tensorflow/tensorboard/backend/event_processing/event_accumulator.py @@ -22,13 +22,13 @@ import os import re import threading -import numpy as np import tensorflow as tf from tensorflow.tensorboard.backend.event_processing import directory_watcher from tensorflow.tensorboard.backend.event_processing import event_file_loader from tensorflow.tensorboard.backend.event_processing import plugin_asset_util from tensorflow.tensorboard.backend.event_processing import reservoir +from tensorflow.tensorboard.plugins.distributions import compressor namedtuple = collections.namedtuple ScalarEvent = namedtuple('ScalarEvent', ['wall_time', 'step', 'value']) @@ -41,9 +41,6 @@ CompressedHistogramEvent = namedtuple('CompressedHistogramEvent', ['wall_time', 'step', 'compressed_histogram_values']) -CompressedHistogramValue = namedtuple('CompressedHistogramValue', - ['basis_point', 'value']) - HistogramEvent = namedtuple('HistogramEvent', ['wall_time', 'step', 'histogram_value']) @@ -640,8 +637,15 @@ class EventAccumulator(object): histo = self._ConvertHistogramProtoToTuple(histo) histo_ev = HistogramEvent(wall_time, step, histo) self._histograms.AddItem(tag, histo_ev) - self._compressed_histograms.AddItem( - tag, histo_ev, lambda x: _CompressHistogram(x, self._compression_bps)) + self._compressed_histograms.AddItem(tag, histo_ev, self._CompressHistogram) + + def _CompressHistogram(self, histo_ev): + """Callback for _ProcessHistogram.""" + return CompressedHistogramEvent( + histo_ev.wall_time, + histo_ev.step, + compressor.CompressHistogram( + histo_ev.histogram_value, self._compression_bps)) def _ProcessImage(self, tag, wall_time, step, image): """Processes an image by adding it to accumulated state.""" @@ -791,61 +795,3 @@ def _ParseFileVersion(file_version): ('Invalid event.proto file_version. Defaulting to use of ' 'out-of-order event.step logic for purging expired events.')) return -1 - - -def _CompressHistogram(histo_ev, bps): - """Creates fixed size histogram by adding compression to accumulated state. - - This routine transforms a histogram at a particular step by linearly - interpolating its variable number of buckets to represent their cumulative - weight at a constant number of compression points. This significantly reduces - the size of the histogram and makes it suitable for a two-dimensional area - plot where the output of this routine constitutes the ranges for a single x - coordinate. - - Args: - histo_ev: A HistogramEvent namedtuple. - bps: Compression points represented in basis points, 1/100ths of a percent. - - Returns: - CompressedHistogramEvent namedtuple. - """ - # See also: Histogram::Percentile() in core/lib/histogram/histogram.cc - histo = histo_ev.histogram_value - if not histo.num: - return CompressedHistogramEvent( - histo_ev.wall_time, - histo_ev.step, - [CompressedHistogramValue(b, 0.0) for b in bps]) - bucket = np.array(histo.bucket) - weights = (bucket * bps[-1] / (bucket.sum() or 1.0)).cumsum() - values = [] - j = 0 - while j < len(bps): - i = np.searchsorted(weights, bps[j], side='right') - while i < len(weights): - cumsum = weights[i] - cumsum_prev = weights[i - 1] if i > 0 else 0.0 - if cumsum == cumsum_prev: # prevent remap divide by zero - i += 1 - continue - if not i or not cumsum_prev: - lhs = histo.min - else: - lhs = max(histo.bucket_limit[i - 1], histo.min) - rhs = min(histo.bucket_limit[i], histo.max) - weight = _Remap(bps[j], cumsum_prev, cumsum, lhs, rhs) - values.append(CompressedHistogramValue(bps[j], weight)) - j += 1 - break - else: - break - while j < len(bps): - values.append(CompressedHistogramValue(bps[j], histo.max)) - j += 1 - return CompressedHistogramEvent(histo_ev.wall_time, histo_ev.step, values) - - -def _Remap(x, x0, x1, y0, y1): - """Linearly map from [x0, x1] unto [y0, y1].""" - return y0 + (x - x0) * float(y1 - y0) / (x1 - x0) diff --git a/tensorflow/tensorboard/backend/event_processing/event_accumulator_test.py b/tensorflow/tensorboard/backend/event_processing/event_accumulator_test.py index 4ce766f4204..516704c700e 100644 --- a/tensorflow/tensorboard/backend/event_processing/event_accumulator_test.py +++ b/tensorflow/tensorboard/backend/event_processing/event_accumulator_test.py @@ -25,6 +25,7 @@ from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf from tensorflow.tensorboard.backend.event_processing import event_accumulator as ea +from tensorflow.tensorboard.plugins.distributions import compressor class _EventGenerator(object): @@ -406,7 +407,7 @@ class MockingEventAccumulatorTest(EventAccumulatorTest): # Create the expected values after compressing hst1 expected_vals1 = [ - ea.CompressedHistogramValue(bp, val) + compressor.CompressedHistogramValue(bp, val) for bp, val in [(0, 1.0), (2500, 1.25), (5000, 1.5), (7500, 1.75 ), (10000, 2.0)] ] @@ -416,7 +417,7 @@ class MockingEventAccumulatorTest(EventAccumulatorTest): # Create the expected values after compressing hst2 expected_vals2 = [ - ea.CompressedHistogramValue(bp, val) + compressor.CompressedHistogramValue(bp, val) for bp, val in [(0, -2), (2500, 2), (5000, 2 + 1 / 3), @@ -427,60 +428,6 @@ class MockingEventAccumulatorTest(EventAccumulatorTest): wall_time=2, step=12, compressed_histogram_values=expected_vals2) self.assertEqual(acc.CompressedHistograms('hst2'), [expected_cmphst2]) - def testCompressedHistogramsWithEmptyHistogram(self): - """Tests that empty histograms compressed properly in EventAccumulator.""" - gen = _EventGenerator(self) - acc = ea.EventAccumulator(gen, compression_bps=(0, 2500, 5000, 7500, 10000)) - - gen.AddHistogram( - 'hst1', - wall_time=1, - step=10, - hmin=None, - hmax=None, - hnum=0, - hsum=0, - hsum_squares=0, - hbucket_limit=[1, 2, 3], - hbucket=[0, 0, 0]) - acc.Reload() - - # Create the expected values after compressing hst1 - expected_vals1 = [ - ea.CompressedHistogramValue(bp, val) - for bp, val in [(0, 0.0), (2500, 0), (5000, 0), (7500, 0), (10000, 0)] - ] - expected_cmphst1 = ea.CompressedHistogramEvent( - wall_time=1, step=10, compressed_histogram_values=expected_vals1) - self.assertEqual(acc.CompressedHistograms('hst1'), [expected_cmphst1]) - - def testCompressHistogram_uglyHistogram(self): - bps = (0, 668, 1587, 3085, 5000, 6915, 8413, 9332, 10000) - histogram_values = ea.HistogramValue( - min=0.0, - max=1.0, - num=960.0, - sum=64.0, - sum_squares=64.0, - bucket_limit=[ - 0.0, 1e-12, 0.917246389039776, 1.0089710279437536, - 1.7976931348623157e+308 - ], - bucket=[0.0, 896.0, 0.0, 64.0, 0.0]) - histogram_event = ea.HistogramEvent(0, 0, histogram_values) - compressed_event = ea._CompressHistogram(histogram_event, bps) - vals = compressed_event.compressed_histogram_values - self.assertEquals(tuple(v.basis_point for v in vals), bps) - self.assertAlmostEqual(vals[0].value, 0.0) - self.assertAlmostEqual(vals[1].value, 7.157142857142856e-14) - self.assertAlmostEqual(vals[2].value, 1.7003571428571426e-13) - self.assertAlmostEqual(vals[3].value, 3.305357142857143e-13) - self.assertAlmostEqual(vals[4].value, 5.357142857142857e-13) - self.assertAlmostEqual(vals[5].value, 7.408928571428571e-13) - self.assertAlmostEqual(vals[6].value, 9.013928571428571e-13) - self.assertAlmostEqual(vals[7].value, 9.998571428571429e-13) - self.assertAlmostEqual(vals[8].value, 1.0) - def testImages(self): """Tests 2 images inserted/accessed in EventAccumulator.""" gen = _EventGenerator(self) diff --git a/tensorflow/tensorboard/plugins/distributions/BUILD b/tensorflow/tensorboard/plugins/distributions/BUILD index 3ce765020e0..a8b131c350b 100644 --- a/tensorflow/tensorboard/plugins/distributions/BUILD +++ b/tensorflow/tensorboard/plugins/distributions/BUILD @@ -41,6 +41,25 @@ py_test( ], ) +py_library( + name = "compressor", + srcs = ["compressor.py"], + srcs_version = "PY2AND3", + visibility = ["//visibility:public"], + deps = ["//third_party/py/numpy"], +) + +py_test( + name = "compressor_test", + size = "small", + srcs = ["compressor_test.py"], + srcs_version = "PY2AND3", + deps = [ + ":compressor", + "//tensorflow:tensorflow_py", + ], +) + filegroup( name = "all_files", srcs = glob(["**"]), diff --git a/tensorflow/tensorboard/plugins/distributions/compressor.py b/tensorflow/tensorboard/plugins/distributions/compressor.py new file mode 100644 index 00000000000..dd9d00f5cda --- /dev/null +++ b/tensorflow/tensorboard/plugins/distributions/compressor.py @@ -0,0 +1,88 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Package for histogram compression.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +import numpy as np + +# Normal CDF for std_devs: (-Inf, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, Inf) +# naturally gives bands around median of width 1 std dev, 2 std dev, 3 std dev, +# and then the long tail. +NORMAL_HISTOGRAM_BPS = (0, 668, 1587, 3085, 5000, 6915, 8413, 9332, 10000) + + +CompressedHistogramValue = collections.namedtuple('CompressedHistogramValue', + ['basis_point', 'value']) + + +def CompressHistogram(histo, bps=NORMAL_HISTOGRAM_BPS): + """Creates fixed size histogram by adding compression to accumulated state. + + This routine transforms a histogram at a particular step by linearly + interpolating its variable number of buckets to represent their cumulative + weight at a constant number of compression points. This significantly reduces + the size of the histogram and makes it suitable for a two-dimensional area + plot where the output of this routine constitutes the ranges for a single x + coordinate. + + Args: + histo: A HistogramProto object. + bps: Compression points represented in basis points, 1/100ths of a percent. + Defaults to normal distribution. + + Returns: + List of values for each basis point. + """ + # See also: Histogram::Percentile() in core/lib/histogram/histogram.cc + if not histo.num: + return [CompressedHistogramValue(b, 0.0) for b in bps] + bucket = np.array(histo.bucket) + bucket_limit = list(histo.bucket_limit) + weights = (bucket * bps[-1] / (bucket.sum() or 1.0)).cumsum() + values = [] + j = 0 + while j < len(bps): + i = np.searchsorted(weights, bps[j], side='right') + while i < len(weights): + cumsum = weights[i] + cumsum_prev = weights[i - 1] if i > 0 else 0.0 + if cumsum == cumsum_prev: # prevent remap divide by zero + i += 1 + continue + if not i or not cumsum_prev: + lhs = histo.min + else: + lhs = max(bucket_limit[i - 1], histo.min) + rhs = min(bucket_limit[i], histo.max) + weight = _Remap(bps[j], cumsum_prev, cumsum, lhs, rhs) + values.append(CompressedHistogramValue(bps[j], weight)) + j += 1 + break + else: + break + while j < len(bps): + values.append(CompressedHistogramValue(bps[j], histo.max)) + j += 1 + return values + + +def _Remap(x, x0, x1, y0, y1): + """Linearly map from [x0, x1] unto [y0, y1].""" + return y0 + (x - x0) * float(y1 - y0) / (x1 - x0) diff --git a/tensorflow/tensorboard/plugins/distributions/compressor_test.py b/tensorflow/tensorboard/plugins/distributions/compressor_test.py new file mode 100644 index 00000000000..92e6408954b --- /dev/null +++ b/tensorflow/tensorboard/plugins/distributions/compressor_test.py @@ -0,0 +1,113 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from tensorflow.tensorboard.plugins.distributions import compressor + + +def _make_expected_value(*values): + return [compressor.CompressedHistogramValue(bp, val) for bp, val in values] + + +class HistcompTest(tf.test.TestCase): + + def testExample(self): + bps = (0, 2500, 5000, 7500, 10000) + proto = tf.HistogramProto( + min=1, + max=2, + num=3, + sum=4, + sum_squares=5, + bucket_limit=[1, 2, 3], + bucket=[0, 3, 0]) + self.assertEqual( + _make_expected_value( + (0, 1.0), + (2500, 1.25), + (5000, 1.5), + (7500, 1.75), + (10000, 2.0)), + compressor.CompressHistogram(proto, bps)) + + def testAnotherExample(self): + bps = (0, 2500, 5000, 7500, 10000) + proto = tf.HistogramProto( + min=-2, + max=3, + num=4, + sum=5, + sum_squares=6, + bucket_limit=[2, 3, 4], + bucket=[1, 3, 0]) + self.assertEqual( + _make_expected_value( + (0, -2), + (2500, 2), + (5000, 2 + 1 / 3), + (7500, 2 + 2 / 3), + (10000, 3)), + compressor.CompressHistogram(proto, bps)) + + def testEmpty(self): + bps = (0, 2500, 5000, 7500, 10000) + proto = tf.HistogramProto( + min=None, + max=None, + num=0, + sum=0, + sum_squares=0, + bucket_limit=[1, 2, 3], + bucket=[0, 0, 0]) + self.assertEqual( + _make_expected_value( + (0, 0), + (2500, 0), + (5000, 0), + (7500, 0), + (10000, 0)), + compressor.CompressHistogram(proto, bps)) + + def testUgly(self): + bps = (0, 668, 1587, 3085, 5000, 6915, 8413, 9332, 10000) + proto = tf.HistogramProto( + min=0.0, + max=1.0, + num=960.0, + sum=64.0, + sum_squares=64.0, + bucket_limit=[0.0, 1e-12, 0.917246389039776, 1.0089710279437536, + 1.7976931348623157e+308], + bucket=[0.0, 896.0, 0.0, 64.0, 0.0]) + vals = compressor.CompressHistogram(proto, bps) + self.assertEquals(tuple(v.basis_point for v in vals), bps) + self.assertAlmostEqual(vals[0].value, 0.0) + self.assertAlmostEqual(vals[1].value, 7.157142857142856e-14) + self.assertAlmostEqual(vals[2].value, 1.7003571428571426e-13) + self.assertAlmostEqual(vals[3].value, 3.305357142857143e-13) + self.assertAlmostEqual(vals[4].value, 5.357142857142857e-13) + self.assertAlmostEqual(vals[5].value, 7.408928571428571e-13) + self.assertAlmostEqual(vals[6].value, 9.013928571428571e-13) + self.assertAlmostEqual(vals[7].value, 9.998571428571429e-13) + self.assertAlmostEqual(vals[8].value, 1.0) + + +if __name__ == '__main__': + tf.test.main() From f0f34a2096c8cab9d7a2100016e54d5133bbc017 Mon Sep 17 00:00:00 2001 From: Jonathan Hseu Date: Tue, 13 Jun 2017 23:35:30 -0700 Subject: [PATCH 013/180] Restore the new xla header only libs. PiperOrigin-RevId: 158945196 --- tensorflow/compiler/jit/BUILD | 28 ++++++++++++---------------- tensorflow/compiler/xla/BUILD | 33 +++++++++++++++------------------ 2 files changed, 27 insertions(+), 34 deletions(-) diff --git a/tensorflow/compiler/jit/BUILD b/tensorflow/compiler/jit/BUILD index da1c50b0a79..306e704415b 100644 --- a/tensorflow/compiler/jit/BUILD +++ b/tensorflow/compiler/jit/BUILD @@ -22,22 +22,6 @@ load("//tensorflow:tensorflow.bzl", "cc_header_only_library") load("//tensorflow:tensorflow.bzl", "tf_kernel_library") load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda") -# TODO(jhseu): Fix this target. -# -# This target can be used by XLA device plugins to prevent circular -# dependencies, and provides access to all of the required headers -# for building a device library. -#cc_header_only_library( -# name = "xla_jit_headers_lib", -# visibility = ["//visibility:public"], -# deps = [ -# ":xla_cpu_device", -# ":xla_cpu_jit", -# ":xla_gpu_device", -# ":xla_gpu_jit", -# ], -#) - # Target that bundles up the XLA CPU and GPU JIT devices. cc_library( name = "jit", @@ -285,3 +269,15 @@ filegroup( ), visibility = ["//tensorflow:__subpackages__"], ) + +# This target can be used by XLA device plugins to prevent circular dependencies, and provides access to all of the required headers for building a device library. +cc_header_only_library( + name = "xla_jit_headers_lib", + visibility = ["//visibility:public"], + deps = [ + ":xla_cpu_device", + ":xla_cpu_jit", + ":xla_gpu_device", + ":xla_gpu_jit", + ], +) diff --git a/tensorflow/compiler/xla/BUILD b/tensorflow/compiler/xla/BUILD index 4ce8532dc7e..cde790c0aed 100644 --- a/tensorflow/compiler/xla/BUILD +++ b/tensorflow/compiler/xla/BUILD @@ -46,24 +46,6 @@ xla_proto_library( ], ) -# TODO(jhseu): Restore -# This is a headers target that extra XLA devices can use to prevent -# circular dependencies. Devices that are compiled as separate shared -# objects can also use it to prevent linking of library code. -#cc_header_only_library( -# name = "xla_headers_lib", -# visibility = ["//visibility:public"], -# deps = [ -# "//tensorflow/compiler/xla:xla_data_proto", -# "//tensorflow/compiler/xla:xla_proto", -# "//tensorflow/compiler/xla/client:client_library", -# "//tensorflow/compiler/xla/legacy_flags:layout_util_flags", -# "//tensorflow/compiler/xla/service:hlo", -# "//tensorflow/core:framework_headers_lib", -# "//tensorflow/core:stream_executor_headers_lib", -# ], -#) - cc_library( name = "test", testonly = 1, @@ -602,3 +584,18 @@ filegroup( ), visibility = ["//tensorflow:__subpackages__"], ) + +# This is a headers target that extra XLA devices can use to prevent circular dependencies. Devices that are compiled as separate shared objects can also use it to prevent linking of library code. +cc_header_only_library( + name = "xla_headers_lib", + visibility = ["//visibility:public"], + deps = [ + ":xla_data_proto", + ":xla_proto", + "//tensorflow/compiler/xla/client:client_library", + "//tensorflow/compiler/xla/legacy_flags:layout_util_flags", + "//tensorflow/compiler/xla/service:hlo", + "//tensorflow/core:framework_headers_lib", + "//tensorflow/core:stream_executor_headers_lib", + ], +) From 5cb84a73356d698ed1ed5f0d2e8ea38075b029ca Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 14 Jun 2017 00:34:27 -0700 Subject: [PATCH 014/180] Fixed links in readme. PiperOrigin-RevId: 158948423 --- tensorflow/examples/learn/README.md | 41 ++++++++++++++++------------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/tensorflow/examples/learn/README.md b/tensorflow/examples/learn/README.md index 37157fc2967..e5b34292b1a 100644 --- a/tensorflow/examples/learn/README.md +++ b/tensorflow/examples/learn/README.md @@ -1,33 +1,36 @@ -# TF Learn Examples +# Estimator Examples -Learn is a high-level API for TensorFlow that allows you to create, -train, and use deep learning models easily. See the [Quickstart tutorial](https://www.tensorflow.org/get_started/tflearn) +TensorFlow Estimators are a high-level API for TensorFlow that allows you to +create, train, and use deep learning models easily. + +See the [Quickstart tutorial](https://www.tensorflow.org/get_started/tflearn) for an introduction to the API. -To run most of these examples, you need to install the `scikit learn` library (`sudo pip install sklearn`). -Some examples use the `pandas` library for data processing (`sudo pip install pandas`). +To run most of these examples, you need to install the `scikit learn` library +(`sudo pip install sklearn`). Some examples use the `pandas` library for data +processing (`sudo pip install pandas`). ## Basics -* [Deep Neural Network Regression with Boston Data](boston.py) -* [Deep Neural Network Classification with Iris Data](iris.py) -* [Building a Custom Model](iris_custom_model.py) -* [Building a Model Using Different GPU Configurations](iris_run_config.py) +* [Deep Neural Network Regression with Boston Data]( https://www.tensorflow.org/code/tensorflow/examples/learn/boston.py) +* [Deep Neural Network Classification with Iris Data]( https://www.tensorflow.org/code/tensorflow/examples/learn/iris.py) +* [Building a Custom Model]( https://www.tensorflow.org/code/tensorflow/examples/learn/iris_custom_model.py) +* [Building a Model Using Different GPU Configurations]( https://www.tensorflow.org/code/tensorflow/examples/learn/iris_run_config.py) ## Techniques -* [Improving Performance Using Early Stopping with Iris Data](iris_val_based_early_stopping.py) -* [Using skflow with Pipeline](iris_with_pipeline.py) -* [Deep Neural Network with Customized Decay Function](iris_custom_decay_dnn.py) +* [Improving Performance Using Early Stopping with Iris Data]( https://www.tensorflow.org/code/tensorflow/examples/learn/iris_val_based_early_stopping.py) +* [Using skflow with Pipeline]( https://www.tensorflow.org/code/tensorflow/examples/learn/iris_with_pipeline.py) +* [Deep Neural Network with Customized Decay Function]( https://www.tensorflow.org/code/tensorflow/examples/learn/iris_custom_decay_dnn.py) ## Specialized Models -* [Building a Random Forest Model](random_forest_mnist.py) -* [Building a Wide & Deep Model](wide_n_deep_tutorial.py) -* [Building a Residual Network Model](resnet.py) +* [Building a Random Forest Model]( https://www.tensorflow.org/code/tensorflow/examples/learn/random_forest_mnist.py) +* [Building a Wide & Deep Model]( https://www.tensorflow.org/code/tensorflow/examples/learn/wide_n_deep_tutorial.py) +* [Building a Residual Network Model]( https://www.tensorflow.org/code/tensorflow/examples/learn/resnet.py) ## Text classification -* [Text Classification Using Recurrent Neural Networks on Words](text_classification.py) -* [Text Classification Using Convolutional Neural Networks on Words](text_classification_cnn.py) -* [Text Classification Using Recurrent Neural Networks on Characters](text_classification_character_rnn.py) -* [Text Classification Using Convolutional Neural Networks on Characters](text_classification_character_cnn.py) +* [Text Classification Using Recurrent Neural Networks on Words]( https://www.tensorflow.org/code/tensorflow/examples/learn/text_classification.py) +* [Text Classification Using Convolutional Neural Networks on Words]( https://www.tensorflow.org/code/tensorflow/examples/learn/text_classification_cnn.py) +* [Text Classification Using Recurrent Neural Networks on Characters]( https://www.tensorflow.org/code/tensorflow/examples/learn/text_classification_character_rnn.py) +* [Text Classification Using Convolutional Neural Networks on Characters]( https://www.tensorflow.org/code/tensorflow/examples/learn/text_classification_character_cnn.py) From 97905a642ef76a2318d39ce21da1cef82e22aa4e Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 14 Jun 2017 00:49:52 -0700 Subject: [PATCH 015/180] Remove margin around trace viewer. PiperOrigin-RevId: 158949251 --- .../components/tf_trace_viewer/tf-trace-viewer.html | 7 +++++++ tensorflow/tensorboard/components/trace_viewer.html | 9 +++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/tensorflow/tensorboard/components/tf_trace_viewer/tf-trace-viewer.html b/tensorflow/tensorboard/components/tf_trace_viewer/tf-trace-viewer.html index a7b0b2cd730..cd65d729a45 100644 --- a/tensorflow/tensorboard/components/tf_trace_viewer/tf-trace-viewer.html +++ b/tensorflow/tensorboard/components/tf_trace_viewer/tf-trace-viewer.html @@ -23,6 +23,13 @@ tf-trace-viewer is the frontend entry point for Trace Viewer on TensorBoard. The server serves the trace viewer app at a separate endpoint. TensorBoard dashboard would integrate trace viewer app using iframe. --> + - - - - - - - - - - diff --git a/tensorflow/tensorboard/components/tf_dashboard_common/BUILD b/tensorflow/tensorboard/components/tf_dashboard_common/BUILD index 7471da3144a..3d6094deeae 100644 --- a/tensorflow/tensorboard/components/tf_dashboard_common/BUILD +++ b/tensorflow/tensorboard/components/tf_dashboard_common/BUILD @@ -33,6 +33,7 @@ ts_web_library( ], path = "/tf-dashboard-common", deps = [ + "//tensorflow/tensorboard/components/tf_color_scale", "//tensorflow/tensorboard/components/tf_imports:d3", "//tensorflow/tensorboard/components/tf_imports:lodash", "//tensorflow/tensorboard/components/tf_imports:polymer", diff --git a/tensorflow/tensorboard/components/tf_dashboard_common/tf-multi-checkbox-demo.html b/tensorflow/tensorboard/components/tf_dashboard_common/tf-multi-checkbox-demo.html index d0f5aa6f27d..93898b152e4 100644 --- a/tensorflow/tensorboard/components/tf_dashboard_common/tf-multi-checkbox-demo.html +++ b/tensorflow/tensorboard/components/tf_dashboard_common/tf-multi-checkbox-demo.html @@ -18,7 +18,6 @@ limitations under the License. - @@ -39,14 +38,8 @@ function random() { id="multiCheckbox" names="[[names]]" tooltips="[[_tooltips]]" - class-scale="[[classScale]]" highlights="[[highlights]]" > - @@ -66,7 +59,6 @@ function random() { tooltips: Object, autoGenerateTooltips: {value: true}, _tooltips: Object, - classScale: Function, highlights: Array, }, observers: [ diff --git a/tensorflow/tensorboard/components/tf_dashboard_common/tf-multi-checkbox.ts b/tensorflow/tensorboard/components/tf_dashboard_common/tf-multi-checkbox.ts index 4b38d82b14e..453f2a4153f 100644 --- a/tensorflow/tensorboard/components/tf_dashboard_common/tf-multi-checkbox.ts +++ b/tensorflow/tensorboard/components/tf_dashboard_common/tf-multi-checkbox.ts @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +import {runsColorScale} from '../tf-color-scale/colorScale'; import * as storage from '../tf-storage/storage'; Polymer({ @@ -47,10 +48,6 @@ Polymer({ notify: true, computed: 'computeOutSelected(namesMatchingRegex.*, runSelectionState.*)' }, - colorScale: { - type: Object, - observer: 'synchronizeColors', - }, // map from run name to css class maxRunsToEnableByDefault: { // When TB first loads, if it has k or fewer runs, they are all enabled // by default. If there are more, then they are all disabled. @@ -127,32 +124,26 @@ Polymer({ }); }, synchronizeColors: function(e) { - if (!this.colorScale) return; - this._setIsolatorIcon(); - var checkboxes = - Array.prototype.slice.call(this.querySelectorAll('paper-checkbox')); - var scale = this.colorScale; - checkboxes.forEach(function(p) { - var color = scale.scale(p.name); + const checkboxes = this.querySelectorAll('paper-checkbox'); + checkboxes.forEach(p => { + const color = runsColorScale(p.name); p.customStyle['--paper-checkbox-checked-color'] = color; p.customStyle['--paper-checkbox-checked-ink-color'] = color; p.customStyle['--paper-checkbox-unchecked-color'] = color; p.customStyle['--paper-checkbox-unchecked-ink-color'] = color; }); - var buttons = - Array.prototype.slice.call(this.querySelectorAll('.isolator')); - buttons.forEach(function(p) { - var color = scale.scale(p.name); + const buttons = this.querySelectorAll('.isolator'); + buttons.forEach(p => { + const color = runsColorScale(p.name); p.style['color'] = color; }); // The updateStyles call fails silently if the browser doesn't have focus, // e.g. if TensorBoard was opened into a new tab that isn't visible. // So we wait for requestAnimationFrame. - var _this = this; - window.requestAnimationFrame(function() { - _this.updateStyles(); + window.requestAnimationFrame(() => { + this.updateStyles(); }); }, _isolateRun: function(e) { diff --git a/tensorflow/tensorboard/components/tf_dashboard_common/tf-panes-helper.html b/tensorflow/tensorboard/components/tf_dashboard_common/tf-panes-helper.html index 155259d3294..32d79c649c5 100644 --- a/tensorflow/tensorboard/components/tf_dashboard_common/tf-panes-helper.html +++ b/tensorflow/tensorboard/components/tf_dashboard_common/tf-panes-helper.html @@ -181,6 +181,7 @@ downloadLinkUrlFunction property to an appropriate value. diff --git a/tensorflow/tensorboard/components/tf_histogram_dashboard/tf-histogram-dashboard.html b/tensorflow/tensorboard/components/tf_histogram_dashboard/tf-histogram-dashboard.html index 1821ce3b6f3..bf6d90fa432 100644 --- a/tensorflow/tensorboard/components/tf_histogram_dashboard/tf-histogram-dashboard.html +++ b/tensorflow/tensorboard/components/tf_histogram_dashboard/tf-histogram-dashboard.html @@ -46,20 +46,11 @@ contains vz-histogram-timeseries embedded inside tf-panes-helper's. -->