From 9e147c44b2ab803b14a418af235e54679b4e03a0 Mon Sep 17 00:00:00 2001 From: Sanders Kleinfeld Date: Fri, 12 Aug 2016 18:11:32 -0400 Subject: [PATCH 1/6] Tutorial updates for r0.10 release (#3634) * New tutorial on tf.contrib.learn monitors. * Code and data for tf.contrib.learn monitors tutorial. From c34142d60b9f3dacb0f835c5b1c8ec52a87fb62c Mon Sep 17 00:00:00 2001 From: Derek Murray Date: Sun, 14 Aug 2016 16:48:50 -0700 Subject: [PATCH 2/6] Automated rollback of change 127668670 (#3770) Change: 128801919 From f646559f5d7f15f2403c472d1b031a3fbc981fa1 Mon Sep 17 00:00:00 2001 From: Longqi Yang Date: Mon, 15 Aug 2016 11:16:36 -0700 Subject: [PATCH 3/6] Correct typo in word2vec.py (#3822) --- tensorflow/models/embedding/word2vec.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/models/embedding/word2vec.py b/tensorflow/models/embedding/word2vec.py index 97b38b7f475..5a4b256ac28 100644 --- a/tensorflow/models/embedding/word2vec.py +++ b/tensorflow/models/embedding/word2vec.py @@ -248,7 +248,7 @@ class Word2Vec(object): true_logits = tf.reduce_sum(tf.mul(example_emb, true_w), 1) + true_b # Sampled logits: [batch_size, num_sampled] - # We replicate sampled noise lables for all examples in the batch + # We replicate sampled noise labels for all examples in the batch # using the matmul. sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples]) sampled_logits = tf.matmul(example_emb, From 6a25a23e851a78b04fb098429804c13571b38ea7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Man=C3=A9?= Date: Tue, 16 Aug 2016 10:27:55 -0700 Subject: [PATCH 4/6] Pull in TensorBoard v25 from master --- tensorflow/tensorboard/TAG | 2 +- tensorflow/tensorboard/backend/handler.py | 102 +- tensorflow/tensorboard/backend/server.py | 9 +- tensorflow/tensorboard/backend/server_test.py | 86 +- tensorflow/tensorboard/bower.json | 30 +- tensorflow/tensorboard/bower/BUILD | 1 - tensorflow/tensorboard/components/BUILD | 5 +- .../components/tf-backend/backend.ts | 116 +- .../tf-backend/test/backendTests.ts | 128 +- .../components/tf-backend/test/index.html | 1 + .../components/tf-backend/tf-backend.html | 1 + .../tf-dashboard-common/dashboard-style.html | 8 +- .../tf-distribution-chart.html | 26 +- .../tf-distribution-dashboard.html | 19 +- .../tf-event-dashboard.html | 19 +- .../components/tf-globals/globals.ts | 3 +- .../components/tf-graph-app/demo/graph.pbtxt | 90 + .../components/tf-graph-app/demo/index.html | 28 + .../components/tf-graph-app/index.html | 14 + .../components/tf-graph-app/tf-graph-app.html | 16 +- .../tf-graph-info/tf-node-info.html | 6 +- .../tf-histogram-dashboard.html | 202 +++ .../tf-option-selector.html | 77 + .../tf-tensorboard/tf-tensorboard.html | 11 + .../vz-histogram-timeseries/demo/index.html | 67 + .../vz-histogram-timeseries/index.html | 14 + .../vz-histogram-timeseries.html | 272 ++- .../vz-line-chart/vz-line-chart.html | 6 +- .../components/vz-line-chart/vz-line-chart.ts | 4 + .../tensorboard/dist/tf-tensorboard.html | 1493 ++++++++++++++--- .../tensorboard/gulp_tasks/vulcanize.js | 4 +- tensorflow/tensorboard/tensorboard.py | 2 +- 32 files changed, 2357 insertions(+), 505 deletions(-) create mode 100644 tensorflow/tensorboard/components/tf-graph-app/demo/graph.pbtxt create mode 100644 tensorflow/tensorboard/components/tf-graph-app/demo/index.html create mode 100644 tensorflow/tensorboard/components/tf-graph-app/index.html create mode 100644 tensorflow/tensorboard/components/tf-histogram-dashboard/tf-histogram-dashboard.html create mode 100644 tensorflow/tensorboard/components/tf-option-selector/tf-option-selector.html create mode 100644 tensorflow/tensorboard/components/vz-histogram-timeseries/demo/index.html create mode 100644 tensorflow/tensorboard/components/vz-histogram-timeseries/index.html diff --git a/tensorflow/tensorboard/TAG b/tensorflow/tensorboard/TAG index 409940768f2..7273c0fa8c5 100644 --- a/tensorflow/tensorboard/TAG +++ b/tensorflow/tensorboard/TAG @@ -1 +1 @@ -23 +25 diff --git a/tensorflow/tensorboard/backend/handler.py b/tensorflow/tensorboard/backend/handler.py index cd9e46270b6..a8ecf73a5b2 100644 --- a/tensorflow/tensorboard/backend/handler.py +++ b/tensorflow/tensorboard/backend/handler.py @@ -29,6 +29,7 @@ import imghdr import json import mimetypes import os +import re from six import BytesIO from six.moves import BaseHTTPServer @@ -65,6 +66,11 @@ _IMGHDR_TO_MIMETYPE = { } _DEFAULT_IMAGE_MIMETYPE = 'application/octet-stream' +# Allows *, gzip or x-gzip, but forbid gzip;q=0 +# https://tools.ietf.org/html/rfc7231#section-5.3.4 +_ALLOWS_GZIP_PATTERN = re.compile( + r'(?:^|,|\s)(?:(?:x-)?gzip|\*)(?!;q=0)(?:\s|,|$)') + def _content_type_for_image(encoded_image_string): image_type = imghdr.what(None, encoded_image_string) @@ -91,6 +97,10 @@ class TensorboardHandler(BaseHTTPServer.BaseHTTPRequestHandler): # How many samples to include in sampling API calls by default. DEFAULT_SAMPLE_COUNT = 10 + # NOTE TO MAINTAINERS: An accurate Content-Length MUST be specified on all + # responses using send_header. + protocol_version = 'HTTP/1.1' + def __init__(self, multiplexer, *args): self._multiplexer = multiplexer BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args) @@ -162,25 +172,54 @@ class TensorboardHandler(BaseHTTPServer.BaseHTTPRequestHandler): prefix = os.path.commonprefix([base, absolute_path]) return prefix == base + def _respond(self, content, content_type, code=200, encoding=None): + """Sends HTTP response. + + All text responses are assumed to be utf-8 unless specified otherwise. + + Args: + content: The content to respond with, which is converted to bytes. + content_type: The mime type of the content. + code: The numeric HTTP status code to use. + encoding: The encoding if any (not sanity checked.) + """ + content = compat.as_bytes(content) + self.send_response(code) + if content_type.startswith(('text/', 'application/json')): + if 'charset=' not in content_type: + content_type += '; charset=utf-8' + self.send_header('Content-Type', content_type) + self.send_header('Content-Length', len(content)) + if encoding: + self.send_header('Content-Encoding', encoding) + self.end_headers() + self.wfile.write(content) + + def _is_gzip_accepted(self): + """Returns true if Accept-Encoding contains gzip.""" + accept_encoding = self.headers.get('Accept-Encoding', '') + return _ALLOWS_GZIP_PATTERN.search(accept_encoding) is not None + def _send_gzip_response(self, content, content_type, code=200): """Writes the given content as gzip response using the given content type. + If the HTTP client does not accept gzip encoding, then the response will be + sent uncompressed. + Args: content: The content to respond with. content_type: The mime type of the content. code: The numeric HTTP status code to use. """ - out = BytesIO() - f = gzip.GzipFile(fileobj=out, mode='wb') - f.write(compat.as_bytes(content)) - f.close() - gzip_content = out.getvalue() - self.send_response(code) - self.send_header('Content-Type', content_type) - self.send_header('Content-Length', len(gzip_content)) - self.send_header('Content-Encoding', 'gzip') - self.end_headers() - self.wfile.write(gzip_content) + encoding = None + if self._is_gzip_accepted(): + out = BytesIO() + f = gzip.GzipFile(fileobj=out, mode='wb', compresslevel=3) + f.write(compat.as_bytes(content)) + f.close() + content = out.getvalue() + encoding = 'gzip' + self._respond(content, content_type, code, encoding) def _send_json_response(self, obj, code=200): """Writes out the given object as JSON using the given HTTP status code. @@ -191,14 +230,8 @@ class TensorboardHandler(BaseHTTPServer.BaseHTTPRequestHandler): obj: The object to respond with. code: The numeric HTTP status code to use. """ - - output = json.dumps(json_util.WrapSpecialFloats(obj)) - - self.send_response(code) - self.send_header('Content-Type', 'application/json') - self.send_header('Content-Length', len(output)) - self.end_headers() - self.wfile.write(compat.as_bytes(output)) + content = json.dumps(json_util.WrapSpecialFloats(obj)) + self._respond(content, 'application/json', code) def _send_csv_response(self, serialized_csv, code=200): """Writes out the given string, which represents CSV data. @@ -210,12 +243,7 @@ class TensorboardHandler(BaseHTTPServer.BaseHTTPRequestHandler): serialized_csv: A string containing some CSV data. code: The numeric HTTP status code to use. """ - - self.send_response(code) - self.send_header('Content-Type', 'text/csv') - self.send_header('Content-Length', len(serialized_csv)) - self.end_headers() - self.wfile.write(serialized_csv) + self._respond(serialized_csv, 'text/csv', code) def _serve_scalars(self, query_params): """Given a tag and single run, return array of ScalarEvents. @@ -372,12 +400,7 @@ class TensorboardHandler(BaseHTTPServer.BaseHTTPRequestHandler): image = self._multiplexer.Images(run, tag)[index] encoded_image_string = image.encoded_image_string content_type = _content_type_for_image(encoded_image_string) - - self.send_response(200) - self.send_header('Content-Type', content_type) - self.send_header('Content-Length', len(encoded_image_string)) - self.end_headers() - self.wfile.write(encoded_image_string) + self._respond(encoded_image_string, content_type) def _query_for_individual_image(self, run, tag, index): """Builds a URL for accessing the specified image. @@ -429,12 +452,7 @@ class TensorboardHandler(BaseHTTPServer.BaseHTTPRequestHandler): audio = self._multiplexer.Audio(run, tag)[index] encoded_audio_string = audio.encoded_audio_string content_type = audio.content_type - - self.send_response(200) - self.send_header('Content-Type', content_type) - self.send_header('Content-Length', len(encoded_audio_string)) - self.end_headers() - self.wfile.write(encoded_audio_string) + self._respond(encoded_audio_string, content_type) def _query_for_individual_audio(self, run, tag, index): """Builds a URL for accessing the specified audio. @@ -523,13 +541,9 @@ class TensorboardHandler(BaseHTTPServer.BaseHTTPRequestHandler): logging.info('path %s not found, sending 404', path) self.send_error(404) return - - self.send_response(200) - - mimetype = mimetypes.guess_type(path)[0] or 'application/octet-stream' - self.send_header('Content-Type', mimetype) - self.end_headers() - self.wfile.write(contents) + mimetype, encoding = mimetypes.guess_type(path) + mimetype = mimetype or 'application/octet-stream' + self._respond(contents, mimetype, encoding=encoding) def do_GET(self): # pylint: disable=invalid-name """Handler for all get requests.""" diff --git a/tensorflow/tensorboard/backend/server.py b/tensorflow/tensorboard/backend/server.py index 796a96a584c..ba7200fc2ca 100644 --- a/tensorflow/tensorboard/backend/server.py +++ b/tensorflow/tensorboard/backend/server.py @@ -41,7 +41,7 @@ TENSORBOARD_SIZE_GUIDANCE = { event_accumulator.IMAGES: 4, event_accumulator.AUDIO: 4, event_accumulator.SCALARS: 1000, - event_accumulator.HISTOGRAMS: 1, + event_accumulator.HISTOGRAMS: 50, } @@ -80,11 +80,8 @@ def ParseEventFilesSpec(logdir): else: run_name = None path = specification - - if not os.path.isabs(path) and not gcs.IsGCSPath(path): - # Create absolute path out of relative one. - path = os.path.join(os.path.realpath('.'), path) - + if not gcs.IsGCSPath(path): + path = os.path.realpath(os.path.expanduser(path)) files[path] = run_name return files diff --git a/tensorflow/tensorboard/backend/server_test.py b/tensorflow/tensorboard/backend/server_test.py index e5c8f90c178..8e564342a26 100644 --- a/tensorflow/tensorboard/backend/server_test.py +++ b/tensorflow/tensorboard/backend/server_test.py @@ -64,9 +64,9 @@ class TensorboardServerTest(tf.test.TestCase): self._server.shutdown() self._server.server_close() - def _get(self, path): + def _get(self, path, headers={}): """Perform a GET request for the given path.""" - self._connection.request('GET', path) + self._connection.request('GET', path, None, headers) return self._connection.getresponse() def _getJson(self, path): @@ -76,18 +76,6 @@ class TensorboardServerTest(tf.test.TestCase): self.assertEqual(response.status, 200) return json.loads(response.read().decode('utf-8')) - def _decodeResponse(self, response): - """Decompresses (if necessary) the response from the server.""" - encoding = response.getheader('Content-Encoding') - content = response.read() - if encoding in ('gzip', 'x-gzip', 'deflate'): - if encoding == 'deflate': - data = BytesIO(zlib.decompress(content)) - else: - data = gzip.GzipFile('', 'rb', 9, BytesIO(content)) - content = data.read() - return content - def testBasicStartup(self): """Start the server up and then shut it down immediately.""" pass @@ -180,8 +168,7 @@ class TensorboardServerTest(tf.test.TestCase): response = self._get('/data/graph?run=run1&limit_attr_size=1024' '&large_attrs_key=_very_large_attrs') self.assertEqual(response.status, 200) - # Decompress (unzip) the response, since graphs come gzipped. - graph_pbtxt = self._decodeResponse(response) + graph_pbtxt = response.read() # Parse the graph from pbtxt into a graph message. graph = tf.GraphDef() graph = text_format.Parse(graph_pbtxt, graph) @@ -194,12 +181,40 @@ class TensorboardServerTest(tf.test.TestCase): self.assertEqual(graph.node[1].attr['_very_large_attrs'].list.s, [b'very_large_attr']) + def testAcceptGzip_compressesResponse(self): + response = self._get('/data/graph?run=run1&limit_attr_size=1024' + '&large_attrs_key=_very_large_attrs', + {'Accept-Encoding': 'gzip'}) + self.assertEqual(response.status, 200) + self.assertEqual(response.getheader('Content-Encoding'), 'gzip') + pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read() + graph = text_format.Parse(pbtxt, tf.GraphDef()) + self.assertEqual(len(graph.node), 2) + + def testAcceptAnyEncoding_compressesResponse(self): + response = self._get('/data/graph?run=run1&limit_attr_size=1024' + '&large_attrs_key=_very_large_attrs', + {'Accept-Encoding': '*'}) + self.assertEqual(response.status, 200) + self.assertEqual(response.getheader('Content-Encoding'), 'gzip') + pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read() + graph = text_format.Parse(pbtxt, tf.GraphDef()) + self.assertEqual(len(graph.node), 2) + + def testAcceptDoodleEncoding_doesNotCompressResponse(self): + response = self._get('/data/graph?run=run1&limit_attr_size=1024' + '&large_attrs_key=_very_large_attrs', + {'Accept-Encoding': 'doodle'}) + self.assertEqual(response.status, 200) + self.assertIsNone(response.getheader('Content-Encoding')) + graph = text_format.Parse(response.read(), tf.GraphDef()) + self.assertEqual(len(graph.node), 2) + def testRunMetadata(self): """Test retrieving the run metadata information.""" response = self._get('/data/run_metadata?run=run1&tag=test%20run') self.assertEqual(response.status, 200) - # Decompress (unzip) the response, since run outputs come gzipped. - run_metadata_pbtxt = self._decodeResponse(response) + run_metadata_pbtxt = response.read() # Parse from pbtxt into a message. run_metadata = tf.RunMetadata() text_format.Parse(run_metadata_pbtxt, run_metadata) @@ -283,11 +298,46 @@ class TensorboardServerTest(tf.test.TestCase): class ParseEventFilesSpecTest(tf.test.TestCase): + def testRunName(self): + logdir_string = 'lol:/cat' + expected = {'/cat': 'lol'} + self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected) + + def testPathWithColonThatComesAfterASlash_isNotConsideredARunName(self): + logdir_string = '/lol:/cat' + expected = {'/lol:/cat': None} + self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected) + + def testMultipleDirectories(self): + logdir_string = '/a,/b' + expected = {'/a': None, '/b': None} + self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected) + + def testNormalizesPaths(self): + logdir_string = '/lol/.//cat/../cat' + expected = {'/lol/cat': None} + self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected) + + def testAbsolutifies(self): + logdir_string = 'lol/cat' + expected = {os.path.realpath('lol/cat'): None} + self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected) + def testRespectsGCSPath(self): logdir_string = 'gs://foo/path' expected = {'gs://foo/path': None} self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected) + def testDoesNotExpandUserInGCSPath(self): + logdir_string = 'gs://~/foo/path' + expected = {'gs://~/foo/path': None} + self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected) + + def testDoesNotNormalizeGCSPath(self): + logdir_string = 'gs://foo/./path//..' + expected = {'gs://foo/./path//..': None} + self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected) + class TensorBoardAssetsTest(tf.test.TestCase): diff --git a/tensorflow/tensorboard/bower.json b/tensorflow/tensorboard/bower.json index 26d3d541656..20095d92750 100644 --- a/tensorflow/tensorboard/bower.json +++ b/tensorflow/tensorboard/bower.json @@ -10,7 +10,6 @@ "iron-flex-layout", "iron-form-element-behavior", "iron-icon", - "iron-icons", "iron-iconset-svg", "iron-input", "iron-menu-behavior", @@ -40,8 +39,8 @@ "iron-a11y-announcer": "PolymerElements/iron-a11y-announcer#1.0.4", "iron-a11y-keys-behavior": "PolymerElements/iron-a11y-keys-behavior#1.1.2", "iron-ajax": "PolymerElements/iron-ajax#1.2.0", - "iron-autogrow-textarea": "PolymerElements/iron-autogrow-textarea#1.0.11", - "iron-behaviors": "PolymerElements/iron-behaviors#1.0.16", + "iron-autogrow-textarea": "PolymerElements/iron-autogrow-textarea#1.0.12", + "iron-behaviors": "PolymerElements/iron-behaviors#1.0.17", "iron-checked-element-behavior": "PolymerElements/iron-checked-element-behavior#1.0.4", "iron-collapse": "PolymerElements/iron-collapse#1.0.8", "iron-dropdown": "PolymerElements/iron-dropdown#1.4.0", @@ -49,16 +48,15 @@ "iron-flex-layout": "PolymerElements/iron-flex-layout#1.3.0", "iron-form-element-behavior": "PolymerElements/iron-form-element-behavior#1.0.6", "iron-icon": "PolymerElements/iron-icon#1.0.8", - "iron-icons": "PolymerElements/iron-icons#1.1.3", "iron-iconset-svg": "PolymerElements/iron-iconset-svg#1.0.9", - "iron-input": "PolymerElements/iron-input#1.0.7", + "iron-input": "PolymerElements/iron-input#1.0.10", "iron-list": "PolymerElements/iron-list#1.1.7", "iron-menu-behavior": "PolymerElements/iron-menu-behavior#1.1.8", "iron-meta": "PolymerElements/iron-meta#1.1.1", "iron-overlay-behavior": "PolymerElements/iron-overlay-behavior#1.7.6", "iron-range-behavior": "PolymerElements/iron-range-behavior#1.0.4", "iron-resizable-behavior": "PolymerElements/iron-resizable-behavior#1.0.3", - "iron-selector": "PolymerElements/iron-selector#1.2.4", + "iron-selector": "PolymerElements/iron-selector#1.5.2", "iron-validatable-behavior": "PolymerElements/iron-validatable-behavior#1.1.1", "lodash": "3.8.0", "neon-animation": "PolymerElements/neon-animation#1.2.2", @@ -67,14 +65,14 @@ "paper-checkbox": "PolymerElements/paper-checkbox#1.1.3", "paper-dialog": "PolymerElements/paper-dialog#1.0.4", "paper-dialog-behavior": "PolymerElements/paper-dialog-behavior#1.2.5", - "paper-dropdown-menu": "PolymerElements/paper-dropdown-menu#1.1.3", + "paper-dropdown-menu": "PolymerElements/paper-dropdown-menu#1.3.2", "paper-header-panel": "PolymerElements/paper-header-panel#1.1.4", "paper-icon-button": "PolymerElements/paper-icon-button#1.1.1", - "paper-input": "PolymerElements/paper-input#1.1.5", + "paper-input": "PolymerElements/paper-input#1.1.14", "paper-item": "PolymerElements/paper-item#1.1.4", "paper-material": "PolymerElements/paper-material#1.0.6", "paper-menu": "PolymerElements/paper-menu#1.2.2", - "paper-menu-button": "PolymerElements/paper-menu-button#1.2.0", + "paper-menu-button": "PolymerElements/paper-menu-button#1.5.0", "paper-progress": "PolymerElements/paper-progress#1.0.9", "paper-radio-button": "PolymerElements/paper-radio-button#1.1.2", "paper-radio-group": "PolymerElements/paper-radio-group#1.0.9", @@ -116,8 +114,8 @@ "iron-a11y-announcer": "1.0.4", "iron-a11y-keys-behavior": "1.1.2", "iron-ajax": "1.2.0", - "iron-autogrow-textarea": "1.0.11", - "iron-behaviors": "1.0.16", + "iron-autogrow-textarea": "1.0.12", + "iron-behaviors": "1.0.17", "iron-checked-element-behavior": "1.0.4", "iron-collapse": "1.0.8", "iron-dropdown": "1.4.0", @@ -127,14 +125,14 @@ "iron-icon": "1.0.8", "iron-icons": "1.1.3", "iron-iconset-svg": "1.0.9", - "iron-input": "1.0.7", + "iron-input": "1.0.10", "iron-list": "1.1.7", "iron-menu-behavior": "1.1.8", "iron-meta": "1.1.1", "iron-overlay-behavior": "1.7.6", "iron-range-behavior": "1.0.4", "iron-resizable-behavior": "1.0.3", - "iron-selector": "1.2.4", + "iron-selector": "1.5.2", "iron-validatable-behavior": "1.1.1", "lodash": "3.8.0", "neon-animation": "1.2.2", @@ -143,14 +141,14 @@ "paper-checkbox": "1.1.3", "paper-dialog": "1.0.4", "paper-dialog-behavior": "1.2.5", - "paper-dropdown-menu": "1.1.3", + "paper-dropdown-menu": "1.3.2", "paper-header-panel": "1.1.4", "paper-icon-button": "1.1.1", - "paper-input": "1.1.5", + "paper-input": "1.1.14", "paper-item": "1.1.4", "paper-material": "1.0.6", "paper-menu": "1.2.2", - "paper-menu-button": "1.2.0", + "paper-menu-button": "1.5.0", "paper-progress": "1.0.9", "paper-radio-button": "1.1.2", "paper-radio-group": "1.0.9", diff --git a/tensorflow/tensorboard/bower/BUILD b/tensorflow/tensorboard/bower/BUILD index 90d9910205b..4b43b558443 100644 --- a/tensorflow/tensorboard/bower/BUILD +++ b/tensorflow/tensorboard/bower/BUILD @@ -22,7 +22,6 @@ filegroup( "@iron_flex_layout//:iron_flex_layout", "@iron_form_element_behavior//:iron_form_element_behavior", "@iron_icon//:iron_icon", - "@iron_icons//:iron_icons", "@iron_iconset_svg//:iron_iconset_svg", "@iron_input//:iron_input", "@iron_list//:iron_list", diff --git a/tensorflow/tensorboard/components/BUILD b/tensorflow/tensorboard/components/BUILD index 970d9ee0ed4..faa8f23390f 100644 --- a/tensorflow/tensorboard/components/BUILD +++ b/tensorflow/tensorboard/components/BUILD @@ -7,7 +7,10 @@ exports_files(["LICENSE"]) filegroup( name = "all_files", srcs = glob( - ["tf-*/**/*", "vz-*/**/*"], + [ + "tf-*/**/*", + "vz-*/**/*", + ], exclude = [ "**/tf_model_zoo/*", "**/METADATA", diff --git a/tensorflow/tensorboard/components/tf-backend/backend.ts b/tensorflow/tensorboard/components/tf-backend/backend.ts index f23d7946e2a..5c75d790106 100644 --- a/tensorflow/tensorboard/components/tf-backend/backend.ts +++ b/tensorflow/tensorboard/components/tf-backend/backend.ts @@ -182,11 +182,16 @@ module TF.Backend { let url = this.router.histograms(tag, run); p = this.requestManager.request(url); return p.then(map(detupler(createHistogram))).then(function(histos) { + // Get the minimum and maximum values across all histograms so that the + // visualization is aligned for all timesteps. + let min = d3.min(histos, d => d.min); + let max = d3.max(histos, d => d.max); + return histos.map(function(histo, i) { return { wall_time: histo.wall_time, step: histo.step, - bins: convertBins(histo) + bins: convertBins(histo, min, max) }; }); }); @@ -254,11 +259,65 @@ module TF.Backend { } /** Given a RunToTag, return sorted array of all runs */ - export function getRuns(r: RunToTag): string[] { return _.keys(r).sort(); } + export function getRuns(r: RunToTag): string[] { + return _.keys(r).sort(compareTagNames); + } /** Given a RunToTag, return array of all tags (sorted + dedup'd) */ export function getTags(r: RunToTag): string[] { - return _.union.apply(null, _.values(r)).sort(); + return _.union.apply(null, _.values(r)).sort(compareTagNames); + } + + /** Compares tag names asciinumerically broken into components. */ + export function compareTagNames(a, b: string): number { + let ai = 0; + let bi = 0; + while (true) { + if (ai === a.length) return bi === b.length ? 0 : -1; + if (bi === b.length) return 1; + if (isDigit(a[ai]) && isDigit(b[bi])) { + let ais = ai; + let bis = bi; + ai = consumeNumber(a, ai + 1); + bi = consumeNumber(b, bi + 1); + let an = parseFloat(a.slice(ais, ai)); + let bn = parseFloat(b.slice(bis, bi)); + if (an < bn) return -1; + if (an > bn) return 1; + continue; + } + if (isBreak(a[ai])) { + if (!isBreak(b[bi])) return -1; + } else if (isBreak(b[bi])) { + return 1; + } else if (a[ai] < b[bi]) { + return -1; + } else if (a[ai] > b[bi]) { + return 1; + } + ai++; + bi++; + } + } + + function consumeNumber(s: string, i: number): number { + let decimal = false; + for (; i < s.length; i++) { + if (isDigit(s[i])) continue; + if (!decimal && s[i] === '.') { + decimal = true; + continue; + } + break; + } + return i; + } + + function isDigit(c: string): boolean { return '0' <= c && c <= '9'; } + + function isBreak(c: string): boolean { + // TODO(jart): Remove underscore when people stop using it like a slash. + return c === '/' || c === '_' || isDigit(c); } /** @@ -313,34 +372,59 @@ module TF.Backend { * Takes histogram data as stored by tensorboard backend and converts it to * the standard d3 histogram data format to make it more compatible and easier * to visualize. When visualizing histograms, having the left edge and width - * makes things quite a bit easier. + * makes things quite a bit easier. The bins are also converted to have an + * uniform width, what makes the visualization easier to understand. * * @param histogram A histogram from tensorboard backend. + * @param min The leftmost edge. The binning will start on it. + * @param max The rightmost edge. The binning will end on it. + * @param numBins The number of bins of the converted data. The default of 30 + * is a sensible default, using more starts to get artifacts because the event + * data is stored in buckets, and you start being able to see the aliased + * borders between each bucket. * @return A histogram bin. Each bin has an x (left edge), a dx (width), * and a y (count). * * If given rightedges are inclusive, then these left edges (x) are exclusive. */ - export function convertBins(histogram: Histogram) { + export function convertBins( + histogram: Histogram, min: number, max: number, numBins = 30) { if (histogram.bucketRightEdges.length !== histogram.bucketCounts.length) { throw(new Error('Edges and counts are of different lengths.')); } - var previousRightEdge = histogram.min; - return histogram.bucketRightEdges.map(function( - rightEdge: number, i: number) { + let binWidth = (max - min) / numBins; + let bucketLeft = min; // Use the min as the starting point for the bins. + let bucketPos = 0; + return d3.range(min, max, binWidth).map(function(binLeft) { + let binRight = binLeft + binWidth; - // Use the previous bin's rightEdge as the new leftEdge - var left = previousRightEdge; + // Take the count of each existing bucket, multiply it by the proportion + // of overlap with the new bin, then sum and store as the count for the + // new bin. If no overlap, will add to zero, if 100% overlap, will include + // the full count into new bin. + let binY = 0; + while (bucketPos < histogram.bucketRightEdges.length) { + // Clip the right edge because right-most edge can be infinite-sized. + let bucketRight = Math.min(max, histogram.bucketRightEdges[bucketPos]); - // We need to clip the rightEdge because right-most edge can be - // infinite-sized - var right = Math.min(histogram.max, rightEdge); + let intersect = + Math.min(bucketRight, binRight) - Math.max(bucketLeft, binLeft); + let count = (intersect / (bucketRight - bucketLeft)) * + histogram.bucketCounts[bucketPos]; - // Store rightEdgeValue for next iteration - previousRightEdge = rightEdge; + binY += intersect > 0 ? count : 0; - return {x: left, dx: right - left, y: histogram.bucketCounts[i]}; + // If bucketRight is bigger than binRight, than this bin is finished and + // there is data for the next bin, so don't increment bucketPos. + if (bucketRight > binRight) { + break; + } + bucketLeft = Math.max(min, bucketRight); + bucketPos++; + }; + + return {x: binLeft, dx: binWidth, y: binY}; }); } diff --git a/tensorflow/tensorboard/components/tf-backend/test/backendTests.ts b/tensorflow/tensorboard/components/tf-backend/test/backendTests.ts index b8f15ab4c1c..15cdd0bf95e 100644 --- a/tensorflow/tensorboard/components/tf-backend/test/backendTests.ts +++ b/tensorflow/tensorboard/components/tf-backend/test/backendTests.ts @@ -191,13 +191,16 @@ module TF.Backend { it('Throws and error if the inputs are of different lengths', function() { assert.throws(function() { convertBins( - {bucketRightEdges: [0], bucketCounts: [1, 2], min: 1, max: 2}); + {bucketRightEdges: [0], bucketCounts: [1, 2], min: 1, max: 2}, 1, 2, + 2); }, 'Edges and counts are of different lengths.'); }); it('Handles data with no bins', function() { assert.deepEqual( - convertBins({bucketRightEdges: [], bucketCounts: [], min: 0, max: 0}), + convertBins( + {bucketRightEdges: [], bucketCounts: [], min: 0, max: 0}, 0, 0, + 0), []); }); @@ -205,12 +208,14 @@ module TF.Backend { let counts = [1]; let rightEdges = [1.21e-12]; let histogram = [{x: 1.1e-12, dx: 1.21e-12 - 1.1e-12, y: 1}]; - let newHistogram = convertBins({ - bucketRightEdges: rightEdges, - bucketCounts: counts, - min: 1.1e-12, - max: 1.21e-12 - }); + let newHistogram = convertBins( + { + bucketRightEdges: rightEdges, + bucketCounts: counts, + min: 1.1e-12, + max: 1.21e-12 + }, + 1.1e-12, 1.21e-12, 1); assertHistogramEquality(newHistogram, histogram); }); @@ -218,15 +223,17 @@ module TF.Backend { let counts = [1, 2]; let rightEdges = [1.1e-12, 1.21e-12]; let histogram = [ - {x: 1.0e-12, dx: 1.1e-12 - 1.0e-12, y: 1}, - {x: 1.1e-12, dx: 1.21e-12 - 1.1e-12, y: 2} + {x: 1.0e-12, dx: 1.05e-13, y: 1.09090909090909}, + {x: 1.105e-12, dx: 1.05e-13, y: 1.9090909090909} ]; - let newHistogram = convertBins({ - bucketRightEdges: rightEdges, - bucketCounts: counts, - min: 1.0e-12, - max: 1.21e-12 - }); + let newHistogram = convertBins( + { + bucketRightEdges: rightEdges, + bucketCounts: counts, + min: 1.0e-12, + max: 1.21e-12 + }, + 1.0e-12, 1.21e-12, 2); assertHistogramEquality(newHistogram, histogram); }); @@ -236,15 +243,17 @@ module TF.Backend { let counts = [1, 2]; let rightEdges = [-1.0e-12, 1.0e-12]; let histogram = [ - {x: -1.1e-12, dx: 1.1e-12 - 1.0e-12, y: 1}, - {x: -1.0e-12, dx: 2.0e-12, y: 2} + {x: -1.1e-12, dx: 1.05e-12, y: 1.95}, + {x: -0.5e-13, dx: 1.05e-12, y: 1.05} ]; - let newHistogram = convertBins({ - bucketRightEdges: rightEdges, - bucketCounts: counts, - min: -1.1e-12, - max: 1.0e-12 - }); + let newHistogram = convertBins( + { + bucketRightEdges: rightEdges, + bucketCounts: counts, + min: -1.1e-12, + max: 1.0e-12 + }, + -1.1e-12, 1.0e-12, 2); assertHistogramEquality(newHistogram, histogram); }); @@ -253,16 +262,71 @@ module TF.Backend { let counts = [1, 2, 3]; let rightEdges = [0, 1.0e-12, 1.0e14]; let histogram = [ - {x: -1.0e-12, dx: 1.0e-12, y: 1}, {x: 0, dx: 1.0e-12, y: 2}, - {x: 1.0e-12, dx: 1.1e-12 - 1.0e-12, y: 3} + {x: -1.0e-12, dx: 0.7e-12, y: 0.7}, + {x: -0.3e-12, dx: 0.7e-12, y: 1.1}, + {x: 0.4e-12, dx: 0.7e-12, y: 4.2} ]; - let newHistogram = convertBins({ - bucketRightEdges: rightEdges, - bucketCounts: counts, - min: -1.0e-12, - max: 1.1e-12 - }); + let newHistogram = convertBins( + { + bucketRightEdges: rightEdges, + bucketCounts: counts, + min: -1.0e-12, + max: 1.1e-12 + }, + -1.0e-12, 1.1e-12, 3); assertHistogramEquality(newHistogram, histogram); }); }); + + describe('sortTagNames', () => { + + let sortTagNames = (a) => a.sort(compareTagNames); + + it('is asciibetical', () => { + assert.deepEqual(sortTagNames(['a', 'b']), ['a', 'b']); + assert.deepEqual(sortTagNames(['a', 'B']), ['B', 'a']); + }); + + it('sorts integer portions', () => { + assert.deepEqual(['03', '1'].sort(), ['03', '1']); + assert.deepEqual(sortTagNames(['03', '1']), ['1', '03']); + assert.deepEqual(sortTagNames(['a03', 'a1']), ['a1', 'a03']); + assert.deepEqual(sortTagNames(['a03', 'b1']), ['a03', 'b1']); + assert.deepEqual(sortTagNames(['x0a03', 'x0a1']), ['x0a1', 'x0a03']); + assert.deepEqual(sortTagNames(['a/b/03', 'a/b/1']), ['a/b/1', 'a/b/03']); + }); + + it('sorts floating point portions', () => { + assert.deepEqual(sortTagNames(['a0.1', 'a0.01']), ['a0.01', 'a0.1']); + }); + + it('is componentized by slash', () => { + assert.deepEqual(['a+/a', 'a/a', 'ab/a'].sort(), ['a+/a', 'a/a', 'ab/a']); + assert.deepEqual( + sortTagNames(['a+/a', 'a/a', 'ab/a']), ['a/a', 'a+/a', 'ab/a']); + }); + + it('is componentized by underscore', () => { + assert.deepEqual( + sortTagNames(['a+_a', 'a_a', 'ab_a']), ['a_a', 'a+_a', 'ab_a']); + assert.deepEqual( + sortTagNames(['a+/a', 'a_a', 'ab_a']), ['a_a', 'a+/a', 'ab_a']); + }); + + it('is componentized by number boundaries', () => { + assert.deepEqual( + sortTagNames(['a+0a', 'a0a', 'ab0a']), ['a0a', 'a+0a', 'ab0a']); + }); + + it('empty comes first', () => { + assert.deepEqual( + sortTagNames(['a', '//', '/', '']), ['', '/', '//', 'a']); + }); + + it('decimal parsed correctly', () => { + assert.deepEqual(sortTagNames(['0.2', '0.03']), ['0.03', '0.2']); + assert.deepEqual(sortTagNames(['0..2', '0..03']), ['0..2', '0..03']); + assert.deepEqual(sortTagNames(['.2', '.03']), ['.2', '.03']); + }); + }); } diff --git a/tensorflow/tensorboard/components/tf-backend/test/index.html b/tensorflow/tensorboard/components/tf-backend/test/index.html index ad92f694029..c97873f46ab 100644 --- a/tensorflow/tensorboard/components/tf-backend/test/index.html +++ b/tensorflow/tensorboard/components/tf-backend/test/index.html @@ -19,6 +19,7 @@ limitations under the License. + diff --git a/tensorflow/tensorboard/components/tf-backend/tf-backend.html b/tensorflow/tensorboard/components/tf-backend/tf-backend.html index 4f6a1680e19..bc4f16a5f70 100644 --- a/tensorflow/tensorboard/components/tf-backend/tf-backend.html +++ b/tensorflow/tensorboard/components/tf-backend/tf-backend.html @@ -1,5 +1,6 @@ + diff --git a/tensorflow/tensorboard/components/tf-dashboard-common/dashboard-style.html b/tensorflow/tensorboard/components/tf-dashboard-common/dashboard-style.html index 095a6cf9a59..3884a26694e 100644 --- a/tensorflow/tensorboard/components/tf-dashboard-common/dashboard-style.html +++ b/tensorflow/tensorboard/components/tf-dashboard-common/dashboard-style.html @@ -16,19 +16,23 @@ position: relative; } - .card .card-title { + .card .card-title, .card .card-subtitle { flex-grow: 0; flex-shrink: 0; - margin-bottom: 10px; font-size: 14px; text-overflow: ellipsis; overflow: hidden; } + .card .card-subtitle { + font-size: 12px; + } + .card .card-content { flex-grow: 1; flex-shrink: 1; display: flex; + margin-top: 10px; } .card .card-bottom-row { position: absolute; diff --git a/tensorflow/tensorboard/components/tf-distribution-dashboard/tf-distribution-chart.html b/tensorflow/tensorboard/components/tf-distribution-dashboard/tf-distribution-chart.html index d58520a8a7c..e5459a8276b 100644 --- a/tensorflow/tensorboard/components/tf-distribution-dashboard/tf-distribution-chart.html +++ b/tensorflow/tensorboard/components/tf-distribution-dashboard/tf-distribution-chart.html @@ -36,10 +36,11 @@ selectedRuns: Array, xType: String, dataProvider: Function, - _initialized: Boolean, + _attached: Boolean, + _makeChartAsyncCallbackId: { type: Number, value: null } }, observers: [ - "_makeChart(tag, dataProvider, xType, colorScale, _initialized)", + "_makeChart(tag, dataProvider, xType, colorScale, _attached)", "_changeRuns(_chart, selectedRuns.*)" ], _changeRuns: function(chart) { @@ -55,23 +56,26 @@ reload: function() { this._chart.reload(); }, - _makeChart: function(tag, dataProvider, xType, colorScale, _initialized) { - if (!_initialized) { - return; + _makeChart: function(tag, dataProvider, xType, colorScale, _attached) { + if (this._makeChartAsyncCallbackId === null) { + this.cancelAsync(this._makeChartAsyncCallbackId); } - if (this._chart) this._chart.destroy(); - var chart = new TF.DistributionChart(tag, dataProvider, xType, colorScale); - var svg = d3.select(this.$.chartsvg); - this.async(function() { + + this._makeChartAsyncCallbackId = this.async(function() { + this._makeChartAsyncCallbackId = null; + if (!_attached) return; + if (this._chart) this._chart.destroy(); + var chart = new TF.DistributionChart(tag, dataProvider, xType, colorScale); + var svg = d3.select(this.$.chartsvg); chart.renderTo(svg); this._chart = chart; }, 350); }, attached: function() { - this._initialized = true; + this._attached = true; }, detached: function() { - this._initialized = false; + this._attached = false; } }); diff --git a/tensorflow/tensorboard/components/tf-distribution-dashboard/tf-distribution-dashboard.html b/tensorflow/tensorboard/components/tf-distribution-dashboard/tf-distribution-dashboard.html index 962dcdef133..6cd938ac527 100644 --- a/tensorflow/tensorboard/components/tf-distribution-dashboard/tf-distribution-dashboard.html +++ b/tensorflow/tensorboard/components/tf-distribution-dashboard/tf-distribution-dashboard.html @@ -1,6 +1,6 @@ - + @@ -47,10 +47,15 @@ tf-collapsable-panes. > @@ -230,6 +238,9 @@ allows the user to toggle between various dashboards. _modeIsDistributions: function(mode) { return mode === "distributions"; }, + _modeIsHistograms: function(mode) { + return mode === "histograms"; + }, selectedDashboard: function() { var dashboard = this.$$("#" + this.mode); if (dashboard == null) { diff --git a/tensorflow/tensorboard/components/vz-histogram-timeseries/demo/index.html b/tensorflow/tensorboard/components/vz-histogram-timeseries/demo/index.html new file mode 100644 index 00000000000..56fb436b98d --- /dev/null +++ b/tensorflow/tensorboard/components/vz-histogram-timeseries/demo/index.html @@ -0,0 +1,67 @@ + + + + + + vz-histogram-timeseries demo + + + + + + + + +

vz-histogram-timeseries mode

+ + + + +

vz-histogram-timeseries axis

+ + + + + + + diff --git a/tensorflow/tensorboard/components/vz-histogram-timeseries/index.html b/tensorflow/tensorboard/components/vz-histogram-timeseries/index.html new file mode 100644 index 00000000000..483a746c489 --- /dev/null +++ b/tensorflow/tensorboard/components/vz-histogram-timeseries/index.html @@ -0,0 +1,14 @@ + + + + + vz-histogram-timeseries + + + + + + + + + diff --git a/tensorflow/tensorboard/components/vz-histogram-timeseries/vz-histogram-timeseries.html b/tensorflow/tensorboard/components/vz-histogram-timeseries/vz-histogram-timeseries.html index dc1fd4a2954..5b91df77d02 100644 --- a/tensorflow/tensorboard/components/vz-histogram-timeseries/vz-histogram-timeseries.html +++ b/tensorflow/tensorboard/components/vz-histogram-timeseries/vz-histogram-timeseries.html @@ -1,15 +1,45 @@ + + - diff --git a/tensorflow/tensorboard/components/vz-line-chart/vz-line-chart.html b/tensorflow/tensorboard/components/vz-line-chart/vz-line-chart.html index be2045ae9c9..ed88750b53c 100644 --- a/tensorflow/tensorboard/components/vz-line-chart/vz-line-chart.html +++ b/tensorflow/tensorboard/components/vz-line-chart/vz-line-chart.html @@ -225,11 +225,11 @@ smoothing. this.scopeSubtree(this.$.chartsvg, true); }, _makeChart: function(xType, colorScale, _attached) { - if (this._makeChartAsyncHandle === null) { + if (this._makeChartAsyncCallbackId === null) { this.cancelAsync(this._makeChartAsyncCallbackId); } - this._makeChartAsyncHandle = this.async(function() { + this._makeChartAsyncCallbackId = this.async(function() { this._makeChartAsyncCallbackId = null; if (!this._attached) return; if (this._chart) this._chart.destroy(); @@ -238,7 +238,7 @@ smoothing. var svg = d3.select(this.$.chartsvg); chart.renderTo(svg); this._chart = chart; - }.bind(this), 350); + }, 350); }, _reloadFromCache: function() { if(this._chart) { diff --git a/tensorflow/tensorboard/components/vz-line-chart/vz-line-chart.ts b/tensorflow/tensorboard/components/vz-line-chart/vz-line-chart.ts index 4e5d219f1c1..503b88e04cf 100644 --- a/tensorflow/tensorboard/components/vz-line-chart/vz-line-chart.ts +++ b/tensorflow/tensorboard/components/vz-line-chart/vz-line-chart.ts @@ -419,6 +419,10 @@ module VZ { this.datasets = names.map((r) => this.getDataset(r)); this.datasets.forEach((d) => d.onUpdate(this.onDatasetChanged)); this.linePlot.datasets(this.datasets); + + if (this.smoothingEnabled) { + this.smoothLinePlot.datasets(this.datasets); + } } /** diff --git a/tensorflow/tensorboard/dist/tf-tensorboard.html b/tensorflow/tensorboard/dist/tf-tensorboard.html index 8fe8e895ac0..169fbc9a5fd 100644 --- a/tensorflow/tensorboard/dist/tf-tensorboard.html +++ b/tensorflow/tensorboard/dist/tf-tensorboard.html @@ -99,7 +99,7 @@ var TF; var Globals; (function (Globals) { // The names of TensorBoard tabs. - Globals.TABS = ['events', 'images', 'audio', 'graphs', 'histograms']; + Globals.TABS = ['events', 'images', 'audio', 'graphs', 'distributions', 'histograms']; // If true, TensorBoard stores its hash in the URI state. // If false, tab switching in TensorBoard will not update location hash, // because hash updates interfere with wct_tests. @@ -526,8 +526,8 @@ var TF;