diff --git a/configure b/configure index ee1967f9374..b68a3d23818 100755 --- a/configure +++ b/configure @@ -145,7 +145,7 @@ while [ "$TF_NEED_CUDA" == "" ]; do done export TF_NEED_CUDA -export TF_NEED_SYCL +export TF_NEED_OPENCL if [[ "$TF_NEED_CUDA" == "0" ]] && [[ "$TF_NEED_OPENCL" == "0" ]]; then echo "Configuration finished" bazel_clean_and_fetch @@ -465,7 +465,6 @@ while true; do COMPUTECPP_TOOLKIT_PATH="" done -export TF_NEED_OPENCL # end of if "$TF_NEED_OPENCL" == "1" fi diff --git a/tensorflow/examples/udacity/3_regularization.ipynb b/tensorflow/examples/udacity/3_regularization.ipynb index 2658c00b6c7..5dc6f148611 100644 --- a/tensorflow/examples/udacity/3_regularization.ipynb +++ b/tensorflow/examples/udacity/3_regularization.ipynb @@ -60,7 +60,7 @@ "colab_type": "text" }, "source": [ - "First reload the data we generated in _notmist.ipynb_." + "First reload the data we generated in `1_notmnist.ipynb`." ] }, { diff --git a/tensorflow/g3doc/api_docs/python/array_ops.md b/tensorflow/g3doc/api_docs/python/array_ops.md index 460787399c8..c2ee073f833 100644 --- a/tensorflow/g3doc/api_docs/python/array_ops.md +++ b/tensorflow/g3doc/api_docs/python/array_ops.md @@ -2373,7 +2373,7 @@ The attr `block_size` indicates the input block size and how the data is moved. * Chunks of data of size `block_size * block_size` from depth are rearranged into non-overlapping blocks of size `block_size x block_size` - * The width the output tensor is `input_depth * block_size`, whereas the + * The width the output tensor is `input_width * block_size`, whereas the height is `input_height * block_size`. * The depth of the input tensor must be divisible by `block_size * block_size`. diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.depth_to_space.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.depth_to_space.md index 03dc6bb3b0d..ef74b4d54a4 100644 --- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.depth_to_space.md +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.depth_to_space.md @@ -10,7 +10,7 @@ The attr `block_size` indicates the input block size and how the data is moved. * Chunks of data of size `block_size * block_size` from depth are rearranged into non-overlapping blocks of size `block_size x block_size` - * The width the output tensor is `input_depth * block_size`, whereas the + * The width the output tensor is `input_width * block_size`, whereas the height is `input_height * block_size`. * The depth of the input tensor must be divisible by `block_size * block_size`. diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.summary.TaggedRunMetadata.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.summary.TaggedRunMetadata.md index 788d2066ad7..8dc62c4c18c 100644 --- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.summary.TaggedRunMetadata.md +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.summary.TaggedRunMetadata.md @@ -1,4 +1,185 @@ +- - - + +#### `tf.summary.TaggedRunMetadata.ByteSize()` {#TaggedRunMetadata.ByteSize} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.Clear()` {#TaggedRunMetadata.Clear} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.ClearExtension(extension_handle)` {#TaggedRunMetadata.ClearExtension} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.ClearField(field_name)` {#TaggedRunMetadata.ClearField} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.CopyFrom(other_msg)` {#TaggedRunMetadata.CopyFrom} + +Copies the content of the specified message into the current message. + +The method clears the current message and then merges the specified +message using MergeFrom. + +##### Args: + + +* `other_msg`: Message to copy into the current one. + + +- - - + +#### `tf.summary.TaggedRunMetadata.DiscardUnknownFields()` {#TaggedRunMetadata.DiscardUnknownFields} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.FindInitializationErrors()` {#TaggedRunMetadata.FindInitializationErrors} + +Finds required fields which are not initialized. + +##### Returns: + + A list of strings. Each string is a path to an uninitialized field from + the top-level message, e.g. "foo.bar[5].baz". + + +- - - + +#### `tf.summary.TaggedRunMetadata.FromString(s)` {#TaggedRunMetadata.FromString} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.HasExtension(extension_handle)` {#TaggedRunMetadata.HasExtension} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.HasField(field_name)` {#TaggedRunMetadata.HasField} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.IsInitialized(errors=None)` {#TaggedRunMetadata.IsInitialized} + +Checks if all required fields of a message are set. + +##### Args: + + +* `errors`: A list which, if provided, will be populated with the field + paths of all missing required fields. + +##### Returns: + + True iff the specified message has all required fields set. + + +- - - + +#### `tf.summary.TaggedRunMetadata.ListFields()` {#TaggedRunMetadata.ListFields} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.MergeFrom(msg)` {#TaggedRunMetadata.MergeFrom} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.MergeFromString(serialized)` {#TaggedRunMetadata.MergeFromString} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.ParseFromString(serialized)` {#TaggedRunMetadata.ParseFromString} + +Parse serialized protocol buffer data into this message. + +Like MergeFromString(), except we clear the object first and +do not return the value that MergeFromString returns. + + +- - - + +#### `tf.summary.TaggedRunMetadata.RegisterExtension(extension_handle)` {#TaggedRunMetadata.RegisterExtension} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.SerializePartialToString()` {#TaggedRunMetadata.SerializePartialToString} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.SerializeToString()` {#TaggedRunMetadata.SerializeToString} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.SetInParent()` {#TaggedRunMetadata.SetInParent} + +Sets the _cached_byte_size_dirty bit to true, +and propagates this to our listener iff this was a state change. + + +- - - + +#### `tf.summary.TaggedRunMetadata.WhichOneof(oneof_name)` {#TaggedRunMetadata.WhichOneof} + +Returns the name of the currently set field inside a oneof, or None. + + +- - - + +#### `tf.summary.TaggedRunMetadata.__deepcopy__(memo=None)` {#TaggedRunMetadata.__deepcopy__} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.__eq__(other)` {#TaggedRunMetadata.__eq__} + + + + - - - #### `tf.summary.TaggedRunMetadata.__getstate__()` {#TaggedRunMetadata.__getstate__} @@ -6,3 +187,66 @@ Support the pickle protocol. +- - - + +#### `tf.summary.TaggedRunMetadata.__hash__()` {#TaggedRunMetadata.__hash__} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.__init__(**kwargs)` {#TaggedRunMetadata.__init__} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.__ne__(other_msg)` {#TaggedRunMetadata.__ne__} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.__repr__()` {#TaggedRunMetadata.__repr__} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.__setstate__(state)` {#TaggedRunMetadata.__setstate__} + +Support the pickle protocol. + + +- - - + +#### `tf.summary.TaggedRunMetadata.__str__()` {#TaggedRunMetadata.__str__} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.__unicode__()` {#TaggedRunMetadata.__unicode__} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.run_metadata` {#TaggedRunMetadata.run_metadata} + +Magic attribute generated for "run_metadata" proto field. + + +- - - + +#### `tf.summary.TaggedRunMetadata.tag` {#TaggedRunMetadata.tag} + +Magic attribute generated for "tag" proto field. + + diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.merge_all_summaries.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.merge_all_summaries.md new file mode 100644 index 00000000000..bf17320a5a3 --- /dev/null +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.merge_all_summaries.md @@ -0,0 +1,17 @@ +### `tf.merge_all_summaries(*args, **kwargs)` {#merge_all_summaries} + +Merges all summaries collected in the default graph. (deprecated) + +THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30. +Instructions for updating: +Please switch to tf.summary.merge_all. + + Args: + key: `GraphKey` used to collect the summaries. Defaults to + `GraphKeys.SUMMARIES`. + + Returns: + If no summaries were collected, returns None. Otherwise returns a scalar + `Tensor` of type `string` containing the serialized `Summary` protocol + buffer resulting from the merging. + diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.image_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.image_summary.md new file mode 100644 index 00000000000..6220d3641bc --- /dev/null +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.image_summary.md @@ -0,0 +1,49 @@ +### `tf.image_summary(*args, **kwargs)` {#image_summary} + +Outputs a `Summary` protocol buffer with images. (deprecated) + +THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30. +Instructions for updating: +Please switch to tf.summary.image. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, the max_images argument was renamed to max_outputs. + + The summary has up to `max_images` summary values containing images. The + images are built from `tensor` which must be 4-D with shape `[batch_size, + height, width, channels]` and where `channels` can be: + + * 1: `tensor` is interpreted as Grayscale. + * 3: `tensor` is interpreted as RGB. + * 4: `tensor` is interpreted as RGBA. + + The images have the same number of channels as the input tensor. For float + input, the values are normalized one image at a time to fit in the range + `[0, 255]`. `uint8` values are unchanged. The op uses two different + normalization algorithms: + + * If the input values are all positive, they are rescaled so the largest one + is 255. + + * If any input value is negative, the values are shifted so input value 0.0 + is at 127. They are then rescaled so that either the smallest value is 0, + or the largest one is 255. + + The `tag` argument is a scalar `Tensor` of type `string`. It is used to + build the `tag` of the summary values: + + * If `max_images` is 1, the summary value tag is '*tag*/image'. + * If `max_images` is greater than 1, the summary value tags are + generated sequentially as '*tag*/image/0', '*tag*/image/1', etc. + + Args: + tag: A scalar `Tensor` of type `string`. Used to build the `tag` + of the summary values. + tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height, + width, channels]` where `channels` is 1, 3, or 4. + max_images: Max number of batch elements to generate images for. + collections: Optional list of ops.GraphKeys. The collections to add the + summary to. Defaults to [ops.GraphKeys.SUMMARIES] + name: A name for the operation (optional). + + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer. + diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.summary.SummaryDescription.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.summary.SummaryDescription.md index 19532f7cc33..bce704ef4f2 100644 --- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.summary.SummaryDescription.md +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.summary.SummaryDescription.md @@ -1,4 +1,185 @@ +- - - + +#### `tf.summary.SummaryDescription.ByteSize()` {#SummaryDescription.ByteSize} + + + + +- - - + +#### `tf.summary.SummaryDescription.Clear()` {#SummaryDescription.Clear} + + + + +- - - + +#### `tf.summary.SummaryDescription.ClearExtension(extension_handle)` {#SummaryDescription.ClearExtension} + + + + +- - - + +#### `tf.summary.SummaryDescription.ClearField(field_name)` {#SummaryDescription.ClearField} + + + + +- - - + +#### `tf.summary.SummaryDescription.CopyFrom(other_msg)` {#SummaryDescription.CopyFrom} + +Copies the content of the specified message into the current message. + +The method clears the current message and then merges the specified +message using MergeFrom. + +##### Args: + + +* `other_msg`: Message to copy into the current one. + + +- - - + +#### `tf.summary.SummaryDescription.DiscardUnknownFields()` {#SummaryDescription.DiscardUnknownFields} + + + + +- - - + +#### `tf.summary.SummaryDescription.FindInitializationErrors()` {#SummaryDescription.FindInitializationErrors} + +Finds required fields which are not initialized. + +##### Returns: + + A list of strings. Each string is a path to an uninitialized field from + the top-level message, e.g. "foo.bar[5].baz". + + +- - - + +#### `tf.summary.SummaryDescription.FromString(s)` {#SummaryDescription.FromString} + + + + +- - - + +#### `tf.summary.SummaryDescription.HasExtension(extension_handle)` {#SummaryDescription.HasExtension} + + + + +- - - + +#### `tf.summary.SummaryDescription.HasField(field_name)` {#SummaryDescription.HasField} + + + + +- - - + +#### `tf.summary.SummaryDescription.IsInitialized(errors=None)` {#SummaryDescription.IsInitialized} + +Checks if all required fields of a message are set. + +##### Args: + + +* `errors`: A list which, if provided, will be populated with the field + paths of all missing required fields. + +##### Returns: + + True iff the specified message has all required fields set. + + +- - - + +#### `tf.summary.SummaryDescription.ListFields()` {#SummaryDescription.ListFields} + + + + +- - - + +#### `tf.summary.SummaryDescription.MergeFrom(msg)` {#SummaryDescription.MergeFrom} + + + + +- - - + +#### `tf.summary.SummaryDescription.MergeFromString(serialized)` {#SummaryDescription.MergeFromString} + + + + +- - - + +#### `tf.summary.SummaryDescription.ParseFromString(serialized)` {#SummaryDescription.ParseFromString} + +Parse serialized protocol buffer data into this message. + +Like MergeFromString(), except we clear the object first and +do not return the value that MergeFromString returns. + + +- - - + +#### `tf.summary.SummaryDescription.RegisterExtension(extension_handle)` {#SummaryDescription.RegisterExtension} + + + + +- - - + +#### `tf.summary.SummaryDescription.SerializePartialToString()` {#SummaryDescription.SerializePartialToString} + + + + +- - - + +#### `tf.summary.SummaryDescription.SerializeToString()` {#SummaryDescription.SerializeToString} + + + + +- - - + +#### `tf.summary.SummaryDescription.SetInParent()` {#SummaryDescription.SetInParent} + +Sets the _cached_byte_size_dirty bit to true, +and propagates this to our listener iff this was a state change. + + +- - - + +#### `tf.summary.SummaryDescription.WhichOneof(oneof_name)` {#SummaryDescription.WhichOneof} + +Returns the name of the currently set field inside a oneof, or None. + + +- - - + +#### `tf.summary.SummaryDescription.__deepcopy__(memo=None)` {#SummaryDescription.__deepcopy__} + + + + +- - - + +#### `tf.summary.SummaryDescription.__eq__(other)` {#SummaryDescription.__eq__} + + + + - - - #### `tf.summary.SummaryDescription.__getstate__()` {#SummaryDescription.__getstate__} @@ -6,3 +187,59 @@ Support the pickle protocol. +- - - + +#### `tf.summary.SummaryDescription.__hash__()` {#SummaryDescription.__hash__} + + + + +- - - + +#### `tf.summary.SummaryDescription.__init__(**kwargs)` {#SummaryDescription.__init__} + + + + +- - - + +#### `tf.summary.SummaryDescription.__ne__(other_msg)` {#SummaryDescription.__ne__} + + + + +- - - + +#### `tf.summary.SummaryDescription.__repr__()` {#SummaryDescription.__repr__} + + + + +- - - + +#### `tf.summary.SummaryDescription.__setstate__(state)` {#SummaryDescription.__setstate__} + +Support the pickle protocol. + + +- - - + +#### `tf.summary.SummaryDescription.__str__()` {#SummaryDescription.__str__} + + + + +- - - + +#### `tf.summary.SummaryDescription.__unicode__()` {#SummaryDescription.__unicode__} + + + + +- - - + +#### `tf.summary.SummaryDescription.type_hint` {#SummaryDescription.type_hint} + +Magic attribute generated for "type_hint" proto field. + + diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.test.TestCase.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.test.TestCase.md index 6dba0e4f1ec..ec995fd99c9 100644 --- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.test.TestCase.md +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.test.TestCase.md @@ -173,125 +173,6 @@ Checks that for all elements of farray1 and farray2 * `err`: a float value. -- - - - -#### `tf.test.TestCase.assertBetween(value, minv, maxv, msg=None)` {#TestCase.assertBetween} - -Asserts that value is between minv and maxv (inclusive). - - -- - - - -#### `tf.test.TestCase.assertCommandFails(command, regexes, env=None, close_fds=True, msg=None)` {#TestCase.assertCommandFails} - -Asserts a shell command fails and the error matches a regex in a list. - -##### Args: - - -* `command`: List or string representing the command to run. -* `regexes`: the list of regular expression strings. -* `env`: Dictionary of environment variable settings. -* `close_fds`: Whether or not to close all open fd's in the child after - forking. -* `msg`: Optional message to report on failure. - - -- - - - -#### `tf.test.TestCase.assertCommandSucceeds(command, regexes=('',), env=None, close_fds=True, msg=None)` {#TestCase.assertCommandSucceeds} - -Asserts that a shell command succeeds (i.e. exits with code 0). - -##### Args: - - -* `command`: List or string representing the command to run. -* `regexes`: List of regular expression byte strings that match success. -* `env`: Dictionary of environment variable settings. -* `close_fds`: Whether or not to close all open fd's in the child after - forking. -* `msg`: Optional message to report on failure. - - -- - - - -#### `tf.test.TestCase.assertContainsExactSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsExactSubsequence} - -Assert that "container" contains "subsequence" as an exact subsequence. - -Asserts that "container" contains all the elements of "subsequence", in -order, and without other elements interspersed. For example, [1, 2, 3] is an -exact subsequence of [0, 0, 1, 2, 3, 0] but not of [0, 0, 1, 2, 0, 3, 0]. - -##### Args: - - -* `container`: the list we're testing for subsequence inclusion. -* `subsequence`: the list we hope will be an exact subsequence of container. -* `msg`: Optional message to report on failure. - - -- - - - -#### `tf.test.TestCase.assertContainsInOrder(strings, target, msg=None)` {#TestCase.assertContainsInOrder} - -Asserts that the strings provided are found in the target in order. - -This may be useful for checking HTML output. - -##### Args: - - -* `strings`: A list of strings, such as [ 'fox', 'dog' ] -* `target`: A target string in which to look for the strings, such as - 'The quick brown fox jumped over the lazy dog'. -* `msg`: Optional message to report on failure. - - -- - - - -#### `tf.test.TestCase.assertContainsSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsSubsequence} - -Assert that "container" contains "subsequence" as a subsequence. - -Asserts that "container" contains all the elements of "subsequence", in -order, but possibly with other elements interspersed. For example, [1, 2, 3] -is a subsequence of [0, 0, 1, 2, 0, 3, 0] but not of [0, 0, 1, 3, 0, 2, 0]. - -##### Args: - - -* `container`: the list we're testing for subsequence inclusion. -* `subsequence`: the list we hope will be a subsequence of container. -* `msg`: Optional message to report on failure. - - -- - - - -#### `tf.test.TestCase.assertContainsSubset(expected_subset, actual_set, msg=None)` {#TestCase.assertContainsSubset} - -Checks whether actual iterable is a superset of expected iterable. - - -- - - - -#### `tf.test.TestCase.assertCountEqual(*args, **kwargs)` {#TestCase.assertCountEqual} - -An unordered sequence specific comparison. - -Equivalent to assertItemsEqual(). This method is a compatibility layer -for Python 3k, since 2to3 does not convert assertItemsEqual() calls into -assertCountEqual() calls. - -##### Args: - - -* `expected_seq`: A sequence containing elements we are expecting. -* `actual_seq`: The sequence that we are testing. -* `msg`: The message to be printed if the test fails. - - - - - #### `tf.test.TestCase.assertDeviceEqual(device1, device2)` {#TestCase.assertDeviceEqual} @@ -314,48 +195,9 @@ Checks whether actual is a superset of expected. - - - -#### `tf.test.TestCase.assertDictEqual(a, b, msg=None)` {#TestCase.assertDictEqual} - -Raises AssertionError if a and b are not equal dictionaries. - -##### Args: +#### `tf.test.TestCase.assertDictEqual(d1, d2, msg=None)` {#TestCase.assertDictEqual} -* `a`: A dict, the expected value. -* `b`: A dict, the actual value. -* `msg`: An optional str, the associated message. - -##### Raises: - - -* `AssertionError`: if the dictionaries are not equal. - - -- - - - -#### `tf.test.TestCase.assertEmpty(container, msg=None)` {#TestCase.assertEmpty} - -Assert that an object has zero length. - -##### Args: - - -* `container`: Anything that implements the collections.Sized interface. -* `msg`: Optional message to report on failure. - - -- - - - -#### `tf.test.TestCase.assertEndsWith(actual, expected_end, msg=None)` {#TestCase.assertEndsWith} - -Assert that actual.endswith(expected_end) is True. - -##### Args: - - -* `actual`: str -* `expected_end`: str -* `msg`: Optional message to report on failure. - - - @@ -440,11 +282,10 @@ Included for symmetry with assertIsNone. - - - -#### `tf.test.TestCase.assertItemsEqual(*args, **kwargs)` {#TestCase.assertItemsEqual} +#### `tf.test.TestCase.assertItemsEqual(expected_seq, actual_seq, msg=None)` {#TestCase.assertItemsEqual} -An unordered sequence specific comparison. - -It asserts that actual_seq and expected_seq have the same element counts. +An unordered sequence specific comparison. It asserts that +actual_seq and expected_seq have the same element counts. Equivalent to:: self.assertEqual(Counter(iter(actual_seq)), @@ -457,30 +298,6 @@ Asserts that each element has the same count in both sequences. - [0, 1, 1] and [1, 0, 1] compare equal. - [0, 0, 1] and [0, 1] compare unequal. -##### Args: - - -* `expected_seq`: A sequence containing elements we are expecting. -* `actual_seq`: The sequence that we are testing. -* `msg`: The message to be printed if the test fails. - - -- - - - -#### `tf.test.TestCase.assertJsonEqual(first, second, msg=None)` {#TestCase.assertJsonEqual} - -Asserts that the JSON objects defined in two strings are equal. - -A summary of the differences will be included in the failure message -using assertSameStructure. - -##### Args: - - -* `first`: A string contining JSON to decode and compare to second. -* `second`: A string contining JSON to decode and compare to first. -* `msg`: Additional text to include in the failure message. - - - - @@ -550,13 +367,6 @@ if not. * `msg`: An optional string message to append to the failure message. -- - - - -#### `tf.test.TestCase.assertNoCommonElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertNoCommonElements} - -Checks whether actual iterable and expected iterable are disjoint. - - - - - #### `tf.test.TestCase.assertNotAlmostEqual(first, second, places=None, msg=None, delta=None)` {#TestCase.assertNotAlmostEqual} @@ -587,33 +397,6 @@ as significant digits (measured from the most signficant digit). Objects that are equal automatically fail. -- - - - -#### `tf.test.TestCase.assertNotEmpty(container, msg=None)` {#TestCase.assertNotEmpty} - -Assert that an object has non-zero length. - -##### Args: - - -* `container`: Anything that implements the collections.Sized interface. -* `msg`: Optional message to report on failure. - - -- - - - -#### `tf.test.TestCase.assertNotEndsWith(actual, unexpected_end, msg=None)` {#TestCase.assertNotEndsWith} - -Assert that actual.endswith(unexpected_end) is False. - -##### Args: - - -* `actual`: str -* `unexpected_end`: str -* `msg`: Optional message to report on failure. - - - - - #### `tf.test.TestCase.assertNotEqual(first, second, msg=None)` {#TestCase.assertNotEqual} @@ -651,20 +434,6 @@ Included for symmetry with assertIsInstance. Fail the test if the text matches the regular expression. -- - - - -#### `tf.test.TestCase.assertNotStartsWith(actual, unexpected_start, msg=None)` {#TestCase.assertNotStartsWith} - -Assert that actual.startswith(unexpected_start) is False. - -##### Args: - - -* `actual`: str -* `unexpected_start`: str -* `msg`: Optional message to report on failure. - - - - - #### `tf.test.TestCase.assertProtoEquals(expected_message_maybe_ascii, message)` {#TestCase.assertProtoEquals} @@ -739,38 +508,6 @@ Asserts that the message in a raised exception matches a regexp. * `kwargs`: Extra kwargs. -- - - - -#### `tf.test.TestCase.assertRaisesWithLiteralMatch(expected_exception, expected_exception_message, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithLiteralMatch} - -Asserts that the message in a raised exception equals the given string. - -Unlike assertRaisesRegexp, this method takes a literal string, not -a regular expression. - -with self.assertRaisesWithLiteralMatch(ExType, 'message'): - DoSomething() - -##### Args: - - -* `expected_exception`: Exception class expected to be raised. -* `expected_exception_message`: String message expected in the raised - exception. For a raise exception e, expected_exception_message must - equal str(e). -* `callable_obj`: Function to be called, or None to return a context. -* `args`: Extra args. -* `kwargs`: Extra kwargs. - -##### Returns: - - A context manager if callable_obj is None. Otherwise, None. - -##### Raises: - - self.failureException if callable_obj does not raise a macthing exception. - - - - - #### `tf.test.TestCase.assertRaisesWithPredicateMatch(exception_type, expected_err_re_or_predicate)` {#TestCase.assertRaisesWithPredicateMatch} @@ -795,71 +532,6 @@ predicate search. exception. -- - - - -#### `tf.test.TestCase.assertRaisesWithRegexpMatch(expected_exception, expected_regexp, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithRegexpMatch} - -Asserts that the message in a raised exception matches the given regexp. - -This is just a wrapper around assertRaisesRegexp. Please use -assertRaisesRegexp instead of assertRaisesWithRegexpMatch. - -##### Args: - - -* `expected_exception`: Exception class expected to be raised. -* `expected_regexp`: Regexp (re pattern object or string) expected to be - found in error message. -* `callable_obj`: Function to be called, or None to return a context. -* `args`: Extra args. -* `kwargs`: Extra keyword args. - -##### Returns: - - A context manager if callable_obj is None. Otherwise, None. - -##### Raises: - - self.failureException if callable_obj does not raise a macthing exception. - - -- - - - -#### `tf.test.TestCase.assertRegexMatch(actual_str, regexes, message=None)` {#TestCase.assertRegexMatch} - -Asserts that at least one regex in regexes matches str. - - If possible you should use assertRegexpMatches, which is a simpler - version of this method. assertRegexpMatches takes a single regular - expression (a string or re compiled object) instead of a list. - - Notes: - 1. This function uses substring matching, i.e. the matching - succeeds if *any* substring of the error message matches *any* - regex in the list. This is more convenient for the user than - full-string matching. - - 2. If regexes is the empty list, the matching will always fail. - - 3. Use regexes=[''] for a regex that will always pass. - - 4. '.' matches any single character *except* the newline. To - match any character, use '(.| -)'. - - 5. '^' matches the beginning of each line, not just the beginning - of the string. Similarly, '$' matches the end of each line. - - 6. An exception will be thrown if regexes contains an invalid - regex. - - Args: - actual_str: The string we try to match with the items in regexes. - regexes: The regular expressions we want to match against str. - See "Notes" above for detailed notes on how this is interpreted. - message: The message to be printed if the test fails. - - - - - #### `tf.test.TestCase.assertRegexpMatches(text, expected_regexp, msg=None)` {#TestCase.assertRegexpMatches} @@ -867,79 +539,6 @@ Asserts that at least one regex in regexes matches str. Fail the test unless the text matches the regular expression. -- - - - -#### `tf.test.TestCase.assertSameElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertSameElements} - -Assert that two sequences have the same elements (in any order). - -This method, unlike assertItemsEqual, doesn't care about any -duplicates in the expected and actual sequences. - - >> assertSameElements([1, 1, 1, 0, 0, 0], [0, 1]) - # Doesn't raise an AssertionError - -If possible, you should use assertItemsEqual instead of -assertSameElements. - -##### Args: - - -* `expected_seq`: A sequence containing elements we are expecting. -* `actual_seq`: The sequence that we are testing. -* `msg`: The message to be printed if the test fails. - - -- - - - -#### `tf.test.TestCase.assertSameStructure(a, b, aname='a', bname='b', msg=None)` {#TestCase.assertSameStructure} - -Asserts that two values contain the same structural content. - -The two arguments should be data trees consisting of trees of dicts and -lists. They will be deeply compared by walking into the contents of dicts -and lists; other items will be compared using the == operator. -If the two structures differ in content, the failure message will indicate -the location within the structures where the first difference is found. -This may be helpful when comparing large structures. - -##### Args: - - -* `a`: The first structure to compare. -* `b`: The second structure to compare. -* `aname`: Variable name to use for the first structure in assertion messages. -* `bname`: Variable name to use for the second structure. -* `msg`: Additional text to include in the failure message. - - -- - - - -#### `tf.test.TestCase.assertSequenceAlmostEqual(expected_seq, actual_seq, places=None, msg=None, delta=None)` {#TestCase.assertSequenceAlmostEqual} - -An approximate equality assertion for ordered sequences. - -Fail if the two sequences are unequal as determined by their value -differences rounded to the given number of decimal places (default 7) and -comparing to zero, or by comparing that the difference between each value -in the two sequences is more than the given delta. - -Note that decimal places (from zero) are usually not the same as significant -digits (measured from the most signficant digit). - -If the two sequences compare equal then they will automatically compare -almost equal. - -##### Args: - - -* `expected_seq`: A sequence containing elements we are expecting. -* `actual_seq`: The sequence that we are testing. -* `places`: The number of decimal places to compare. -* `msg`: The message to be printed if the test fails. -* `delta`: The OK difference between compared values. - - - - - #### `tf.test.TestCase.assertSequenceEqual(seq1, seq2, msg=None, seq_type=None)` {#TestCase.assertSequenceEqual} @@ -960,26 +559,6 @@ which can be indexed, has a length, and has an equality operator. differences. -- - - - -#### `tf.test.TestCase.assertSequenceStartsWith(prefix, whole, msg=None)` {#TestCase.assertSequenceStartsWith} - -An equality assertion for the beginning of ordered sequences. - -If prefix is an empty sequence, it will raise an error unless whole is also -an empty sequence. - -If prefix is not a sequence, it will raise an error if the first element of -whole does not match. - -##### Args: - - -* `prefix`: A sequence expected at the beginning of the whole parameter. -* `whole`: The sequence in which to look for prefix. -* `msg`: Optional message to report on failure. - - - - - #### `tf.test.TestCase.assertSetEqual(set1, set2, msg=None)` {#TestCase.assertSetEqual} @@ -1031,51 +610,6 @@ Assert that actual.startswith(expected_start) is True. * `msg`: Optional message to report on failure. -- - - - -#### `tf.test.TestCase.assertTotallyOrdered(*groups, **kwargs)` {#TestCase.assertTotallyOrdered} - -Asserts that total ordering has been implemented correctly. - -For example, say you have a class A that compares only on its attribute x. -Comparators other than __lt__ are omitted for brevity. - -class A(object): - def __init__(self, x, y): - self.x = x - self.y = y - - def __hash__(self): - return hash(self.x) - - def __lt__(self, other): - try: - return self.x < other.x - except AttributeError: - return NotImplemented - -assertTotallyOrdered will check that instances can be ordered correctly. -For example, - -self.assertTotallyOrdered( - [None], # None should come before everything else. - [1], # Integers sort earlier. - [A(1, 'a')], - [A(2, 'b')], # 2 is after 1. - [A(3, 'c'), A(3, 'd')], # The second argument is irrelevant. - [A(4, 'z')], - ['foo']) # Strings sort last. - -##### Args: - - -* `*groups`: A list of groups of elements. Each group of elements is a list - of objects that are equal. The elements in each group must be less than - the elements in the group after it. For example, these groups are - totally ordered: [None], [1], [2, 2], [3]. -* `**kwargs`: optional msg keyword argument can be passed. - - - - - #### `tf.test.TestCase.assertTrue(expr, msg=None)` {#TestCase.assertTrue} @@ -1098,13 +632,6 @@ A tuple-specific equality assertion. differences. -- - - - -#### `tf.test.TestCase.assertUrlEqual(a, b, msg=None)` {#TestCase.assertUrlEqual} - -Asserts that urls are equal, ignoring ordering of query params. - - - - - #### `tf.test.TestCase.assert_(expr, msg=None)` {#TestCase.assert_} @@ -1166,9 +693,9 @@ tearDown. - - - -#### `tf.test.TestCase.fail(msg=None, prefix=None)` {#TestCase.fail} +#### `tf.test.TestCase.fail(msg=None)` {#TestCase.fail} -Fail immediately with the given message, optionally prefixed. +Fail immediately, with the given message. - - - @@ -1220,13 +747,6 @@ Fail immediately with the given message, optionally prefixed. -- - - - -#### `tf.test.TestCase.getRecordedProperties()` {#TestCase.getRecordedProperties} - -Return any properties that the user has recorded. - - - - - #### `tf.test.TestCase.get_temp_dir()` {#TestCase.get_temp_dir} @@ -1249,20 +769,6 @@ pollute each others environment. -- - - - -#### `tf.test.TestCase.recordProperty(property_name, property_value)` {#TestCase.recordProperty} - -Record an arbitrary property for later use. - -##### Args: - - -* `property_name`: str, name of property to record; must be a valid XML - attribute name -* `property_value`: value of property; must be valid XML attribute value - - - - - #### `tf.test.TestCase.run(result=None)` {#TestCase.run} @@ -1288,18 +794,11 @@ Hook method for setting up class fixture before running tests in the class. #### `tf.test.TestCase.shortDescription()` {#TestCase.shortDescription} -Format both the test method name and the first line of its docstring. +Returns a one-line description of the test, or None if no +description has been provided. -If no docstring is given, only returns the method name. - -This method overrides unittest.TestCase.shortDescription(), which -only returns the first line of the docstring, obscuring the name -of the test upon failure. - -##### Returns: - - -* `desc`: A short description of a test method. +The default implementation of this method returns the first line of +the specified test method's docstring. - - - diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scalar_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scalar_summary.md new file mode 100644 index 00000000000..3ffd9260c7b --- /dev/null +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scalar_summary.md @@ -0,0 +1,22 @@ +### `tf.scalar_summary(*args, **kwargs)` {#scalar_summary} + +Outputs a `Summary` protocol buffer with scalar values. (deprecated) + +THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30. +Instructions for updating: +Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported. + + The input `tags` and `values` must have the same shape. The generated + summary has a summary value for each tag-value pair in `tags` and `values`. + + Args: + tags: A `string` `Tensor`. Tags for the summaries. + values: A real numeric Tensor. Values for the summaries. + collections: Optional list of graph collections keys. The new summary op is + added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. + name: A name for the operation (optional). + + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer. + diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.summary.SummaryDescription.RegisterExtension.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.summary.SummaryDescription.RegisterExtension.md new file mode 100644 index 00000000000..3cfd7103d7e --- /dev/null +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.summary.SummaryDescription.RegisterExtension.md @@ -0,0 +1,4 @@ +#### `tf.summary.SummaryDescription.RegisterExtension(extension_handle)` {#SummaryDescription.RegisterExtension} + + + diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.histogram_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.histogram_summary.md new file mode 100644 index 00000000000..570d7b712c6 --- /dev/null +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.histogram_summary.md @@ -0,0 +1,26 @@ +### `tf.histogram_summary(*args, **kwargs)` {#histogram_summary} + +Outputs a `Summary` protocol buffer with a histogram. (deprecated) + +THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30. +Instructions for updating: +Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope. + + The generated + [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + has one summary value containing a histogram for `values`. + + This op reports an `InvalidArgument` error if any value is not finite. + + Args: + tag: A `string` `Tensor`. 0-D. Tag to use for the summary value. + values: A real numeric `Tensor`. Any shape. Values to use to + build the histogram. + collections: Optional list of graph collections keys. The new summary op is + added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. + name: A name for the operation (optional). + + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer. + diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.merge_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.merge_summary.md new file mode 100644 index 00000000000..ccb984f5abe --- /dev/null +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.merge_summary.md @@ -0,0 +1,27 @@ +### `tf.merge_summary(*args, **kwargs)` {#merge_summary} + +Merges summaries. (deprecated) + +THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30. +Instructions for updating: +Please switch to tf.summary.merge. + + This op creates a + [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + protocol buffer that contains the union of all the values in the input + summaries. + + When the Op is run, it reports an `InvalidArgument` error if multiple values + in the summaries to merge use the same tag. + + Args: + inputs: A list of `string` `Tensor` objects containing serialized `Summary` + protocol buffers. + collections: Optional list of graph collections keys. The new summary op is + added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. + name: A name for the operation (optional). + + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer resulting from the merging. + diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.summary.SummaryDescription.FromString.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.summary.SummaryDescription.FromString.md new file mode 100644 index 00000000000..24a3b3f10c3 --- /dev/null +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.summary.SummaryDescription.FromString.md @@ -0,0 +1,4 @@ +#### `tf.summary.SummaryDescription.FromString(s)` {#SummaryDescription.FromString} + + + diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.summary.TaggedRunMetadata.RegisterExtension.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.summary.TaggedRunMetadata.RegisterExtension.md new file mode 100644 index 00000000000..f2d0c042d77 --- /dev/null +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.summary.TaggedRunMetadata.RegisterExtension.md @@ -0,0 +1,4 @@ +#### `tf.summary.TaggedRunMetadata.RegisterExtension(extension_handle)` {#TaggedRunMetadata.RegisterExtension} + + + diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.train.SummaryWriter.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.train.SummaryWriter.md new file mode 100644 index 00000000000..e9bdda200f9 --- /dev/null +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.train.SummaryWriter.md @@ -0,0 +1,207 @@ + +- - - + +#### `tf.train.SummaryWriter.__init__(*args, **kwargs)` {#SummaryWriter.__init__} + +Creates a `SummaryWriter` and an event file. (deprecated) + +THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30. +Instructions for updating: +Please switch to tf.summary.FileWriter. The interface and behavior is the same; this is just a rename. + + This class is deprecated, and should be replaced with tf.summary.FileWriter. + + On construction the summary writer creates a new event file in `logdir`. + This event file will contain `Event` protocol buffers constructed when you + call one of the following functions: `add_summary()`, `add_session_log()`, + `add_event()`, or `add_graph()`. + + If you pass a `Graph` to the constructor it is added to + the event file. (This is equivalent to calling `add_graph()` later). + + TensorBoard will pick the graph from the file and display it graphically so + you can interactively explore the graph you built. You will usually pass + the graph from the session in which you launched it: + + ```python + ...create a graph... + # Launch the graph in a session. + sess = tf.Session() + # Create a summary writer, add the 'graph' to the event file. + writer = tf.train.SummaryWriter(, sess.graph) + ``` + + The other arguments to the constructor control the asynchronous writes to + the event file: + + * `flush_secs`: How often, in seconds, to flush the added summaries + and events to disk. + * `max_queue`: Maximum number of summaries or events pending to be + written to disk before one of the 'add' calls block. + + Args: + logdir: A string. Directory where event file will be written. + graph: A `Graph` object, such as `sess.graph`. + max_queue: Integer. Size of the queue for pending events and summaries. + flush_secs: Number. How often, in seconds, to flush the + pending events and summaries to disk. + graph_def: DEPRECATED: Use the `graph` argument instead. + + +- - - + +#### `tf.train.SummaryWriter.add_event(event)` {#SummaryWriter.add_event} + +Adds an event to the event file. + +##### Args: + + +* `event`: An `Event` protocol buffer. + + +- - - + +#### `tf.train.SummaryWriter.add_graph(graph, global_step=None, graph_def=None)` {#SummaryWriter.add_graph} + +Adds a `Graph` to the event file. + +The graph described by the protocol buffer will be displayed by +TensorBoard. Most users pass a graph in the constructor instead. + +##### Args: + + +* `graph`: A `Graph` object, such as `sess.graph`. +* `global_step`: Number. Optional global step counter to record with the + graph. +* `graph_def`: DEPRECATED. Use the `graph` parameter instead. + +##### Raises: + + +* `ValueError`: If both graph and graph_def are passed to the method. + + +- - - + +#### `tf.train.SummaryWriter.add_meta_graph(meta_graph_def, global_step=None)` {#SummaryWriter.add_meta_graph} + +Adds a `MetaGraphDef` to the event file. + +The `MetaGraphDef` allows running the given graph via +`saver.import_meta_graph()`. + +##### Args: + + +* `meta_graph_def`: A `MetaGraphDef` object, often as retured by + `saver.export_meta_graph()`. +* `global_step`: Number. Optional global step counter to record with the + graph. + +##### Raises: + + +* `TypeError`: If both `meta_graph_def` is not an instance of `MetaGraphDef`. + + +- - - + +#### `tf.train.SummaryWriter.add_run_metadata(run_metadata, tag, global_step=None)` {#SummaryWriter.add_run_metadata} + +Adds a metadata information for a single session.run() call. + +##### Args: + + +* `run_metadata`: A `RunMetadata` protobuf object. +* `tag`: The tag name for this metadata. +* `global_step`: Number. Optional global step counter to record with the + StepStats. + +##### Raises: + + +* `ValueError`: If the provided tag was already used for this type of event. + + +- - - + +#### `tf.train.SummaryWriter.add_session_log(session_log, global_step=None)` {#SummaryWriter.add_session_log} + +Adds a `SessionLog` protocol buffer to the event file. + +This method wraps the provided session in an `Event` protocol buffer +and adds it to the event file. + +##### Args: + + +* `session_log`: A `SessionLog` protocol buffer. +* `global_step`: Number. Optional global step value to record with the + summary. + + +- - - + +#### `tf.train.SummaryWriter.add_summary(summary, global_step=None)` {#SummaryWriter.add_summary} + +Adds a `Summary` protocol buffer to the event file. + +This method wraps the provided summary in an `Event` protocol buffer +and adds it to the event file. + +You can pass the result of evaluating any summary op, using +[`Session.run()`](client.md#Session.run) or +[`Tensor.eval()`](framework.md#Tensor.eval), to this +function. Alternatively, you can pass a `tf.Summary` protocol +buffer that you populate with your own data. The latter is +commonly done to report evaluation results in event files. + +##### Args: + + +* `summary`: A `Summary` protocol buffer, optionally serialized as a string. +* `global_step`: Number. Optional global step value to record with the + summary. + + +- - - + +#### `tf.train.SummaryWriter.close()` {#SummaryWriter.close} + +Flushes the event file to disk and close the file. + +Call this method when you do not need the summary writer anymore. + + +- - - + +#### `tf.train.SummaryWriter.flush()` {#SummaryWriter.flush} + +Flushes the event file to disk. + +Call this method to make sure that all pending events have been written to +disk. + + +- - - + +#### `tf.train.SummaryWriter.get_logdir()` {#SummaryWriter.get_logdir} + +Returns the directory where event file will be written. + + +- - - + +#### `tf.train.SummaryWriter.reopen()` {#SummaryWriter.reopen} + +Reopens the EventFileWriter. + +Can be called after `close()` to add more events in the same directory. +The events will go into a new events file. + +Does nothing if the EventFileWriter was not closed. + + diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.audio_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.audio_summary.md new file mode 100644 index 00000000000..c5830ab5504 --- /dev/null +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.audio_summary.md @@ -0,0 +1,37 @@ +### `tf.audio_summary(*args, **kwargs)` {#audio_summary} + +Outputs a `Summary` protocol buffer with audio. (deprecated) + +THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30. +Instructions for updating: +Please switch to tf.summary.audio. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. + + The summary has up to `max_outputs` summary values containing audio. The + audio is built from `tensor` which must be 3-D with shape `[batch_size, + frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are + assumed to be in the range of `[-1.0, 1.0]` with a sample rate of + `sample_rate`. + + The `tag` argument is a scalar `Tensor` of type `string`. It is used to + build the `tag` of the summary values: + + * If `max_outputs` is 1, the summary value tag is '*tag*/audio'. + * If `max_outputs` is greater than 1, the summary value tags are + generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc. + + Args: + tag: A scalar `Tensor` of type `string`. Used to build the `tag` + of the summary values. + tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]` + or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`. + sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the + signal in hertz. + max_outputs: Max number of batch elements to generate audio for. + collections: Optional list of ops.GraphKeys. The collections to add the + summary to. Defaults to [ops.GraphKeys.SUMMARIES] + name: A name for the operation (optional). + + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer. + diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.summary.TaggedRunMetadata.FromString.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.summary.TaggedRunMetadata.FromString.md new file mode 100644 index 00000000000..613f4ebd73d --- /dev/null +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.summary.TaggedRunMetadata.FromString.md @@ -0,0 +1,4 @@ +#### `tf.summary.TaggedRunMetadata.FromString(s)` {#TaggedRunMetadata.FromString} + + + diff --git a/tensorflow/g3doc/api_docs/python/summary.md b/tensorflow/g3doc/api_docs/python/summary.md index 8d344036dbc..be029f42906 100644 --- a/tensorflow/g3doc/api_docs/python/summary.md +++ b/tensorflow/g3doc/api_docs/python/summary.md @@ -485,6 +485,187 @@ metadata is stored in its NodeDef. This method retrieves the description. ### `class tf.summary.SummaryDescription` {#SummaryDescription} +- - - + +#### `tf.summary.SummaryDescription.ByteSize()` {#SummaryDescription.ByteSize} + + + + +- - - + +#### `tf.summary.SummaryDescription.Clear()` {#SummaryDescription.Clear} + + + + +- - - + +#### `tf.summary.SummaryDescription.ClearExtension(extension_handle)` {#SummaryDescription.ClearExtension} + + + + +- - - + +#### `tf.summary.SummaryDescription.ClearField(field_name)` {#SummaryDescription.ClearField} + + + + +- - - + +#### `tf.summary.SummaryDescription.CopyFrom(other_msg)` {#SummaryDescription.CopyFrom} + +Copies the content of the specified message into the current message. + +The method clears the current message and then merges the specified +message using MergeFrom. + +##### Args: + + +* `other_msg`: Message to copy into the current one. + + +- - - + +#### `tf.summary.SummaryDescription.DiscardUnknownFields()` {#SummaryDescription.DiscardUnknownFields} + + + + +- - - + +#### `tf.summary.SummaryDescription.FindInitializationErrors()` {#SummaryDescription.FindInitializationErrors} + +Finds required fields which are not initialized. + +##### Returns: + + A list of strings. Each string is a path to an uninitialized field from + the top-level message, e.g. "foo.bar[5].baz". + + +- - - + +#### `tf.summary.SummaryDescription.FromString(s)` {#SummaryDescription.FromString} + + + + +- - - + +#### `tf.summary.SummaryDescription.HasExtension(extension_handle)` {#SummaryDescription.HasExtension} + + + + +- - - + +#### `tf.summary.SummaryDescription.HasField(field_name)` {#SummaryDescription.HasField} + + + + +- - - + +#### `tf.summary.SummaryDescription.IsInitialized(errors=None)` {#SummaryDescription.IsInitialized} + +Checks if all required fields of a message are set. + +##### Args: + + +* `errors`: A list which, if provided, will be populated with the field + paths of all missing required fields. + +##### Returns: + + True iff the specified message has all required fields set. + + +- - - + +#### `tf.summary.SummaryDescription.ListFields()` {#SummaryDescription.ListFields} + + + + +- - - + +#### `tf.summary.SummaryDescription.MergeFrom(msg)` {#SummaryDescription.MergeFrom} + + + + +- - - + +#### `tf.summary.SummaryDescription.MergeFromString(serialized)` {#SummaryDescription.MergeFromString} + + + + +- - - + +#### `tf.summary.SummaryDescription.ParseFromString(serialized)` {#SummaryDescription.ParseFromString} + +Parse serialized protocol buffer data into this message. + +Like MergeFromString(), except we clear the object first and +do not return the value that MergeFromString returns. + + +- - - + +#### `tf.summary.SummaryDescription.RegisterExtension(extension_handle)` {#SummaryDescription.RegisterExtension} + + + + +- - - + +#### `tf.summary.SummaryDescription.SerializePartialToString()` {#SummaryDescription.SerializePartialToString} + + + + +- - - + +#### `tf.summary.SummaryDescription.SerializeToString()` {#SummaryDescription.SerializeToString} + + + + +- - - + +#### `tf.summary.SummaryDescription.SetInParent()` {#SummaryDescription.SetInParent} + +Sets the _cached_byte_size_dirty bit to true, +and propagates this to our listener iff this was a state change. + + +- - - + +#### `tf.summary.SummaryDescription.WhichOneof(oneof_name)` {#SummaryDescription.WhichOneof} + +Returns the name of the currently set field inside a oneof, or None. + + +- - - + +#### `tf.summary.SummaryDescription.__deepcopy__(memo=None)` {#SummaryDescription.__deepcopy__} + + + + +- - - + +#### `tf.summary.SummaryDescription.__eq__(other)` {#SummaryDescription.__eq__} + + + + - - - #### `tf.summary.SummaryDescription.__getstate__()` {#SummaryDescription.__getstate__} @@ -492,12 +673,249 @@ metadata is stored in its NodeDef. This method retrieves the description. Support the pickle protocol. +- - - + +#### `tf.summary.SummaryDescription.__hash__()` {#SummaryDescription.__hash__} + + + + +- - - + +#### `tf.summary.SummaryDescription.__init__(**kwargs)` {#SummaryDescription.__init__} + + + + +- - - + +#### `tf.summary.SummaryDescription.__ne__(other_msg)` {#SummaryDescription.__ne__} + + + + +- - - + +#### `tf.summary.SummaryDescription.__repr__()` {#SummaryDescription.__repr__} + + + + +- - - + +#### `tf.summary.SummaryDescription.__setstate__(state)` {#SummaryDescription.__setstate__} + +Support the pickle protocol. + + +- - - + +#### `tf.summary.SummaryDescription.__str__()` {#SummaryDescription.__str__} + + + + +- - - + +#### `tf.summary.SummaryDescription.__unicode__()` {#SummaryDescription.__unicode__} + + + + +- - - + +#### `tf.summary.SummaryDescription.type_hint` {#SummaryDescription.type_hint} + +Magic attribute generated for "type_hint" proto field. + + - - - ### `class tf.summary.TaggedRunMetadata` {#TaggedRunMetadata} +- - - + +#### `tf.summary.TaggedRunMetadata.ByteSize()` {#TaggedRunMetadata.ByteSize} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.Clear()` {#TaggedRunMetadata.Clear} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.ClearExtension(extension_handle)` {#TaggedRunMetadata.ClearExtension} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.ClearField(field_name)` {#TaggedRunMetadata.ClearField} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.CopyFrom(other_msg)` {#TaggedRunMetadata.CopyFrom} + +Copies the content of the specified message into the current message. + +The method clears the current message and then merges the specified +message using MergeFrom. + +##### Args: + + +* `other_msg`: Message to copy into the current one. + + +- - - + +#### `tf.summary.TaggedRunMetadata.DiscardUnknownFields()` {#TaggedRunMetadata.DiscardUnknownFields} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.FindInitializationErrors()` {#TaggedRunMetadata.FindInitializationErrors} + +Finds required fields which are not initialized. + +##### Returns: + + A list of strings. Each string is a path to an uninitialized field from + the top-level message, e.g. "foo.bar[5].baz". + + +- - - + +#### `tf.summary.TaggedRunMetadata.FromString(s)` {#TaggedRunMetadata.FromString} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.HasExtension(extension_handle)` {#TaggedRunMetadata.HasExtension} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.HasField(field_name)` {#TaggedRunMetadata.HasField} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.IsInitialized(errors=None)` {#TaggedRunMetadata.IsInitialized} + +Checks if all required fields of a message are set. + +##### Args: + + +* `errors`: A list which, if provided, will be populated with the field + paths of all missing required fields. + +##### Returns: + + True iff the specified message has all required fields set. + + +- - - + +#### `tf.summary.TaggedRunMetadata.ListFields()` {#TaggedRunMetadata.ListFields} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.MergeFrom(msg)` {#TaggedRunMetadata.MergeFrom} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.MergeFromString(serialized)` {#TaggedRunMetadata.MergeFromString} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.ParseFromString(serialized)` {#TaggedRunMetadata.ParseFromString} + +Parse serialized protocol buffer data into this message. + +Like MergeFromString(), except we clear the object first and +do not return the value that MergeFromString returns. + + +- - - + +#### `tf.summary.TaggedRunMetadata.RegisterExtension(extension_handle)` {#TaggedRunMetadata.RegisterExtension} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.SerializePartialToString()` {#TaggedRunMetadata.SerializePartialToString} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.SerializeToString()` {#TaggedRunMetadata.SerializeToString} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.SetInParent()` {#TaggedRunMetadata.SetInParent} + +Sets the _cached_byte_size_dirty bit to true, +and propagates this to our listener iff this was a state change. + + +- - - + +#### `tf.summary.TaggedRunMetadata.WhichOneof(oneof_name)` {#TaggedRunMetadata.WhichOneof} + +Returns the name of the currently set field inside a oneof, or None. + + +- - - + +#### `tf.summary.TaggedRunMetadata.__deepcopy__(memo=None)` {#TaggedRunMetadata.__deepcopy__} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.__eq__(other)` {#TaggedRunMetadata.__eq__} + + + + - - - #### `tf.summary.TaggedRunMetadata.__getstate__()` {#TaggedRunMetadata.__getstate__} @@ -505,4 +923,67 @@ Support the pickle protocol. Support the pickle protocol. +- - - + +#### `tf.summary.TaggedRunMetadata.__hash__()` {#TaggedRunMetadata.__hash__} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.__init__(**kwargs)` {#TaggedRunMetadata.__init__} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.__ne__(other_msg)` {#TaggedRunMetadata.__ne__} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.__repr__()` {#TaggedRunMetadata.__repr__} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.__setstate__(state)` {#TaggedRunMetadata.__setstate__} + +Support the pickle protocol. + + +- - - + +#### `tf.summary.TaggedRunMetadata.__str__()` {#TaggedRunMetadata.__str__} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.__unicode__()` {#TaggedRunMetadata.__unicode__} + + + + +- - - + +#### `tf.summary.TaggedRunMetadata.run_metadata` {#TaggedRunMetadata.run_metadata} + +Magic attribute generated for "run_metadata" proto field. + + +- - - + +#### `tf.summary.TaggedRunMetadata.tag` {#TaggedRunMetadata.tag} + +Magic attribute generated for "tag" proto field. + + diff --git a/tensorflow/g3doc/api_docs/python/test.md b/tensorflow/g3doc/api_docs/python/test.md index ade1478781a..fb7252625b3 100644 --- a/tensorflow/g3doc/api_docs/python/test.md +++ b/tensorflow/g3doc/api_docs/python/test.md @@ -213,125 +213,6 @@ Checks that for all elements of farray1 and farray2 * `err`: a float value. -- - - - -#### `tf.test.TestCase.assertBetween(value, minv, maxv, msg=None)` {#TestCase.assertBetween} - -Asserts that value is between minv and maxv (inclusive). - - -- - - - -#### `tf.test.TestCase.assertCommandFails(command, regexes, env=None, close_fds=True, msg=None)` {#TestCase.assertCommandFails} - -Asserts a shell command fails and the error matches a regex in a list. - -##### Args: - - -* `command`: List or string representing the command to run. -* `regexes`: the list of regular expression strings. -* `env`: Dictionary of environment variable settings. -* `close_fds`: Whether or not to close all open fd's in the child after - forking. -* `msg`: Optional message to report on failure. - - -- - - - -#### `tf.test.TestCase.assertCommandSucceeds(command, regexes=('',), env=None, close_fds=True, msg=None)` {#TestCase.assertCommandSucceeds} - -Asserts that a shell command succeeds (i.e. exits with code 0). - -##### Args: - - -* `command`: List or string representing the command to run. -* `regexes`: List of regular expression byte strings that match success. -* `env`: Dictionary of environment variable settings. -* `close_fds`: Whether or not to close all open fd's in the child after - forking. -* `msg`: Optional message to report on failure. - - -- - - - -#### `tf.test.TestCase.assertContainsExactSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsExactSubsequence} - -Assert that "container" contains "subsequence" as an exact subsequence. - -Asserts that "container" contains all the elements of "subsequence", in -order, and without other elements interspersed. For example, [1, 2, 3] is an -exact subsequence of [0, 0, 1, 2, 3, 0] but not of [0, 0, 1, 2, 0, 3, 0]. - -##### Args: - - -* `container`: the list we're testing for subsequence inclusion. -* `subsequence`: the list we hope will be an exact subsequence of container. -* `msg`: Optional message to report on failure. - - -- - - - -#### `tf.test.TestCase.assertContainsInOrder(strings, target, msg=None)` {#TestCase.assertContainsInOrder} - -Asserts that the strings provided are found in the target in order. - -This may be useful for checking HTML output. - -##### Args: - - -* `strings`: A list of strings, such as [ 'fox', 'dog' ] -* `target`: A target string in which to look for the strings, such as - 'The quick brown fox jumped over the lazy dog'. -* `msg`: Optional message to report on failure. - - -- - - - -#### `tf.test.TestCase.assertContainsSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsSubsequence} - -Assert that "container" contains "subsequence" as a subsequence. - -Asserts that "container" contains all the elements of "subsequence", in -order, but possibly with other elements interspersed. For example, [1, 2, 3] -is a subsequence of [0, 0, 1, 2, 0, 3, 0] but not of [0, 0, 1, 3, 0, 2, 0]. - -##### Args: - - -* `container`: the list we're testing for subsequence inclusion. -* `subsequence`: the list we hope will be a subsequence of container. -* `msg`: Optional message to report on failure. - - -- - - - -#### `tf.test.TestCase.assertContainsSubset(expected_subset, actual_set, msg=None)` {#TestCase.assertContainsSubset} - -Checks whether actual iterable is a superset of expected iterable. - - -- - - - -#### `tf.test.TestCase.assertCountEqual(*args, **kwargs)` {#TestCase.assertCountEqual} - -An unordered sequence specific comparison. - -Equivalent to assertItemsEqual(). This method is a compatibility layer -for Python 3k, since 2to3 does not convert assertItemsEqual() calls into -assertCountEqual() calls. - -##### Args: - - -* `expected_seq`: A sequence containing elements we are expecting. -* `actual_seq`: The sequence that we are testing. -* `msg`: The message to be printed if the test fails. - - - - - #### `tf.test.TestCase.assertDeviceEqual(device1, device2)` {#TestCase.assertDeviceEqual} @@ -354,48 +235,9 @@ Checks whether actual is a superset of expected. - - - -#### `tf.test.TestCase.assertDictEqual(a, b, msg=None)` {#TestCase.assertDictEqual} - -Raises AssertionError if a and b are not equal dictionaries. - -##### Args: +#### `tf.test.TestCase.assertDictEqual(d1, d2, msg=None)` {#TestCase.assertDictEqual} -* `a`: A dict, the expected value. -* `b`: A dict, the actual value. -* `msg`: An optional str, the associated message. - -##### Raises: - - -* `AssertionError`: if the dictionaries are not equal. - - -- - - - -#### `tf.test.TestCase.assertEmpty(container, msg=None)` {#TestCase.assertEmpty} - -Assert that an object has zero length. - -##### Args: - - -* `container`: Anything that implements the collections.Sized interface. -* `msg`: Optional message to report on failure. - - -- - - - -#### `tf.test.TestCase.assertEndsWith(actual, expected_end, msg=None)` {#TestCase.assertEndsWith} - -Assert that actual.endswith(expected_end) is True. - -##### Args: - - -* `actual`: str -* `expected_end`: str -* `msg`: Optional message to report on failure. - - - @@ -480,11 +322,10 @@ Included for symmetry with assertIsNone. - - - -#### `tf.test.TestCase.assertItemsEqual(*args, **kwargs)` {#TestCase.assertItemsEqual} +#### `tf.test.TestCase.assertItemsEqual(expected_seq, actual_seq, msg=None)` {#TestCase.assertItemsEqual} -An unordered sequence specific comparison. - -It asserts that actual_seq and expected_seq have the same element counts. +An unordered sequence specific comparison. It asserts that +actual_seq and expected_seq have the same element counts. Equivalent to:: self.assertEqual(Counter(iter(actual_seq)), @@ -497,30 +338,6 @@ Asserts that each element has the same count in both sequences. - [0, 1, 1] and [1, 0, 1] compare equal. - [0, 0, 1] and [0, 1] compare unequal. -##### Args: - - -* `expected_seq`: A sequence containing elements we are expecting. -* `actual_seq`: The sequence that we are testing. -* `msg`: The message to be printed if the test fails. - - -- - - - -#### `tf.test.TestCase.assertJsonEqual(first, second, msg=None)` {#TestCase.assertJsonEqual} - -Asserts that the JSON objects defined in two strings are equal. - -A summary of the differences will be included in the failure message -using assertSameStructure. - -##### Args: - - -* `first`: A string contining JSON to decode and compare to second. -* `second`: A string contining JSON to decode and compare to first. -* `msg`: Additional text to include in the failure message. - - - - @@ -590,13 +407,6 @@ if not. * `msg`: An optional string message to append to the failure message. -- - - - -#### `tf.test.TestCase.assertNoCommonElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertNoCommonElements} - -Checks whether actual iterable and expected iterable are disjoint. - - - - - #### `tf.test.TestCase.assertNotAlmostEqual(first, second, places=None, msg=None, delta=None)` {#TestCase.assertNotAlmostEqual} @@ -627,33 +437,6 @@ as significant digits (measured from the most signficant digit). Objects that are equal automatically fail. -- - - - -#### `tf.test.TestCase.assertNotEmpty(container, msg=None)` {#TestCase.assertNotEmpty} - -Assert that an object has non-zero length. - -##### Args: - - -* `container`: Anything that implements the collections.Sized interface. -* `msg`: Optional message to report on failure. - - -- - - - -#### `tf.test.TestCase.assertNotEndsWith(actual, unexpected_end, msg=None)` {#TestCase.assertNotEndsWith} - -Assert that actual.endswith(unexpected_end) is False. - -##### Args: - - -* `actual`: str -* `unexpected_end`: str -* `msg`: Optional message to report on failure. - - - - - #### `tf.test.TestCase.assertNotEqual(first, second, msg=None)` {#TestCase.assertNotEqual} @@ -691,20 +474,6 @@ Included for symmetry with assertIsInstance. Fail the test if the text matches the regular expression. -- - - - -#### `tf.test.TestCase.assertNotStartsWith(actual, unexpected_start, msg=None)` {#TestCase.assertNotStartsWith} - -Assert that actual.startswith(unexpected_start) is False. - -##### Args: - - -* `actual`: str -* `unexpected_start`: str -* `msg`: Optional message to report on failure. - - - - - #### `tf.test.TestCase.assertProtoEquals(expected_message_maybe_ascii, message)` {#TestCase.assertProtoEquals} @@ -779,38 +548,6 @@ Asserts that the message in a raised exception matches a regexp. * `kwargs`: Extra kwargs. -- - - - -#### `tf.test.TestCase.assertRaisesWithLiteralMatch(expected_exception, expected_exception_message, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithLiteralMatch} - -Asserts that the message in a raised exception equals the given string. - -Unlike assertRaisesRegexp, this method takes a literal string, not -a regular expression. - -with self.assertRaisesWithLiteralMatch(ExType, 'message'): - DoSomething() - -##### Args: - - -* `expected_exception`: Exception class expected to be raised. -* `expected_exception_message`: String message expected in the raised - exception. For a raise exception e, expected_exception_message must - equal str(e). -* `callable_obj`: Function to be called, or None to return a context. -* `args`: Extra args. -* `kwargs`: Extra kwargs. - -##### Returns: - - A context manager if callable_obj is None. Otherwise, None. - -##### Raises: - - self.failureException if callable_obj does not raise a macthing exception. - - - - - #### `tf.test.TestCase.assertRaisesWithPredicateMatch(exception_type, expected_err_re_or_predicate)` {#TestCase.assertRaisesWithPredicateMatch} @@ -835,71 +572,6 @@ predicate search. exception. -- - - - -#### `tf.test.TestCase.assertRaisesWithRegexpMatch(expected_exception, expected_regexp, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithRegexpMatch} - -Asserts that the message in a raised exception matches the given regexp. - -This is just a wrapper around assertRaisesRegexp. Please use -assertRaisesRegexp instead of assertRaisesWithRegexpMatch. - -##### Args: - - -* `expected_exception`: Exception class expected to be raised. -* `expected_regexp`: Regexp (re pattern object or string) expected to be - found in error message. -* `callable_obj`: Function to be called, or None to return a context. -* `args`: Extra args. -* `kwargs`: Extra keyword args. - -##### Returns: - - A context manager if callable_obj is None. Otherwise, None. - -##### Raises: - - self.failureException if callable_obj does not raise a macthing exception. - - -- - - - -#### `tf.test.TestCase.assertRegexMatch(actual_str, regexes, message=None)` {#TestCase.assertRegexMatch} - -Asserts that at least one regex in regexes matches str. - - If possible you should use assertRegexpMatches, which is a simpler - version of this method. assertRegexpMatches takes a single regular - expression (a string or re compiled object) instead of a list. - - Notes: - 1. This function uses substring matching, i.e. the matching - succeeds if *any* substring of the error message matches *any* - regex in the list. This is more convenient for the user than - full-string matching. - - 2. If regexes is the empty list, the matching will always fail. - - 3. Use regexes=[''] for a regex that will always pass. - - 4. '.' matches any single character *except* the newline. To - match any character, use '(.| -)'. - - 5. '^' matches the beginning of each line, not just the beginning - of the string. Similarly, '$' matches the end of each line. - - 6. An exception will be thrown if regexes contains an invalid - regex. - - Args: - actual_str: The string we try to match with the items in regexes. - regexes: The regular expressions we want to match against str. - See "Notes" above for detailed notes on how this is interpreted. - message: The message to be printed if the test fails. - - - - - #### `tf.test.TestCase.assertRegexpMatches(text, expected_regexp, msg=None)` {#TestCase.assertRegexpMatches} @@ -907,79 +579,6 @@ Asserts that at least one regex in regexes matches str. Fail the test unless the text matches the regular expression. -- - - - -#### `tf.test.TestCase.assertSameElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertSameElements} - -Assert that two sequences have the same elements (in any order). - -This method, unlike assertItemsEqual, doesn't care about any -duplicates in the expected and actual sequences. - - >> assertSameElements([1, 1, 1, 0, 0, 0], [0, 1]) - # Doesn't raise an AssertionError - -If possible, you should use assertItemsEqual instead of -assertSameElements. - -##### Args: - - -* `expected_seq`: A sequence containing elements we are expecting. -* `actual_seq`: The sequence that we are testing. -* `msg`: The message to be printed if the test fails. - - -- - - - -#### `tf.test.TestCase.assertSameStructure(a, b, aname='a', bname='b', msg=None)` {#TestCase.assertSameStructure} - -Asserts that two values contain the same structural content. - -The two arguments should be data trees consisting of trees of dicts and -lists. They will be deeply compared by walking into the contents of dicts -and lists; other items will be compared using the == operator. -If the two structures differ in content, the failure message will indicate -the location within the structures where the first difference is found. -This may be helpful when comparing large structures. - -##### Args: - - -* `a`: The first structure to compare. -* `b`: The second structure to compare. -* `aname`: Variable name to use for the first structure in assertion messages. -* `bname`: Variable name to use for the second structure. -* `msg`: Additional text to include in the failure message. - - -- - - - -#### `tf.test.TestCase.assertSequenceAlmostEqual(expected_seq, actual_seq, places=None, msg=None, delta=None)` {#TestCase.assertSequenceAlmostEqual} - -An approximate equality assertion for ordered sequences. - -Fail if the two sequences are unequal as determined by their value -differences rounded to the given number of decimal places (default 7) and -comparing to zero, or by comparing that the difference between each value -in the two sequences is more than the given delta. - -Note that decimal places (from zero) are usually not the same as significant -digits (measured from the most signficant digit). - -If the two sequences compare equal then they will automatically compare -almost equal. - -##### Args: - - -* `expected_seq`: A sequence containing elements we are expecting. -* `actual_seq`: The sequence that we are testing. -* `places`: The number of decimal places to compare. -* `msg`: The message to be printed if the test fails. -* `delta`: The OK difference between compared values. - - - - - #### `tf.test.TestCase.assertSequenceEqual(seq1, seq2, msg=None, seq_type=None)` {#TestCase.assertSequenceEqual} @@ -1000,26 +599,6 @@ which can be indexed, has a length, and has an equality operator. differences. -- - - - -#### `tf.test.TestCase.assertSequenceStartsWith(prefix, whole, msg=None)` {#TestCase.assertSequenceStartsWith} - -An equality assertion for the beginning of ordered sequences. - -If prefix is an empty sequence, it will raise an error unless whole is also -an empty sequence. - -If prefix is not a sequence, it will raise an error if the first element of -whole does not match. - -##### Args: - - -* `prefix`: A sequence expected at the beginning of the whole parameter. -* `whole`: The sequence in which to look for prefix. -* `msg`: Optional message to report on failure. - - - - - #### `tf.test.TestCase.assertSetEqual(set1, set2, msg=None)` {#TestCase.assertSetEqual} @@ -1071,51 +650,6 @@ Assert that actual.startswith(expected_start) is True. * `msg`: Optional message to report on failure. -- - - - -#### `tf.test.TestCase.assertTotallyOrdered(*groups, **kwargs)` {#TestCase.assertTotallyOrdered} - -Asserts that total ordering has been implemented correctly. - -For example, say you have a class A that compares only on its attribute x. -Comparators other than __lt__ are omitted for brevity. - -class A(object): - def __init__(self, x, y): - self.x = x - self.y = y - - def __hash__(self): - return hash(self.x) - - def __lt__(self, other): - try: - return self.x < other.x - except AttributeError: - return NotImplemented - -assertTotallyOrdered will check that instances can be ordered correctly. -For example, - -self.assertTotallyOrdered( - [None], # None should come before everything else. - [1], # Integers sort earlier. - [A(1, 'a')], - [A(2, 'b')], # 2 is after 1. - [A(3, 'c'), A(3, 'd')], # The second argument is irrelevant. - [A(4, 'z')], - ['foo']) # Strings sort last. - -##### Args: - - -* `*groups`: A list of groups of elements. Each group of elements is a list - of objects that are equal. The elements in each group must be less than - the elements in the group after it. For example, these groups are - totally ordered: [None], [1], [2, 2], [3]. -* `**kwargs`: optional msg keyword argument can be passed. - - - - - #### `tf.test.TestCase.assertTrue(expr, msg=None)` {#TestCase.assertTrue} @@ -1138,13 +672,6 @@ A tuple-specific equality assertion. differences. -- - - - -#### `tf.test.TestCase.assertUrlEqual(a, b, msg=None)` {#TestCase.assertUrlEqual} - -Asserts that urls are equal, ignoring ordering of query params. - - - - - #### `tf.test.TestCase.assert_(expr, msg=None)` {#TestCase.assert_} @@ -1206,9 +733,9 @@ tearDown. - - - -#### `tf.test.TestCase.fail(msg=None, prefix=None)` {#TestCase.fail} +#### `tf.test.TestCase.fail(msg=None)` {#TestCase.fail} -Fail immediately with the given message, optionally prefixed. +Fail immediately, with the given message. - - - @@ -1260,13 +787,6 @@ Fail immediately with the given message, optionally prefixed. -- - - - -#### `tf.test.TestCase.getRecordedProperties()` {#TestCase.getRecordedProperties} - -Return any properties that the user has recorded. - - - - - #### `tf.test.TestCase.get_temp_dir()` {#TestCase.get_temp_dir} @@ -1289,20 +809,6 @@ pollute each others environment. -- - - - -#### `tf.test.TestCase.recordProperty(property_name, property_value)` {#TestCase.recordProperty} - -Record an arbitrary property for later use. - -##### Args: - - -* `property_name`: str, name of property to record; must be a valid XML - attribute name -* `property_value`: value of property; must be valid XML attribute value - - - - - #### `tf.test.TestCase.run(result=None)` {#TestCase.run} @@ -1328,18 +834,11 @@ Hook method for setting up class fixture before running tests in the class. #### `tf.test.TestCase.shortDescription()` {#TestCase.shortDescription} -Format both the test method name and the first line of its docstring. +Returns a one-line description of the test, or None if no +description has been provided. -If no docstring is given, only returns the method name. - -This method overrides unittest.TestCase.shortDescription(), which -only returns the first line of the docstring, obscuring the name -of the test upon failure. - -##### Returns: - - -* `desc`: A short description of a test method. +The default implementation of this method returns the first line of +the specified test method's docstring. - - - diff --git a/tensorflow/python/training/supervisor.py b/tensorflow/python/training/supervisor.py index eb8efd17e25..c3088b9fefc 100644 --- a/tensorflow/python/training/supervisor.py +++ b/tensorflow/python/training/supervisor.py @@ -152,7 +152,7 @@ class Supervisor(object): ... sv = Supervisor(logdir='/tmp/mydir') with sv.managed_session(FLAGS.master) as sess: - sv.loop(60, print_loss, (sess)) + sv.loop(60, print_loss, (sess, )) while not sv.should_stop(): sess.run(my_train_op) ``` diff --git a/tensorflow/tools/ci_build/builds/android_nightly.sh b/tensorflow/tools/ci_build/builds/android_full.sh old mode 100644 new mode 100755 similarity index 100% rename from tensorflow/tools/ci_build/builds/android_nightly.sh rename to tensorflow/tools/ci_build/builds/android_full.sh diff --git a/tensorflow/tools/ci_build/ci_parameterized_build.sh b/tensorflow/tools/ci_build/ci_parameterized_build.sh index 50d61d8c31d..5521a52ef67 100755 --- a/tensorflow/tools/ci_build/ci_parameterized_build.sh +++ b/tensorflow/tools/ci_build/ci_parameterized_build.sh @@ -18,7 +18,7 @@ # ci_parameterized_build.sh # # The script obeys the following required environment variables: -# TF_BUILD_CONTAINER_TYPE: (CPU | GPU | ANDROID | ANDROID_NIGHTLY) +# TF_BUILD_CONTAINER_TYPE: (CPU | GPU | ANDROID | ANDROID_FULL) # TF_BUILD_PYTHON_VERSION: (PYTHON2 | PYTHON3 | PYTHON3.5) # TF_BUILD_IS_PIP: (NO_PIP | PIP | BOTH) # @@ -127,7 +127,7 @@ PIP_CMD="${CI_BUILD_DIR}/builds/pip.sh" PIP_TEST_TUTORIALS_FLAG="--test_tutorials" PIP_INTEGRATION_TESTS_FLAG="--integration_tests" ANDROID_CMD="${CI_BUILD_DIR}/builds/android.sh" -ANDROID_NIGHTLY_CMD="${CI_BUILD_DIR}/builds/android_nightly.sh" +ANDROID_FULL_CMD="${CI_BUILD_DIR}/builds/android_full.sh" TF_GPU_COUNT=${TF_GPU_COUNT:-8} PARALLEL_GPU_TEST_CMD='//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute' @@ -237,7 +237,7 @@ elif [[ ${CTYPE} == "gpu" ]]; then echo "" fi fi -elif [[ ${CTYPE} == "android" ]] || [[ ${CTYPE} == "android_nightly" ]]; then +elif [[ ${CTYPE} == "android" ]] || [[ ${CTYPE} == "android_full" ]]; then : else die "Unrecognized value in TF_BUILD_CONTAINER_TYPE: "\ @@ -344,9 +344,9 @@ if [[ ${TF_BUILD_IS_PIP} == "no_pip" ]] || elif [[ ${CTYPE} == "android" ]]; then # Run android specific script for android build. NO_PIP_MAIN_CMD="${ANDROID_CMD} ${OPT_FLAG} " - elif [[ ${CTYPE} == "android_nightly" ]]; then - # Run android specific script for android nightly build. - NO_PIP_MAIN_CMD="${ANDROID_NIGHTLY_CMD} ${OPT_FLAG} " + elif [[ ${CTYPE} == "android_full" ]]; then + # Run android specific script for full android build. + NO_PIP_MAIN_CMD="${ANDROID_full_CMD} ${OPT_FLAG} " fi fi @@ -501,6 +501,13 @@ fi chmod +x ${TMP_SCRIPT} +# Map TF_BUILD container types to containers we actually have. +if [[ "${CTYPE}" == "android_full" ]]; then + CONTAINER="android" +else + CONTAINER=${CTYPE} +fi + FAILURE=0 if [[ ! -z "${TF_BUILD_DRY_RUN}" ]] && [[ ${TF_BUILD_DRY_RUN} != "0" ]]; then # Do a dry run: just print the final command @@ -508,7 +515,7 @@ if [[ ! -z "${TF_BUILD_DRY_RUN}" ]] && [[ ${TF_BUILD_DRY_RUN} != "0" ]]; then else # Actually run the command if [[ "${DO_DOCKER}" == "1" ]]; then - ${DOCKER_MAIN_CMD} ${CTYPE} ${DOCKERFILE_FLAG} /tmp/tf_build.sh + ${DOCKER_MAIN_CMD} ${CONTAINER} ${DOCKERFILE_FLAG} /tmp/tf_build.sh else ${TMP_SCRIPT} fi diff --git a/tensorflow/tools/pip_package/BUILD b/tensorflow/tools/pip_package/BUILD index 0a0a438284c..b99dbe954e4 100644 --- a/tensorflow/tools/pip_package/BUILD +++ b/tensorflow/tools/pip_package/BUILD @@ -58,14 +58,15 @@ py_binary( "//tensorflow/contrib/specs:all_files", "//tensorflow/contrib/tensor_forest:all_files", "//tensorflow/contrib/tensor_forest/hybrid:all_files", - "//tensorflow/examples/tutorials/mnist:package", "//tensorflow/python:util_example_parser_configuration", "//tensorflow/python/debug:all_files", "//tensorflow/python/saved_model:all_files", "//tensorflow/python/tools:all_files", - # The following two targets have an issue when archiving them into - # the python zip, exclude them for now. + # The following target has an issue when archiving them into the python + # zip, exclude them for now. # "//tensorflow/tensorboard", + # This package does not build. Exclude it in windows for now. + # "//tensorflow/examples/tutorials/mnist:package", ], srcs_version = "PY2AND3", deps = ["//tensorflow:tensorflow_py"], diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index 79149b9a021..01f54e70b8d 100644 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -323,7 +323,7 @@ def tf_workspace(path_prefix = "", tf_repo_name = ""): name = "zlib_archive", urls = [ "http://bazel-mirror.storage.googleapis.com/zlib.net/zlib-1.2.8.tar.gz", - "http://zlib.net/zlib-1.2.8.tar.gz", + "http://zlib.net/fossils/zlib-1.2.8.tar.gz", ], sha256 = "36658cb768a54c1d4dec43c3116c27ed893e88b02ecfcb44f2166f9c0b7f2a0d", strip_prefix = "zlib-1.2.8", diff --git a/third_party/gpus/cuda_configure.bzl b/third_party/gpus/cuda_configure.bzl index 02b8796f337..c94f67bc947 100644 --- a/third_party/gpus/cuda_configure.bzl +++ b/third_party/gpus/cuda_configure.bzl @@ -147,6 +147,42 @@ def _cudnn_install_basedir(repository_ctx): return cudnn_install_path +def _matches_version(environ_version, detected_version): + """Checks whether the user-specified version matches the detected version. + + This function performs a weak matching so that if the user specifies only the + major or major and minor versions, the versions are still considered matching + if the version parts match. To illustrate: + + environ_version detected_version result + ----------------------------------------- + 5.1.3 5.1.3 True + 5.1 5.1.3 True + 5 5.1 True + 5.1.3 5.1 False + 5.2.3 5.1.3 False + + Args: + environ_version: The version specified by the user via environment + variables. + detected_version: The version autodetected from the CUDA installation on + the system. + + Returns: True if user-specified version matches detected version and False + otherwise. + """ + environ_version_parts = environ_version.split(".") + detected_version_parts = detected_version.split(".") + if len(detected_version_parts) < len(environ_version_parts): + return False + for i, part in enumerate(detected_version_parts): + if i >= len(environ_version_parts): + break + if part != environ_version_parts[i]: + return False + return True + + _NVCC_VERSION_PREFIX = "Cuda compilation tools, release " @@ -179,28 +215,70 @@ def _cuda_version(repository_ctx, cuda_toolkit_path, cpu_value): # Parse the CUDA version from the line containing the CUDA version. prefix_removed = version_line.replace(_NVCC_VERSION_PREFIX, '') parts = prefix_removed.split(",") - if len(parts) != 2 or len(parts[0]) == 0: + if len(parts) != 2 or len(parts[0]) < 2: auto_configure_fail( "Could not parse CUDA version from nvcc --version. Got: %s" % result.stdout) - version = parts[0].strip() + full_version = parts[1].strip() + if full_version.startswith('V'): + full_version = full_version[1:] # Check whether TF_CUDA_VERSION was set by the user and fail if it does not # match the detected version. environ_version = "" if _TF_CUDA_VERSION in repository_ctx.os.environ: environ_version = repository_ctx.os.environ[_TF_CUDA_VERSION].strip() - if environ_version and version != environ_version: + if environ_version and not _matches_version(environ_version, full_version): auto_configure_fail( ("CUDA version detected from nvcc (%s) does not match " + - "TF_CUDA_VERSION (%s)") % (version, environ_version)) + "TF_CUDA_VERSION (%s)") % (full_version, environ_version)) + # We only use the version consisting of the major and minor version numbers. + version_parts = full_version.split('.') + if len(version_parts) < 2: + auto_configure_fail("CUDA version detected from nvcc (%s) is incomplete.") if cpu_value == "Windows": - version = "64_" + version.replace(".", "") + version = "64_%s%s" % (version_parts[0], version_parts[1]) + else: + version = "%s.%s" % (version_parts[0], version_parts[1]) return version _DEFINE_CUDNN_MAJOR = "#define CUDNN_MAJOR" +_DEFINE_CUDNN_MINOR = "#define CUDNN_MINOR" +_DEFINE_CUDNN_PATCHLEVEL = "#define CUDNN_PATCHLEVEL" + + +def _find_cuda_define(repository_ctx, cudnn_install_basedir, define): + """Returns the value of a #define in cudnn.h + + Greps through cudnn.h and returns the value of the specified #define. If the + #define is not found, then raise an error. + + Args: + repository_ctx: The repository context. + cudnn_install_basedir: The install directory for cuDNN on the system. + define: The #define to search for. + + Returns: + The value of the #define found in cudnn.h. + """ + # Find cudnn.h and grep for the line defining CUDNN_MAJOR. + cudnn_h_path = repository_ctx.path("%s/include/cudnn.h" % + cudnn_install_basedir) + if not cudnn_h_path.exists: + auto_configure_fail("Cannot find cudnn.h at %s" % str(cudnn_h_path)) + result = repository_ctx.execute(["grep", "-E", define, str(cudnn_h_path)]) + if result.stderr: + auto_configure_fail("Error reading %s: %s" % + (result.stderr, str(cudnn_h_path))) + + # Parse the cuDNN major version from the line defining CUDNN_MAJOR + lines = result.stdout.splitlines() + if len(lines) == 0 or lines[0].find(define) == -1: + auto_configure_fail("Cannot find line containing '%s' in %s" % + (define, str(cudnn_h_path))) + return lines[0].replace(define, "").strip() def _cudnn_version(repository_ctx, cudnn_install_basedir, cpu_value): @@ -214,34 +292,30 @@ def _cudnn_version(repository_ctx, cudnn_install_basedir, cpu_value): Returns: A string containing the version of cuDNN. """ - # Find cudnn.h and grep for the line defining CUDNN_MAJOR. - cudnn_h_path = repository_ctx.path("%s/include/cudnn.h" % - cudnn_install_basedir) - if not cudnn_h_path.exists: - auto_configure_fail("Cannot find cudnn.h at %s" % str(cudnn_h_path)) - result = repository_ctx.execute([ - "grep", "-E", _DEFINE_CUDNN_MAJOR, str(cudnn_h_path)]) - if result.stderr: - auto_configure_fail("Error reading %s: %s" % - (result.stderr, str(cudnn_h_path))) - - # Parse the cuDNN major version from the line defining CUDNN_MAJOR - lines = result.stdout.splitlines() - if len(lines) == 0 or lines[0].find(_DEFINE_CUDNN_MAJOR) == -1: - auto_configure_fail("Cannot find line containing '%s' in %s" % - (_DEFINE_CUDNN_MAJOR, str(cudnn_h_path))) - version = lines[0].replace(_DEFINE_CUDNN_MAJOR, "").strip() + major_version = _find_cuda_define(repository_ctx, cudnn_install_basedir, + _DEFINE_CUDNN_MAJOR) + minor_version = _find_cuda_define(repository_ctx, cudnn_install_basedir, + _DEFINE_CUDNN_MINOR) + patch_version = _find_cuda_define(repository_ctx, cudnn_install_basedir, + _DEFINE_CUDNN_PATCHLEVEL) + full_version = "%s.%s.%s" % (major_version, minor_version, patch_version) # Check whether TF_CUDNN_VERSION was set by the user and fail if it does not # match the detected version. environ_version = "" if _TF_CUDNN_VERSION in repository_ctx.os.environ: environ_version = repository_ctx.os.environ[_TF_CUDNN_VERSION].strip() - if environ_version and version != environ_version: + if environ_version and not _matches_version(environ_version, full_version): + cudnn_h_path = repository_ctx.path("%s/include/cudnn.h" % + cudnn_install_basedir) auto_configure_fail( ("cuDNN version detected from %s (%s) does not match " + - "TF_CUDNN_VERSION (%s)") % (str(cudnn_h_path), version, environ_version)) + "TF_CUDNN_VERSION (%s)") % + (str(cudnn_h_path), full_version, environ_version)) + # We only use the major version since we use the libcudnn libraries that are + # only versioned with the major version (e.g. libcudnn.so.5). + version = major_version if cpu_value == "Windows": version = "64_" + version return version