Merge changes from github.
Change: 143922699
This commit is contained in:
parent
bbdf8811ce
commit
efd40e5b73
WORKSPACEconfigure
tensorflow
contrib
examples/android/res/layout
g3doc/api_docs/python
array_ops.mdsummary.mdtest.md
functions_and_classes
shard0
shard1
shard2
shard3
shard4
shard5
shard6
shard7
shard9
python
tensorflow.bzltools/ci_build
workspace.bzl
12
WORKSPACE
12
WORKSPACE
@ -1,5 +1,12 @@
|
|||||||
workspace(name = "org_tensorflow")
|
workspace(name = "org_tensorflow")
|
||||||
|
|
||||||
|
load("//tensorflow:workspace.bzl", "check_version", "tf_workspace")
|
||||||
|
|
||||||
|
# We must check the bazel version before trying to parse any other BUILD files,
|
||||||
|
# in case the parsing of those build files depends on the bazel version we
|
||||||
|
# require here.
|
||||||
|
check_version("0.4.2")
|
||||||
|
|
||||||
# Uncomment and update the paths in these entries to build the Android demo.
|
# Uncomment and update the paths in these entries to build the Android demo.
|
||||||
#android_sdk_repository(
|
#android_sdk_repository(
|
||||||
# name = "androidsdk",
|
# name = "androidsdk",
|
||||||
@ -15,13 +22,8 @@ workspace(name = "org_tensorflow")
|
|||||||
# api_level=21)
|
# api_level=21)
|
||||||
|
|
||||||
# Please add all new TensorFlow dependencies in workspace.bzl.
|
# Please add all new TensorFlow dependencies in workspace.bzl.
|
||||||
load("//tensorflow:workspace.bzl", "tf_workspace")
|
|
||||||
tf_workspace()
|
tf_workspace()
|
||||||
|
|
||||||
# Specify the minimum required bazel version.
|
|
||||||
load("//tensorflow:tensorflow.bzl", "check_version")
|
|
||||||
check_version("0.4.2")
|
|
||||||
|
|
||||||
new_http_archive(
|
new_http_archive(
|
||||||
name = "inception5h",
|
name = "inception5h",
|
||||||
build_file = "models.BUILD",
|
build_file = "models.BUILD",
|
||||||
|
6
configure
vendored
6
configure
vendored
@ -289,7 +289,11 @@ while true; do
|
|||||||
echo "libcudnn.dylib resolves to libcudnn${TF_CUDNN_EXT}"
|
echo "libcudnn.dylib resolves to libcudnn${TF_CUDNN_EXT}"
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
TF_CUDNN_EXT=".$TF_CUDNN_VERSION"
|
if [ "$OSNAME" == "Darwin" ]; then
|
||||||
|
TF_CUDNN_EXT=".${TF_CUDNN_VERSION}.dylib"
|
||||||
|
else
|
||||||
|
TF_CUDNN_EXT=".$TF_CUDNN_VERSION"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if is_windows; then
|
if is_windows; then
|
||||||
|
@ -763,7 +763,7 @@ class EstimatorTest(test.TestCase):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.run_count = 0
|
self.run_count = 0
|
||||||
|
|
||||||
def before_run(self, run_context):
|
def after_run(self, run_context, run_values):
|
||||||
self.run_count += 1
|
self.run_count += 1
|
||||||
|
|
||||||
est = learn.Estimator(model_fn=linear_model_fn)
|
est = learn.Estimator(model_fn=linear_model_fn)
|
||||||
|
@ -260,9 +260,10 @@ class StopAfterNEvalsHook(session_run_hook.SessionRunHook):
|
|||||||
"""
|
"""
|
||||||
# The number of evals to run for.
|
# The number of evals to run for.
|
||||||
self._num_evals = num_evals
|
self._num_evals = num_evals
|
||||||
|
self._evals_completed = None
|
||||||
|
|
||||||
def begin(self):
|
def _set_evals_completed_tensor(self, updated_eval_step):
|
||||||
self._evals_completed = get_or_create_eval_step()
|
self._evals_completed = updated_eval_step
|
||||||
|
|
||||||
def before_run(self, run_context):
|
def before_run(self, run_context):
|
||||||
return session_run_hook.SessionRunArgs({
|
return session_run_hook.SessionRunArgs({
|
||||||
@ -388,9 +389,16 @@ def evaluate_once(checkpoint_path,
|
|||||||
"""
|
"""
|
||||||
eval_step = get_or_create_eval_step()
|
eval_step = get_or_create_eval_step()
|
||||||
|
|
||||||
|
# Prepare the run hooks.
|
||||||
|
hooks = hooks or []
|
||||||
|
|
||||||
if eval_ops is not None:
|
if eval_ops is not None:
|
||||||
update_eval_step = state_ops.assign_add(eval_step, 1)
|
update_eval_step = state_ops.assign_add(eval_step, 1)
|
||||||
|
|
||||||
|
for h in hooks:
|
||||||
|
if isinstance(h, StopAfterNEvalsHook):
|
||||||
|
h._set_evals_completed_tensor(update_eval_step) # pylint: disable=protected-access
|
||||||
|
|
||||||
if isinstance(eval_ops, dict):
|
if isinstance(eval_ops, dict):
|
||||||
eval_ops['update_eval_step'] = update_eval_step
|
eval_ops['update_eval_step'] = update_eval_step
|
||||||
elif isinstance(eval_ops, (tuple, list)):
|
elif isinstance(eval_ops, (tuple, list)):
|
||||||
@ -408,9 +416,6 @@ def evaluate_once(checkpoint_path,
|
|||||||
master=master,
|
master=master,
|
||||||
config=config)
|
config=config)
|
||||||
|
|
||||||
# Prepare the run hooks.
|
|
||||||
hooks = hooks or []
|
|
||||||
|
|
||||||
final_ops_hook = basic_session_run_hooks.FinalOpsHook(
|
final_ops_hook = basic_session_run_hooks.FinalOpsHook(
|
||||||
final_ops, final_ops_feed_dict)
|
final_ops, final_ops_feed_dict)
|
||||||
hooks.append(final_ops_hook)
|
hooks.append(final_ops_hook)
|
||||||
@ -489,9 +494,16 @@ def evaluate_repeatedly(checkpoint_dir,
|
|||||||
"""
|
"""
|
||||||
eval_step = get_or_create_eval_step()
|
eval_step = get_or_create_eval_step()
|
||||||
|
|
||||||
|
# Prepare the run hooks.
|
||||||
|
hooks = hooks or []
|
||||||
|
|
||||||
if eval_ops is not None:
|
if eval_ops is not None:
|
||||||
update_eval_step = state_ops.assign_add(eval_step, 1)
|
update_eval_step = state_ops.assign_add(eval_step, 1)
|
||||||
|
|
||||||
|
for h in hooks:
|
||||||
|
if isinstance(h, StopAfterNEvalsHook):
|
||||||
|
h._set_evals_completed_tensor(update_eval_step) # pylint: disable=protected-access
|
||||||
|
|
||||||
if isinstance(eval_ops, dict):
|
if isinstance(eval_ops, dict):
|
||||||
eval_ops['update_eval_step'] = update_eval_step
|
eval_ops['update_eval_step'] = update_eval_step
|
||||||
elif isinstance(eval_ops, (tuple, list)):
|
elif isinstance(eval_ops, (tuple, list)):
|
||||||
@ -499,9 +511,6 @@ def evaluate_repeatedly(checkpoint_dir,
|
|||||||
else:
|
else:
|
||||||
eval_ops = [eval_ops, update_eval_step]
|
eval_ops = [eval_ops, update_eval_step]
|
||||||
|
|
||||||
# Prepare the run hooks.
|
|
||||||
hooks = hooks or []
|
|
||||||
|
|
||||||
final_ops_hook = basic_session_run_hooks.FinalOpsHook(
|
final_ops_hook = basic_session_run_hooks.FinalOpsHook(
|
||||||
final_ops, final_ops_feed_dict)
|
final_ops, final_ops_feed_dict)
|
||||||
hooks.append(final_ops_hook)
|
hooks.append(final_ops_hook)
|
||||||
|
@ -30,7 +30,7 @@
|
|||||||
android:layout_alignParentTop="true" />
|
android:layout_alignParentTop="true" />
|
||||||
|
|
||||||
<org.tensorflow.demo.OverlayView
|
<org.tensorflow.demo.OverlayView
|
||||||
android:id="@+id/overlay_debug"
|
android:id="@+id/debug_overlay"
|
||||||
android:layout_width="match_parent"
|
android:layout_width="match_parent"
|
||||||
android:layout_height="match_parent"
|
android:layout_height="match_parent"
|
||||||
android:layout_alignParentBottom="true" />
|
android:layout_alignParentBottom="true" />
|
||||||
|
@ -2209,7 +2209,7 @@ The attr `block_size` indicates the input block size and how the data is moved.
|
|||||||
|
|
||||||
* Chunks of data of size `block_size * block_size` from depth are rearranged
|
* Chunks of data of size `block_size * block_size` from depth are rearranged
|
||||||
into non-overlapping blocks of size `block_size x block_size`
|
into non-overlapping blocks of size `block_size x block_size`
|
||||||
* The width the output tensor is `input_depth * block_size`, whereas the
|
* The width the output tensor is `input_width * block_size`, whereas the
|
||||||
height is `input_height * block_size`.
|
height is `input_height * block_size`.
|
||||||
* The depth of the input tensor must be divisible by
|
* The depth of the input tensor must be divisible by
|
||||||
`block_size * block_size`.
|
`block_size * block_size`.
|
||||||
|
@ -10,7 +10,7 @@ The attr `block_size` indicates the input block size and how the data is moved.
|
|||||||
|
|
||||||
* Chunks of data of size `block_size * block_size` from depth are rearranged
|
* Chunks of data of size `block_size * block_size` from depth are rearranged
|
||||||
into non-overlapping blocks of size `block_size x block_size`
|
into non-overlapping blocks of size `block_size x block_size`
|
||||||
* The width the output tensor is `input_depth * block_size`, whereas the
|
* The width the output tensor is `input_width * block_size`, whereas the
|
||||||
height is `input_height * block_size`.
|
height is `input_height * block_size`.
|
||||||
* The depth of the input tensor must be divisible by
|
* The depth of the input tensor must be divisible by
|
||||||
`block_size * block_size`.
|
`block_size * block_size`.
|
||||||
|
@ -1,4 +1,185 @@
|
|||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.ByteSize()` {#TaggedRunMetadata.ByteSize}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.Clear()` {#TaggedRunMetadata.Clear}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.ClearExtension(extension_handle)` {#TaggedRunMetadata.ClearExtension}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.ClearField(field_name)` {#TaggedRunMetadata.ClearField}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.CopyFrom(other_msg)` {#TaggedRunMetadata.CopyFrom}
|
||||||
|
|
||||||
|
Copies the content of the specified message into the current message.
|
||||||
|
|
||||||
|
The method clears the current message and then merges the specified
|
||||||
|
message using MergeFrom.
|
||||||
|
|
||||||
|
##### Args:
|
||||||
|
|
||||||
|
|
||||||
|
* <b>`other_msg`</b>: Message to copy into the current one.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.DiscardUnknownFields()` {#TaggedRunMetadata.DiscardUnknownFields}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.FindInitializationErrors()` {#TaggedRunMetadata.FindInitializationErrors}
|
||||||
|
|
||||||
|
Finds required fields which are not initialized.
|
||||||
|
|
||||||
|
##### Returns:
|
||||||
|
|
||||||
|
A list of strings. Each string is a path to an uninitialized field from
|
||||||
|
the top-level message, e.g. "foo.bar[5].baz".
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.FromString(s)` {#TaggedRunMetadata.FromString}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.HasExtension(extension_handle)` {#TaggedRunMetadata.HasExtension}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.HasField(field_name)` {#TaggedRunMetadata.HasField}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.IsInitialized(errors=None)` {#TaggedRunMetadata.IsInitialized}
|
||||||
|
|
||||||
|
Checks if all required fields of a message are set.
|
||||||
|
|
||||||
|
##### Args:
|
||||||
|
|
||||||
|
|
||||||
|
* <b>`errors`</b>: A list which, if provided, will be populated with the field
|
||||||
|
paths of all missing required fields.
|
||||||
|
|
||||||
|
##### Returns:
|
||||||
|
|
||||||
|
True iff the specified message has all required fields set.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.ListFields()` {#TaggedRunMetadata.ListFields}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.MergeFrom(msg)` {#TaggedRunMetadata.MergeFrom}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.MergeFromString(serialized)` {#TaggedRunMetadata.MergeFromString}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.ParseFromString(serialized)` {#TaggedRunMetadata.ParseFromString}
|
||||||
|
|
||||||
|
Parse serialized protocol buffer data into this message.
|
||||||
|
|
||||||
|
Like MergeFromString(), except we clear the object first and
|
||||||
|
do not return the value that MergeFromString returns.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.RegisterExtension(extension_handle)` {#TaggedRunMetadata.RegisterExtension}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.SerializePartialToString()` {#TaggedRunMetadata.SerializePartialToString}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.SerializeToString()` {#TaggedRunMetadata.SerializeToString}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.SetInParent()` {#TaggedRunMetadata.SetInParent}
|
||||||
|
|
||||||
|
Sets the _cached_byte_size_dirty bit to true,
|
||||||
|
and propagates this to our listener iff this was a state change.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.WhichOneof(oneof_name)` {#TaggedRunMetadata.WhichOneof}
|
||||||
|
|
||||||
|
Returns the name of the currently set field inside a oneof, or None.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.__deepcopy__(memo=None)` {#TaggedRunMetadata.__deepcopy__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.__eq__(other)` {#TaggedRunMetadata.__eq__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.summary.TaggedRunMetadata.__getstate__()` {#TaggedRunMetadata.__getstate__}
|
#### `tf.summary.TaggedRunMetadata.__getstate__()` {#TaggedRunMetadata.__getstate__}
|
||||||
@ -6,3 +187,66 @@
|
|||||||
Support the pickle protocol.
|
Support the pickle protocol.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.__hash__()` {#TaggedRunMetadata.__hash__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.__init__(**kwargs)` {#TaggedRunMetadata.__init__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.__ne__(other_msg)` {#TaggedRunMetadata.__ne__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.__repr__()` {#TaggedRunMetadata.__repr__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.__setstate__(state)` {#TaggedRunMetadata.__setstate__}
|
||||||
|
|
||||||
|
Support the pickle protocol.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.__str__()` {#TaggedRunMetadata.__str__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.__unicode__()` {#TaggedRunMetadata.__unicode__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.run_metadata` {#TaggedRunMetadata.run_metadata}
|
||||||
|
|
||||||
|
Magic attribute generated for "run_metadata" proto field.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.tag` {#TaggedRunMetadata.tag}
|
||||||
|
|
||||||
|
Magic attribute generated for "tag" proto field.
|
||||||
|
|
||||||
|
|
||||||
|
@ -0,0 +1,17 @@
|
|||||||
|
### `tf.merge_all_summaries(*args, **kwargs)` {#merge_all_summaries}
|
||||||
|
|
||||||
|
Merges all summaries collected in the default graph. (deprecated)
|
||||||
|
|
||||||
|
THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
|
||||||
|
Instructions for updating:
|
||||||
|
Please switch to tf.summary.merge_all.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
key: `GraphKey` used to collect the summaries. Defaults to
|
||||||
|
`GraphKeys.SUMMARIES`.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
If no summaries were collected, returns None. Otherwise returns a scalar
|
||||||
|
`Tensor` of type `string` containing the serialized `Summary` protocol
|
||||||
|
buffer resulting from the merging.
|
||||||
|
|
@ -0,0 +1,49 @@
|
|||||||
|
### `tf.image_summary(*args, **kwargs)` {#image_summary}
|
||||||
|
|
||||||
|
Outputs a `Summary` protocol buffer with images. (deprecated)
|
||||||
|
|
||||||
|
THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
|
||||||
|
Instructions for updating:
|
||||||
|
Please switch to tf.summary.image. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, the max_images argument was renamed to max_outputs.
|
||||||
|
|
||||||
|
The summary has up to `max_images` summary values containing images. The
|
||||||
|
images are built from `tensor` which must be 4-D with shape `[batch_size,
|
||||||
|
height, width, channels]` and where `channels` can be:
|
||||||
|
|
||||||
|
* 1: `tensor` is interpreted as Grayscale.
|
||||||
|
* 3: `tensor` is interpreted as RGB.
|
||||||
|
* 4: `tensor` is interpreted as RGBA.
|
||||||
|
|
||||||
|
The images have the same number of channels as the input tensor. For float
|
||||||
|
input, the values are normalized one image at a time to fit in the range
|
||||||
|
`[0, 255]`. `uint8` values are unchanged. The op uses two different
|
||||||
|
normalization algorithms:
|
||||||
|
|
||||||
|
* If the input values are all positive, they are rescaled so the largest one
|
||||||
|
is 255.
|
||||||
|
|
||||||
|
* If any input value is negative, the values are shifted so input value 0.0
|
||||||
|
is at 127. They are then rescaled so that either the smallest value is 0,
|
||||||
|
or the largest one is 255.
|
||||||
|
|
||||||
|
The `tag` argument is a scalar `Tensor` of type `string`. It is used to
|
||||||
|
build the `tag` of the summary values:
|
||||||
|
|
||||||
|
* If `max_images` is 1, the summary value tag is '*tag*/image'.
|
||||||
|
* If `max_images` is greater than 1, the summary value tags are
|
||||||
|
generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tag: A scalar `Tensor` of type `string`. Used to build the `tag`
|
||||||
|
of the summary values.
|
||||||
|
tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
|
||||||
|
width, channels]` where `channels` is 1, 3, or 4.
|
||||||
|
max_images: Max number of batch elements to generate images for.
|
||||||
|
collections: Optional list of ops.GraphKeys. The collections to add the
|
||||||
|
summary to. Defaults to [ops.GraphKeys.SUMMARIES]
|
||||||
|
name: A name for the operation (optional).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
|
||||||
|
buffer.
|
||||||
|
|
@ -1,4 +1,185 @@
|
|||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.ByteSize()` {#SummaryDescription.ByteSize}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.Clear()` {#SummaryDescription.Clear}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.ClearExtension(extension_handle)` {#SummaryDescription.ClearExtension}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.ClearField(field_name)` {#SummaryDescription.ClearField}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.CopyFrom(other_msg)` {#SummaryDescription.CopyFrom}
|
||||||
|
|
||||||
|
Copies the content of the specified message into the current message.
|
||||||
|
|
||||||
|
The method clears the current message and then merges the specified
|
||||||
|
message using MergeFrom.
|
||||||
|
|
||||||
|
##### Args:
|
||||||
|
|
||||||
|
|
||||||
|
* <b>`other_msg`</b>: Message to copy into the current one.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.DiscardUnknownFields()` {#SummaryDescription.DiscardUnknownFields}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.FindInitializationErrors()` {#SummaryDescription.FindInitializationErrors}
|
||||||
|
|
||||||
|
Finds required fields which are not initialized.
|
||||||
|
|
||||||
|
##### Returns:
|
||||||
|
|
||||||
|
A list of strings. Each string is a path to an uninitialized field from
|
||||||
|
the top-level message, e.g. "foo.bar[5].baz".
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.FromString(s)` {#SummaryDescription.FromString}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.HasExtension(extension_handle)` {#SummaryDescription.HasExtension}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.HasField(field_name)` {#SummaryDescription.HasField}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.IsInitialized(errors=None)` {#SummaryDescription.IsInitialized}
|
||||||
|
|
||||||
|
Checks if all required fields of a message are set.
|
||||||
|
|
||||||
|
##### Args:
|
||||||
|
|
||||||
|
|
||||||
|
* <b>`errors`</b>: A list which, if provided, will be populated with the field
|
||||||
|
paths of all missing required fields.
|
||||||
|
|
||||||
|
##### Returns:
|
||||||
|
|
||||||
|
True iff the specified message has all required fields set.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.ListFields()` {#SummaryDescription.ListFields}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.MergeFrom(msg)` {#SummaryDescription.MergeFrom}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.MergeFromString(serialized)` {#SummaryDescription.MergeFromString}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.ParseFromString(serialized)` {#SummaryDescription.ParseFromString}
|
||||||
|
|
||||||
|
Parse serialized protocol buffer data into this message.
|
||||||
|
|
||||||
|
Like MergeFromString(), except we clear the object first and
|
||||||
|
do not return the value that MergeFromString returns.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.RegisterExtension(extension_handle)` {#SummaryDescription.RegisterExtension}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.SerializePartialToString()` {#SummaryDescription.SerializePartialToString}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.SerializeToString()` {#SummaryDescription.SerializeToString}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.SetInParent()` {#SummaryDescription.SetInParent}
|
||||||
|
|
||||||
|
Sets the _cached_byte_size_dirty bit to true,
|
||||||
|
and propagates this to our listener iff this was a state change.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.WhichOneof(oneof_name)` {#SummaryDescription.WhichOneof}
|
||||||
|
|
||||||
|
Returns the name of the currently set field inside a oneof, or None.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.__deepcopy__(memo=None)` {#SummaryDescription.__deepcopy__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.__eq__(other)` {#SummaryDescription.__eq__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.summary.SummaryDescription.__getstate__()` {#SummaryDescription.__getstate__}
|
#### `tf.summary.SummaryDescription.__getstate__()` {#SummaryDescription.__getstate__}
|
||||||
@ -6,3 +187,59 @@
|
|||||||
Support the pickle protocol.
|
Support the pickle protocol.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.__hash__()` {#SummaryDescription.__hash__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.__init__(**kwargs)` {#SummaryDescription.__init__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.__ne__(other_msg)` {#SummaryDescription.__ne__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.__repr__()` {#SummaryDescription.__repr__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.__setstate__(state)` {#SummaryDescription.__setstate__}
|
||||||
|
|
||||||
|
Support the pickle protocol.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.__str__()` {#SummaryDescription.__str__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.__unicode__()` {#SummaryDescription.__unicode__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.type_hint` {#SummaryDescription.type_hint}
|
||||||
|
|
||||||
|
Magic attribute generated for "type_hint" proto field.
|
||||||
|
|
||||||
|
|
||||||
|
@ -173,125 +173,6 @@ Checks that for all elements of farray1 and farray2
|
|||||||
* <b>`err`</b>: a float value.
|
* <b>`err`</b>: a float value.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertBetween(value, minv, maxv, msg=None)` {#TestCase.assertBetween}
|
|
||||||
|
|
||||||
Asserts that value is between minv and maxv (inclusive).
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertCommandFails(command, regexes, env=None, close_fds=True, msg=None)` {#TestCase.assertCommandFails}
|
|
||||||
|
|
||||||
Asserts a shell command fails and the error matches a regex in a list.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`command`</b>: List or string representing the command to run.
|
|
||||||
* <b>`regexes`</b>: the list of regular expression strings.
|
|
||||||
* <b>`env`</b>: Dictionary of environment variable settings.
|
|
||||||
* <b>`close_fds`</b>: Whether or not to close all open fd's in the child after
|
|
||||||
forking.
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertCommandSucceeds(command, regexes=('',), env=None, close_fds=True, msg=None)` {#TestCase.assertCommandSucceeds}
|
|
||||||
|
|
||||||
Asserts that a shell command succeeds (i.e. exits with code 0).
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`command`</b>: List or string representing the command to run.
|
|
||||||
* <b>`regexes`</b>: List of regular expression byte strings that match success.
|
|
||||||
* <b>`env`</b>: Dictionary of environment variable settings.
|
|
||||||
* <b>`close_fds`</b>: Whether or not to close all open fd's in the child after
|
|
||||||
forking.
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertContainsExactSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsExactSubsequence}
|
|
||||||
|
|
||||||
Assert that "container" contains "subsequence" as an exact subsequence.
|
|
||||||
|
|
||||||
Asserts that "container" contains all the elements of "subsequence", in
|
|
||||||
order, and without other elements interspersed. For example, [1, 2, 3] is an
|
|
||||||
exact subsequence of [0, 0, 1, 2, 3, 0] but not of [0, 0, 1, 2, 0, 3, 0].
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`container`</b>: the list we're testing for subsequence inclusion.
|
|
||||||
* <b>`subsequence`</b>: the list we hope will be an exact subsequence of container.
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertContainsInOrder(strings, target, msg=None)` {#TestCase.assertContainsInOrder}
|
|
||||||
|
|
||||||
Asserts that the strings provided are found in the target in order.
|
|
||||||
|
|
||||||
This may be useful for checking HTML output.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`strings`</b>: A list of strings, such as [ 'fox', 'dog' ]
|
|
||||||
* <b>`target`</b>: A target string in which to look for the strings, such as
|
|
||||||
'The quick brown fox jumped over the lazy dog'.
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertContainsSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsSubsequence}
|
|
||||||
|
|
||||||
Assert that "container" contains "subsequence" as a subsequence.
|
|
||||||
|
|
||||||
Asserts that "container" contains all the elements of "subsequence", in
|
|
||||||
order, but possibly with other elements interspersed. For example, [1, 2, 3]
|
|
||||||
is a subsequence of [0, 0, 1, 2, 0, 3, 0] but not of [0, 0, 1, 3, 0, 2, 0].
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`container`</b>: the list we're testing for subsequence inclusion.
|
|
||||||
* <b>`subsequence`</b>: the list we hope will be a subsequence of container.
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertContainsSubset(expected_subset, actual_set, msg=None)` {#TestCase.assertContainsSubset}
|
|
||||||
|
|
||||||
Checks whether actual iterable is a superset of expected iterable.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertCountEqual(*args, **kwargs)` {#TestCase.assertCountEqual}
|
|
||||||
|
|
||||||
An unordered sequence specific comparison.
|
|
||||||
|
|
||||||
Equivalent to assertItemsEqual(). This method is a compatibility layer
|
|
||||||
for Python 3k, since 2to3 does not convert assertItemsEqual() calls into
|
|
||||||
assertCountEqual() calls.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
|
|
||||||
* <b>`actual_seq`</b>: The sequence that we are testing.
|
|
||||||
* <b>`msg`</b>: The message to be printed if the test fails.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertDeviceEqual(device1, device2)` {#TestCase.assertDeviceEqual}
|
#### `tf.test.TestCase.assertDeviceEqual(device1, device2)` {#TestCase.assertDeviceEqual}
|
||||||
@ -314,48 +195,9 @@ Checks whether actual is a superset of expected.
|
|||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertDictEqual(a, b, msg=None)` {#TestCase.assertDictEqual}
|
#### `tf.test.TestCase.assertDictEqual(d1, d2, msg=None)` {#TestCase.assertDictEqual}
|
||||||
|
|
||||||
Raises AssertionError if a and b are not equal dictionaries.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`a`</b>: A dict, the expected value.
|
|
||||||
* <b>`b`</b>: A dict, the actual value.
|
|
||||||
* <b>`msg`</b>: An optional str, the associated message.
|
|
||||||
|
|
||||||
##### Raises:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`AssertionError`</b>: if the dictionaries are not equal.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertEmpty(container, msg=None)` {#TestCase.assertEmpty}
|
|
||||||
|
|
||||||
Assert that an object has zero length.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`container`</b>: Anything that implements the collections.Sized interface.
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertEndsWith(actual, expected_end, msg=None)` {#TestCase.assertEndsWith}
|
|
||||||
|
|
||||||
Assert that actual.endswith(expected_end) is True.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`actual`</b>: str
|
|
||||||
* <b>`expected_end`</b>: str
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
@ -440,11 +282,10 @@ Included for symmetry with assertIsNone.
|
|||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertItemsEqual(*args, **kwargs)` {#TestCase.assertItemsEqual}
|
#### `tf.test.TestCase.assertItemsEqual(expected_seq, actual_seq, msg=None)` {#TestCase.assertItemsEqual}
|
||||||
|
|
||||||
An unordered sequence specific comparison.
|
An unordered sequence specific comparison. It asserts that
|
||||||
|
actual_seq and expected_seq have the same element counts.
|
||||||
It asserts that actual_seq and expected_seq have the same element counts.
|
|
||||||
Equivalent to::
|
Equivalent to::
|
||||||
|
|
||||||
self.assertEqual(Counter(iter(actual_seq)),
|
self.assertEqual(Counter(iter(actual_seq)),
|
||||||
@ -457,30 +298,6 @@ Asserts that each element has the same count in both sequences.
|
|||||||
- [0, 1, 1] and [1, 0, 1] compare equal.
|
- [0, 1, 1] and [1, 0, 1] compare equal.
|
||||||
- [0, 0, 1] and [0, 1] compare unequal.
|
- [0, 0, 1] and [0, 1] compare unequal.
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
|
|
||||||
* <b>`actual_seq`</b>: The sequence that we are testing.
|
|
||||||
* <b>`msg`</b>: The message to be printed if the test fails.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertJsonEqual(first, second, msg=None)` {#TestCase.assertJsonEqual}
|
|
||||||
|
|
||||||
Asserts that the JSON objects defined in two strings are equal.
|
|
||||||
|
|
||||||
A summary of the differences will be included in the failure message
|
|
||||||
using assertSameStructure.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`first`</b>: A string contining JSON to decode and compare to second.
|
|
||||||
* <b>`second`</b>: A string contining JSON to decode and compare to first.
|
|
||||||
* <b>`msg`</b>: Additional text to include in the failure message.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
@ -550,13 +367,6 @@ if not.
|
|||||||
* <b>`msg`</b>: An optional string message to append to the failure message.
|
* <b>`msg`</b>: An optional string message to append to the failure message.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertNoCommonElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertNoCommonElements}
|
|
||||||
|
|
||||||
Checks whether actual iterable and expected iterable are disjoint.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertNotAlmostEqual(first, second, places=None, msg=None, delta=None)` {#TestCase.assertNotAlmostEqual}
|
#### `tf.test.TestCase.assertNotAlmostEqual(first, second, places=None, msg=None, delta=None)` {#TestCase.assertNotAlmostEqual}
|
||||||
@ -587,33 +397,6 @@ as significant digits (measured from the most signficant digit).
|
|||||||
Objects that are equal automatically fail.
|
Objects that are equal automatically fail.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertNotEmpty(container, msg=None)` {#TestCase.assertNotEmpty}
|
|
||||||
|
|
||||||
Assert that an object has non-zero length.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`container`</b>: Anything that implements the collections.Sized interface.
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertNotEndsWith(actual, unexpected_end, msg=None)` {#TestCase.assertNotEndsWith}
|
|
||||||
|
|
||||||
Assert that actual.endswith(unexpected_end) is False.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`actual`</b>: str
|
|
||||||
* <b>`unexpected_end`</b>: str
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertNotEqual(first, second, msg=None)` {#TestCase.assertNotEqual}
|
#### `tf.test.TestCase.assertNotEqual(first, second, msg=None)` {#TestCase.assertNotEqual}
|
||||||
@ -651,20 +434,6 @@ Included for symmetry with assertIsInstance.
|
|||||||
Fail the test if the text matches the regular expression.
|
Fail the test if the text matches the regular expression.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertNotStartsWith(actual, unexpected_start, msg=None)` {#TestCase.assertNotStartsWith}
|
|
||||||
|
|
||||||
Assert that actual.startswith(unexpected_start) is False.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`actual`</b>: str
|
|
||||||
* <b>`unexpected_start`</b>: str
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertProtoEquals(expected_message_maybe_ascii, message)` {#TestCase.assertProtoEquals}
|
#### `tf.test.TestCase.assertProtoEquals(expected_message_maybe_ascii, message)` {#TestCase.assertProtoEquals}
|
||||||
@ -739,38 +508,6 @@ Asserts that the message in a raised exception matches a regexp.
|
|||||||
* <b>`kwargs`</b>: Extra kwargs.
|
* <b>`kwargs`</b>: Extra kwargs.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertRaisesWithLiteralMatch(expected_exception, expected_exception_message, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithLiteralMatch}
|
|
||||||
|
|
||||||
Asserts that the message in a raised exception equals the given string.
|
|
||||||
|
|
||||||
Unlike assertRaisesRegexp, this method takes a literal string, not
|
|
||||||
a regular expression.
|
|
||||||
|
|
||||||
with self.assertRaisesWithLiteralMatch(ExType, 'message'):
|
|
||||||
DoSomething()
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`expected_exception`</b>: Exception class expected to be raised.
|
|
||||||
* <b>`expected_exception_message`</b>: String message expected in the raised
|
|
||||||
exception. For a raise exception e, expected_exception_message must
|
|
||||||
equal str(e).
|
|
||||||
* <b>`callable_obj`</b>: Function to be called, or None to return a context.
|
|
||||||
* <b>`args`</b>: Extra args.
|
|
||||||
* <b>`kwargs`</b>: Extra kwargs.
|
|
||||||
|
|
||||||
##### Returns:
|
|
||||||
|
|
||||||
A context manager if callable_obj is None. Otherwise, None.
|
|
||||||
|
|
||||||
##### Raises:
|
|
||||||
|
|
||||||
self.failureException if callable_obj does not raise a macthing exception.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertRaisesWithPredicateMatch(exception_type, expected_err_re_or_predicate)` {#TestCase.assertRaisesWithPredicateMatch}
|
#### `tf.test.TestCase.assertRaisesWithPredicateMatch(exception_type, expected_err_re_or_predicate)` {#TestCase.assertRaisesWithPredicateMatch}
|
||||||
@ -795,71 +532,6 @@ predicate search.
|
|||||||
exception.
|
exception.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertRaisesWithRegexpMatch(expected_exception, expected_regexp, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithRegexpMatch}
|
|
||||||
|
|
||||||
Asserts that the message in a raised exception matches the given regexp.
|
|
||||||
|
|
||||||
This is just a wrapper around assertRaisesRegexp. Please use
|
|
||||||
assertRaisesRegexp instead of assertRaisesWithRegexpMatch.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`expected_exception`</b>: Exception class expected to be raised.
|
|
||||||
* <b>`expected_regexp`</b>: Regexp (re pattern object or string) expected to be
|
|
||||||
found in error message.
|
|
||||||
* <b>`callable_obj`</b>: Function to be called, or None to return a context.
|
|
||||||
* <b>`args`</b>: Extra args.
|
|
||||||
* <b>`kwargs`</b>: Extra keyword args.
|
|
||||||
|
|
||||||
##### Returns:
|
|
||||||
|
|
||||||
A context manager if callable_obj is None. Otherwise, None.
|
|
||||||
|
|
||||||
##### Raises:
|
|
||||||
|
|
||||||
self.failureException if callable_obj does not raise a macthing exception.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertRegexMatch(actual_str, regexes, message=None)` {#TestCase.assertRegexMatch}
|
|
||||||
|
|
||||||
Asserts that at least one regex in regexes matches str.
|
|
||||||
|
|
||||||
If possible you should use assertRegexpMatches, which is a simpler
|
|
||||||
version of this method. assertRegexpMatches takes a single regular
|
|
||||||
expression (a string or re compiled object) instead of a list.
|
|
||||||
|
|
||||||
Notes:
|
|
||||||
1. This function uses substring matching, i.e. the matching
|
|
||||||
succeeds if *any* substring of the error message matches *any*
|
|
||||||
regex in the list. This is more convenient for the user than
|
|
||||||
full-string matching.
|
|
||||||
|
|
||||||
2. If regexes is the empty list, the matching will always fail.
|
|
||||||
|
|
||||||
3. Use regexes=[''] for a regex that will always pass.
|
|
||||||
|
|
||||||
4. '.' matches any single character *except* the newline. To
|
|
||||||
match any character, use '(.|
|
|
||||||
)'.
|
|
||||||
|
|
||||||
5. '^' matches the beginning of each line, not just the beginning
|
|
||||||
of the string. Similarly, '$' matches the end of each line.
|
|
||||||
|
|
||||||
6. An exception will be thrown if regexes contains an invalid
|
|
||||||
regex.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
actual_str: The string we try to match with the items in regexes.
|
|
||||||
regexes: The regular expressions we want to match against str.
|
|
||||||
See "Notes" above for detailed notes on how this is interpreted.
|
|
||||||
message: The message to be printed if the test fails.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertRegexpMatches(text, expected_regexp, msg=None)` {#TestCase.assertRegexpMatches}
|
#### `tf.test.TestCase.assertRegexpMatches(text, expected_regexp, msg=None)` {#TestCase.assertRegexpMatches}
|
||||||
@ -867,79 +539,6 @@ Asserts that at least one regex in regexes matches str.
|
|||||||
Fail the test unless the text matches the regular expression.
|
Fail the test unless the text matches the regular expression.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertSameElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertSameElements}
|
|
||||||
|
|
||||||
Assert that two sequences have the same elements (in any order).
|
|
||||||
|
|
||||||
This method, unlike assertItemsEqual, doesn't care about any
|
|
||||||
duplicates in the expected and actual sequences.
|
|
||||||
|
|
||||||
>> assertSameElements([1, 1, 1, 0, 0, 0], [0, 1])
|
|
||||||
# Doesn't raise an AssertionError
|
|
||||||
|
|
||||||
If possible, you should use assertItemsEqual instead of
|
|
||||||
assertSameElements.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
|
|
||||||
* <b>`actual_seq`</b>: The sequence that we are testing.
|
|
||||||
* <b>`msg`</b>: The message to be printed if the test fails.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertSameStructure(a, b, aname='a', bname='b', msg=None)` {#TestCase.assertSameStructure}
|
|
||||||
|
|
||||||
Asserts that two values contain the same structural content.
|
|
||||||
|
|
||||||
The two arguments should be data trees consisting of trees of dicts and
|
|
||||||
lists. They will be deeply compared by walking into the contents of dicts
|
|
||||||
and lists; other items will be compared using the == operator.
|
|
||||||
If the two structures differ in content, the failure message will indicate
|
|
||||||
the location within the structures where the first difference is found.
|
|
||||||
This may be helpful when comparing large structures.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`a`</b>: The first structure to compare.
|
|
||||||
* <b>`b`</b>: The second structure to compare.
|
|
||||||
* <b>`aname`</b>: Variable name to use for the first structure in assertion messages.
|
|
||||||
* <b>`bname`</b>: Variable name to use for the second structure.
|
|
||||||
* <b>`msg`</b>: Additional text to include in the failure message.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertSequenceAlmostEqual(expected_seq, actual_seq, places=None, msg=None, delta=None)` {#TestCase.assertSequenceAlmostEqual}
|
|
||||||
|
|
||||||
An approximate equality assertion for ordered sequences.
|
|
||||||
|
|
||||||
Fail if the two sequences are unequal as determined by their value
|
|
||||||
differences rounded to the given number of decimal places (default 7) and
|
|
||||||
comparing to zero, or by comparing that the difference between each value
|
|
||||||
in the two sequences is more than the given delta.
|
|
||||||
|
|
||||||
Note that decimal places (from zero) are usually not the same as significant
|
|
||||||
digits (measured from the most signficant digit).
|
|
||||||
|
|
||||||
If the two sequences compare equal then they will automatically compare
|
|
||||||
almost equal.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
|
|
||||||
* <b>`actual_seq`</b>: The sequence that we are testing.
|
|
||||||
* <b>`places`</b>: The number of decimal places to compare.
|
|
||||||
* <b>`msg`</b>: The message to be printed if the test fails.
|
|
||||||
* <b>`delta`</b>: The OK difference between compared values.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertSequenceEqual(seq1, seq2, msg=None, seq_type=None)` {#TestCase.assertSequenceEqual}
|
#### `tf.test.TestCase.assertSequenceEqual(seq1, seq2, msg=None, seq_type=None)` {#TestCase.assertSequenceEqual}
|
||||||
@ -960,26 +559,6 @@ which can be indexed, has a length, and has an equality operator.
|
|||||||
differences.
|
differences.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertSequenceStartsWith(prefix, whole, msg=None)` {#TestCase.assertSequenceStartsWith}
|
|
||||||
|
|
||||||
An equality assertion for the beginning of ordered sequences.
|
|
||||||
|
|
||||||
If prefix is an empty sequence, it will raise an error unless whole is also
|
|
||||||
an empty sequence.
|
|
||||||
|
|
||||||
If prefix is not a sequence, it will raise an error if the first element of
|
|
||||||
whole does not match.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`prefix`</b>: A sequence expected at the beginning of the whole parameter.
|
|
||||||
* <b>`whole`</b>: The sequence in which to look for prefix.
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertSetEqual(set1, set2, msg=None)` {#TestCase.assertSetEqual}
|
#### `tf.test.TestCase.assertSetEqual(set1, set2, msg=None)` {#TestCase.assertSetEqual}
|
||||||
@ -1031,51 +610,6 @@ Assert that actual.startswith(expected_start) is True.
|
|||||||
* <b>`msg`</b>: Optional message to report on failure.
|
* <b>`msg`</b>: Optional message to report on failure.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertTotallyOrdered(*groups, **kwargs)` {#TestCase.assertTotallyOrdered}
|
|
||||||
|
|
||||||
Asserts that total ordering has been implemented correctly.
|
|
||||||
|
|
||||||
For example, say you have a class A that compares only on its attribute x.
|
|
||||||
Comparators other than __lt__ are omitted for brevity.
|
|
||||||
|
|
||||||
class A(object):
|
|
||||||
def __init__(self, x, y):
|
|
||||||
self.x = x
|
|
||||||
self.y = y
|
|
||||||
|
|
||||||
def __hash__(self):
|
|
||||||
return hash(self.x)
|
|
||||||
|
|
||||||
def __lt__(self, other):
|
|
||||||
try:
|
|
||||||
return self.x < other.x
|
|
||||||
except AttributeError:
|
|
||||||
return NotImplemented
|
|
||||||
|
|
||||||
assertTotallyOrdered will check that instances can be ordered correctly.
|
|
||||||
For example,
|
|
||||||
|
|
||||||
self.assertTotallyOrdered(
|
|
||||||
[None], # None should come before everything else.
|
|
||||||
[1], # Integers sort earlier.
|
|
||||||
[A(1, 'a')],
|
|
||||||
[A(2, 'b')], # 2 is after 1.
|
|
||||||
[A(3, 'c'), A(3, 'd')], # The second argument is irrelevant.
|
|
||||||
[A(4, 'z')],
|
|
||||||
['foo']) # Strings sort last.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`*groups`</b>: A list of groups of elements. Each group of elements is a list
|
|
||||||
of objects that are equal. The elements in each group must be less than
|
|
||||||
the elements in the group after it. For example, these groups are
|
|
||||||
totally ordered: [None], [1], [2, 2], [3].
|
|
||||||
* <b>`**kwargs`</b>: optional msg keyword argument can be passed.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertTrue(expr, msg=None)` {#TestCase.assertTrue}
|
#### `tf.test.TestCase.assertTrue(expr, msg=None)` {#TestCase.assertTrue}
|
||||||
@ -1098,13 +632,6 @@ A tuple-specific equality assertion.
|
|||||||
differences.
|
differences.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertUrlEqual(a, b, msg=None)` {#TestCase.assertUrlEqual}
|
|
||||||
|
|
||||||
Asserts that urls are equal, ignoring ordering of query params.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assert_(expr, msg=None)` {#TestCase.assert_}
|
#### `tf.test.TestCase.assert_(expr, msg=None)` {#TestCase.assert_}
|
||||||
@ -1166,9 +693,9 @@ tearDown.
|
|||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.fail(msg=None, prefix=None)` {#TestCase.fail}
|
#### `tf.test.TestCase.fail(msg=None)` {#TestCase.fail}
|
||||||
|
|
||||||
Fail immediately with the given message, optionally prefixed.
|
Fail immediately, with the given message.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
@ -1220,13 +747,6 @@ Fail immediately with the given message, optionally prefixed.
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.getRecordedProperties()` {#TestCase.getRecordedProperties}
|
|
||||||
|
|
||||||
Return any properties that the user has recorded.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.get_temp_dir()` {#TestCase.get_temp_dir}
|
#### `tf.test.TestCase.get_temp_dir()` {#TestCase.get_temp_dir}
|
||||||
@ -1249,20 +769,6 @@ pollute each others environment.
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.recordProperty(property_name, property_value)` {#TestCase.recordProperty}
|
|
||||||
|
|
||||||
Record an arbitrary property for later use.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`property_name`</b>: str, name of property to record; must be a valid XML
|
|
||||||
attribute name
|
|
||||||
* <b>`property_value`</b>: value of property; must be valid XML attribute value
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.run(result=None)` {#TestCase.run}
|
#### `tf.test.TestCase.run(result=None)` {#TestCase.run}
|
||||||
@ -1288,18 +794,11 @@ Hook method for setting up class fixture before running tests in the class.
|
|||||||
|
|
||||||
#### `tf.test.TestCase.shortDescription()` {#TestCase.shortDescription}
|
#### `tf.test.TestCase.shortDescription()` {#TestCase.shortDescription}
|
||||||
|
|
||||||
Format both the test method name and the first line of its docstring.
|
Returns a one-line description of the test, or None if no
|
||||||
|
description has been provided.
|
||||||
|
|
||||||
If no docstring is given, only returns the method name.
|
The default implementation of this method returns the first line of
|
||||||
|
the specified test method's docstring.
|
||||||
This method overrides unittest.TestCase.shortDescription(), which
|
|
||||||
only returns the first line of the docstring, obscuring the name
|
|
||||||
of the test upon failure.
|
|
||||||
|
|
||||||
##### Returns:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`desc`</b>: A short description of a test method.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
@ -0,0 +1,22 @@
|
|||||||
|
### `tf.scalar_summary(*args, **kwargs)` {#scalar_summary}
|
||||||
|
|
||||||
|
Outputs a `Summary` protocol buffer with scalar values. (deprecated)
|
||||||
|
|
||||||
|
THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
|
||||||
|
Instructions for updating:
|
||||||
|
Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.
|
||||||
|
|
||||||
|
The input `tags` and `values` must have the same shape. The generated
|
||||||
|
summary has a summary value for each tag-value pair in `tags` and `values`.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tags: A `string` `Tensor`. Tags for the summaries.
|
||||||
|
values: A real numeric Tensor. Values for the summaries.
|
||||||
|
collections: Optional list of graph collections keys. The new summary op is
|
||||||
|
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
|
||||||
|
name: A name for the operation (optional).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
|
||||||
|
buffer.
|
||||||
|
|
@ -0,0 +1,4 @@
|
|||||||
|
#### `tf.summary.SummaryDescription.RegisterExtension(extension_handle)` {#SummaryDescription.RegisterExtension}
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -0,0 +1,26 @@
|
|||||||
|
### `tf.histogram_summary(*args, **kwargs)` {#histogram_summary}
|
||||||
|
|
||||||
|
Outputs a `Summary` protocol buffer with a histogram. (deprecated)
|
||||||
|
|
||||||
|
THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
|
||||||
|
Instructions for updating:
|
||||||
|
Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
|
||||||
|
|
||||||
|
The generated
|
||||||
|
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
|
||||||
|
has one summary value containing a histogram for `values`.
|
||||||
|
|
||||||
|
This op reports an `InvalidArgument` error if any value is not finite.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tag: A `string` `Tensor`. 0-D. Tag to use for the summary value.
|
||||||
|
values: A real numeric `Tensor`. Any shape. Values to use to
|
||||||
|
build the histogram.
|
||||||
|
collections: Optional list of graph collections keys. The new summary op is
|
||||||
|
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
|
||||||
|
name: A name for the operation (optional).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
|
||||||
|
buffer.
|
||||||
|
|
@ -0,0 +1,27 @@
|
|||||||
|
### `tf.merge_summary(*args, **kwargs)` {#merge_summary}
|
||||||
|
|
||||||
|
Merges summaries. (deprecated)
|
||||||
|
|
||||||
|
THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
|
||||||
|
Instructions for updating:
|
||||||
|
Please switch to tf.summary.merge.
|
||||||
|
|
||||||
|
This op creates a
|
||||||
|
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
|
||||||
|
protocol buffer that contains the union of all the values in the input
|
||||||
|
summaries.
|
||||||
|
|
||||||
|
When the Op is run, it reports an `InvalidArgument` error if multiple values
|
||||||
|
in the summaries to merge use the same tag.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
inputs: A list of `string` `Tensor` objects containing serialized `Summary`
|
||||||
|
protocol buffers.
|
||||||
|
collections: Optional list of graph collections keys. The new summary op is
|
||||||
|
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
|
||||||
|
name: A name for the operation (optional).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
|
||||||
|
buffer resulting from the merging.
|
||||||
|
|
@ -0,0 +1,4 @@
|
|||||||
|
#### `tf.summary.SummaryDescription.FromString(s)` {#SummaryDescription.FromString}
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -0,0 +1,4 @@
|
|||||||
|
#### `tf.summary.TaggedRunMetadata.RegisterExtension(extension_handle)` {#TaggedRunMetadata.RegisterExtension}
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -0,0 +1,207 @@
|
|||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.train.SummaryWriter.__init__(*args, **kwargs)` {#SummaryWriter.__init__}
|
||||||
|
|
||||||
|
Creates a `SummaryWriter` and an event file. (deprecated)
|
||||||
|
|
||||||
|
THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
|
||||||
|
Instructions for updating:
|
||||||
|
Please switch to tf.summary.FileWriter. The interface and behavior is the same; this is just a rename.
|
||||||
|
|
||||||
|
This class is deprecated, and should be replaced with tf.summary.FileWriter.
|
||||||
|
|
||||||
|
On construction the summary writer creates a new event file in `logdir`.
|
||||||
|
This event file will contain `Event` protocol buffers constructed when you
|
||||||
|
call one of the following functions: `add_summary()`, `add_session_log()`,
|
||||||
|
`add_event()`, or `add_graph()`.
|
||||||
|
|
||||||
|
If you pass a `Graph` to the constructor it is added to
|
||||||
|
the event file. (This is equivalent to calling `add_graph()` later).
|
||||||
|
|
||||||
|
TensorBoard will pick the graph from the file and display it graphically so
|
||||||
|
you can interactively explore the graph you built. You will usually pass
|
||||||
|
the graph from the session in which you launched it:
|
||||||
|
|
||||||
|
```python
|
||||||
|
...create a graph...
|
||||||
|
# Launch the graph in a session.
|
||||||
|
sess = tf.Session()
|
||||||
|
# Create a summary writer, add the 'graph' to the event file.
|
||||||
|
writer = tf.train.SummaryWriter(<some-directory>, sess.graph)
|
||||||
|
```
|
||||||
|
|
||||||
|
The other arguments to the constructor control the asynchronous writes to
|
||||||
|
the event file:
|
||||||
|
|
||||||
|
* `flush_secs`: How often, in seconds, to flush the added summaries
|
||||||
|
and events to disk.
|
||||||
|
* `max_queue`: Maximum number of summaries or events pending to be
|
||||||
|
written to disk before one of the 'add' calls block.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
logdir: A string. Directory where event file will be written.
|
||||||
|
graph: A `Graph` object, such as `sess.graph`.
|
||||||
|
max_queue: Integer. Size of the queue for pending events and summaries.
|
||||||
|
flush_secs: Number. How often, in seconds, to flush the
|
||||||
|
pending events and summaries to disk.
|
||||||
|
graph_def: DEPRECATED: Use the `graph` argument instead.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.train.SummaryWriter.add_event(event)` {#SummaryWriter.add_event}
|
||||||
|
|
||||||
|
Adds an event to the event file.
|
||||||
|
|
||||||
|
##### Args:
|
||||||
|
|
||||||
|
|
||||||
|
* <b>`event`</b>: An `Event` protocol buffer.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.train.SummaryWriter.add_graph(graph, global_step=None, graph_def=None)` {#SummaryWriter.add_graph}
|
||||||
|
|
||||||
|
Adds a `Graph` to the event file.
|
||||||
|
|
||||||
|
The graph described by the protocol buffer will be displayed by
|
||||||
|
TensorBoard. Most users pass a graph in the constructor instead.
|
||||||
|
|
||||||
|
##### Args:
|
||||||
|
|
||||||
|
|
||||||
|
* <b>`graph`</b>: A `Graph` object, such as `sess.graph`.
|
||||||
|
* <b>`global_step`</b>: Number. Optional global step counter to record with the
|
||||||
|
graph.
|
||||||
|
* <b>`graph_def`</b>: DEPRECATED. Use the `graph` parameter instead.
|
||||||
|
|
||||||
|
##### Raises:
|
||||||
|
|
||||||
|
|
||||||
|
* <b>`ValueError`</b>: If both graph and graph_def are passed to the method.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.train.SummaryWriter.add_meta_graph(meta_graph_def, global_step=None)` {#SummaryWriter.add_meta_graph}
|
||||||
|
|
||||||
|
Adds a `MetaGraphDef` to the event file.
|
||||||
|
|
||||||
|
The `MetaGraphDef` allows running the given graph via
|
||||||
|
`saver.import_meta_graph()`.
|
||||||
|
|
||||||
|
##### Args:
|
||||||
|
|
||||||
|
|
||||||
|
* <b>`meta_graph_def`</b>: A `MetaGraphDef` object, often as retured by
|
||||||
|
`saver.export_meta_graph()`.
|
||||||
|
* <b>`global_step`</b>: Number. Optional global step counter to record with the
|
||||||
|
graph.
|
||||||
|
|
||||||
|
##### Raises:
|
||||||
|
|
||||||
|
|
||||||
|
* <b>`TypeError`</b>: If both `meta_graph_def` is not an instance of `MetaGraphDef`.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.train.SummaryWriter.add_run_metadata(run_metadata, tag, global_step=None)` {#SummaryWriter.add_run_metadata}
|
||||||
|
|
||||||
|
Adds a metadata information for a single session.run() call.
|
||||||
|
|
||||||
|
##### Args:
|
||||||
|
|
||||||
|
|
||||||
|
* <b>`run_metadata`</b>: A `RunMetadata` protobuf object.
|
||||||
|
* <b>`tag`</b>: The tag name for this metadata.
|
||||||
|
* <b>`global_step`</b>: Number. Optional global step counter to record with the
|
||||||
|
StepStats.
|
||||||
|
|
||||||
|
##### Raises:
|
||||||
|
|
||||||
|
|
||||||
|
* <b>`ValueError`</b>: If the provided tag was already used for this type of event.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.train.SummaryWriter.add_session_log(session_log, global_step=None)` {#SummaryWriter.add_session_log}
|
||||||
|
|
||||||
|
Adds a `SessionLog` protocol buffer to the event file.
|
||||||
|
|
||||||
|
This method wraps the provided session in an `Event` protocol buffer
|
||||||
|
and adds it to the event file.
|
||||||
|
|
||||||
|
##### Args:
|
||||||
|
|
||||||
|
|
||||||
|
* <b>`session_log`</b>: A `SessionLog` protocol buffer.
|
||||||
|
* <b>`global_step`</b>: Number. Optional global step value to record with the
|
||||||
|
summary.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.train.SummaryWriter.add_summary(summary, global_step=None)` {#SummaryWriter.add_summary}
|
||||||
|
|
||||||
|
Adds a `Summary` protocol buffer to the event file.
|
||||||
|
|
||||||
|
This method wraps the provided summary in an `Event` protocol buffer
|
||||||
|
and adds it to the event file.
|
||||||
|
|
||||||
|
You can pass the result of evaluating any summary op, using
|
||||||
|
[`Session.run()`](client.md#Session.run) or
|
||||||
|
[`Tensor.eval()`](framework.md#Tensor.eval), to this
|
||||||
|
function. Alternatively, you can pass a `tf.Summary` protocol
|
||||||
|
buffer that you populate with your own data. The latter is
|
||||||
|
commonly done to report evaluation results in event files.
|
||||||
|
|
||||||
|
##### Args:
|
||||||
|
|
||||||
|
|
||||||
|
* <b>`summary`</b>: A `Summary` protocol buffer, optionally serialized as a string.
|
||||||
|
* <b>`global_step`</b>: Number. Optional global step value to record with the
|
||||||
|
summary.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.train.SummaryWriter.close()` {#SummaryWriter.close}
|
||||||
|
|
||||||
|
Flushes the event file to disk and close the file.
|
||||||
|
|
||||||
|
Call this method when you do not need the summary writer anymore.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.train.SummaryWriter.flush()` {#SummaryWriter.flush}
|
||||||
|
|
||||||
|
Flushes the event file to disk.
|
||||||
|
|
||||||
|
Call this method to make sure that all pending events have been written to
|
||||||
|
disk.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.train.SummaryWriter.get_logdir()` {#SummaryWriter.get_logdir}
|
||||||
|
|
||||||
|
Returns the directory where event file will be written.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.train.SummaryWriter.reopen()` {#SummaryWriter.reopen}
|
||||||
|
|
||||||
|
Reopens the EventFileWriter.
|
||||||
|
|
||||||
|
Can be called after `close()` to add more events in the same directory.
|
||||||
|
The events will go into a new events file.
|
||||||
|
|
||||||
|
Does nothing if the EventFileWriter was not closed.
|
||||||
|
|
||||||
|
|
@ -0,0 +1,37 @@
|
|||||||
|
### `tf.audio_summary(*args, **kwargs)` {#audio_summary}
|
||||||
|
|
||||||
|
Outputs a `Summary` protocol buffer with audio. (deprecated)
|
||||||
|
|
||||||
|
THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
|
||||||
|
Instructions for updating:
|
||||||
|
Please switch to tf.summary.audio. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in.
|
||||||
|
|
||||||
|
The summary has up to `max_outputs` summary values containing audio. The
|
||||||
|
audio is built from `tensor` which must be 3-D with shape `[batch_size,
|
||||||
|
frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
|
||||||
|
assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
|
||||||
|
`sample_rate`.
|
||||||
|
|
||||||
|
The `tag` argument is a scalar `Tensor` of type `string`. It is used to
|
||||||
|
build the `tag` of the summary values:
|
||||||
|
|
||||||
|
* If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
|
||||||
|
* If `max_outputs` is greater than 1, the summary value tags are
|
||||||
|
generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tag: A scalar `Tensor` of type `string`. Used to build the `tag`
|
||||||
|
of the summary values.
|
||||||
|
tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
|
||||||
|
or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
|
||||||
|
sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
|
||||||
|
signal in hertz.
|
||||||
|
max_outputs: Max number of batch elements to generate audio for.
|
||||||
|
collections: Optional list of ops.GraphKeys. The collections to add the
|
||||||
|
summary to. Defaults to [ops.GraphKeys.SUMMARIES]
|
||||||
|
name: A name for the operation (optional).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
|
||||||
|
buffer.
|
||||||
|
|
@ -0,0 +1,4 @@
|
|||||||
|
#### `tf.summary.TaggedRunMetadata.FromString(s)` {#TaggedRunMetadata.FromString}
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -485,6 +485,187 @@ metadata is stored in its NodeDef. This method retrieves the description.
|
|||||||
### `class tf.summary.SummaryDescription` {#SummaryDescription}
|
### `class tf.summary.SummaryDescription` {#SummaryDescription}
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.ByteSize()` {#SummaryDescription.ByteSize}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.Clear()` {#SummaryDescription.Clear}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.ClearExtension(extension_handle)` {#SummaryDescription.ClearExtension}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.ClearField(field_name)` {#SummaryDescription.ClearField}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.CopyFrom(other_msg)` {#SummaryDescription.CopyFrom}
|
||||||
|
|
||||||
|
Copies the content of the specified message into the current message.
|
||||||
|
|
||||||
|
The method clears the current message and then merges the specified
|
||||||
|
message using MergeFrom.
|
||||||
|
|
||||||
|
##### Args:
|
||||||
|
|
||||||
|
|
||||||
|
* <b>`other_msg`</b>: Message to copy into the current one.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.DiscardUnknownFields()` {#SummaryDescription.DiscardUnknownFields}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.FindInitializationErrors()` {#SummaryDescription.FindInitializationErrors}
|
||||||
|
|
||||||
|
Finds required fields which are not initialized.
|
||||||
|
|
||||||
|
##### Returns:
|
||||||
|
|
||||||
|
A list of strings. Each string is a path to an uninitialized field from
|
||||||
|
the top-level message, e.g. "foo.bar[5].baz".
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.FromString(s)` {#SummaryDescription.FromString}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.HasExtension(extension_handle)` {#SummaryDescription.HasExtension}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.HasField(field_name)` {#SummaryDescription.HasField}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.IsInitialized(errors=None)` {#SummaryDescription.IsInitialized}
|
||||||
|
|
||||||
|
Checks if all required fields of a message are set.
|
||||||
|
|
||||||
|
##### Args:
|
||||||
|
|
||||||
|
|
||||||
|
* <b>`errors`</b>: A list which, if provided, will be populated with the field
|
||||||
|
paths of all missing required fields.
|
||||||
|
|
||||||
|
##### Returns:
|
||||||
|
|
||||||
|
True iff the specified message has all required fields set.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.ListFields()` {#SummaryDescription.ListFields}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.MergeFrom(msg)` {#SummaryDescription.MergeFrom}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.MergeFromString(serialized)` {#SummaryDescription.MergeFromString}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.ParseFromString(serialized)` {#SummaryDescription.ParseFromString}
|
||||||
|
|
||||||
|
Parse serialized protocol buffer data into this message.
|
||||||
|
|
||||||
|
Like MergeFromString(), except we clear the object first and
|
||||||
|
do not return the value that MergeFromString returns.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.RegisterExtension(extension_handle)` {#SummaryDescription.RegisterExtension}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.SerializePartialToString()` {#SummaryDescription.SerializePartialToString}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.SerializeToString()` {#SummaryDescription.SerializeToString}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.SetInParent()` {#SummaryDescription.SetInParent}
|
||||||
|
|
||||||
|
Sets the _cached_byte_size_dirty bit to true,
|
||||||
|
and propagates this to our listener iff this was a state change.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.WhichOneof(oneof_name)` {#SummaryDescription.WhichOneof}
|
||||||
|
|
||||||
|
Returns the name of the currently set field inside a oneof, or None.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.__deepcopy__(memo=None)` {#SummaryDescription.__deepcopy__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.__eq__(other)` {#SummaryDescription.__eq__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.summary.SummaryDescription.__getstate__()` {#SummaryDescription.__getstate__}
|
#### `tf.summary.SummaryDescription.__getstate__()` {#SummaryDescription.__getstate__}
|
||||||
@ -492,12 +673,249 @@ metadata is stored in its NodeDef. This method retrieves the description.
|
|||||||
Support the pickle protocol.
|
Support the pickle protocol.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.__hash__()` {#SummaryDescription.__hash__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.__init__(**kwargs)` {#SummaryDescription.__init__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.__ne__(other_msg)` {#SummaryDescription.__ne__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.__repr__()` {#SummaryDescription.__repr__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.__setstate__(state)` {#SummaryDescription.__setstate__}
|
||||||
|
|
||||||
|
Support the pickle protocol.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.__str__()` {#SummaryDescription.__str__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.__unicode__()` {#SummaryDescription.__unicode__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.SummaryDescription.type_hint` {#SummaryDescription.type_hint}
|
||||||
|
|
||||||
|
Magic attribute generated for "type_hint" proto field.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
### `class tf.summary.TaggedRunMetadata` {#TaggedRunMetadata}
|
### `class tf.summary.TaggedRunMetadata` {#TaggedRunMetadata}
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.ByteSize()` {#TaggedRunMetadata.ByteSize}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.Clear()` {#TaggedRunMetadata.Clear}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.ClearExtension(extension_handle)` {#TaggedRunMetadata.ClearExtension}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.ClearField(field_name)` {#TaggedRunMetadata.ClearField}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.CopyFrom(other_msg)` {#TaggedRunMetadata.CopyFrom}
|
||||||
|
|
||||||
|
Copies the content of the specified message into the current message.
|
||||||
|
|
||||||
|
The method clears the current message and then merges the specified
|
||||||
|
message using MergeFrom.
|
||||||
|
|
||||||
|
##### Args:
|
||||||
|
|
||||||
|
|
||||||
|
* <b>`other_msg`</b>: Message to copy into the current one.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.DiscardUnknownFields()` {#TaggedRunMetadata.DiscardUnknownFields}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.FindInitializationErrors()` {#TaggedRunMetadata.FindInitializationErrors}
|
||||||
|
|
||||||
|
Finds required fields which are not initialized.
|
||||||
|
|
||||||
|
##### Returns:
|
||||||
|
|
||||||
|
A list of strings. Each string is a path to an uninitialized field from
|
||||||
|
the top-level message, e.g. "foo.bar[5].baz".
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.FromString(s)` {#TaggedRunMetadata.FromString}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.HasExtension(extension_handle)` {#TaggedRunMetadata.HasExtension}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.HasField(field_name)` {#TaggedRunMetadata.HasField}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.IsInitialized(errors=None)` {#TaggedRunMetadata.IsInitialized}
|
||||||
|
|
||||||
|
Checks if all required fields of a message are set.
|
||||||
|
|
||||||
|
##### Args:
|
||||||
|
|
||||||
|
|
||||||
|
* <b>`errors`</b>: A list which, if provided, will be populated with the field
|
||||||
|
paths of all missing required fields.
|
||||||
|
|
||||||
|
##### Returns:
|
||||||
|
|
||||||
|
True iff the specified message has all required fields set.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.ListFields()` {#TaggedRunMetadata.ListFields}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.MergeFrom(msg)` {#TaggedRunMetadata.MergeFrom}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.MergeFromString(serialized)` {#TaggedRunMetadata.MergeFromString}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.ParseFromString(serialized)` {#TaggedRunMetadata.ParseFromString}
|
||||||
|
|
||||||
|
Parse serialized protocol buffer data into this message.
|
||||||
|
|
||||||
|
Like MergeFromString(), except we clear the object first and
|
||||||
|
do not return the value that MergeFromString returns.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.RegisterExtension(extension_handle)` {#TaggedRunMetadata.RegisterExtension}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.SerializePartialToString()` {#TaggedRunMetadata.SerializePartialToString}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.SerializeToString()` {#TaggedRunMetadata.SerializeToString}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.SetInParent()` {#TaggedRunMetadata.SetInParent}
|
||||||
|
|
||||||
|
Sets the _cached_byte_size_dirty bit to true,
|
||||||
|
and propagates this to our listener iff this was a state change.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.WhichOneof(oneof_name)` {#TaggedRunMetadata.WhichOneof}
|
||||||
|
|
||||||
|
Returns the name of the currently set field inside a oneof, or None.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.__deepcopy__(memo=None)` {#TaggedRunMetadata.__deepcopy__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.__eq__(other)` {#TaggedRunMetadata.__eq__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.summary.TaggedRunMetadata.__getstate__()` {#TaggedRunMetadata.__getstate__}
|
#### `tf.summary.TaggedRunMetadata.__getstate__()` {#TaggedRunMetadata.__getstate__}
|
||||||
@ -505,4 +923,67 @@ Support the pickle protocol.
|
|||||||
Support the pickle protocol.
|
Support the pickle protocol.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.__hash__()` {#TaggedRunMetadata.__hash__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.__init__(**kwargs)` {#TaggedRunMetadata.__init__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.__ne__(other_msg)` {#TaggedRunMetadata.__ne__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.__repr__()` {#TaggedRunMetadata.__repr__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.__setstate__(state)` {#TaggedRunMetadata.__setstate__}
|
||||||
|
|
||||||
|
Support the pickle protocol.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.__str__()` {#TaggedRunMetadata.__str__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.__unicode__()` {#TaggedRunMetadata.__unicode__}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.run_metadata` {#TaggedRunMetadata.run_metadata}
|
||||||
|
|
||||||
|
Magic attribute generated for "run_metadata" proto field.
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
#### `tf.summary.TaggedRunMetadata.tag` {#TaggedRunMetadata.tag}
|
||||||
|
|
||||||
|
Magic attribute generated for "tag" proto field.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -213,125 +213,6 @@ Checks that for all elements of farray1 and farray2
|
|||||||
* <b>`err`</b>: a float value.
|
* <b>`err`</b>: a float value.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertBetween(value, minv, maxv, msg=None)` {#TestCase.assertBetween}
|
|
||||||
|
|
||||||
Asserts that value is between minv and maxv (inclusive).
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertCommandFails(command, regexes, env=None, close_fds=True, msg=None)` {#TestCase.assertCommandFails}
|
|
||||||
|
|
||||||
Asserts a shell command fails and the error matches a regex in a list.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`command`</b>: List or string representing the command to run.
|
|
||||||
* <b>`regexes`</b>: the list of regular expression strings.
|
|
||||||
* <b>`env`</b>: Dictionary of environment variable settings.
|
|
||||||
* <b>`close_fds`</b>: Whether or not to close all open fd's in the child after
|
|
||||||
forking.
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertCommandSucceeds(command, regexes=('',), env=None, close_fds=True, msg=None)` {#TestCase.assertCommandSucceeds}
|
|
||||||
|
|
||||||
Asserts that a shell command succeeds (i.e. exits with code 0).
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`command`</b>: List or string representing the command to run.
|
|
||||||
* <b>`regexes`</b>: List of regular expression byte strings that match success.
|
|
||||||
* <b>`env`</b>: Dictionary of environment variable settings.
|
|
||||||
* <b>`close_fds`</b>: Whether or not to close all open fd's in the child after
|
|
||||||
forking.
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertContainsExactSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsExactSubsequence}
|
|
||||||
|
|
||||||
Assert that "container" contains "subsequence" as an exact subsequence.
|
|
||||||
|
|
||||||
Asserts that "container" contains all the elements of "subsequence", in
|
|
||||||
order, and without other elements interspersed. For example, [1, 2, 3] is an
|
|
||||||
exact subsequence of [0, 0, 1, 2, 3, 0] but not of [0, 0, 1, 2, 0, 3, 0].
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`container`</b>: the list we're testing for subsequence inclusion.
|
|
||||||
* <b>`subsequence`</b>: the list we hope will be an exact subsequence of container.
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertContainsInOrder(strings, target, msg=None)` {#TestCase.assertContainsInOrder}
|
|
||||||
|
|
||||||
Asserts that the strings provided are found in the target in order.
|
|
||||||
|
|
||||||
This may be useful for checking HTML output.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`strings`</b>: A list of strings, such as [ 'fox', 'dog' ]
|
|
||||||
* <b>`target`</b>: A target string in which to look for the strings, such as
|
|
||||||
'The quick brown fox jumped over the lazy dog'.
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertContainsSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsSubsequence}
|
|
||||||
|
|
||||||
Assert that "container" contains "subsequence" as a subsequence.
|
|
||||||
|
|
||||||
Asserts that "container" contains all the elements of "subsequence", in
|
|
||||||
order, but possibly with other elements interspersed. For example, [1, 2, 3]
|
|
||||||
is a subsequence of [0, 0, 1, 2, 0, 3, 0] but not of [0, 0, 1, 3, 0, 2, 0].
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`container`</b>: the list we're testing for subsequence inclusion.
|
|
||||||
* <b>`subsequence`</b>: the list we hope will be a subsequence of container.
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertContainsSubset(expected_subset, actual_set, msg=None)` {#TestCase.assertContainsSubset}
|
|
||||||
|
|
||||||
Checks whether actual iterable is a superset of expected iterable.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertCountEqual(*args, **kwargs)` {#TestCase.assertCountEqual}
|
|
||||||
|
|
||||||
An unordered sequence specific comparison.
|
|
||||||
|
|
||||||
Equivalent to assertItemsEqual(). This method is a compatibility layer
|
|
||||||
for Python 3k, since 2to3 does not convert assertItemsEqual() calls into
|
|
||||||
assertCountEqual() calls.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
|
|
||||||
* <b>`actual_seq`</b>: The sequence that we are testing.
|
|
||||||
* <b>`msg`</b>: The message to be printed if the test fails.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertDeviceEqual(device1, device2)` {#TestCase.assertDeviceEqual}
|
#### `tf.test.TestCase.assertDeviceEqual(device1, device2)` {#TestCase.assertDeviceEqual}
|
||||||
@ -354,48 +235,9 @@ Checks whether actual is a superset of expected.
|
|||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertDictEqual(a, b, msg=None)` {#TestCase.assertDictEqual}
|
#### `tf.test.TestCase.assertDictEqual(d1, d2, msg=None)` {#TestCase.assertDictEqual}
|
||||||
|
|
||||||
Raises AssertionError if a and b are not equal dictionaries.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`a`</b>: A dict, the expected value.
|
|
||||||
* <b>`b`</b>: A dict, the actual value.
|
|
||||||
* <b>`msg`</b>: An optional str, the associated message.
|
|
||||||
|
|
||||||
##### Raises:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`AssertionError`</b>: if the dictionaries are not equal.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertEmpty(container, msg=None)` {#TestCase.assertEmpty}
|
|
||||||
|
|
||||||
Assert that an object has zero length.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`container`</b>: Anything that implements the collections.Sized interface.
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertEndsWith(actual, expected_end, msg=None)` {#TestCase.assertEndsWith}
|
|
||||||
|
|
||||||
Assert that actual.endswith(expected_end) is True.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`actual`</b>: str
|
|
||||||
* <b>`expected_end`</b>: str
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
@ -480,11 +322,10 @@ Included for symmetry with assertIsNone.
|
|||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertItemsEqual(*args, **kwargs)` {#TestCase.assertItemsEqual}
|
#### `tf.test.TestCase.assertItemsEqual(expected_seq, actual_seq, msg=None)` {#TestCase.assertItemsEqual}
|
||||||
|
|
||||||
An unordered sequence specific comparison.
|
An unordered sequence specific comparison. It asserts that
|
||||||
|
actual_seq and expected_seq have the same element counts.
|
||||||
It asserts that actual_seq and expected_seq have the same element counts.
|
|
||||||
Equivalent to::
|
Equivalent to::
|
||||||
|
|
||||||
self.assertEqual(Counter(iter(actual_seq)),
|
self.assertEqual(Counter(iter(actual_seq)),
|
||||||
@ -497,30 +338,6 @@ Asserts that each element has the same count in both sequences.
|
|||||||
- [0, 1, 1] and [1, 0, 1] compare equal.
|
- [0, 1, 1] and [1, 0, 1] compare equal.
|
||||||
- [0, 0, 1] and [0, 1] compare unequal.
|
- [0, 0, 1] and [0, 1] compare unequal.
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
|
|
||||||
* <b>`actual_seq`</b>: The sequence that we are testing.
|
|
||||||
* <b>`msg`</b>: The message to be printed if the test fails.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertJsonEqual(first, second, msg=None)` {#TestCase.assertJsonEqual}
|
|
||||||
|
|
||||||
Asserts that the JSON objects defined in two strings are equal.
|
|
||||||
|
|
||||||
A summary of the differences will be included in the failure message
|
|
||||||
using assertSameStructure.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`first`</b>: A string contining JSON to decode and compare to second.
|
|
||||||
* <b>`second`</b>: A string contining JSON to decode and compare to first.
|
|
||||||
* <b>`msg`</b>: Additional text to include in the failure message.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
@ -590,13 +407,6 @@ if not.
|
|||||||
* <b>`msg`</b>: An optional string message to append to the failure message.
|
* <b>`msg`</b>: An optional string message to append to the failure message.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertNoCommonElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertNoCommonElements}
|
|
||||||
|
|
||||||
Checks whether actual iterable and expected iterable are disjoint.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertNotAlmostEqual(first, second, places=None, msg=None, delta=None)` {#TestCase.assertNotAlmostEqual}
|
#### `tf.test.TestCase.assertNotAlmostEqual(first, second, places=None, msg=None, delta=None)` {#TestCase.assertNotAlmostEqual}
|
||||||
@ -627,33 +437,6 @@ as significant digits (measured from the most signficant digit).
|
|||||||
Objects that are equal automatically fail.
|
Objects that are equal automatically fail.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertNotEmpty(container, msg=None)` {#TestCase.assertNotEmpty}
|
|
||||||
|
|
||||||
Assert that an object has non-zero length.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`container`</b>: Anything that implements the collections.Sized interface.
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertNotEndsWith(actual, unexpected_end, msg=None)` {#TestCase.assertNotEndsWith}
|
|
||||||
|
|
||||||
Assert that actual.endswith(unexpected_end) is False.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`actual`</b>: str
|
|
||||||
* <b>`unexpected_end`</b>: str
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertNotEqual(first, second, msg=None)` {#TestCase.assertNotEqual}
|
#### `tf.test.TestCase.assertNotEqual(first, second, msg=None)` {#TestCase.assertNotEqual}
|
||||||
@ -691,20 +474,6 @@ Included for symmetry with assertIsInstance.
|
|||||||
Fail the test if the text matches the regular expression.
|
Fail the test if the text matches the regular expression.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertNotStartsWith(actual, unexpected_start, msg=None)` {#TestCase.assertNotStartsWith}
|
|
||||||
|
|
||||||
Assert that actual.startswith(unexpected_start) is False.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`actual`</b>: str
|
|
||||||
* <b>`unexpected_start`</b>: str
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertProtoEquals(expected_message_maybe_ascii, message)` {#TestCase.assertProtoEquals}
|
#### `tf.test.TestCase.assertProtoEquals(expected_message_maybe_ascii, message)` {#TestCase.assertProtoEquals}
|
||||||
@ -779,38 +548,6 @@ Asserts that the message in a raised exception matches a regexp.
|
|||||||
* <b>`kwargs`</b>: Extra kwargs.
|
* <b>`kwargs`</b>: Extra kwargs.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertRaisesWithLiteralMatch(expected_exception, expected_exception_message, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithLiteralMatch}
|
|
||||||
|
|
||||||
Asserts that the message in a raised exception equals the given string.
|
|
||||||
|
|
||||||
Unlike assertRaisesRegexp, this method takes a literal string, not
|
|
||||||
a regular expression.
|
|
||||||
|
|
||||||
with self.assertRaisesWithLiteralMatch(ExType, 'message'):
|
|
||||||
DoSomething()
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`expected_exception`</b>: Exception class expected to be raised.
|
|
||||||
* <b>`expected_exception_message`</b>: String message expected in the raised
|
|
||||||
exception. For a raise exception e, expected_exception_message must
|
|
||||||
equal str(e).
|
|
||||||
* <b>`callable_obj`</b>: Function to be called, or None to return a context.
|
|
||||||
* <b>`args`</b>: Extra args.
|
|
||||||
* <b>`kwargs`</b>: Extra kwargs.
|
|
||||||
|
|
||||||
##### Returns:
|
|
||||||
|
|
||||||
A context manager if callable_obj is None. Otherwise, None.
|
|
||||||
|
|
||||||
##### Raises:
|
|
||||||
|
|
||||||
self.failureException if callable_obj does not raise a macthing exception.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertRaisesWithPredicateMatch(exception_type, expected_err_re_or_predicate)` {#TestCase.assertRaisesWithPredicateMatch}
|
#### `tf.test.TestCase.assertRaisesWithPredicateMatch(exception_type, expected_err_re_or_predicate)` {#TestCase.assertRaisesWithPredicateMatch}
|
||||||
@ -835,71 +572,6 @@ predicate search.
|
|||||||
exception.
|
exception.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertRaisesWithRegexpMatch(expected_exception, expected_regexp, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithRegexpMatch}
|
|
||||||
|
|
||||||
Asserts that the message in a raised exception matches the given regexp.
|
|
||||||
|
|
||||||
This is just a wrapper around assertRaisesRegexp. Please use
|
|
||||||
assertRaisesRegexp instead of assertRaisesWithRegexpMatch.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`expected_exception`</b>: Exception class expected to be raised.
|
|
||||||
* <b>`expected_regexp`</b>: Regexp (re pattern object or string) expected to be
|
|
||||||
found in error message.
|
|
||||||
* <b>`callable_obj`</b>: Function to be called, or None to return a context.
|
|
||||||
* <b>`args`</b>: Extra args.
|
|
||||||
* <b>`kwargs`</b>: Extra keyword args.
|
|
||||||
|
|
||||||
##### Returns:
|
|
||||||
|
|
||||||
A context manager if callable_obj is None. Otherwise, None.
|
|
||||||
|
|
||||||
##### Raises:
|
|
||||||
|
|
||||||
self.failureException if callable_obj does not raise a macthing exception.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertRegexMatch(actual_str, regexes, message=None)` {#TestCase.assertRegexMatch}
|
|
||||||
|
|
||||||
Asserts that at least one regex in regexes matches str.
|
|
||||||
|
|
||||||
If possible you should use assertRegexpMatches, which is a simpler
|
|
||||||
version of this method. assertRegexpMatches takes a single regular
|
|
||||||
expression (a string or re compiled object) instead of a list.
|
|
||||||
|
|
||||||
Notes:
|
|
||||||
1. This function uses substring matching, i.e. the matching
|
|
||||||
succeeds if *any* substring of the error message matches *any*
|
|
||||||
regex in the list. This is more convenient for the user than
|
|
||||||
full-string matching.
|
|
||||||
|
|
||||||
2. If regexes is the empty list, the matching will always fail.
|
|
||||||
|
|
||||||
3. Use regexes=[''] for a regex that will always pass.
|
|
||||||
|
|
||||||
4. '.' matches any single character *except* the newline. To
|
|
||||||
match any character, use '(.|
|
|
||||||
)'.
|
|
||||||
|
|
||||||
5. '^' matches the beginning of each line, not just the beginning
|
|
||||||
of the string. Similarly, '$' matches the end of each line.
|
|
||||||
|
|
||||||
6. An exception will be thrown if regexes contains an invalid
|
|
||||||
regex.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
actual_str: The string we try to match with the items in regexes.
|
|
||||||
regexes: The regular expressions we want to match against str.
|
|
||||||
See "Notes" above for detailed notes on how this is interpreted.
|
|
||||||
message: The message to be printed if the test fails.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertRegexpMatches(text, expected_regexp, msg=None)` {#TestCase.assertRegexpMatches}
|
#### `tf.test.TestCase.assertRegexpMatches(text, expected_regexp, msg=None)` {#TestCase.assertRegexpMatches}
|
||||||
@ -907,79 +579,6 @@ Asserts that at least one regex in regexes matches str.
|
|||||||
Fail the test unless the text matches the regular expression.
|
Fail the test unless the text matches the regular expression.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertSameElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertSameElements}
|
|
||||||
|
|
||||||
Assert that two sequences have the same elements (in any order).
|
|
||||||
|
|
||||||
This method, unlike assertItemsEqual, doesn't care about any
|
|
||||||
duplicates in the expected and actual sequences.
|
|
||||||
|
|
||||||
>> assertSameElements([1, 1, 1, 0, 0, 0], [0, 1])
|
|
||||||
# Doesn't raise an AssertionError
|
|
||||||
|
|
||||||
If possible, you should use assertItemsEqual instead of
|
|
||||||
assertSameElements.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
|
|
||||||
* <b>`actual_seq`</b>: The sequence that we are testing.
|
|
||||||
* <b>`msg`</b>: The message to be printed if the test fails.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertSameStructure(a, b, aname='a', bname='b', msg=None)` {#TestCase.assertSameStructure}
|
|
||||||
|
|
||||||
Asserts that two values contain the same structural content.
|
|
||||||
|
|
||||||
The two arguments should be data trees consisting of trees of dicts and
|
|
||||||
lists. They will be deeply compared by walking into the contents of dicts
|
|
||||||
and lists; other items will be compared using the == operator.
|
|
||||||
If the two structures differ in content, the failure message will indicate
|
|
||||||
the location within the structures where the first difference is found.
|
|
||||||
This may be helpful when comparing large structures.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`a`</b>: The first structure to compare.
|
|
||||||
* <b>`b`</b>: The second structure to compare.
|
|
||||||
* <b>`aname`</b>: Variable name to use for the first structure in assertion messages.
|
|
||||||
* <b>`bname`</b>: Variable name to use for the second structure.
|
|
||||||
* <b>`msg`</b>: Additional text to include in the failure message.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertSequenceAlmostEqual(expected_seq, actual_seq, places=None, msg=None, delta=None)` {#TestCase.assertSequenceAlmostEqual}
|
|
||||||
|
|
||||||
An approximate equality assertion for ordered sequences.
|
|
||||||
|
|
||||||
Fail if the two sequences are unequal as determined by their value
|
|
||||||
differences rounded to the given number of decimal places (default 7) and
|
|
||||||
comparing to zero, or by comparing that the difference between each value
|
|
||||||
in the two sequences is more than the given delta.
|
|
||||||
|
|
||||||
Note that decimal places (from zero) are usually not the same as significant
|
|
||||||
digits (measured from the most signficant digit).
|
|
||||||
|
|
||||||
If the two sequences compare equal then they will automatically compare
|
|
||||||
almost equal.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
|
|
||||||
* <b>`actual_seq`</b>: The sequence that we are testing.
|
|
||||||
* <b>`places`</b>: The number of decimal places to compare.
|
|
||||||
* <b>`msg`</b>: The message to be printed if the test fails.
|
|
||||||
* <b>`delta`</b>: The OK difference between compared values.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertSequenceEqual(seq1, seq2, msg=None, seq_type=None)` {#TestCase.assertSequenceEqual}
|
#### `tf.test.TestCase.assertSequenceEqual(seq1, seq2, msg=None, seq_type=None)` {#TestCase.assertSequenceEqual}
|
||||||
@ -1000,26 +599,6 @@ which can be indexed, has a length, and has an equality operator.
|
|||||||
differences.
|
differences.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertSequenceStartsWith(prefix, whole, msg=None)` {#TestCase.assertSequenceStartsWith}
|
|
||||||
|
|
||||||
An equality assertion for the beginning of ordered sequences.
|
|
||||||
|
|
||||||
If prefix is an empty sequence, it will raise an error unless whole is also
|
|
||||||
an empty sequence.
|
|
||||||
|
|
||||||
If prefix is not a sequence, it will raise an error if the first element of
|
|
||||||
whole does not match.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`prefix`</b>: A sequence expected at the beginning of the whole parameter.
|
|
||||||
* <b>`whole`</b>: The sequence in which to look for prefix.
|
|
||||||
* <b>`msg`</b>: Optional message to report on failure.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertSetEqual(set1, set2, msg=None)` {#TestCase.assertSetEqual}
|
#### `tf.test.TestCase.assertSetEqual(set1, set2, msg=None)` {#TestCase.assertSetEqual}
|
||||||
@ -1071,51 +650,6 @@ Assert that actual.startswith(expected_start) is True.
|
|||||||
* <b>`msg`</b>: Optional message to report on failure.
|
* <b>`msg`</b>: Optional message to report on failure.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertTotallyOrdered(*groups, **kwargs)` {#TestCase.assertTotallyOrdered}
|
|
||||||
|
|
||||||
Asserts that total ordering has been implemented correctly.
|
|
||||||
|
|
||||||
For example, say you have a class A that compares only on its attribute x.
|
|
||||||
Comparators other than __lt__ are omitted for brevity.
|
|
||||||
|
|
||||||
class A(object):
|
|
||||||
def __init__(self, x, y):
|
|
||||||
self.x = x
|
|
||||||
self.y = y
|
|
||||||
|
|
||||||
def __hash__(self):
|
|
||||||
return hash(self.x)
|
|
||||||
|
|
||||||
def __lt__(self, other):
|
|
||||||
try:
|
|
||||||
return self.x < other.x
|
|
||||||
except AttributeError:
|
|
||||||
return NotImplemented
|
|
||||||
|
|
||||||
assertTotallyOrdered will check that instances can be ordered correctly.
|
|
||||||
For example,
|
|
||||||
|
|
||||||
self.assertTotallyOrdered(
|
|
||||||
[None], # None should come before everything else.
|
|
||||||
[1], # Integers sort earlier.
|
|
||||||
[A(1, 'a')],
|
|
||||||
[A(2, 'b')], # 2 is after 1.
|
|
||||||
[A(3, 'c'), A(3, 'd')], # The second argument is irrelevant.
|
|
||||||
[A(4, 'z')],
|
|
||||||
['foo']) # Strings sort last.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`*groups`</b>: A list of groups of elements. Each group of elements is a list
|
|
||||||
of objects that are equal. The elements in each group must be less than
|
|
||||||
the elements in the group after it. For example, these groups are
|
|
||||||
totally ordered: [None], [1], [2, 2], [3].
|
|
||||||
* <b>`**kwargs`</b>: optional msg keyword argument can be passed.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertTrue(expr, msg=None)` {#TestCase.assertTrue}
|
#### `tf.test.TestCase.assertTrue(expr, msg=None)` {#TestCase.assertTrue}
|
||||||
@ -1138,13 +672,6 @@ A tuple-specific equality assertion.
|
|||||||
differences.
|
differences.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.assertUrlEqual(a, b, msg=None)` {#TestCase.assertUrlEqual}
|
|
||||||
|
|
||||||
Asserts that urls are equal, ignoring ordering of query params.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.assert_(expr, msg=None)` {#TestCase.assert_}
|
#### `tf.test.TestCase.assert_(expr, msg=None)` {#TestCase.assert_}
|
||||||
@ -1206,9 +733,9 @@ tearDown.
|
|||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.fail(msg=None, prefix=None)` {#TestCase.fail}
|
#### `tf.test.TestCase.fail(msg=None)` {#TestCase.fail}
|
||||||
|
|
||||||
Fail immediately with the given message, optionally prefixed.
|
Fail immediately, with the given message.
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
@ -1260,13 +787,6 @@ Fail immediately with the given message, optionally prefixed.
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.getRecordedProperties()` {#TestCase.getRecordedProperties}
|
|
||||||
|
|
||||||
Return any properties that the user has recorded.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.get_temp_dir()` {#TestCase.get_temp_dir}
|
#### `tf.test.TestCase.get_temp_dir()` {#TestCase.get_temp_dir}
|
||||||
@ -1289,20 +809,6 @@ pollute each others environment.
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
|
||||||
|
|
||||||
#### `tf.test.TestCase.recordProperty(property_name, property_value)` {#TestCase.recordProperty}
|
|
||||||
|
|
||||||
Record an arbitrary property for later use.
|
|
||||||
|
|
||||||
##### Args:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`property_name`</b>: str, name of property to record; must be a valid XML
|
|
||||||
attribute name
|
|
||||||
* <b>`property_value`</b>: value of property; must be valid XML attribute value
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
#### `tf.test.TestCase.run(result=None)` {#TestCase.run}
|
#### `tf.test.TestCase.run(result=None)` {#TestCase.run}
|
||||||
@ -1328,18 +834,11 @@ Hook method for setting up class fixture before running tests in the class.
|
|||||||
|
|
||||||
#### `tf.test.TestCase.shortDescription()` {#TestCase.shortDescription}
|
#### `tf.test.TestCase.shortDescription()` {#TestCase.shortDescription}
|
||||||
|
|
||||||
Format both the test method name and the first line of its docstring.
|
Returns a one-line description of the test, or None if no
|
||||||
|
description has been provided.
|
||||||
|
|
||||||
If no docstring is given, only returns the method name.
|
The default implementation of this method returns the first line of
|
||||||
|
the specified test method's docstring.
|
||||||
This method overrides unittest.TestCase.shortDescription(), which
|
|
||||||
only returns the first line of the docstring, obscuring the name
|
|
||||||
of the test upon failure.
|
|
||||||
|
|
||||||
##### Returns:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>`desc`</b>: A short description of a test method.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
@ -67,7 +67,7 @@ class _RNNCell(object):
|
|||||||
of operators that allow add dropouts, projections, or embeddings for inputs.
|
of operators that allow add dropouts, projections, or embeddings for inputs.
|
||||||
Constructing multi-layer cells is supported by the class `MultiRNNCell`,
|
Constructing multi-layer cells is supported by the class `MultiRNNCell`,
|
||||||
or by calling the `rnn` ops several times. Every `RNNCell` must have the
|
or by calling the `rnn` ops several times. Every `RNNCell` must have the
|
||||||
properties below and and implement `__call__` with the following signature.
|
properties below and implement `__call__` with the following signature.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __call__(self, inputs, state, scope=None):
|
def __call__(self, inputs, state, scope=None):
|
||||||
|
@ -329,27 +329,20 @@ class CheckpointSaverHookTest(test.TestCase):
|
|||||||
with session_lib.Session() as sess:
|
with session_lib.Session() as sess:
|
||||||
sess.run(self.scaffold.init_op)
|
sess.run(self.scaffold.init_op)
|
||||||
mon_sess = monitored_session._HookedSession(sess, [hook])
|
mon_sess = monitored_session._HookedSession(sess, [hook])
|
||||||
mon_sess.run(self.train_op)
|
mon_sess.run(self.train_op) # Saved.
|
||||||
mon_sess.run(self.train_op)
|
mon_sess.run(self.train_op) # Not saved.
|
||||||
# Not saved
|
|
||||||
self.assertEqual(1,
|
self.assertEqual(1,
|
||||||
checkpoint_utils.load_variable(self.model_dir,
|
checkpoint_utils.load_variable(self.model_dir,
|
||||||
self.global_step.name))
|
self.global_step.name))
|
||||||
time.sleep(2.5)
|
time.sleep(2.5)
|
||||||
mon_sess.run(self.train_op)
|
mon_sess.run(self.train_op) # Saved.
|
||||||
# saved
|
mon_sess.run(self.train_op) # Not saved.
|
||||||
self.assertEqual(3,
|
mon_sess.run(self.train_op) # Not saved.
|
||||||
checkpoint_utils.load_variable(self.model_dir,
|
|
||||||
self.global_step.name))
|
|
||||||
mon_sess.run(self.train_op)
|
|
||||||
mon_sess.run(self.train_op)
|
|
||||||
# Not saved
|
|
||||||
self.assertEqual(3,
|
self.assertEqual(3,
|
||||||
checkpoint_utils.load_variable(self.model_dir,
|
checkpoint_utils.load_variable(self.model_dir,
|
||||||
self.global_step.name))
|
self.global_step.name))
|
||||||
time.sleep(2.5)
|
time.sleep(2.5)
|
||||||
mon_sess.run(self.train_op)
|
mon_sess.run(self.train_op) # Saved.
|
||||||
# saved
|
|
||||||
self.assertEqual(6,
|
self.assertEqual(6,
|
||||||
checkpoint_utils.load_variable(self.model_dir,
|
checkpoint_utils.load_variable(self.model_dir,
|
||||||
self.global_step.name))
|
self.global_step.name))
|
||||||
|
@ -34,7 +34,6 @@ from tensorflow.python.summary import summary as _summary
|
|||||||
from tensorflow.python.training import coordinator
|
from tensorflow.python.training import coordinator
|
||||||
from tensorflow.python.training import saver as saver_mod
|
from tensorflow.python.training import saver as saver_mod
|
||||||
from tensorflow.python.training import session_manager as session_manager_mod
|
from tensorflow.python.training import session_manager as session_manager_mod
|
||||||
from tensorflow.python.training import summary_io
|
|
||||||
from tensorflow.python.training import training_util
|
from tensorflow.python.training import training_util
|
||||||
|
|
||||||
|
|
||||||
@ -341,7 +340,7 @@ class Supervisor(object):
|
|||||||
self._save_path = os.path.join(self._logdir, checkpoint_basename)
|
self._save_path = os.path.join(self._logdir, checkpoint_basename)
|
||||||
if summary_writer is Supervisor.USE_DEFAULT:
|
if summary_writer is Supervisor.USE_DEFAULT:
|
||||||
if self._logdir:
|
if self._logdir:
|
||||||
self._summary_writer = summary_io.SummaryWriter(self._logdir)
|
self._summary_writer = _summary.FileWriter(self._logdir)
|
||||||
else:
|
else:
|
||||||
self._summary_writer = summary_writer
|
self._summary_writer = summary_writer
|
||||||
self._graph_added_to_summary = False
|
self._graph_added_to_summary = False
|
||||||
|
@ -1,41 +1,11 @@
|
|||||||
# -*- Python -*-
|
# -*- Python -*-
|
||||||
|
|
||||||
# Parse the bazel version string from `native.bazel_version`.
|
|
||||||
def _parse_bazel_version(bazel_version):
|
|
||||||
# Remove commit from version.
|
|
||||||
version = bazel_version.split(" ", 1)[0]
|
|
||||||
|
|
||||||
# Split into (release, date) parts and only return the release
|
|
||||||
# as a tuple of integers.
|
|
||||||
parts = version.split('-', 1)
|
|
||||||
|
|
||||||
# Turn "release" into a tuple of strings
|
|
||||||
version_tuple = ()
|
|
||||||
for number in parts[0].split('.'):
|
|
||||||
version_tuple += (str(number),)
|
|
||||||
return version_tuple
|
|
||||||
|
|
||||||
# Given a source file, generate a test name.
|
# Given a source file, generate a test name.
|
||||||
# i.e. "common_runtime/direct_session_test.cc" becomes
|
# i.e. "common_runtime/direct_session_test.cc" becomes
|
||||||
# "common_runtime_direct_session_test"
|
# "common_runtime_direct_session_test"
|
||||||
def src_to_test_name(src):
|
def src_to_test_name(src):
|
||||||
return src.replace("/", "_").split(".")[0]
|
return src.replace("/", "_").split(".")[0]
|
||||||
|
|
||||||
# Check that a specific bazel version is being used.
|
|
||||||
def check_version(bazel_version):
|
|
||||||
if "bazel_version" not in dir(native):
|
|
||||||
fail("\nCurrent Bazel version is lower than 0.2.1, expected at least %s\n" % bazel_version)
|
|
||||||
elif not native.bazel_version:
|
|
||||||
print("\nCurrent Bazel is not a release version, cannot check for compatibility.")
|
|
||||||
print("Make sure that you are running at least Bazel %s.\n" % bazel_version)
|
|
||||||
else:
|
|
||||||
current_bazel_version = _parse_bazel_version(native.bazel_version)
|
|
||||||
minimum_bazel_version = _parse_bazel_version(bazel_version)
|
|
||||||
if minimum_bazel_version > current_bazel_version:
|
|
||||||
fail("\nCurrent Bazel version is {}, expected at least {}\n".format(
|
|
||||||
native.bazel_version, bazel_version))
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Return the options to use for a C++ library or binary build.
|
# Return the options to use for a C++ library or binary build.
|
||||||
# Uses the ":optmode" config_setting to pick the options.
|
# Uses the ":optmode" config_setting to pick the options.
|
||||||
load(
|
load(
|
||||||
|
@ -21,4 +21,10 @@ source "${SCRIPT_DIR}/builds_common.sh"
|
|||||||
configure_android_workspace
|
configure_android_workspace
|
||||||
|
|
||||||
CPUS=armeabi-v7a,x86_64
|
CPUS=armeabi-v7a,x86_64
|
||||||
bazel build -c opt --fat_apk_cpu=${CPUS} //tensorflow/examples/android:tensorflow_demo
|
|
||||||
|
# Enable sandboxing so that zip archives don't get incorrectly packaged
|
||||||
|
# in assets/ dir (see https://github.com/bazelbuild/bazel/issues/2334)
|
||||||
|
# TODO(gunan): remove extra flags once sandboxing is enabled for all builds.
|
||||||
|
bazel --bazelrc=/dev/null build -c opt --fat_apk_cpu=${CPUS} \
|
||||||
|
--spawn_strategy=sandboxed --genrule_strategy=sandboxed \
|
||||||
|
//tensorflow/examples/android:tensorflow_demo
|
||||||
|
@ -51,8 +51,12 @@ do
|
|||||||
done
|
done
|
||||||
|
|
||||||
# Build Jar and also demo containing native libs for all architectures.
|
# Build Jar and also demo containing native libs for all architectures.
|
||||||
|
# Enable sandboxing so that zip archives don't get incorrectly packaged
|
||||||
|
# in assets/ dir (see https://github.com/bazelbuild/bazel/issues/2334)
|
||||||
|
# TODO(gunan): remove extra flags once sandboxing is enabled for all builds.
|
||||||
echo "========== Building TensorFlow Android Jar and Demo =========="
|
echo "========== Building TensorFlow Android Jar and Demo =========="
|
||||||
bazel build -c opt --fat_apk_cpu=${CPUS} \
|
bazel --bazelrc=/dev/null build -c opt --fat_apk_cpu=${CPUS} \
|
||||||
|
--spawn_strategy=sandboxed --genrule_strategy=sandboxed \
|
||||||
//tensorflow/contrib/android:android_tensorflow_inference_java \
|
//tensorflow/contrib/android:android_tensorflow_inference_java \
|
||||||
//tensorflow/examples/android:tensorflow_demo
|
//tensorflow/examples/android:tensorflow_demo
|
||||||
|
|
||||||
|
@ -346,7 +346,7 @@ if [[ ${TF_BUILD_IS_PIP} == "no_pip" ]] ||
|
|||||||
NO_PIP_MAIN_CMD="${ANDROID_CMD} ${OPT_FLAG} "
|
NO_PIP_MAIN_CMD="${ANDROID_CMD} ${OPT_FLAG} "
|
||||||
elif [[ ${CTYPE} == "android_full" ]]; then
|
elif [[ ${CTYPE} == "android_full" ]]; then
|
||||||
# Run android specific script for full android build.
|
# Run android specific script for full android build.
|
||||||
NO_PIP_MAIN_CMD="${ANDROID_full_CMD} ${OPT_FLAG} "
|
NO_PIP_MAIN_CMD="${ANDROID_FULL_CMD} ${OPT_FLAG} "
|
||||||
fi
|
fi
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
@ -4,6 +4,36 @@ load("//third_party/gpus:cuda_configure.bzl", "cuda_configure")
|
|||||||
load("//third_party/sycl:sycl_configure.bzl", "sycl_configure")
|
load("//third_party/sycl:sycl_configure.bzl", "sycl_configure")
|
||||||
|
|
||||||
|
|
||||||
|
# Parse the bazel version string from `native.bazel_version`.
|
||||||
|
def _parse_bazel_version(bazel_version):
|
||||||
|
# Remove commit from version.
|
||||||
|
version = bazel_version.split(" ", 1)[0]
|
||||||
|
|
||||||
|
# Split into (release, date) parts and only return the release
|
||||||
|
# as a tuple of integers.
|
||||||
|
parts = version.split('-', 1)
|
||||||
|
|
||||||
|
# Turn "release" into a tuple of strings
|
||||||
|
version_tuple = ()
|
||||||
|
for number in parts[0].split('.'):
|
||||||
|
version_tuple += (str(number),)
|
||||||
|
return version_tuple
|
||||||
|
|
||||||
|
# Check that a specific bazel version is being used.
|
||||||
|
def check_version(bazel_version):
|
||||||
|
if "bazel_version" not in dir(native):
|
||||||
|
fail("\nCurrent Bazel version is lower than 0.2.1, expected at least %s\n" % bazel_version)
|
||||||
|
elif not native.bazel_version:
|
||||||
|
print("\nCurrent Bazel is not a release version, cannot check for compatibility.")
|
||||||
|
print("Make sure that you are running at least Bazel %s.\n" % bazel_version)
|
||||||
|
else:
|
||||||
|
current_bazel_version = _parse_bazel_version(native.bazel_version)
|
||||||
|
minimum_bazel_version = _parse_bazel_version(bazel_version)
|
||||||
|
if minimum_bazel_version > current_bazel_version:
|
||||||
|
fail("\nCurrent Bazel version is {}, expected at least {}\n".format(
|
||||||
|
native.bazel_version, bazel_version))
|
||||||
|
pass
|
||||||
|
|
||||||
# If TensorFlow is linked as a submodule.
|
# If TensorFlow is linked as a submodule.
|
||||||
# path_prefix and tf_repo_name are no longer used.
|
# path_prefix and tf_repo_name are no longer used.
|
||||||
def tf_workspace(path_prefix = "", tf_repo_name = ""):
|
def tf_workspace(path_prefix = "", tf_repo_name = ""):
|
||||||
|
Loading…
Reference in New Issue
Block a user