diff --git a/.bazelrc b/.bazelrc
index 1a9c46362e5..d4d7ad61867 100644
--- a/.bazelrc
+++ b/.bazelrc
@@ -105,9 +105,6 @@ build --define=PREFIX=/usr
build --define=LIBDIR=$(PREFIX)/lib
build --define=INCLUDEDIR=$(PREFIX)/include
-# Disable MKL-DNN contraction kernels by default.
-build --define=tensorflow_mkldnn_contraction_kernel=0
-
# Default options should come above this line
# Options from ./configure
diff --git a/.github/ISSUE_TEMPLATE/00-bug-performance-issue.md b/.github/ISSUE_TEMPLATE/00-bug-performance-issue.md
index 34ba4cf9601..d562ced6f3a 100644
--- a/.github/ISSUE_TEMPLATE/00-bug-performance-issue.md
+++ b/.github/ISSUE_TEMPLATE/00-bug-performance-issue.md
@@ -18,10 +18,11 @@ about: Use this template for reporting a bug or a performance issue.
- CUDA/cuDNN version:
- GPU model and memory:
-
-You can collect some of this information using our environment capture [script](https://github.com/tensorflow/tensorflow/tree/master/tools/tf_env_collect.sh)
-You can also obtain the TensorFlow version with
-python -c "import tensorflow as tf; print(tf.GIT_VERSION, tf.VERSION)"
+You can collect some of this information using our environment capture
+[script](https://github.com/tensorflow/tensorflow/tree/master/tools/tf_env_collect.sh)
+You can also obtain the TensorFlow version with: 1. TF 1.0: `python -c "import
+tensorflow as tf; print(tf.GIT_VERSION, tf.VERSION)"` 2. TF 2.0: `python -c
+"import tensorflow as tf; print(tf.version.GIT_VERSION, tf.version.VERSION)"`
**Describe the current behavior**
diff --git a/.github/ISSUE_TEMPLATE/20-documentation-issue.md b/.github/ISSUE_TEMPLATE/20-documentation-issue.md
index 7123ca6d6c5..7f4a1f1b5b0 100644
--- a/.github/ISSUE_TEMPLATE/20-documentation-issue.md
+++ b/.github/ISSUE_TEMPLATE/20-documentation-issue.md
@@ -1,17 +1,55 @@
---
name: Documentation Issue
-about: Use this template for documentation related issues
+about: Use this template for documentation related
+labels: 'type:docs'
---
-Please make sure that this is a documentation issue. As per our [GitHub Policy](https://github.com/tensorflow/tensorflow/blob/master/ISSUES.md), we only address code/doc bugs, performance issues, feature requests and build/installation issues on GitHub. tag:doc_template
+Thank you for submitting a TensorFlow documentation issue. Per our GitHub
+policy, we only address code/doc bugs, performance issues, feature requests, and
+build/installation issues on GitHub.
+The TensorFlow docs are open source! To get involved, read the documentation
+contributor guide: https://www.tensorflow.org/community/contribute/docs
-**System information**
-- TensorFlow version:
-- Doc Link:
+## URL(s) with the issue:
+Please provide a link to the documentation entry, for example:
+https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/MyMethod
-**Describe the documentation issue**
+## Description of issue (what needs changing):
-**We welcome contributions by users. Will you be able to update submit a PR (use the [doc style guide](https://www.tensorflow.org/community/documentation)) to fix the doc Issue?**
+### Clear description
+
+For example, why should someone use this method? How is it useful?
+
+### Correct links
+
+Is the link to the source code correct?
+
+### Parameters defined
+
+Are all parameters defined and formatted correctly?
+
+### Returns defined
+
+Are return values defined?
+
+### Raises listed and defined
+
+Are the errors defined? For example,
+https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_file#raises
+
+### Usage example
+
+Is there a usage example?
+
+### Request visuals, if applicable
+
+Are there currently visuals? If not, will it clarify the content?
+
+### Submit a pull request?
+
+Are you planning to also submit a pull request to fix the issue? See the docs
+contributor guide: https://www.tensorflow.org/community/contribute/docs and the
+docs style guide: https://www.tensorflow.org/community/contribute/docs_style
diff --git a/.gitignore b/.gitignore
index e1d352c238a..99ba9312a92 100644
--- a/.gitignore
+++ b/.gitignore
@@ -20,15 +20,8 @@ tensorflow/contrib/cmake/_build/
[Bb]uild/
/tensorflow/core/util/version_info.cc
/tensorflow/python/framework/fast_tensor_util.cpp
-Pods
-Podfile.lock
-*.pbxproj
-*.xcworkspacedata
-/tensorflow/lite/tools/make/downloads/**
/tensorflow/lite/gen/**
-/tensorflow/lite/examples/ios/simple/data/*.txt
-/tensorflow/lite/examples/ios/simple/data/*.tflite
-xcuserdata/**
+/tensorflow/lite/tools/make/downloads/**
/api_init_files_list.txt
/estimator_api_init_files_list.txt
*.whl
@@ -39,3 +32,14 @@ xcuserdata/**
*.iml
local.properties
gradleBuild
+
+# iOS
+*.pbxproj
+*.xcworkspace
+/*.podspec
+/tensorflow/lite/**/[ios|objc|swift]*/BUILD
+/tensorflow/lite/examples/ios/simple/data/*.tflite
+/tensorflow/lite/examples/ios/simple/data/*.txt
+Podfile.lock
+Pods
+xcuserdata
diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md
index b3d84ad8c94..04cd8cb65ef 100644
--- a/ISSUE_TEMPLATE.md
+++ b/ISSUE_TEMPLATE.md
@@ -32,7 +32,7 @@ https://github.com/tensorflow/tensorflow/tree/master/tools/tf_env_collect.sh
You can obtain the TensorFlow version with:
```bash
-python -c "import tensorflow as tf; print(tf.GIT_VERSION, tf.VERSION)"
+python -c "import tensorflow as tf; print(tf.version.GIT_VERSION, tf.version.VERSION)"
```
### Describe the problem
diff --git a/LICENSE b/LICENSE
index 4862420c023..12763eca4c2 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,4 @@
-Copyright 2018 The TensorFlow Authors. All rights reserved.
+Copyright 2019 The TensorFlow Authors. All rights reserved.
Apache License
Version 2.0, January 2004
diff --git a/README.md b/README.md
index 96a8ecf4f69..ec5e9af58d8 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
-

+
-----------------
@@ -25,7 +25,7 @@ networks research. The system is general enough to be applicable in a wide
variety of other domains, as well.
TensorFlow provides stable Python and C APIs as well as non-guaranteed backwards
-compatible API's for C++, Go, Java, JavaScript and Swift.
+compatible API's for C++, Go, Java, JavaScript, and Swift.
Keep up to date with release announcements and security updates by
subscribing to
@@ -50,10 +50,10 @@ instructions, and how to build from source.*
People who are a little more adventurous can also try our nightly binaries:
-**Nightly pip packages**
-* We are pleased to announce that TensorFlow now offers nightly pip packages
-under the [tf-nightly](https://pypi.python.org/pypi/tf-nightly) and
-[tf-nightly-gpu](https://pypi.python.org/pypi/tf-nightly-gpu) project on pypi.
+**Nightly pip packages** * We are pleased to announce that TensorFlow now offers
+nightly pip packages under the
+[tf-nightly](https://pypi.python.org/pypi/tf-nightly) and
+[tf-nightly-gpu](https://pypi.python.org/pypi/tf-nightly-gpu) project on PyPi.
Simply run `pip install tf-nightly` or `pip install tf-nightly-gpu` in a clean
environment to install the nightly TensorFlow build. We support CPU and GPU
packages on Linux, Mac, and Windows.
@@ -85,7 +85,7 @@ guidelines](CONTRIBUTING.md). This project adheres to TensorFlow's
uphold this code.**
**We use [GitHub issues](https://github.com/tensorflow/tensorflow/issues) for
-tracking requests and bugs, so please see
+tracking requests and bugs, please see
[TensorFlow Discuss](https://groups.google.com/a/tensorflow.org/forum/#!forum/discuss)
for general questions and discussion, and please direct specific questions to
[Stack Overflow](https://stackoverflow.com/questions/tagged/tensorflow).**
@@ -114,15 +114,16 @@ The TensorFlow project strives to abide by generally accepted best practices in
### Community Supported Builds
-Build Type | Status | Artifacts
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------
-**IBM s390x** | [](http://ibmz-ci.osuosl.org/job/TensorFlow_IBMZ_CI/) | TBA
-**Linux ppc64le CPU** Nightly | [](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_CPU_Build/) | [Nightly](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_CPU_Nightly_Artifact/)
-**Linux ppc64le CPU** Stable Release | [](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_CPU_Release_Build/) | [Release](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_CPU_Release_Build/)
-**Linux ppc64le GPU** Nightly | [](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Build/) | [Nightly](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Nightly_Artifact/)
-**Linux ppc64le GPU** Stable Release | [](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Release_Build/) | [Release](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Release_Build/)
-**Linux CPU with Intel® MKL-DNN** Nightly | [](https://tensorflow-ci.intel.com/job/tensorflow-mkl-linux-cpu/) | [Nightly](https://tensorflow-ci.intel.com/job/tensorflow-mkl-build-whl-nightly/)
-**Linux CPU with Intel® MKL-DNN** Python 2.7
**Linux CPU with Intel® MKL-DNN** Python 3.4
**Linux CPU with Intel® MKL-DNN** Python 3.5
**Linux CPU with Intel® MKL-DNN** Python 3.6 | [](https://tensorflow-ci.intel.com/job/tensorflow-mkl-build-release-whl/lastStableBuild) | [1.12.0 py2.7](https://storage.googleapis.com/intel-optimized-tensorflow/tensorflow-1.12.0-cp27-cp27mu-linux_x86_64.whl)
[1.12.0 py3.4](https://storage.googleapis.com/intel-optimized-tensorflow/tensorflow-1.12.0-cp34-cp34m-linux_x86_64.whl)
[1.12.0 py3.5](https://storage.googleapis.com/intel-optimized-tensorflow/tensorflow-1.12.0-cp35-cp35m-linux_x86_64.whl)
[1.12.0 py3.6](https://storage.googleapis.com/intel-optimized-tensorflow/tensorflow-1.12.0-cp36-cp36m-linux_x86_64.whl)
+Build Type | Status | Artifacts
+--------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------
+**IBM s390x** | [](http://ibmz-ci.osuosl.org/job/TensorFlow_IBMZ_CI/) | TBA
+**Linux ppc64le CPU** Nightly | [](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_CPU_Build/) | [Nightly](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_CPU_Nightly_Artifact/)
+**Linux ppc64le CPU** Stable Release | [](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_CPU_Release_Build/) | [Release](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_CPU_Release_Build/)
+**Linux ppc64le GPU** Nightly | [](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Build/) | [Nightly](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Nightly_Artifact/)
+**Linux ppc64le GPU** Stable Release | [](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Release_Build/) | [Release](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Release_Build/)
+**Linux CPU with Intel® MKL-DNN** Nightly | [](https://tensorflow-ci.intel.com/job/tensorflow-mkl-linux-cpu/) | [Nightly](https://tensorflow-ci.intel.com/job/tensorflow-mkl-build-whl-nightly/)
+**Linux CPU with Intel® MKL-DNN**
**Supports Python 2.7, 3.4, 3.5, and 3.6** | [](https://tensorflow-ci.intel.com/job/tensorflow-mkl-build-release-whl/lastStableBuild) | [1.13.1 pypi](https://pypi.org/project/intel-tensorflow/)
+**Red Hat® Enterprise Linux® 7.6 CPU & GPU**
Python 2.7, 3.6 | [](https://jenkins-tensorflow.apps.ci.centos.org/job/tensorflow-rhel7-3.6/2/) | [1.13.1 pypi](https://tensorflow.pypi.thoth-station.ninja/index/)
## For more information
diff --git a/RELEASE.md b/RELEASE.md
index 0a56e690987..c2c50c590ba 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -1,3 +1,212 @@
+# Release 1.12.2
+
+## Bug Fixes and Other Changes
+
+* Fixes a potential security vulnerability where carefully crafted GIF images
+ can produce a null pointer dereference during decoding.
+
+# Release 1.13.0
+
+## Major Features and Improvements
+
+* TensorFlow Lite has moved from contrib to core. This means that Python modules are under `tf.lite` and source code is now under `tensorflow/lite` rather than `tensorflow/contrib/lite`.
+* TensorFlow GPU binaries are now built against CUDA 10 and TensorRT 5.0.
+* Support for Python3.7 on all operating systems.
+* Moved NCCL to core.
+
+## Behavioral changes
+
+* Disallow conversion of python floating types to uint32/64 (matching behavior of other integer types) in `tf.constant`.
+* Make the `gain` argument of convolutional orthogonal initializers (`convolutional_delta_orthogonal`, `convolutional_orthogonal_1D`, `convolutional_orthogonal_2D`, `convolutional_orthogonal_3D`) have consistent behavior with the `tf.initializers.orthogonal` initializer, i.e. scale the output l2-norm by `gain` and NOT by `sqrt(gain)`. (Note that these functions are currently in `tf.contrib` which is not guaranteed backward compatible).
+
+## Bug Fixes and Other Changes
+
+* Documentation
+ * Update the doc with the details about the rounding mode used in
+ quantize_and_dequantize_v2.
+ * Clarify that tensorflow::port::InitMain() _should_ be called before
+ using the TensorFlow library. Programs failing to do this are not
+ portable to all platforms.
+* Deprecations and Symbol renames.
+ * Removing deprecations for the following endpoints: `tf.acos`,
+ `tf.acosh`, `tf.add`, `tf.as_string`, `tf.asin`, `tf.asinh`, `tf.atan`,
+ `tf.atan2`, `tf.atanh`, `tf.cos`, `tf.cosh`, `tf.equal`, `tf.exp`,
+ `tf.floor`, `tf.greater`, `tf.greater_equal`, `tf.less`,
+ `tf.less_equal`, `tf.log`, `tf.logp1`, `tf.logical_and`,
+ `tf.logical_not`, `tf.logical_or`, `tf.maximum`, `tf.minimum`,
+ `tf.not_equal`, `tf.sin`, `tf.sinh`, `tf.tan`
+ * Deprecate `tf.data.Dataset.shard`.
+ * Deprecate `saved_model.loader.load` which is replaced by
+ `saved_model.load` and `saved_model.main_op`, which will be replaced by
+ `saved_model.main_op` in V2.
+ * Deprecate tf.QUANTIZED_DTYPES. The official new symbol is
+ tf.dtypes.QUANTIZED_DTYPES.
+ * Update sklearn imports for deprecated packages.
+ * Deprecate `Variable.count_up_to` and `tf.count_up_to` in favor of
+ `Dataset.range`.
+ * Export `confusion_matrix` op as `tf.math.confusion_matrix` instead of
+ `tf.train.confusion_matrix`.
+ * Add `tf.dtypes.` endpoint for every constant in dtypes.py. Moving
+ endpoints in versions.py to corresponding endpoints in `tf.sysconfig.`
+ and `tf.version.`. Moving all constants under `tf.saved_model`
+ submodules to `tf.saved_model` module. New endpoints are added in V1 and
+ V2 but existing endpoint removals are only applied in V2.
+ * Deprecates behavior where device assignment overrides collocation
+ constraints inside a collocation context manager.
+* Keras & Python API
+ * Add to Keras functionality analogous to
+ `tf.register_tensor_conversion_function`.
+ * Subclassed Keras models can now be saved through
+ `tf.contrib.saved_model.save_keras_model`.
+ * `LinearOperator.matmul` now returns a new `LinearOperator`.
+* New ops and improved op functionality
+ * Add a Nearest Neighbor Resize op.
+ * Add an `ignore_unknown` argument to `parse_values` which suppresses
+ ValueError for unknown hyperparameter types. Such * Add
+ `tf.linalg.matvec` convenience function.
+ * `tf.einsum()`raises `ValueError` for unsupported equations like
+ `"ii->"`.
+ * Add DCT-I and IDCT-I in `tf.signal.dct` and `tf.signal.idct`.
+ * Add LU decomposition op.
+ * Add quantile loss to gradient boosted trees in estimator.
+ * Add `round_mode` to `QuantizeAndDequantizeV2` op to select rounding
+ algorithm.
+ * Add `unicode_encode`, `unicode_decode`, `unicode_decode_with_offsets`,
+ `unicode_split`, `unicode_split_with_offset`, and `unicode_transcode`
+ ops. Amongst other things, this Op adds the ability to encode, decode,
+ and transcode a variety of input text encoding formats into the main
+ Unicode encodings (UTF-8, UTF-16-BE, UTF-32-BE)
+ * Add "unit" attribute to the substr op, which allows obtaining the
+ substring of a string containing unicode characters.
+ * Broadcasting support for Ragged Tensors.
+ * `SpaceToDepth` supports uint8 data type.
+ * Support multi-label quantile regression in estimator.
+ * We now use "div" as the default partition_strategy in
+ `tf.nn.safe_embedding_lookup_sparse`, `tf.nn.sampled_softmax` and
+ `tf.nn.nce_loss`. hyperparameter are ignored.
+* Performance
+ * Improve performance of GPU cumsum/cumprod by up to 300x.
+ * Added support for weight decay in most TPU embedding optimizers,
+ including AdamW and MomentumW.
+* TensorFlow 2.0 Development
+ * Add a command line tool to convert to TF2.0, tf_upgrade_v2
+ * Merge `tf.spectral` into `tf.signal` for TensorFlow 2.0.
+ * Change the default recurrent activation function for LSTM from
+ 'hard_sigmoid' to 'sigmoid' in 2.0. Historically recurrent activation is
+ 'hard_sigmoid' since it is fast than 'sigmoid'. With new unified backend
+ between CPU and GPU mode, since the CuDNN kernel is using sigmoid, we
+ change the default for CPU mode to sigmoid as well. With that, the
+ default LSTM will be compatible with both CPU and GPU kernel. This will
+ enable user with GPU to use CuDNN kernel by default and get a 10x
+ performance boost in training. Note that this is checkpoint breaking
+ change. If user want to use their 1.x pre-trained checkpoint, please
+ construct the layer with LSTM(recurrent_activation='hard_sigmoid') to
+ fallback to 1.x behavior.
+* TensorFlow Lite
+ * Move from `tensorflow/contrib/lite` to `tensorflow/lite`.
+ * Add experimental Java API for injecting TensorFlow Lite delegates
+ * Add support for strings in TensorFlow Lite Java API.
+* `tf.contrib`:
+ * Add Apache Ignite Filesystem plugin to support accessing Apache IGFS.
+ * Dropout now takes `rate` argument, `keep_prob` is deprecated.
+ * Estimator occurrences references `tf.contrib.estimator` were changed to
+ `tf.estimator`:
+ * `tf.contrib.estimator.BaselineEstimator` with
+ `tf.estimator.BaselineEstimator`
+ * `tf.contrib.estimator.DNNLinearCombinedEstimator` with
+ `tf.estimator.DNNLinearCombinedEstimator`
+ * `tf.contrib.estimator.DNNEstimator` with `tf.estimator.DNNEstimator`
+ * `tf.contrib.estimator.LinearEstimator` with
+ `tf.estimator.LinearEstimator`
+ * `tf.contrib.estimator.InMemoryEvaluatorHook` and
+ tf.estimator.experimental.InMemoryEvaluatorHook`.
+ * `tf.contrib.estimator.make_stop_at_checkpoint_step_hook` with
+ `tf.estimator.experimental.make_stop_at_checkpoint_step_hook`.
+ * Expose `tf.distribute.Strategy as the new name for
+ tf.contrib.distribute.DistributionStrategy.
+ * Migrate linear optimizer from contrib to core.
+ * Move `tf.contrib.signal` to `tf.signal` (preserving aliases in
+ tf.contrib.signal).
+ * Users of `tf.contrib.estimator.export_all_saved_models` and related
+ should switch to
+ `tf.estimator.Estimator.experimental_export_all_saved_models`.
+* tf.data:
+ * Add `tf.data.experimental.StatsOptions()`, to configure options to
+ collect statistics from `tf.data.Dataset` pipeline using
+ `StatsAggregator`. Add nested option, `experimental_stats` (which takes
+ a `tf.data.experimen tal.StatsOptions` object), to `tf.data.Options`.
+ Deprecates `tf.data.experimental.set_stats_agregator`.
+ * Performance optimizations:
+ * Add `tf.data.experimental.OptimizationOptions()`, to configure options
+ to enable `tf.data` performance optimizations. Add nested option,
+ `experimental_optimization` (which takes a
+ `tf.data.experimental.OptimizationOptions` object), to
+ `tf.data.Options`. Remove performance optimization options from
+ `tf.data.Options`, and add them under
+ `tf.data.experimental.OptimizationOptions` instead.
+ * Enable `map_and_batch_fusion` and `noop_elimination` optimizations by
+ default. They can be disabled by configuring
+ `tf.data.experimental.OptimizationOptions` to set `map_and_batch =
+ False` or `noop_elimination = False` respectively. To disable all
+ default optimizations, set `apply_default_optimizations = False`.
+ * Support parallel map in `map_and_filter_fusion`.
+ * Disable static optimizations for input pipelines that use non-resource
+ `tf.Variable`s.
+ * Add NUMA-aware MapAndBatch dataset.
+ * Deprecate `tf.data.Dataset.make_one_shot_iterator()` in V1, removed it
+ from V2, and added tf.compat.v1.data.make_one_shot_iterator()`.
+ * Deprecate `tf.data.Dataset.make_initializable_iterator()` in V1, removed
+ it from V2, and added `tf.compat.v1.data.make_initializable_iterator()`.
+ * Enable nested dataset support in core `tf.data` transformations.
+ * For `tf.data.Dataset` implementers: Added
+ `tf.data.Dataset._element_structured property` to replace
+ `Dataset.output_{types,shapes,classes}`.
+ * Make `num_parallel_calls` of `tf.data.Dataset.interleave` and
+ `tf.data.Dataset.map` work in Eager mode.
+* Toolchains
+ * Fixed OpenSSL compatibility by avoiding `EVP_MD_CTX_destroy`.
+ * Added bounds checking to printing deprecation warnings.
+ * Upgraded CUDA dependency to 10.0
+ * To build with Android NDK r14b, add "#include " to
+ android-ndk-r14b/platforms/android-14/arch-*/usr/include/linux/futex.h
+ * Removed `:android_tensorflow_lib_selective_registration*` targets, use
+ `:android_tensorflow_lib_lite*` targets instead.
+* XLA
+ * Move `RoundToEven` function to xla/client/lib/math.h.
+ * A new environment variable `TF_XLA_DEBUG_OPTIONS_PASSTHROUGH` set to "1"
+ or "true" allows the debug options passed within an XRTCompile op to be
+ passed directly to the XLA compilation backend. If such variable is not
+ set (service side), only a restricted set will be passed through.
+ * Allow the XRTCompile op to return the ProgramShape resulted form the XLA
+ compilation as a second return argument.
+ * XLA HLO graphs can now be rendered as SVG/HTML.
+* Estimator
+ * Replace all occurences of `tf.contrib.estimator.BaselineEstimator` with
+ `tf.estimator.BaselineEstimator`
+ * Replace all occurences of
+ `tf.contrib.estimator.DNNLinearCombinedEstimator` with
+ `tf.estimator.DNNLinearCombinedEstimator`
+ * Replace all occurrences of `tf.contrib.estimator.DNNEstimator` with
+ `tf.estimator.DNNEstimator`
+ * Replace all occurrences of `tf.contrib.estimator.LinearEstimator` with
+ `tf.estimator.LinearEstimator`
+ * Users of `tf.contrib.estimator.export_all_saved_models` and related
+ should switch to
+ `tf.estimator.Estimator.experimental_export_all_saved_models`.
+ * Update `regression_head` to the new Head API for Canned Estimator V2.
+ * Switch `multi_class_head` to Head API for Canned Estimator V2.
+ * Replace all occurences of `tf.contrib.estimator.InMemoryEvaluatorHook`
+ and `tf.contrib.estimator.make_stop_at_checkpoint_step_hook` with
+ `tf.estimator.experimental.InMemoryEvaluatorHook` and
+ `tf.estimator.experimental.make_stop_at_checkpoint_step_hook`
+ * Migrate linear optimizer from contrib to core.
+
+## Thanks to our Contributors
+
+This release contains contributions from many people at Google, as well as:
+
+Abhinav Upadhyay, Ag Ramesh, akikaaa, Alexis Louis, Anders Huss, Andreas Madsen, Andrew Banchich, Andy Craze, Anton Dmitriev, Artem Malykh, Avijit-Nervana, Balint Cristian, Benjamin Tan Wei Hao, Bhavani Subramanian, Brendan Finan, Brian Nemsick, Bryan Cutler, By Shen, Cao Zongyan, Castiel, Chris Antaki, Christian Goll, Cibifang, Clayne Robison, Codrut Grosu, Cong Xu, Dalmo Cirne, Daniel Hunter, Dougal J. Sutherland, Edvard Fagerholm, EFanZh, Erik Smistad, Evgeniy Polyakov, Feiyang Chen, franklin5, Fred Reiss, Gautam, gehring, Geoffrey Irving, George Sterpu, Gitea, Grzegorz George Pawelczak, Guozhong Zhuang, himkt, Hoeseong Kim, Huan Li (李卓桓), HuiyangFei, hyunyoung, Isaac Burbank, jackonan, Jacky Ko, Jason Furmanek, Jason Zaman, Javier Luraschi, Jiang,Zhoulong, joaak, John Lin, Jonathan Wyatt Hoech, josephyearsley, Josh Gordon, Julian Niedermeier, Karl Lessard, Keno Fischer, lanhin, Leon Graser, leondgarse, Li, Guizi, Li, Yiqiang, lxl910915, Mahmoud Abuzaina, manhyuk, Marcela Morales Quispe, margaretmz, Matt Conley, Max Pumperla, mbhuiyan, mdfaijul, Meng, Peng, Michael, Michael Gielda, mrTsjolder, Muhammad Wildan, neargye, Nehal J Wani, NEWPLAN, Niranjan Hasabnis, Nutti, olicht, Pan Daoxin, Pedro Monreal, Peng Yu, pillarpond, Pooya Davoodi, qiezi, Rholais Lii, Richard Yu, Rin Arakaki, Roger Iyengar, sahilbadyal, Sami Kama, Sandip Giri, Scott Leishman, Serge Panev, Seunghoon Park, Shafi Dayatar, shengfuintel, Shimin Guo, Siju, silent567, Stefan Dyulgerov, steven, Tao Wei, Thor Johnsen, Tingbo Lu, tomguluson92, Tongxuan Liu, Trevor Morris, Ubuntu, Vadim Borisov, vanderliang, wangsiyu, Wen Yun, Wen-Heng (Jack) Chung, wenxizhu, William D. Irons, Xiaoming (Jason) Cui, Yan Facai (颜发才), Yanbo Liang, Yaniv Blumenfeld, Yash Gaurkar, Yicheng Fan, Yong Tang, Yongjoon Lee, Yuan (Terry) Tang, Yuxin Wu, zldrobit
+
# Release 1.12.0
## Major Features and Improvements
@@ -38,21 +247,21 @@
* Remove integer types from `tf.nn.softplus` and `tf.nn.softsign` OpDefs.
This is a bugfix; these ops were never meant to support integers.
* Allow subslicing Tensors with a single dimension.
- * Add option to calculate string length in Unicode characters
+ * Add option to calculate string length in Unicode characters.
* Add functionality to SubSlice a tensor.
* Add searchsorted (ie lower/upper_bound) op.
* Add model explainability to Boosted Trees.
- * Support negative positions for tf.substr
+ * Support negative positions for tf.substr.
* There was previously a bug in the bijector_impl where the
_reduce_jacobian_det_over_event does not handle scalar ILDJ
implementations properly.
- * In tf eager execution, allow re-entering a GradientTape context
+ * In tf eager execution, allow re-entering a GradientTape context.
* Add tf_api_version flag. If --define=tf_api_version=2 flag is passed in,
then bazel will build TensorFlow API version 2.0. Note that TensorFlow
2.0 is under active development and has no guarantees at this point.
- * Add additional compression options to TfRecordWriter
+ * Add additional compression options to TfRecordWriter.
* Performance improvements for regex full match operations.
- * Replace tf.GraphKeys.VARIABLES with `tf.GraphKeys.GLOBAL_VARIABLES`
+ * Replace tf.GraphKeys.VARIABLES with `tf.GraphKeys.GLOBAL_VARIABLES`.
* Remove unused dynamic learning rate support.
## Thanks to our Contributors
@@ -75,15 +284,22 @@ Facai (颜发才), Yanbo Liang, Yash Katariya, Yong Tang, 在原佐为
## Major Features and Improvements
-* Nvidia GPU:
- * Prebuilt binaries are now (as of TensorFlow 1.11) built against cuDNN 7.2 and TensorRT 4. See updated install guides: [Installing TensorFlow on Ubuntu](https://www.tensorflow.org/install/install_linux#tensorflow_gpu_support)
-* Google Cloud TPU:
- * Experimental tf.data integration for Keras on Google Cloud TPUs.
- * Experimental / preview support for eager execution on Google Cloud TPUs.
-* DistributionStrategy:
- * Add multi-GPU DistributionStrategy support in tf.keras. Users can now use `fit`, `evaluate` and `predict` to distribute their model on multiple GPUs.
- * Add multi-worker DistributionStrategy and standalone client support in Estimator. See [README] (https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/distribute) for more details.
-* Add C, C++, and Python functions for querying kernels
+* Nvidia GPU:
+ * Prebuilt binaries are now (as of TensorFlow 1.11) built against cuDNN
+ 7.2 and TensorRT 4. See updated install guides:
+ [Installing TensorFlow on Ubuntu](https://www.tensorflow.org/install/install_linux#tensorflow_gpu_support)
+* Google Cloud TPU:
+ * Experimental tf.data integration for Keras on Google Cloud TPUs.
+ * Experimental / preview support for eager execution on Google Cloud TPUs.
+* DistributionStrategy:
+ * Add multi-GPU DistributionStrategy support in tf.keras. Users can now
+ use `fit`, `evaluate` and `predict` to distribute their model on
+ multiple GPUs.
+ * Add multi-worker DistributionStrategy and standalone client support in
+ Estimator. See
+ [README](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/distribute)
+ for more details.
+* Add C, C++, and Python functions for querying kernels.
## Breaking Changes
@@ -134,18 +350,18 @@ Facai (颜发才), Yanbo Liang, Yash Katariya, Yong Tang, 在原佐为
* Deprecate self.test_session() in favor of self.session() or
self.cached_session().
* Directly import tensor.proto.h (the transitive import will be removed
- from tensor.h soon)
+ from tensor.h soon).
* Estimator.train() now supports tf.contrib.summary.\* summaries out of
the box; each call to .train() will now create a separate tfevents file
rather than re-using a shared one.
* Fix FTRL L2-shrinkage behavior: the gradient from the L2 shrinkage term
should not end up in the accumulator.
- * Fix toco compilation/execution on Windows
+ * Fix toco compilation/execution on Windows.
* GoogleZoneProvider class added to detect which Google Cloud Engine zone
tensorflow is running in.
* It is now safe to call any of the C API's TF_Delete\* functions on
- nullptr
- * Log some errors on Android to logcat
+ nullptr.
+ * Log some errors on Android to logcat.
* Match FakeQuant numerics in TFLite to improve accuracy of TFLite
quantized inference models.
* Optional bucket location check for the GCS Filesystem.
@@ -166,7 +382,7 @@ Facai (颜发才), Yanbo Liang, Yash Katariya, Yong Tang, 在原佐为
the existing zero_state() method.
* Update initialization of variables in Keras.
* Updates to "constrained_optimization" in tensorflow/contrib.
- * boosted trees: adding pruning mode
+ * boosted trees: adding pruning mode.
* tf.train.Checkpoint does not delete old checkpoints by default.
* tfdbg: Limit the total disk space occupied by dumped tensor data to 100
GBytes. Add environment variable `TFDBG_DISK_BYTES_LIMIT` to allow
diff --git a/WORKSPACE b/WORKSPACE
index 9f07b9fd471..868421dc31e 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -4,11 +4,11 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file"
http_archive(
name = "io_bazel_rules_closure",
- sha256 = "43c9b882fa921923bcba764453f4058d102bece35a37c9f6383c713004aacff1",
- strip_prefix = "rules_closure-9889e2348259a5aad7e805547c1a0cf311cfcd91",
+ sha256 = "e0a111000aeed2051f29fcc7a3f83be3ad8c6c93c186e64beb1ad313f0c7f9f9",
+ strip_prefix = "rules_closure-cf1e44edb908e9616030cc83d085989b8e6cd6df",
urls = [
- "https://mirror.bazel.build/github.com/bazelbuild/rules_closure/archive/9889e2348259a5aad7e805547c1a0cf311cfcd91.tar.gz",
- "https://github.com/bazelbuild/rules_closure/archive/9889e2348259a5aad7e805547c1a0cf311cfcd91.tar.gz", # 2018-12-21
+ "http://mirror.tensorflow.org/github.com/bazelbuild/rules_closure/archive/cf1e44edb908e9616030cc83d085989b8e6cd6df.tar.gz",
+ "https://github.com/bazelbuild/rules_closure/archive/cf1e44edb908e9616030cc83d085989b8e6cd6df.tar.gz", # 2019-04-04
],
)
@@ -43,17 +43,37 @@ remote_config_workspace()
# Apple and Swift rules.
http_archive(
name = "build_bazel_rules_apple",
- sha256 = "73b4980a318d203d3307f850e27e66ec5cc8d223147a3475a6f11597eb6438a5",
- strip_prefix = "rules_apple-0.13.0",
- urls = ["https://github.com/bazelbuild/rules_apple/archive/0.13.0.tar.gz"],
-)
+ sha256 = "23792cd999f97fc97284d1c44cb1324bfdd0bc54aa68ad513fa3705aca3b1f9e",
+ urls = ["https://github.com/bazelbuild/rules_apple/releases/download/0.15.0/rules_apple.0.15.0.tar.gz"],
+) # https://github.com/bazelbuild/rules_apple/releases
+http_archive(
+ name = "build_bazel_apple_support",
+ sha256 = "7356dbd44dea71570a929d1d4731e870622151a5f27164d966dda97305f33471",
+ urls = ["https://github.com/bazelbuild/apple_support/releases/download/0.6.0/apple_support.0.6.0.tar.gz"],
+) # https://github.com/bazelbuild/apple_support/releases
+http_archive(
+ name = "bazel_skylib",
+ sha256 = "2ef429f5d7ce7111263289644d233707dba35e39696377ebab8b0bc701f7818e",
+ urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/0.8.0/bazel-skylib.0.8.0.tar.gz"],
+) # https://github.com/bazelbuild/bazel-skylib/releases
+http_archive(
+ name = "build_bazel_rules_swift",
+ sha256 = "9efe9699e9765e6b4a5e063e4a08f6b163cccaf0443f775d935baf5c3cd6ed0e",
+ urls = ["https://github.com/bazelbuild/rules_swift/releases/download/0.9.0/rules_swift.0.9.0.tar.gz"],
+) # https://github.com/bazelbuild/rules_swift/releases
+http_archive(
+ name = "com_github_apple_swift_swift_protobuf",
+ type = "zip",
+ strip_prefix = "swift-protobuf-1.5.0/",
+ urls = ["https://github.com/apple/swift-protobuf/archive/1.5.0.zip"],
+) # https://github.com/apple/swift-protobuf/releases
http_file(
name = "xctestrunner",
executable = 1,
- urls = ["https://github.com/google/xctestrunner/releases/download/0.2.6/ios_test_runner.par"],
-)
-load("@build_bazel_rules_apple//apple:repositories.bzl", "apple_rules_dependencies")
-apple_rules_dependencies()
+ urls = ["https://github.com/google/xctestrunner/releases/download/0.2.7/ios_test_runner.par"],
+) # https://github.com/google/xctestrunner/releases
+# Use `swift_rules_dependencies` to fetch the toolchains. With the
+# `git_repository` rules above, the following call will skip redefining them.
load("@build_bazel_rules_swift//swift:repositories.bzl", "swift_rules_dependencies")
swift_rules_dependencies()
diff --git a/configure.py b/configure.py
index 4814143f466..2120a4b27d6 100644
--- a/configure.py
+++ b/configure.py
@@ -33,13 +33,11 @@ except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top
-_DEFAULT_CUDA_VERSION = '10.0'
+_DEFAULT_CUDA_VERSION = '10'
_DEFAULT_CUDNN_VERSION = '7'
+_DEFAULT_TENSORRT_VERSION = '5'
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
-_DEFAULT_CUDA_PATH = '/usr/local/cuda'
-_DEFAULT_CUDA_PATH_LINUX = '/opt/cuda'
-_DEFAULT_CUDA_PATH_WIN = ('C:/Program Files/NVIDIA GPU Computing '
- 'Toolkit/CUDA/v%s' % _DEFAULT_CUDA_VERSION)
+
_TF_OPENCL_VERSION = '1.2'
_DEFAULT_COMPUTECPP_TOOLKIT_PATH = '/usr/local/computecpp'
_DEFAULT_TRISYCL_INCLUDE_DIR = '/usr/local/triSYCL/include'
@@ -50,21 +48,24 @@ _DEFAULT_PROMPT_ASK_ATTEMPTS = 10
_TF_BAZELRC_FILENAME = '.tf_configure.bazelrc'
_TF_WORKSPACE_ROOT = ''
_TF_BAZELRC = ''
+_TF_CURRENT_BAZEL_VERSION = None
NCCL_LIB_PATHS = [
'lib64/', 'lib/powerpc64le-linux-gnu/', 'lib/x86_64-linux-gnu/', ''
]
-# List of files to be configured for using Bazel on Apple platforms.
+# List of files to configure when building Bazel on Apple platforms.
APPLE_BAZEL_FILES = [
+ 'tensorflow/lite/experimental/ios/BUILD',
'tensorflow/lite/experimental/objc/BUILD',
'tensorflow/lite/experimental/swift/BUILD'
]
-if platform.machine() == 'ppc64le':
- _DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/powerpc64le-linux-gnu/'
-else:
- _DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/%s-linux-gnu' % platform.machine()
+# List of files to move when building for iOS.
+IOS_FILES = [
+ 'tensorflow/lite/experimental/objc/TensorFlowLiteObjC.podspec',
+ 'tensorflow/lite/experimental/swift/TensorFlowLiteSwift.podspec',
+]
class UserInputError(Exception):
@@ -199,9 +200,10 @@ def setup_python(environ_cp):
ask_python_bin_path = ('Please specify the location of python. [Default is '
'%s]: ') % default_python_bin_path
while True:
- python_bin_path = get_from_env_or_user_or_default(
- environ_cp, 'PYTHON_BIN_PATH', ask_python_bin_path,
- default_python_bin_path)
+ python_bin_path = get_from_env_or_user_or_default(environ_cp,
+ 'PYTHON_BIN_PATH',
+ ask_python_bin_path,
+ default_python_bin_path)
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK):
break
@@ -291,9 +293,9 @@ def get_var(environ_cp,
Args:
environ_cp: copy of the os.environ.
- var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
- query_item: string for feature related to the variable, e.g. "Hadoop File
- System".
+ var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
+ query_item: string for feature related to the variable, e.g. "CUDA for
+ Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
@@ -337,8 +339,8 @@ def get_var(environ_cp,
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
- 'Current value is %s.' % (var_name, ', '.join(true_strings),
- ', '.join(false_strings), var))
+ 'Current value is %s.' %
+ (var_name, ', '.join(true_strings), ', '.join(false_strings), var))
while var is None:
user_input_origin = get_input(question)
@@ -374,9 +376,9 @@ def set_build_var(environ_cp,
Args:
environ_cp: copy of the os.environ.
- var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
- query_item: string for feature related to the variable, e.g. "Hadoop File
- System".
+ var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
+ query_item: string for feature related to the variable, e.g. "CUDA for
+ Nvidia GPUs".
option_name: string for option to define in .bazelrc.
enabled_by_default: boolean for default behavior.
bazel_config_name: Name for Bazel --config argument to enable build feature.
@@ -385,14 +387,14 @@ def set_build_var(environ_cp,
var = str(int(get_var(environ_cp, var_name, query_item, enabled_by_default)))
environ_cp[var_name] = var
if var == '1':
- write_to_bazelrc(
- 'build:%s --define %s=true' % (bazel_config_name, option_name))
+ write_to_bazelrc('build:%s --define %s=true' %
+ (bazel_config_name, option_name))
write_to_bazelrc('build --config=%s' % bazel_config_name)
elif bazel_config_name is not None:
# TODO(mikecase): Migrate all users of configure.py to use --config Bazel
# options and not to set build configs through environment variables.
- write_to_bazelrc(
- 'build:%s --define %s=true' % (bazel_config_name, option_name))
+ write_to_bazelrc('build:%s --define %s=true' %
+ (bazel_config_name, option_name))
def set_action_env_var(environ_cp,
@@ -409,9 +411,9 @@ def set_action_env_var(environ_cp,
Args:
environ_cp: copy of the os.environ.
- var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
- query_item: string for feature related to the variable, e.g. "Hadoop File
- System".
+ var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
+ query_item: string for feature related to the variable, e.g. "CUDA for
+ Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
@@ -439,6 +441,9 @@ def convert_version_to_int(version):
"""
version = version.split('-')[0]
version_segments = version.split('.')
+ # Treat "0.24" as "0.24.0"
+ if len(version_segments) == 2:
+ version_segments.append('0')
for seg in version_segments:
if not seg.isdigit():
return None
@@ -451,8 +456,8 @@ def check_bazel_version(min_version, max_version):
"""Check installed bazel version is between min_version and max_version.
Args:
- min_version: string for minimum bazel version.
- max_version: string for maximum bazel version.
+ min_version: string for minimum bazel version (must exist!).
+ max_version: string for maximum bazel version (must exist!).
Returns:
The bazel version detected.
@@ -565,7 +570,7 @@ def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
Args:
environ_cp: copy of the os.environ.
- var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
+ var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
ask_for_var: string for how to ask for user input.
var_default: default value string.
@@ -658,9 +663,9 @@ def prompt_loop_or_load_from_env(environ_cp,
print(error_msg % val)
environ_cp[var_name] = ''
else:
- raise UserInputError(
- 'Invalid %s setting was provided %d times in a row. '
- 'Assuming to be a scripting mistake.' % (var_name, n_ask_attempts))
+ raise UserInputError('Invalid %s setting was provided %d times in a row. '
+ 'Assuming to be a scripting mistake.' %
+ (var_name, n_ask_attempts))
environ_cp[var_name] = val
return val
@@ -669,8 +674,8 @@ def prompt_loop_or_load_from_env(environ_cp,
def create_android_ndk_rule(environ_cp):
"""Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule."""
if is_windows() or is_cygwin():
- default_ndk_path = cygpath(
- '%s/Android/Sdk/ndk-bundle' % environ_cp['APPDATA'])
+ default_ndk_path = cygpath('%s/Android/Sdk/ndk-bundle' %
+ environ_cp['APPDATA'])
elif is_macos():
default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
@@ -689,8 +694,9 @@ def create_android_ndk_rule(environ_cp):
error_msg=('The path %s or its child file "source.properties" '
'does not exist.'))
write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path)
- write_action_env_to_bazelrc('ANDROID_NDK_API_LEVEL',
- check_ndk_level(android_ndk_home_path))
+ write_action_env_to_bazelrc(
+ 'ANDROID_NDK_API_LEVEL',
+ get_ndk_api_level(environ_cp, android_ndk_home_path))
def create_android_sdk_rule(environ_cp):
@@ -757,8 +763,10 @@ def create_android_sdk_rule(environ_cp):
write_action_env_to_bazelrc('ANDROID_SDK_HOME', android_sdk_home_path)
-def check_ndk_level(android_ndk_home_path):
- """Check the revision number of an Android NDK path."""
+def get_ndk_api_level(environ_cp, android_ndk_home_path):
+ """Gets the appropriate NDK API level to use for the provided Android NDK path."""
+
+ # First check to see if we're using a blessed version of the NDK.
properties_path = '%s/source.properties' % android_ndk_home_path
if is_windows() or is_cygwin():
properties_path = cygpath(properties_path)
@@ -767,16 +775,40 @@ def check_ndk_level(android_ndk_home_path):
revision = re.search(r'Pkg.Revision = (\d+)', filedata)
if revision:
- ndk_api_level = revision.group(1)
+ ndk_version = revision.group(1)
else:
raise Exception('Unable to parse NDK revision.')
- if int(ndk_api_level) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
- print('WARNING: The API level of the NDK in %s is %s, which is not '
+ if int(ndk_version) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
+ print('WARNING: The NDK version in %s is %s, which is not '
'supported by Bazel (officially supported versions: %s). Please use '
'another version. Compiling Android targets may result in confusing '
- 'errors.\n' % (android_ndk_home_path, ndk_api_level,
+ 'errors.\n' % (android_ndk_home_path, ndk_version,
_SUPPORTED_ANDROID_NDK_VERSIONS))
- return ndk_api_level
+
+ # Now grab the NDK API level to use. Note that this is different from the
+ # SDK API level, as the NDK API level is effectively the *min* target SDK
+ # version.
+ platforms = os.path.join(android_ndk_home_path, 'platforms')
+ api_levels = sorted(os.listdir(platforms))
+ api_levels = [
+ x.replace('android-', '') for x in api_levels if 'android-' in x
+ ]
+
+ def valid_api_level(api_level):
+ return os.path.exists(
+ os.path.join(android_ndk_home_path, 'platforms',
+ 'android-' + api_level))
+
+ android_ndk_api_level = prompt_loop_or_load_from_env(
+ environ_cp,
+ var_name='ANDROID_NDK_API_LEVEL',
+ var_default='18', # 18 is required for GPU acceleration.
+ ask_for_var=('Please specify the (min) Android NDK API level to use. '
+ '[Available levels: %s]') % api_levels,
+ check_success=valid_api_level,
+ error_msg='Android-%s is not present in the NDK path.')
+
+ return android_ndk_api_level
def set_gcc_host_compiler_path(environ_cp):
@@ -823,149 +855,39 @@ def reformat_version_sequence(version_str, sequence_count):
return '.'.join(v[:sequence_count])
+def set_tf_cuda_paths(environ_cp):
+ """Set TF_CUDA_PATHS."""
+ ask_cuda_paths = (
+ 'Please specify the comma-separated list of base paths to look for CUDA '
+ 'libraries and headers. [Leave empty to use the default]: ')
+ tf_cuda_paths = get_from_env_or_user_or_default(environ_cp, 'TF_CUDA_PATHS',
+ ask_cuda_paths, '')
+ if tf_cuda_paths:
+ environ_cp['TF_CUDA_PATHS'] = tf_cuda_paths
+
+
def set_tf_cuda_version(environ_cp):
- """Set CUDA_TOOLKIT_PATH and TF_CUDA_VERSION."""
+ """Set TF_CUDA_VERSION."""
ask_cuda_version = (
'Please specify the CUDA SDK version you want to use. '
'[Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION
-
- for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
- # Configure the Cuda SDK version to use.
- tf_cuda_version = get_from_env_or_user_or_default(
- environ_cp, 'TF_CUDA_VERSION', ask_cuda_version, _DEFAULT_CUDA_VERSION)
- tf_cuda_version = reformat_version_sequence(str(tf_cuda_version), 2)
-
- # Find out where the CUDA toolkit is installed
- default_cuda_path = _DEFAULT_CUDA_PATH
- if is_windows() or is_cygwin():
- default_cuda_path = cygpath(
- environ_cp.get('CUDA_PATH', _DEFAULT_CUDA_PATH_WIN))
- elif is_linux():
- # If the default doesn't exist, try an alternative default.
- if (not os.path.exists(default_cuda_path)
- ) and os.path.exists(_DEFAULT_CUDA_PATH_LINUX):
- default_cuda_path = _DEFAULT_CUDA_PATH_LINUX
- ask_cuda_path = ('Please specify the location where CUDA %s toolkit is'
- ' installed. Refer to README.md for more details. '
- '[Default is %s]: ') % (tf_cuda_version, default_cuda_path)
- cuda_toolkit_path = get_from_env_or_user_or_default(
- environ_cp, 'CUDA_TOOLKIT_PATH', ask_cuda_path, default_cuda_path)
- if is_windows() or is_cygwin():
- cuda_toolkit_path = cygpath(cuda_toolkit_path)
-
- if is_windows():
- cuda_rt_lib_paths = ['lib/x64/cudart.lib']
- elif is_linux():
- cuda_rt_lib_paths = [
- '%s/libcudart.so.%s' % (x, tf_cuda_version) for x in [
- 'lib64',
- 'lib/powerpc64le-linux-gnu',
- 'lib/x86_64-linux-gnu',
- ]
- ]
- elif is_macos():
- cuda_rt_lib_paths = ['lib/libcudart.%s.dylib' % tf_cuda_version]
-
- cuda_toolkit_paths_full = [
- os.path.join(cuda_toolkit_path, x) for x in cuda_rt_lib_paths
- ]
- if any(os.path.exists(x) for x in cuda_toolkit_paths_full):
- break
-
- # Reset and retry
- print('Invalid path to CUDA %s toolkit. %s cannot be found' %
- (tf_cuda_version, cuda_toolkit_paths_full))
- environ_cp['TF_CUDA_VERSION'] = ''
- environ_cp['CUDA_TOOLKIT_PATH'] = ''
-
- else:
- raise UserInputError('Invalid TF_CUDA_SETTING setting was provided %d '
- 'times in a row. Assuming to be a scripting mistake.' %
- _DEFAULT_PROMPT_ASK_ATTEMPTS)
-
- # Set CUDA_TOOLKIT_PATH and TF_CUDA_VERSION
- environ_cp['CUDA_TOOLKIT_PATH'] = cuda_toolkit_path
- write_action_env_to_bazelrc('CUDA_TOOLKIT_PATH', cuda_toolkit_path)
+ tf_cuda_version = get_from_env_or_user_or_default(environ_cp,
+ 'TF_CUDA_VERSION',
+ ask_cuda_version,
+ _DEFAULT_CUDA_VERSION)
environ_cp['TF_CUDA_VERSION'] = tf_cuda_version
- write_action_env_to_bazelrc('TF_CUDA_VERSION', tf_cuda_version)
def set_tf_cudnn_version(environ_cp):
- """Set CUDNN_INSTALL_PATH and TF_CUDNN_VERSION."""
+ """Set TF_CUDNN_VERSION."""
ask_cudnn_version = (
'Please specify the cuDNN version you want to use. '
'[Leave empty to default to cuDNN %s]: ') % _DEFAULT_CUDNN_VERSION
-
- for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
- tf_cudnn_version = get_from_env_or_user_or_default(
- environ_cp, 'TF_CUDNN_VERSION', ask_cudnn_version,
- _DEFAULT_CUDNN_VERSION)
- tf_cudnn_version = reformat_version_sequence(str(tf_cudnn_version), 1)
-
- default_cudnn_path = environ_cp.get('CUDA_TOOLKIT_PATH')
- ask_cudnn_path = (r'Please specify the location where cuDNN %s library is '
- 'installed. Refer to README.md for more details. [Default'
- ' is %s]: ') % (tf_cudnn_version, default_cudnn_path)
- cudnn_install_path = get_from_env_or_user_or_default(
- environ_cp, 'CUDNN_INSTALL_PATH', ask_cudnn_path, default_cudnn_path)
-
- # Result returned from "read" will be used unexpanded. That make "~"
- # unusable. Going through one more level of expansion to handle that.
- cudnn_install_path = os.path.realpath(
- os.path.expanduser(cudnn_install_path))
- if is_windows() or is_cygwin():
- cudnn_install_path = cygpath(cudnn_install_path)
-
- if is_windows():
- cuda_dnn_lib_path = 'lib/x64/cudnn.lib'
- cuda_dnn_lib_alt_path = 'lib/x64/cudnn.lib'
- elif is_linux():
- cuda_dnn_lib_path = 'lib64/libcudnn.so.%s' % tf_cudnn_version
- cuda_dnn_lib_alt_path = 'libcudnn.so.%s' % tf_cudnn_version
- elif is_macos():
- cuda_dnn_lib_path = 'lib/libcudnn.%s.dylib' % tf_cudnn_version
- cuda_dnn_lib_alt_path = 'libcudnn.%s.dylib' % tf_cudnn_version
-
- cuda_dnn_lib_path_full = os.path.join(cudnn_install_path, cuda_dnn_lib_path)
- cuda_dnn_lib_alt_path_full = os.path.join(cudnn_install_path,
- cuda_dnn_lib_alt_path)
- if os.path.exists(cuda_dnn_lib_path_full) or os.path.exists(
- cuda_dnn_lib_alt_path_full):
- break
-
- # Try another alternative for Linux
- if is_linux():
- ldconfig_bin = which('ldconfig') or '/sbin/ldconfig'
- cudnn_path_from_ldconfig = run_shell([ldconfig_bin, '-p'])
- cudnn_path_from_ldconfig = re.search('.*libcudnn.so .* => (.*)',
- cudnn_path_from_ldconfig)
- if cudnn_path_from_ldconfig:
- cudnn_path_from_ldconfig = cudnn_path_from_ldconfig.group(1)
- if os.path.exists(
- '%s.%s' % (cudnn_path_from_ldconfig, tf_cudnn_version)):
- cudnn_install_path = os.path.dirname(cudnn_path_from_ldconfig)
- break
-
- # Reset and Retry
- print(
- 'Invalid path to cuDNN %s toolkit. None of the following files can be '
- 'found:' % tf_cudnn_version)
- print(cuda_dnn_lib_path_full)
- print(cuda_dnn_lib_alt_path_full)
- if is_linux():
- print('%s.%s' % (cudnn_path_from_ldconfig, tf_cudnn_version))
-
- environ_cp['TF_CUDNN_VERSION'] = ''
- else:
- raise UserInputError('Invalid TF_CUDNN setting was provided %d '
- 'times in a row. Assuming to be a scripting mistake.' %
- _DEFAULT_PROMPT_ASK_ATTEMPTS)
-
- # Set CUDNN_INSTALL_PATH and TF_CUDNN_VERSION
- environ_cp['CUDNN_INSTALL_PATH'] = cudnn_install_path
- write_action_env_to_bazelrc('CUDNN_INSTALL_PATH', cudnn_install_path)
+ tf_cudnn_version = get_from_env_or_user_or_default(environ_cp,
+ 'TF_CUDNN_VERSION',
+ ask_cudnn_version,
+ _DEFAULT_CUDNN_VERSION)
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
- write_action_env_to_bazelrc('TF_CUDNN_VERSION', tf_cudnn_version)
def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
@@ -997,252 +919,38 @@ def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
return cudnn_ok and cuda_ok
-def set_tf_tensorrt_install_path(environ_cp):
- """Set TENSORRT_INSTALL_PATH and TF_TENSORRT_VERSION.
-
- Adapted from code contributed by Sami Kama (https://github.com/samikama).
-
- Args:
- environ_cp: copy of the os.environ.
-
- Raises:
- ValueError: if this method was called under non-Linux platform.
- UserInputError: if user has provided invalid input multiple times.
- """
+def set_tf_tensorrt_version(environ_cp):
+ """Set TF_TENSORRT_VERSION."""
if not is_linux():
raise ValueError('Currently TensorRT is only supported on Linux platform.')
- # Ask user whether to add TensorRT support.
- if str(int(get_var(environ_cp, 'TF_NEED_TENSORRT', 'TensorRT',
- False))) != '1':
+ if not int(environ_cp.get('TF_NEED_TENSORRT', False)):
return
- for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
- ask_tensorrt_path = (r'Please specify the location where TensorRT is '
- 'installed. [Default is %s]:') % (
- _DEFAULT_TENSORRT_PATH_LINUX)
- trt_install_path = get_from_env_or_user_or_default(
- environ_cp, 'TENSORRT_INSTALL_PATH', ask_tensorrt_path,
- _DEFAULT_TENSORRT_PATH_LINUX)
-
- # Result returned from "read" will be used unexpanded. That make "~"
- # unusable. Going through one more level of expansion to handle that.
- trt_install_path = os.path.realpath(os.path.expanduser(trt_install_path))
-
- def find_libs(search_path):
- """Search for libnvinfer.so in "search_path"."""
- fl = set()
- if os.path.exists(search_path) and os.path.isdir(search_path):
- fl.update([
- os.path.realpath(os.path.join(search_path, x))
- for x in os.listdir(search_path)
- if 'libnvinfer.so' in x
- ])
- return fl
-
- possible_files = find_libs(trt_install_path)
- possible_files.update(find_libs(os.path.join(trt_install_path, 'lib')))
- possible_files.update(find_libs(os.path.join(trt_install_path, 'lib64')))
- cuda_ver = convert_version_to_int(environ_cp['TF_CUDA_VERSION'])
- cudnn_ver = convert_version_to_int(environ_cp['TF_CUDNN_VERSION'])
- nvinfer_pattern = re.compile('.*libnvinfer.so.?(.*)$')
- highest_ver = [0, None, None]
-
- for lib_file in possible_files:
- if is_cuda_compatible(lib_file, cuda_ver, cudnn_ver):
- matches = nvinfer_pattern.search(lib_file)
- if not matches.groups():
- continue
- ver_str = matches.group(1)
- ver = convert_version_to_int(ver_str) if len(ver_str) else 0
- if ver > highest_ver[0]:
- highest_ver = [ver, ver_str, lib_file]
- if highest_ver[1] is not None:
- trt_install_path = os.path.dirname(highest_ver[2])
- tf_tensorrt_version = highest_ver[1]
- break
-
- # Try another alternative from ldconfig.
- ldconfig_bin = which('ldconfig') or '/sbin/ldconfig'
- ldconfig_output = run_shell([ldconfig_bin, '-p'])
- search_result = re.search('.*libnvinfer.so\\.?([0-9.]*).* => (.*)',
- ldconfig_output)
- if search_result:
- libnvinfer_path_from_ldconfig = search_result.group(2)
- if os.path.exists(libnvinfer_path_from_ldconfig):
- if is_cuda_compatible(libnvinfer_path_from_ldconfig, cuda_ver,
- cudnn_ver):
- trt_install_path = os.path.dirname(libnvinfer_path_from_ldconfig)
- tf_tensorrt_version = search_result.group(1)
- break
-
- # Reset and Retry
- if possible_files:
- print('TensorRT libraries found in one the following directories',
- 'are not compatible with selected cuda and cudnn installations')
- print(trt_install_path)
- print(os.path.join(trt_install_path, 'lib'))
- print(os.path.join(trt_install_path, 'lib64'))
- if search_result:
- print(libnvinfer_path_from_ldconfig)
- else:
- print(
- 'Invalid path to TensorRT. None of the following files can be found:')
- print(trt_install_path)
- print(os.path.join(trt_install_path, 'lib'))
- print(os.path.join(trt_install_path, 'lib64'))
- if search_result:
- print(libnvinfer_path_from_ldconfig)
-
- else:
- raise UserInputError('Invalid TF_TENSORRT setting was provided %d '
- 'times in a row. Assuming to be a scripting mistake.' %
- _DEFAULT_PROMPT_ASK_ATTEMPTS)
-
- # Set TENSORRT_INSTALL_PATH and TF_TENSORRT_VERSION
- environ_cp['TENSORRT_INSTALL_PATH'] = trt_install_path
- write_action_env_to_bazelrc('TENSORRT_INSTALL_PATH', trt_install_path)
+ ask_tensorrt_version = (
+ 'Please specify the TensorRT version you want to use. '
+ '[Leave empty to default to TensorRT %s]: ') % _DEFAULT_TENSORRT_VERSION
+ tf_tensorrt_version = get_from_env_or_user_or_default(
+ environ_cp, 'TF_TENSORRT_VERSION', ask_tensorrt_version,
+ _DEFAULT_TENSORRT_VERSION)
environ_cp['TF_TENSORRT_VERSION'] = tf_tensorrt_version
- write_action_env_to_bazelrc('TF_TENSORRT_VERSION', tf_tensorrt_version)
-def set_tf_nccl_install_path(environ_cp):
- """Set NCCL_INSTALL_PATH, NCCL_HDR_PATH and TF_NCCL_VERSION.
-
- Args:
- environ_cp: copy of the os.environ.
-
- Raises:
- ValueError: if this method was called under non-Linux platform.
- UserInputError: if user has provided invalid input multiple times.
- """
+def set_tf_nccl_version(environ_cp):
+ """Set TF_NCCL_VERSION."""
if not is_linux():
- raise ValueError('Currently NCCL is only supported on Linux platforms.')
+ raise ValueError('Currently NCCL is only supported on Linux platform.')
+
+ if 'TF_NCCL_VERSION' in environ_cp:
+ return
ask_nccl_version = (
'Please specify the locally installed NCCL version you want to use. '
- '[Default is to use https://github.com/nvidia/nccl]: ')
-
- for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
- tf_nccl_version = get_from_env_or_user_or_default(
- environ_cp, 'TF_NCCL_VERSION', ask_nccl_version, '')
-
- if not tf_nccl_version:
- break # No need to get install path, building the open source code.
-
- tf_nccl_version = reformat_version_sequence(str(tf_nccl_version), 1)
-
- # Look with ldconfig first if we can find the library in paths
- # like /usr/lib/x86_64-linux-gnu and the header file in the corresponding
- # include directory. This is where the NCCL .deb packages install them.
-
- # First check to see if NCCL is in the ldconfig.
- # If its found, use that location.
- if is_linux():
- ldconfig_bin = which('ldconfig') or '/sbin/ldconfig'
- nccl2_path_from_ldconfig = run_shell([ldconfig_bin, '-p'])
- nccl2_path_from_ldconfig = re.search('.*libnccl.so .* => (.*)',
- nccl2_path_from_ldconfig)
- if nccl2_path_from_ldconfig:
- nccl2_path_from_ldconfig = nccl2_path_from_ldconfig.group(1)
- if os.path.exists('%s.%s' % (nccl2_path_from_ldconfig, tf_nccl_version)):
- nccl_install_path = os.path.dirname(nccl2_path_from_ldconfig)
- print('NCCL libraries found in ' + nccl2_path_from_ldconfig)
-
- # Check if this is the main system lib location
- if re.search('.*linux-gnu', nccl_install_path):
- trunc_nccl_install_path = '/usr'
- print('This looks like a system path.')
- else:
- trunc_nccl_install_path = nccl_install_path + '/..'
-
- # Look for header
- nccl_hdr_path = trunc_nccl_install_path + '/include'
- print('Assuming NCCL header path is ' + nccl_hdr_path)
- if os.path.exists(nccl_hdr_path + '/nccl.h'):
- # Set NCCL_INSTALL_PATH
- environ_cp['NCCL_INSTALL_PATH'] = nccl_install_path
- write_action_env_to_bazelrc('NCCL_INSTALL_PATH', nccl_install_path)
-
- # Set NCCL_HDR_PATH
- environ_cp['NCCL_HDR_PATH'] = nccl_hdr_path
- write_action_env_to_bazelrc('NCCL_HDR_PATH', nccl_hdr_path)
- break
- else:
- print(
- 'The header for NCCL2 cannot be found. Please install the libnccl-dev package.'
- )
- else:
- print('NCCL2 is listed by ldconfig but the library is not found. '
- 'Your ldconfig is out of date. Please run sudo ldconfig.')
- else:
- # NCCL is not found in ldconfig. Ask the user for the location.
- default_nccl_path = environ_cp.get('CUDA_TOOLKIT_PATH')
- ask_nccl_path = (
- r'Please specify the location where NCCL %s library is '
- 'installed. Refer to README.md for more details. [Default '
- 'is %s]:') % (tf_nccl_version, default_nccl_path)
- nccl_install_path = get_from_env_or_user_or_default(
- environ_cp, 'NCCL_INSTALL_PATH', ask_nccl_path, default_nccl_path)
-
- # Result returned from "read" will be used unexpanded. That make "~"
- # unusable. Going through one more level of expansion to handle that.
- nccl_install_path = os.path.realpath(
- os.path.expanduser(nccl_install_path))
- if is_windows() or is_cygwin():
- nccl_install_path = cygpath(nccl_install_path)
-
- nccl_lib_path = ''
- if is_windows():
- nccl_lib_path = 'lib/x64/nccl.lib'
- elif is_linux():
- nccl_lib_filename = 'libnccl.so.%s' % tf_nccl_version
- nccl_lpath = '%s/lib/%s' % (nccl_install_path, nccl_lib_filename)
- if not os.path.exists(nccl_lpath):
- for relative_path in NCCL_LIB_PATHS:
- path = '%s/%s%s' % (nccl_install_path, relative_path,
- nccl_lib_filename)
- if os.path.exists(path):
- print('NCCL found at ' + path)
- nccl_lib_path = path
- break
- else:
- nccl_lib_path = nccl_lpath
- elif is_macos():
- nccl_lib_path = 'lib/libnccl.%s.dylib' % tf_nccl_version
-
- nccl_lib_path = os.path.join(nccl_install_path, nccl_lib_path)
- nccl_hdr_path = os.path.join(
- os.path.dirname(nccl_lib_path), '../include/nccl.h')
- print('Assuming NCCL header path is ' + nccl_hdr_path)
- if os.path.exists(nccl_lib_path) and os.path.exists(nccl_hdr_path):
- # Set NCCL_INSTALL_PATH
- environ_cp['NCCL_INSTALL_PATH'] = os.path.dirname(nccl_lib_path)
- write_action_env_to_bazelrc('NCCL_INSTALL_PATH',
- os.path.dirname(nccl_lib_path))
-
- # Set NCCL_HDR_PATH
- environ_cp['NCCL_HDR_PATH'] = os.path.dirname(nccl_hdr_path)
- write_action_env_to_bazelrc('NCCL_HDR_PATH',
- os.path.dirname(nccl_hdr_path))
- break
-
- # Reset and Retry
- print(
- 'Invalid path to NCCL %s toolkit, %s or %s not found. Please use the '
- 'O/S agnostic package of NCCL 2' % (tf_nccl_version, nccl_lib_path,
- nccl_hdr_path))
-
- environ_cp['TF_NCCL_VERSION'] = ''
- else:
- raise UserInputError('Invalid TF_NCCL setting was provided %d '
- 'times in a row. Assuming to be a scripting mistake.' %
- _DEFAULT_PROMPT_ASK_ATTEMPTS)
-
- # Set TF_NCCL_VERSION
+ '[Leave empty to use http://github.com/nvidia/nccl]: ')
+ tf_nccl_version = get_from_env_or_user_or_default(environ_cp,
+ 'TF_NCCL_VERSION',
+ ask_nccl_version, '')
environ_cp['TF_NCCL_VERSION'] = tf_nccl_version
- write_action_env_to_bazelrc('TF_NCCL_VERSION', tf_nccl_version)
-
def get_native_cuda_compute_capabilities(environ_cp):
"""Get native cuda compute capabilities.
@@ -1305,11 +1013,14 @@ def set_tf_cuda_compute_capabilities(environ_cp):
all_valid = False
else:
ver = float(m.group(0))
- if ver < 3.5:
- print('ERROR: TensorFlow only supports CUDA compute capabilities 3.5 '
+ if ver < 3.0:
+ print('ERROR: TensorFlow only supports CUDA compute capabilities 3.0 '
'and higher. Please re-specify the list of compute '
'capabilities excluding version %s.' % ver)
all_valid = False
+ if ver < 3.5:
+ print('WARNING: XLA does not support CUDA compute capabilities '
+ 'lower than 3.5. Disable XLA when running on older GPUs.')
if all_valid:
break
@@ -1328,10 +1039,8 @@ def set_other_cuda_vars(environ_cp):
# If CUDA is enabled, always use GPU during build and test.
if environ_cp.get('TF_CUDA_CLANG') == '1':
write_to_bazelrc('build --config=cuda_clang')
- write_to_bazelrc('test --config=cuda_clang')
else:
write_to_bazelrc('build --config=cuda')
- write_to_bazelrc('test --config=cuda')
def set_host_cxx_compiler(environ_cp):
@@ -1495,15 +1204,16 @@ def set_other_mpi_vars(environ_cp):
'Cannot find the MPI library file in %s/lib or %s/lib64 or %s/lib32' %
(mpi_home, mpi_home, mpi_home))
+
def system_specific_test_config(env):
- """Add default test flags required for TF tests to bazelrc."""
+ """Add default build and test flags required for TF tests to bazelrc."""
write_to_bazelrc('test --flaky_test_attempts=3')
write_to_bazelrc('test --test_size_filters=small,medium')
write_to_bazelrc(
'test --test_tag_filters=-benchmark-test,-no_oss,-oss_serial')
write_to_bazelrc('test --build_tag_filters=-benchmark-test,-no_oss')
if is_windows():
- if env.get('TF_NEED_CUDA', None) == 1:
+ if env.get('TF_NEED_CUDA', None) == '1':
write_to_bazelrc(
'test --test_tag_filters=-no_windows,-no_windows_gpu,-no_gpu')
write_to_bazelrc(
@@ -1515,7 +1225,7 @@ def system_specific_test_config(env):
write_to_bazelrc('test --test_tag_filters=-gpu,-nomac,-no_mac')
write_to_bazelrc('test --build_tag_filters=-gpu,-nomac,-no_mac')
elif is_linux():
- if env.get('TF_NEED_CUDA', None) == 1:
+ if env.get('TF_NEED_CUDA', None) == '1':
write_to_bazelrc('test --test_tag_filters=-no_gpu')
write_to_bazelrc('test --build_tag_filters=-no_gpu')
write_to_bazelrc('test --test_env=LD_LIBRARY_PATH')
@@ -1549,7 +1259,8 @@ def set_windows_build_flags(environ_cp):
write_to_bazelrc('build --copt=-w --host_copt=-w')
# Fix winsock2.h conflicts
write_to_bazelrc(
- 'build --copt=-DWIN32_LEAN_AND_MEAN --host_copt=-DWIN32_LEAN_AND_MEAN')
+ 'build --copt=-DWIN32_LEAN_AND_MEAN --host_copt=-DWIN32_LEAN_AND_MEAN '
+ '--copt=-DNOGDI --host_copt=-DNOGDI')
# Output more verbose information when something goes wrong
write_to_bazelrc('build --verbose_failures')
# The host and target platforms are the same in Windows build. So we don't
@@ -1575,26 +1286,90 @@ def config_info_line(name, help_text):
print('\t--config=%-12s\t# %s' % (name, help_text))
-def configure_apple_bazel_rules():
- """Configures Bazel rules for building on Apple platforms.
+def configure_ios():
+ """Configures TensorFlow for iOS builds.
- Enables analyzing and building Apple Bazel rules on Apple platforms. This
- function will only be executed if `is_macos()` is true.
+ This function will only be executed if `is_macos()` is true.
"""
if not is_macos():
return
- for filepath in APPLE_BAZEL_FILES:
+ if _TF_CURRENT_BAZEL_VERSION is None or _TF_CURRENT_BAZEL_VERSION < 23000:
print(
- 'Configuring %s file to analyze and build Bazel rules on Apple platforms.'
- % filepath)
+ 'Building Bazel rules on Apple platforms requires Bazel 0.23 or later.')
+ for filepath in APPLE_BAZEL_FILES:
existing_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath + '.apple')
renamed_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath)
- os.rename(existing_filepath, renamed_filepath)
+ symlink_force(existing_filepath, renamed_filepath)
+ for filepath in IOS_FILES:
+ filename = os.path.basename(filepath)
+ new_filepath = os.path.join(_TF_WORKSPACE_ROOT, filename)
+ symlink_force(filepath, new_filepath)
+
+
+def validate_cuda_config(environ_cp):
+ """Run find_cuda_config.py and return cuda_toolkit_path, or None."""
+
+ def maybe_encode_env(env):
+ """Encodes unicode in env to str on Windows python 2.x."""
+ if not is_windows() or sys.version_info[0] != 2:
+ return env
+ for k, v in env.items():
+ if isinstance(k, unicode):
+ k = k.encode('ascii')
+ if isinstance(v, unicode):
+ v = v.encode('ascii')
+ env[k] = v
+ return env
+
+ cuda_libraries = ['cuda', 'cudnn']
+ if is_linux():
+ if int(environ_cp.get('TF_NEED_TENSORRT', False)):
+ cuda_libraries.append('tensorrt')
+ if environ_cp.get('TF_NCCL_VERSION', None):
+ cuda_libraries.append('nccl')
+
+ proc = subprocess.Popen(
+ [environ_cp['PYTHON_BIN_PATH'], 'third_party/gpus/find_cuda_config.py'] +
+ cuda_libraries,
+ stdout=subprocess.PIPE,
+ env=maybe_encode_env(environ_cp))
+
+ if proc.wait():
+ # Errors from find_cuda_config.py were sent to stderr.
+ print('Asking for detailed CUDA configuration...\n')
+ return False
+
+ config = dict(
+ tuple(line.decode('ascii').rstrip().split(': ')) for line in proc.stdout)
+
+ print('Found CUDA %s in:' % config['cuda_version'])
+ print(' %s' % config['cuda_library_dir'])
+ print(' %s' % config['cuda_include_dir'])
+
+ print('Found cuDNN %s in:' % config['cudnn_version'])
+ print(' %s' % config['cudnn_library_dir'])
+ print(' %s' % config['cudnn_include_dir'])
+
+ if 'tensorrt_version' in config:
+ print('Found TensorRT %s in:' % config['tensorrt_version'])
+ print(' %s' % config['tensorrt_library_dir'])
+ print(' %s' % config['tensorrt_include_dir'])
+
+ if config.get('nccl_version', None):
+ print('Found NCCL %s in:' % config['nccl_version'])
+ print(' %s' % config['nccl_library_dir'])
+ print(' %s' % config['nccl_include_dir'])
+
+ print('\n')
+
+ environ_cp['CUDA_TOOLKIT_PATH'] = config['cuda_toolkit_path']
+ return True
def main():
global _TF_WORKSPACE_ROOT
global _TF_BAZELRC
+ global _TF_CURRENT_BAZEL_VERSION
parser = argparse.ArgumentParser()
parser.add_argument(
@@ -1611,7 +1386,8 @@ def main():
# environment variables.
environ_cp = dict(os.environ)
- check_bazel_version('0.19.0', '0.23.0')
+ current_bazel_version = check_bazel_version('0.24.1', '0.25.2')
+ _TF_CURRENT_BAZEL_VERSION = convert_version_to_int(current_bazel_version)
reset_tf_configure_bazelrc()
@@ -1633,7 +1409,7 @@ def main():
if is_macos():
environ_cp['TF_NEED_TENSORRT'] = '0'
else:
- environ_cp['TF_CONFIGURE_APPLE_BAZEL_RULES'] = '0'
+ environ_cp['TF_CONFIGURE_IOS'] = '0'
# The numpy package on ppc64le uses OpenBLAS which has multi-threading
# issues that lead to incorrect answers. Set OMP_NUM_THREADS=1 at
@@ -1666,11 +1442,43 @@ def main():
set_action_env_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)
if (environ_cp.get('TF_NEED_CUDA') == '1' and
'TF_CUDA_CONFIG_REPO' not in environ_cp):
- set_tf_cuda_version(environ_cp)
- set_tf_cudnn_version(environ_cp)
- if is_linux():
- set_tf_tensorrt_install_path(environ_cp)
- set_tf_nccl_install_path(environ_cp)
+
+ set_action_env_var(environ_cp, 'TF_NEED_TENSORRT', 'TensorRT', False)
+
+ environ_save = dict(environ_cp)
+ for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
+
+ if validate_cuda_config(environ_cp):
+ cuda_env_names = [
+ 'TF_CUDA_VERSION', 'TF_CUBLAS_VERSION', 'TF_CUDNN_VERSION',
+ 'TF_TENSORRT_VERSION', 'TF_NCCL_VERSION', 'TF_CUDA_PATHS',
+ # Items below are for backwards compatibility when not using
+ # TF_CUDA_PATHS.
+ 'CUDA_TOOLKIT_PATH', 'CUDNN_INSTALL_PATH', 'NCCL_INSTALL_PATH',
+ 'NCCL_HDR_PATH', 'TENSORRT_INSTALL_PATH'
+ ]
+ # Note: set_action_env_var above already writes to bazelrc.
+ for name in cuda_env_names:
+ if name in environ_cp:
+ write_action_env_to_bazelrc(name, environ_cp[name])
+ break
+
+ # Restore settings changed below if CUDA config could not be validated.
+ environ_cp = dict(environ_save)
+
+ set_tf_cuda_version(environ_cp)
+ set_tf_cudnn_version(environ_cp)
+ if is_linux():
+ set_tf_tensorrt_version(environ_cp)
+ set_tf_nccl_version(environ_cp)
+
+ set_tf_cuda_paths(environ_cp)
+
+ else:
+ raise UserInputError(
+ 'Invalid CUDA setting were provided %d '
+ 'times in a row. Assuming to be a scripting mistake.' %
+ _DEFAULT_PROMPT_ASK_ATTEMPTS)
set_tf_cuda_compute_capabilities(environ_cp)
if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get(
@@ -1688,7 +1496,6 @@ def main():
else:
# Use downloaded LLD for linking.
write_to_bazelrc('build:cuda_clang --config=download_clang_use_lld')
- write_to_bazelrc('test:cuda_clang --config=download_clang_use_lld')
else:
# Set up which gcc nvcc should use as the host compiler
# No need to set this on Windows
@@ -1701,7 +1508,6 @@ def main():
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') == '1':
write_to_bazelrc('build --config=download_clang')
- write_to_bazelrc('test --config=download_clang')
# SYCL / ROCm / CUDA are mutually exclusive.
# At most 1 GPU platform can be configured.
@@ -1738,13 +1544,9 @@ def main():
system_specific_test_config(os.environ)
- if get_var(
- environ_cp, 'TF_CONFIGURE_APPLE_BAZEL_RULES',
- 'Configure Bazel rules for Apple platforms', False,
- ('Would you like to configure Bazel rules for building on Apple platforms?'
- ), 'Configuring Bazel rules for Apple platforms.',
- 'Not configuring Bazel rules for Apple platforms.'):
- configure_apple_bazel_rules()
+ set_action_env_var(environ_cp, 'TF_CONFIGURE_IOS', 'iOS', False)
+ if environ_cp.get('TF_CONFIGURE_IOS') == '1':
+ configure_ios()
print('Preconfigured Bazel build configs. You can use any of the below by '
'adding "--config=<>" to your build command. See .bazelrc for more '
diff --git a/tensorflow/BUILD b/tensorflow/BUILD
index 8f05e653ecc..a04ddf9f8a1 100644
--- a/tensorflow/BUILD
+++ b/tensorflow/BUILD
@@ -15,6 +15,7 @@ exports_files([
"leakr_file_type_recipe.ftrcp",
])
+load("//tensorflow:tensorflow.bzl", "VERSION")
load("//tensorflow:tensorflow.bzl", "tf_cc_shared_object")
load("//tensorflow:tensorflow.bzl", "tf_custom_op_library_additional_deps_impl")
load("//tensorflow:tensorflow.bzl", "tf_native_cc_binary")
@@ -163,7 +164,7 @@ config_setting(
name = "macos",
values = {
"apple_platform_type": "macos",
- "cpu": "darwin_x86_64",
+ "cpu": "darwin",
},
visibility = ["//visibility:public"],
)
@@ -183,6 +184,12 @@ config_setting(
visibility = ["//visibility:public"],
)
+config_setting(
+ name = "linux_aarch64",
+ values = {"cpu": "aarch64"},
+ visibility = ["//visibility:public"],
+)
+
config_setting(
name = "linux_x86_64",
values = {"cpu": "k8"},
@@ -325,6 +332,18 @@ config_setting(
visibility = ["//visibility:public"],
)
+config_setting(
+ name = "macos_with_framework_shared_object",
+ define_values = {
+ "framework_shared_object": "true",
+ },
+ values = {
+ "apple_platform_type": "macos",
+ "cpu": "darwin",
+ },
+ visibility = ["//visibility:public"],
+)
+
config_setting(
name = "using_cuda_clang",
define_values = {
@@ -407,9 +426,15 @@ config_setting(
values = {"cpu": "x64_windows"},
)
+# DO NOT ADD ANY NEW EXCEPTIONS TO THIS LIST!
+# Instead, please use public APIs or public build rules TF provides.
+# If you need functionality that is not exposed, we will work with you to expand our public APIs.
package_group(
name = "internal",
- packages = ["//tensorflow/..."],
+ packages = [
+ "//tensorflow/...",
+ "//tensorflow_estimator/python/estimator/...",
+ ],
)
load(
@@ -467,7 +492,7 @@ cc_library(
# projects building with Bazel and importing TensorFlow as a dependency will not
# depend on libtensorflow_framework.so unless they opt in.
tf_cc_shared_object(
- name = "libtensorflow_framework.so",
+ name = "tensorflow_framework",
framework_so = [],
linkopts = select({
"//tensorflow:macos": [],
@@ -477,8 +502,11 @@ tf_cc_shared_object(
],
}),
linkstatic = 1,
+ per_os_targets = True,
+ soversion = VERSION,
visibility = ["//visibility:public"],
deps = [
+ "//tensorflow/cc/saved_model:loader_lite_impl",
"//tensorflow/core:core_cpu_impl",
"//tensorflow/core:framework_internal_impl",
"//tensorflow/core:gpu_runtime_impl",
@@ -508,7 +536,6 @@ tf_cc_shared_object(
linkopts = select({
"//tensorflow:macos": [
"-Wl,-exported_symbols_list,$(location //tensorflow/c:exported_symbols.lds)",
- "-Wl,-install_name,@rpath/libtensorflow.so",
],
"//tensorflow:windows": [
],
@@ -518,6 +545,7 @@ tf_cc_shared_object(
],
}),
per_os_targets = True,
+ soversion = VERSION,
visibility = ["//visibility:public"],
# add win_def_file for tensorflow
win_def_file = select({
@@ -548,6 +576,7 @@ tf_cc_shared_object(
],
}),
per_os_targets = True,
+ soversion = VERSION,
visibility = ["//visibility:public"],
# add win_def_file for tensorflow_cc
win_def_file = select({
diff --git a/tensorflow/api_template.__init__.py b/tensorflow/api_template.__init__.py
index 7bd6b722398..feaf805f684 100644
--- a/tensorflow/api_template.__init__.py
+++ b/tensorflow/api_template.__init__.py
@@ -12,7 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Bring in all of the public TensorFlow interface into this module."""
+"""
+Top-level module of TensorFlow. By convention, we refer to this module as
+`tf` instead of `tensorflow`, following the common practice of importing
+TensorFlow via the command `import tensorflow as tf`.
+
+The primary function of this module is to import all of the public TensorFlow
+interfaces into a single place. The interfaces themselves are located in
+sub-modules, as described below.
+
+Note that the file `__init__.py` in the TensorFlow source code tree is actually
+only a placeholder to enable test cases to run. The TensorFlow build replaces
+this file with a file generated from [`api_template.__init__.py`](https://www.github.com/tensorflow/tensorflow/blob/master/tensorflow/api_template.__init__.py)
+"""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
@@ -20,10 +32,13 @@ from __future__ import print_function as _print_function
import distutils as _distutils
import inspect as _inspect
+import logging as _logging
import os as _os
import site as _site
import sys as _sys
+from tensorflow.python.tools import module_util as _module_util
+
# API IMPORTS PLACEHOLDER
# Make sure directory containing top level submodules is in
@@ -37,25 +52,29 @@ if not hasattr(_current_module, '__path__'):
elif _tf_api_dir not in __path__:
__path__.append(_tf_api_dir)
-# pylint: disable=g-bad-import-order
-from tensorflow.python.tools import component_api_helper as _component_api_helper
-_component_api_helper.package_hook(
- parent_package_str=__name__,
- child_package_str=('tensorboard.summary._tf.summary'),
- error_msg="Limited tf.summary API due to missing TensorBoard installation")
-_component_api_helper.package_hook(
- parent_package_str=__name__,
- child_package_str=(
- 'tensorflow_estimator.python.estimator.api._v2.estimator'))
+# Hook external TensorFlow modules.
+try:
+ from tensorboard.summary._tf import summary
+ _current_module.__path__ = (
+ [_module_util.get_parent_dir(summary)] + _current_module.__path__)
+except ImportError:
+ _logging.warning(
+ "Limited tf.summary API due to missing TensorBoard installation.")
+
+try:
+ from tensorflow_estimator.python.estimator.api._v2 import estimator
+ _current_module.__path__ = (
+ [_module_util.get_parent_dir(estimator)] + _current_module.__path__)
+except ImportError:
+ pass
+
+try:
+ from tensorflow.python.keras.api._v2 import keras
+ _current_module.__path__ = (
+ [_module_util.get_parent_dir(keras)] + _current_module.__path__)
+except ImportError:
+ pass
-if not hasattr(_current_module, 'estimator'):
- _component_api_helper.package_hook(
- parent_package_str=__name__,
- child_package_str=(
- 'tensorflow_estimator.python.estimator.api.estimator'))
-_component_api_helper.package_hook(
- parent_package_str=__name__,
- child_package_str=('tensorflow.python.keras.api._v2.keras'))
# Enable TF2 behaviors
from tensorflow.python.compat import v2_compat as _compat # pylint: disable=g-import-not-at-top
diff --git a/tensorflow/api_template_v1.__init__.py b/tensorflow/api_template_v1.__init__.py
index 5eb25a81b7f..a83ff3a16c2 100644
--- a/tensorflow/api_template_v1.__init__.py
+++ b/tensorflow/api_template_v1.__init__.py
@@ -26,30 +26,44 @@ import sys as _sys
# pylint: disable=g-bad-import-order
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
+from tensorflow.python.tools import module_util as _module_util
# API IMPORTS PLACEHOLDER
-from tensorflow.python.tools import component_api_helper as _component_api_helper
-_component_api_helper.package_hook(
- parent_package_str=__name__,
- child_package_str=(
- 'tensorflow_estimator.python.estimator.api._v1.estimator'))
-
+# Make sure directory containing top level submodules is in
+# the __path__ so that "from tensorflow.foo import bar" works.
+# We're using bitwise, but there's nothing special about that.
+_API_MODULE = bitwise # pylint: disable=undefined-variable
_current_module = _sys.modules[__name__]
-if not hasattr(_current_module, 'estimator'):
- _component_api_helper.package_hook(
- parent_package_str=__name__,
- child_package_str=(
- 'tensorflow_estimator.python.estimator.api.estimator'))
-_component_api_helper.package_hook(
- parent_package_str=__name__,
- child_package_str=('tensorflow.python.keras.api._v1.keras'))
+_tf_api_dir = _os.path.dirname(_os.path.dirname(_API_MODULE.__file__))
+if not hasattr(_current_module, '__path__'):
+ __path__ = [_tf_api_dir]
+elif _tf_api_dir not in __path__:
+ __path__.append(_tf_api_dir)
+
+# Hook external TensorFlow modules.
+try:
+ from tensorflow_estimator.python.estimator.api._v1 import estimator
+ _current_module.__path__ = (
+ [_module_util.get_parent_dir(estimator)] + _current_module.__path__)
+except ImportError:
+ pass
+
+try:
+ from tensorflow.python.keras.api._v1 import keras
+ _current_module.__path__ = (
+ [_module_util.get_parent_dir(keras)] + _current_module.__path__)
+except ImportError:
+ pass
+
+
from tensorflow.python.util.lazy_loader import LazyLoader # pylint: disable=g-import-not-at-top
_CONTRIB_WARNING = """
-WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0.
+The TensorFlow contrib module will not be included in TensorFlow 2.0.
For more information, please see:
* https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md
* https://github.com/tensorflow/addons
+ * https://github.com/tensorflow/io (for I/O related ops)
If you depend on functionality not listed there, please file an issue.
"""
contrib = LazyLoader('contrib', globals(), 'tensorflow.contrib',
@@ -65,17 +79,6 @@ from tensorflow.python.platform import flags # pylint: disable=g-import-not-at-
# The 'app' module will be imported as part of the placeholder section above.
app.flags = flags # pylint: disable=undefined-variable
-# Also use 'app' module (choice is arbitrary) to derive the API directory below.
-_API_MODULE = app # pylint: disable=undefined-variable
-
-# Make sure directory containing top level submodules is in
-# the __path__ so that "from tensorflow.foo import bar" works.
-_tf_api_dir = _os.path.dirname(_os.path.dirname(_API_MODULE.__file__))
-if not hasattr(_current_module, '__path__'):
- __path__ = [_tf_api_dir]
-elif _tf_api_dir not in __path__:
- __path__.append(_tf_api_dir)
-
# Load all plugin libraries from site-packages/tensorflow-plugins if we are
# running under pip.
# TODO(gunan): Enable setting an environment variable to define arbitrary plugin
@@ -117,7 +120,11 @@ if _running_from_pip_package():
# pylint: disable=undefined-variable
try:
del python
+ if '__all__' in vars():
+ vars()['__all__'].remove('python')
del core
+ if '__all__' in vars():
+ vars()['__all__'].remove('core')
except NameError:
# Don't fail if these modules are not available.
# For e.g. this file will be originally placed under tensorflow/_api/v1 which
@@ -128,6 +135,8 @@ except NameError:
# others don't exist.
try:
del compiler
+ if '__all__' in vars():
+ vars()['__all__'].remove('compiler')
except NameError:
pass
# pylint: enable=undefined-variable
diff --git a/tensorflow/c/BUILD b/tensorflow/c/BUILD
index 3c43467b510..f2ca79f57fc 100644
--- a/tensorflow/c/BUILD
+++ b/tensorflow/c/BUILD
@@ -21,6 +21,7 @@ filegroup(
srcs = [
"c_api.h",
"c_api_experimental.h",
+ "tf_attrtype.h",
],
visibility = ["//tensorflow:__subpackages__"],
)
@@ -39,14 +40,19 @@ filegroup(
"python_api.h",
"*test*",
],
- ),
+ ) + [
+ "//tensorflow/cc:srcs",
+ "//tensorflow/core/distributed_runtime:server_lib.h",
+ ],
visibility = ["//visibility:public"],
)
tf_cuda_library(
name = "c_api_internal",
- srcs = ["c_api.h"],
- hdrs = ["c_api_internal.h"],
+ hdrs = [
+ "c_api.h",
+ "c_api_internal.h",
+ ],
visibility = [
"//tensorflow:internal",
"//tensorflow/c:__subpackages__",
@@ -56,6 +62,7 @@ tf_cuda_library(
"//tensorflow/core:android_tensorflow_lib_lite",
],
"//conditions:default": [
+ ":tf_attrtype",
"//tensorflow/core:core_cpu",
"//tensorflow/core:framework",
"//tensorflow/core:lib",
@@ -66,14 +73,24 @@ tf_cuda_library(
}),
)
+cc_library(
+ name = "tf_attrtype",
+ hdrs = ["tf_attrtype.h"],
+ visibility = ["//visibility:public"],
+)
+
tf_cuda_library(
name = "c_api",
- hdrs = ["c_api.h"],
+ hdrs = [
+ "c_api.h",
+ "tf_attrtype.h",
+ ],
copts = tf_copts(),
visibility = ["//visibility:public"],
deps = [
":c_api_no_xla",
":c_api_internal",
+ ":tf_attrtype",
] + select({
"//tensorflow:with_xla_support": [
"//tensorflow/compiler/tf2xla:xla_compiler",
@@ -89,16 +106,18 @@ tf_cuda_library(
"c_api.cc",
"c_api_function.cc",
],
- hdrs = [
- "c_api.h",
- ],
+ hdrs = ["c_api.h"],
copts = tf_copts(),
visibility = ["//tensorflow/c:__subpackages__"],
- deps = [":c_api_internal"] + select({
+ deps = [
+ ":c_api_internal",
+ ":tf_attrtype",
+ ] + select({
"//tensorflow:android": [
"//tensorflow/core:android_tensorflow_lib_lite",
],
"//conditions:default": [
+ "@com_google_absl//absl/strings",
"//tensorflow/cc/saved_model:loader_lite",
"//tensorflow/cc:gradients",
"//tensorflow/cc:ops",
@@ -140,19 +159,11 @@ tf_cuda_library(
"//tensorflow/core:lib_platform",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core/common_runtime/eager:attr_builder",
+ "//tensorflow/core/distributed_runtime/rpc:grpc_server_lib",
"@com_google_absl//absl/strings",
],
)
-cc_library(
- name = "c_api_headers",
- hdrs = [
- "c_api.h",
- ],
- copts = tf_copts(),
- visibility = ["//tensorflow:__subpackages__"],
-)
-
exports_files(
[
"version_script.lds",
@@ -238,6 +249,28 @@ tf_cuda_library(
}),
)
+tf_cuda_library(
+ name = "ops",
+ srcs = [
+ "ops.cc",
+ ],
+ hdrs = [
+ "ops.h",
+ ],
+ copts = tf_copts(),
+ visibility = ["//visibility:public"],
+ deps = [
+ ":tf_status_helper",
+ ] + select({
+ "//tensorflow:android": [
+ "//tensorflow/core:android_tensorflow_lib_lite",
+ ],
+ "//conditions:default": [
+ "//tensorflow/core:framework",
+ ],
+ }) + [":c_api_internal"],
+)
+
# -----------------------------------------------------------------------------
# Tests
@@ -286,7 +319,6 @@ tf_cuda_cc_test(
"//conditions:default": [],
}),
tags = [
- "no_oss", # http://b/119522529
"noasan",
],
# We must ensure that the dependencies can be dynamically linked since
@@ -440,6 +472,27 @@ tf_cuda_cc_test(
],
)
+tf_cc_test(
+ name = "ops_test",
+ size = "small",
+ srcs = ["ops_test.cc"],
+ linkopts = select({
+ "//conditions:default": [],
+ }),
+ tags = ["noasan"],
+ # We must ensure that the dependencies can be dynamically linked since
+ # the shared library must be able to use core:framework.
+ # linkstatic = tf_kernel_tests_linkstatic(),
+ deps = [
+ ":c_api",
+ ":ops",
+ "//tensorflow/core:protos_all_cc",
+ "//tensorflow/core:test",
+ "//tensorflow/core:test_main",
+ "//tensorflow/core:testlib",
+ ],
+)
+
# -----------------------------------------------------------------------------
# Python API target
diff --git a/tensorflow/c/c_api.cc b/tensorflow/c/c_api.cc
index af93d91b94c..21d72ac96b5 100644
--- a/tensorflow/c/c_api.cc
+++ b/tensorflow/c/c_api.cc
@@ -30,8 +30,8 @@ limitations under the License.
#include "tensorflow/cc/ops/while_loop.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/core/distributed_runtime/server_lib.h"
+#include "tensorflow/core/framework/logging.h"
#include "tensorflow/core/framework/op_gen_lib.h"
-#include "tensorflow/core/kernels/logging_ops.h"
#endif // !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD)
#include "tensorflow/c/c_api_internal.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
@@ -368,7 +368,7 @@ static Status TF_StringDecode_Impl(const char* src, size_t src_len,
size_t TF_StringDecode(const char* src, size_t src_len, const char** dst,
size_t* dst_len, TF_Status* status) {
status->status = TF_StringDecode_Impl(src, src_len, dst, dst_len);
- if (!status->status.ok()) return 0;
+ if (TF_GetCode(status) != TF_OK) return 0;
return static_cast(*dst - src) + *dst_len;
}
@@ -423,7 +423,7 @@ TF_DeprecatedSession* TF_NewDeprecatedSession(const TF_SessionOptions* opt,
TF_Status* status) {
Session* session;
status->status = NewSession(opt->options, &session);
- if (status->status.ok()) {
+ if (TF_GetCode(status) == TF_OK) {
return new TF_DeprecatedSession({session});
} else {
DCHECK_EQ(nullptr, session);
@@ -615,7 +615,7 @@ TF_Tensor* TF_TensorFromTensor(const tensorflow::Tensor& src,
offsets++;
const string& s = srcarray(i);
size_t consumed = TF_StringEncode(s.data(), s.size(), dst, dst_len, status);
- if (!status->status.ok()) {
+ if (TF_GetCode(status) != TF_OK) {
status->status = InvalidArgument(
"invalid string tensor encoding (string #", i, " of ",
srcarray.size(), "): ", status->status.error_message());
@@ -775,7 +775,7 @@ bool ExtendSessionGraphHelper(TF_Session* session, TF_Status* status) {
// TODO(nolivia): check this on a subset of the graph instead of all of
// it.
status->status = graph::ValidateGraphHasNoCycle(session->graph->graph);
- if (!status->status.ok()) {
+ if (TF_GetCode(status) != TF_OK) {
session->graph->mu.unlock();
return false;
}
@@ -795,7 +795,7 @@ bool ExtendSessionGraphHelper(TF_Session* session, TF_Status* status) {
*graph_def.mutable_library() = graph.flib_def().ToProto();
session->graph->mu.unlock();
status->status = session->session->Extend(graph_def);
- if (!status->status.ok()) {
+ if (TF_GetCode(status) != TF_OK) {
// Contract is we always delete input_values[i].
return false;
}
@@ -825,7 +825,7 @@ static bool TF_Run_Inputs(TF_Tensor* const* c_inputs,
const int ninputs = input_pairs->size();
for (int i = 0; i < ninputs; ++i) {
status->status = TF_TensorToTensor(c_inputs[i], &(*input_pairs)[i].second);
- if (!status->status.ok()) return false;
+ if (TF_GetCode(status) != TF_OK) return false;
}
return true;
}
@@ -863,7 +863,7 @@ static void TF_Run_Helper(
// Serialize back to upstream client, who now owns the new buffer
if (run_metadata != nullptr) {
status->status = MessageToBuffer(run_metadata_proto, run_metadata);
- if (!status->status.ok()) return;
+ if (TF_GetCode(status) != TF_OK) return;
}
} else {
// NOTE(zongheng): PRun does not support RunOptions yet.
@@ -883,7 +883,7 @@ static void TF_Run_Helper(
continue;
}
c_outputs[i] = TF_TensorFromTensor(src, status);
- if (!status->status.ok()) return;
+ if (TF_GetCode(status) != TF_OK) return;
}
}
@@ -940,7 +940,7 @@ void TF_PRunSetup(TF_DeprecatedSession* s,
string new_handle;
status->status = s->session->PRunSetup(input_names, output_names,
target_oper_names, &new_handle);
- if (status->status.ok()) {
+ if (TF_GetCode(status) == TF_OK) {
char* buf = new char[new_handle.size() + 1];
memcpy(buf, new_handle.c_str(), new_handle.size() + 1);
*handle = buf;
@@ -979,7 +979,7 @@ TF_Library* TF_LoadLibrary(const char* library_filename, TF_Status* status) {
status->status = tensorflow::LoadLibrary(
library_filename, &lib_handle->lib_handle, &lib_handle->op_list.data,
&lib_handle->op_list.length);
- if (!status->status.ok()) {
+ if (TF_GetCode(status) != TF_OK) {
delete lib_handle;
return nullptr;
}
@@ -1009,7 +1009,7 @@ TF_Buffer* TF_GetAllOpList() {
// --------------------------------------------------------------------------
// ListDevices & SessionListDevices API
-void TF_DeleteDeviceList(TF_DeviceList* s) { delete s; }
+void TF_DeleteDeviceList(TF_DeviceList* list) { delete list; }
TF_DeviceList* TF_SessionListDevices(TF_Session* session, TF_Status* status) {
TF_DeviceList* response = new TF_DeviceList;
@@ -1407,7 +1407,7 @@ void TF_SetAttrTensor(TF_OperationDescription* desc, const char* attr_name,
TF_Tensor* value, TF_Status* status) {
Tensor t;
status->status = TF_TensorToTensor(value, &t);
- if (status->status.ok()) desc->node_builder.Attr(attr_name, t);
+ if (TF_GetCode(status) == TF_OK) desc->node_builder.Attr(attr_name, t);
}
void TF_SetAttrTensorList(TF_OperationDescription* desc, const char* attr_name,
@@ -1417,13 +1417,13 @@ void TF_SetAttrTensorList(TF_OperationDescription* desc, const char* attr_name,
std::vector t;
t.reserve(num_values);
- for (int i = 0; i < num_values && status->status.ok(); ++i) {
+ for (int i = 0; i < num_values && TF_GetCode(status) == TF_OK; ++i) {
Tensor v;
status->status = TF_TensorToTensor(values[i], &v);
t.emplace_back(v);
}
- if (status->status.ok()) desc->node_builder.Attr(attr_name, t);
+ if (TF_GetCode(status) == TF_OK) desc->node_builder.Attr(attr_name, t);
}
void TF_SetAttrValueProto(TF_OperationDescription* desc, const char* attr_name,
@@ -1471,11 +1471,11 @@ static TF_Operation* TF_FinishOperationLocked(TF_OperationDescription* desc,
}
status->status = desc->node_builder.Finalize(&desc->graph->graph, &ret);
- if (status->status.ok()) {
+ if (TF_GetCode(status) == TF_OK) {
// Run shape inference function for newly added node.
status->status = desc->graph->refiner.AddNode(ret);
}
- if (status->status.ok()) {
+ if (TF_GetCode(status) == TF_OK) {
// Add the node to the name-to-node mapping.
desc->graph->name_map[ret->name()] = ret;
} else if (ret != nullptr) {
@@ -1524,10 +1524,10 @@ int TF_OperationOutputListLength(TF_Operation* oper, const char* arg_name,
NameRangeMap name_ranges;
status->status =
NameRangesForNode(oper->node, oper->node.op_def(), nullptr, &name_ranges);
- if (!status->status.ok()) return -1;
+ if (TF_GetCode(status) != TF_OK) return -1;
auto iter = name_ranges.find(arg_name);
if (iter == name_ranges.end()) {
- status->status = InvalidArgument("Input arg '", arg_name, "' not found");
+ status->status = InvalidArgument("Output arg '", arg_name, "' not found");
return -1;
}
return iter->second.second - iter->second.first;
@@ -1546,7 +1546,7 @@ int TF_OperationInputListLength(TF_Operation* oper, const char* arg_name,
NameRangeMap name_ranges;
status->status =
NameRangesForNode(oper->node, oper->node.op_def(), &name_ranges, nullptr);
- if (!status->status.ok()) return -1;
+ if (TF_GetCode(status) != TF_OK) return -1;
auto iter = name_ranges.find(arg_name);
if (iter == name_ranges.end()) {
status->status = InvalidArgument("Input arg '", arg_name, "' not found");
@@ -1644,7 +1644,7 @@ TF_AttrMetadata TF_OperationGetAttrMetadata(TF_Operation* oper,
TF_Status* status) {
TF_AttrMetadata metadata;
const auto* attr = GetAttrValue(oper, attr_name, status);
- if (!status->status.ok()) return metadata;
+ if (TF_GetCode(status) != TF_OK) return metadata;
switch (attr->value_case()) {
#define SINGLE_CASE(kK, attr_type, size_expr) \
case tensorflow::AttrValue::kK: \
@@ -1751,7 +1751,7 @@ void TF_OperationGetAttrString(TF_Operation* oper, const char* attr_name,
void* value, size_t max_length,
TF_Status* status) {
const auto* attr = GetAttrValue(oper, attr_name, status);
- if (!status->status.ok()) return;
+ if (TF_GetCode(status) != TF_OK) return;
if (attr->value_case() != tensorflow::AttrValue::kS) {
status->status =
InvalidArgument("Attribute '", attr_name, "' is not a string");
@@ -1769,7 +1769,7 @@ void TF_OperationGetAttrStringList(TF_Operation* oper, const char* attr_name,
int max_values, void* storage,
size_t storage_size, TF_Status* status) {
const auto* attr = GetAttrValue(oper, attr_name, status);
- if (!status->status.ok()) return;
+ if (TF_GetCode(status) != TF_OK) return;
if (attr->value_case() != tensorflow::AttrValue::kList) {
status->status =
InvalidArgument("Value for '", attr_name, "' is not a list");
@@ -1802,7 +1802,7 @@ void TF_OperationGetAttrStringList(TF_Operation* oper, const char* attr_name,
void func##List(TF_Operation* oper, const char* attr_name, c_type* values, \
int max_values, TF_Status* status) { \
const auto* attr = GetAttrValue(oper, attr_name, status); \
- if (!status->status.ok()) return; \
+ if (TF_GetCode(status) != TF_OK) return; \
if (attr->value_case() != tensorflow::AttrValue::kList) { \
status->status = \
InvalidArgument("Value for '", attr_name, "' is not a list."); \
@@ -1824,7 +1824,7 @@ void TF_OperationGetAttrShape(TF_Operation* oper, const char* attr_name,
PartialTensorShape shape;
status->status =
tensorflow::GetNodeAttr(oper->node.attrs(), attr_name, &shape);
- if (!status->status.ok()) return;
+ if (TF_GetCode(status) != TF_OK) return;
auto len = std::min(shape.dims(), num_dims);
for (int i = 0; i < len; ++i) {
value[i] = shape.dim_size(i);
@@ -1832,21 +1832,21 @@ void TF_OperationGetAttrShape(TF_Operation* oper, const char* attr_name,
}
void TF_OperationGetAttrShapeList(TF_Operation* oper, const char* attr_name,
- int64_t** values, int* num_dims,
- int max_values, int64_t* storage,
- int storage_size, TF_Status* status) {
+ int64_t** dims, int* num_dims, int num_shapes,
+ int64_t* storage, int storage_size,
+ TF_Status* status) {
std::vector shapes;
status->status =
tensorflow::GetNodeAttr(oper->node.attrs(), attr_name, &shapes);
- if (!status->status.ok()) return;
- auto len = std::min(static_cast(shapes.size()), max_values);
+ if (TF_GetCode(status) != TF_OK) return;
+ auto len = std::min(static_cast(shapes.size()), num_shapes);
int64_t* p = storage;
int storage_left = storage_size;
for (int i = 0; i < len; ++i) {
// shapes[i].dims() == -1 for shapes with an unknown rank.
int64_t n = shapes[i].dims();
num_dims[i] = n;
- values[i] = p;
+ dims[i] = p;
if (n < 0) {
continue;
}
@@ -1866,7 +1866,7 @@ void TF_OperationGetAttrTensorShapeProto(TF_Operation* oper,
const char* attr_name,
TF_Buffer* value, TF_Status* status) {
const auto* attr = GetAttrValue(oper, attr_name, status);
- if (!status->status.ok()) return;
+ if (TF_GetCode(status) != TF_OK) return;
if (attr->value_case() != tensorflow::AttrValue::kShape) {
status->status =
InvalidArgument("Value for '", attr_name, "' is not a shape.");
@@ -1880,7 +1880,7 @@ void TF_OperationGetAttrTensorShapeProtoList(TF_Operation* oper,
TF_Buffer** values, int max_values,
TF_Status* status) {
const auto* attr = GetAttrValue(oper, attr_name, status);
- if (!status->status.ok()) return;
+ if (TF_GetCode(status) != TF_OK) return;
if (attr->value_case() != tensorflow::AttrValue::kList) {
status->status =
InvalidArgument("Value for '", attr_name, "' is not a list");
@@ -1890,7 +1890,7 @@ void TF_OperationGetAttrTensorShapeProtoList(TF_Operation* oper,
for (int i = 0; i < len; ++i) {
values[i] = TF_NewBuffer();
status->status = MessageToBuffer(attr->list().shape(i), values[i]);
- if (!status->status.ok()) {
+ if (TF_GetCode(status) != TF_OK) {
// Delete everything allocated to far, the operation has failed.
for (int j = 0; j <= i; ++j) {
TF_DeleteBuffer(values[j]);
@@ -1905,7 +1905,7 @@ void TF_OperationGetAttrTensor(TF_Operation* oper, const char* attr_name,
*value = nullptr;
Tensor t;
status->status = tensorflow::GetNodeAttr(oper->node.attrs(), attr_name, &t);
- if (!status->status.ok()) return;
+ if (TF_GetCode(status) != TF_OK) return;
*value = TF_TensorFromTensor(t, status);
}
@@ -1914,7 +1914,7 @@ void TF_OperationGetAttrTensorList(TF_Operation* oper, const char* attr_name,
TF_Status* status) {
std::vector ts;
status->status = tensorflow::GetNodeAttr(oper->node.attrs(), attr_name, &ts);
- if (!status->status.ok()) return;
+ if (TF_GetCode(status) != TF_OK) return;
const auto len = std::min(max_values, static_cast(ts.size()));
for (int i = 0; i < len; ++i) {
values[i] = TF_TensorFromTensor(ts[i], status);
@@ -1925,7 +1925,7 @@ void TF_OperationGetAttrValueProto(TF_Operation* oper, const char* attr_name,
TF_Buffer* output_attr_value,
TF_Status* status) {
const auto* attr = GetAttrValue(oper, attr_name, status);
- if (!status->status.ok()) return;
+ if (TF_GetCode(status) != TF_OK) return;
status->status = MessageToBuffer(*attr, output_attr_value);
}
@@ -1941,7 +1941,10 @@ TF_Graph::TF_Graph()
refiner(graph.versions().producer(), graph.op_registry()),
delete_requested(false),
parent(nullptr),
- parent_inputs(nullptr) {}
+ parent_inputs(nullptr) {
+ // Tell the shape refiner to also run shape inference on functions.
+ refiner.set_function_library_for_shape_inference(&graph.flib_def());
+}
TF_Graph* TF_NewGraph() { return new TF_Graph; }
@@ -2003,7 +2006,7 @@ void TF_GraphGetOpDef(TF_Graph* graph, const char* op_name,
{
mutex_lock l(graph->mu);
status->status = graph->graph.op_registry()->LookUpOpDef(op_name, &op_def);
- if (!status->status.ok()) return;
+ if (TF_GetCode(status) != TF_OK) return;
}
status->status = MessageToBuffer(*op_def, output_op_def);
}
@@ -2121,7 +2124,7 @@ static void GraphImportGraphDefLocked(TF_Graph* graph, const GraphDef& def,
tensorflow::ImportGraphDefResults results;
status->status = tensorflow::ImportGraphDef(opts->opts, def, &graph->graph,
&graph->refiner, &results);
- if (!status->status.ok()) return;
+ if (TF_GetCode(status) != TF_OK) return;
// Add new nodes to name_map
for (int i = last_node_id; i < graph->graph.num_node_ids(); ++i) {
@@ -2175,7 +2178,7 @@ TF_ImportGraphDefResults* TF_GraphImportGraphDefWithResults(
auto results = new TF_ImportGraphDefResults();
mutex_lock l(graph->mu);
GraphImportGraphDefLocked(graph, def, options, results, status);
- if (!status->status.ok()) {
+ if (TF_GetCode(status) != TF_OK) {
delete results;
return nullptr;
}
@@ -2233,7 +2236,7 @@ bool CreateInput(const TF_Output& parent_input, TF_Graph* g, const char* name,
TF_SetAttrType(desc, "dtype", TF_OperationOutputType(parent_input));
// TODO(skyewm): set placeholder shape
TF_Operation* oper = TF_FinishOperation(desc, status);
- if (!status->status.ok()) return false;
+ if (TF_GetCode(status) != TF_OK) return false;
*input = {oper, 0};
return true;
}
@@ -2378,7 +2381,7 @@ TF_WhileParams TF_NewWhile(TF_Graph* g, TF_Output* inputs, int ninputs,
TF_WhileParams params = {ninputs, cond_graph, cond_inputs, cond_output,
body_graph, body_inputs, body_outputs, name};
- if (!status->status.ok()) {
+ if (TF_GetCode(status) != TF_OK) {
FreeWhileResources(¶ms);
return EmptyWhileParams();
}
@@ -2582,7 +2585,7 @@ TF_Session* TF_NewSession(TF_Graph* graph, const TF_SessionOptions* opt,
TF_Status* status) {
Session* session;
status->status = NewSession(opt->options, &session);
- if (status->status.ok()) {
+ if (TF_GetCode(status) == TF_OK) {
TF_Session* new_session = new TF_Session(session, graph);
if (graph != nullptr) {
mutex_lock l(graph->mu);
@@ -2630,7 +2633,7 @@ TF_Session* TF_LoadSessionFromSavedModel(
status->status =
tensorflow::LoadSavedModel(session_options->options, run_options_proto,
export_dir, tag_set, &bundle);
- if (!status->status.ok()) return nullptr;
+ if (TF_GetCode(status) != TF_OK) return nullptr;
// Create a TF_Graph from the MetaGraphDef. This is safe as long as Session
// extends using GraphDefs. The Graph instance is different, but equivalent
@@ -2647,7 +2650,7 @@ TF_Session* TF_LoadSessionFromSavedModel(
if (meta_graph_def != nullptr) {
status->status = MessageToBuffer(bundle.meta_graph_def, meta_graph_def);
- if (!status->status.ok()) return nullptr;
+ if (TF_GetCode(status) != TF_OK) return nullptr;
}
TF_Session* session = new TF_Session(bundle.session.release(), graph);
@@ -2747,7 +2750,7 @@ void TF_SessionPRunSetup(TF_Session* session, const TF_Output* inputs,
string new_handle;
status->status = session->session->PRunSetup(input_names, output_names,
target_names, &new_handle);
- if (status->status.ok()) {
+ if (TF_GetCode(status) == TF_OK) {
char* buf = new char[new_handle.size() + 1];
memcpy(buf, new_handle.c_str(), new_handle.size() + 1);
*handle = buf;
@@ -2809,9 +2812,9 @@ unsigned char TF_TryEvaluateConstant(TF_Graph* graph, TF_Output output,
tensor, graph->refiner, *graph->graph.op_registry(),
graph->graph.versions().producer(), &evaluated, &result_tensor);
if (evaluated) {
- DCHECK(status->status.ok());
+ DCHECK(TF_GetCode(status) == TF_OK);
*result = TF_TensorFromTensor(result_tensor, status);
- if (!status->status.ok()) evaluated = false;
+ if (TF_GetCode(status) != TF_OK) evaluated = false;
}
return evaluated;
}
@@ -2866,7 +2869,7 @@ TF_Buffer* TF_ApiDefMapGet(TF_ApiDefMap* api_def_map, const char* name,
TF_Buffer* ret = TF_NewBuffer();
status->status = MessageToBuffer(*api_def, ret);
- if (!status->status.ok()) {
+ if (TF_GetCode(status) != TF_OK) {
TF_DeleteBuffer(ret);
return nullptr;
}
@@ -2878,7 +2881,7 @@ TF_Buffer* TF_GetAllRegisteredKernels(TF_Status* status) {
tensorflow::KernelList kernel_list = tensorflow::GetAllRegisteredKernels();
TF_Buffer* ret = TF_NewBuffer();
status->status = MessageToBuffer(kernel_list, ret);
- if (!status->status.ok()) {
+ if (TF_GetCode(status) != TF_OK) {
TF_DeleteBuffer(ret);
return nullptr;
}
@@ -2890,7 +2893,7 @@ TF_Buffer* TF_GetRegisteredKernelsForOp(const char* name, TF_Status* status) {
tensorflow::GetRegisteredKernelsForOp(name);
TF_Buffer* ret = TF_NewBuffer();
status->status = MessageToBuffer(kernel_list, ret);
- if (!status->status.ok()) {
+ if (TF_GetCode(status) != TF_OK) {
TF_DeleteBuffer(ret);
return nullptr;
}
@@ -2920,7 +2923,7 @@ TF_Server* TF_NewServer(const void* proto, size_t proto_len,
std::unique_ptr out_server;
status->status = tensorflow::NewServer(server_def, &out_server);
- if (!status->status.ok()) return nullptr;
+ if (TF_GetCode(status) != TF_OK) return nullptr;
return new TF_Server(std::move(out_server));
#endif // defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD)
diff --git a/tensorflow/c/c_api.h b/tensorflow/c/c_api.h
index 051de3a7dc0..c074e5d3629 100644
--- a/tensorflow/c/c_api.h
+++ b/tensorflow/c/c_api.h
@@ -19,6 +19,8 @@ limitations under the License.
#include
#include
+#include "tensorflow/c/tf_attrtype.h"
+
// --------------------------------------------------------------------------
// C API for TensorFlow.
//
@@ -686,19 +688,6 @@ TF_CAPI_EXPORT extern int TF_OperationGetControlOutputs(
TF_Operation* oper, TF_Operation** control_outputs,
int max_control_outputs);
-// TF_AttrType describes the type of the value of an attribute on an operation.
-typedef enum TF_AttrType {
- TF_ATTR_STRING = 0,
- TF_ATTR_INT = 1,
- TF_ATTR_FLOAT = 2,
- TF_ATTR_BOOL = 3,
- TF_ATTR_TYPE = 4,
- TF_ATTR_SHAPE = 5,
- TF_ATTR_TENSOR = 6,
- TF_ATTR_PLACEHOLDER = 7,
- TF_ATTR_FUNC = 8,
-} TF_AttrType;
-
// TF_AttrMetadata describes the value of an attribute on an operation.
typedef struct TF_AttrMetadata {
// A boolean: 1 if the attribute value is a list, 0 otherwise.
diff --git a/tensorflow/c/c_api_experimental.cc b/tensorflow/c/c_api_experimental.cc
index 7ff4084decc..726ce2784ae 100644
--- a/tensorflow/c/c_api_experimental.cc
+++ b/tensorflow/c/c_api_experimental.cc
@@ -22,6 +22,7 @@ limitations under the License.
#include "tensorflow/c/eager/c_api_internal.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/core/common_runtime/eager/attr_builder.h"
+#include "tensorflow/core/distributed_runtime/rpc/grpc_server_lib.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
@@ -66,6 +67,24 @@ void TF_EnableXLACompilation(TF_SessionOptions* options, unsigned char enable) {
}
}
+unsigned char TF_SetXlaEnableLazyCompilation(unsigned char enable) {
+ tensorflow::BuildXlaOpsPassFlags* flags =
+ tensorflow::GetBuildXlaOpsPassFlags();
+ bool original = flags->tf_xla_enable_lazy_compilation;
+ flags->tf_xla_enable_lazy_compilation = enable;
+ return original;
+}
+
+void TF_SetXLaAutoJitMode(const char* mode) {
+ tensorflow::SetXlaAutoJitFlagFromFlagString(mode);
+}
+
+void TF_SetXlaMinClusterSize(int size) {
+ tensorflow::MarkForCompilationPassFlags* flags =
+ tensorflow::GetMarkForCompilationPassFlags();
+ flags->tf_xla_min_cluster_size = size;
+}
+
TF_Buffer* TF_CreateConfig(unsigned char enable_xla_compilation,
unsigned char gpu_memory_allow_growth,
unsigned int num_cpu_devices) {
@@ -177,8269 +196,6 @@ static std::vector CreateFunctionsFromTextProto(
return ret;
}
-// On success, returns a newly created TF_Function instance encoding a dataset
-// node stack that returns a sequence of 3 floats, and sets `dataset_name` to
-// the created dataset name. The returned function must be deleted by calling
-// TF_DeleteFunction.
-static UniqueFuncPtr CreateFakeDatasetFunction(std::string* dataset_name,
- TF_Status* status) {
- const char* func_def = R"PREFIX(
-library {
- function {
- signature {
- name: "_make_dataset_d8de2712"
- output_arg {
- name: "TensorSliceDataset"
- type: DT_VARIANT
- }
- is_stateful: true
- }
- node_def {
- name: "TensorSliceDataset/tensors/component_0"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_FLOAT
- tensor_shape {
- dim {
- size: 3
- }
- }
- tensor_content: "\000\000(B\000\000,B\000\0000B"
- }
- }
- }
- }
- node_def {
- name: "TensorSliceDataset"
- op: "TensorSliceDataset"
- input: "TensorSliceDataset/tensors/component_0:output:0"
- attr {
- key: "Toutput_types"
- value {
- list {
- type: DT_FLOAT
- }
- }
- }
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- }
- }
- }
- }
- }
- ret {
- key: "TensorSliceDataset"
- value: "TensorSliceDataset:handle:0"
- }
- }
-}
-)PREFIX";
-
- *dataset_name = "_make_dataset_d8de2712";
- auto functions = CreateFunctionsFromTextProto(
- func_def, /*mutate_proto_func*/ nullptr, status);
- DCHECK_EQ(functions.size(), 1);
- return std::move(functions[0]);
-}
-
-#if not defined(PLATFORM_WINDOWS)
-// On success, returns a set of TF_Function instances encoding a dataset
-// node stack that reads a Imagenet TFRecordFile dataset from `file_path`, and
-// sets `dataset_name` to the created dataset name. The returned functions must
-// be deleted by calling TF_DeleteFunction.
-static std::vector CreateImagenetDatasetFunctions(
- const char* file_path, std::string* dataset_name, TF_Status* status) {
-#if defined(PLATFORM_WINDOWS)
- status->status = tensorflow::errors::Unimplemented(
- "TF_MakeFileBasedIteratorGetNextWithDatasets in the experimental C API "
- "is not implemented for Windows");
- return std::vector();
-#else
- const char* func_def = R"PREFIX(
-library {
- function {
- signature {
- name: "tf_map_func_91295dea"
- input_arg {
- name: "arg0"
- type: DT_STRING
- }
- output_arg {
- name: "FlatMapDataset"
- type: DT_VARIANT
- }
- description: "A wrapper for Defun that facilitates shape inference."
- is_stateful: true
- }
- node_def {
- name: "flat_filenames/shape"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: -1
- }
- }
- }
- }
- node_def {
- name: "flat_filenames"
- op: "Reshape"
- input: "arg0"
- input: "flat_filenames/shape:output:0"
- attr {
- key: "T"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "Tshape"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "TensorSliceDataset"
- op: "TensorSliceDataset"
- input: "flat_filenames:output:0"
- attr {
- key: "Toutput_types"
- value {
- list {
- type: DT_STRING
- }
- }
- }
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- }
- }
- }
- }
- }
- node_def {
- name: "FlatMapDataset"
- op: "FlatMapDataset"
- input: "TensorSliceDataset:handle:0"
- attr {
- key: "Targuments"
- value {
- list {
- }
- }
- }
- attr {
- key: "f"
- value {
- func {
- name: "tf_map_func_0cc8c35b"
- }
- }
- }
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- }
- }
- }
- }
- attr {
- key: "output_types"
- value {
- list {
- type: DT_STRING
- }
- }
- }
- }
- ret {
- key: "FlatMapDataset"
- value: "FlatMapDataset:handle:0"
- }
- }
- function {
- signature {
- name: "tf_map_func_0cc8c35b"
- input_arg {
- name: "arg0"
- type: DT_STRING
- }
- output_arg {
- name: "TFRecordDataset"
- type: DT_VARIANT
- }
- description: "A wrapper for Defun that facilitates shape inference."
- is_stateful: true
- }
- node_def {
- name: "compression_type"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: ""
- }
- }
- }
- }
- node_def {
- name: "buffer_size"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 8388608
- }
- }
- }
- }
- node_def {
- name: "TFRecordDataset"
- op: "TFRecordDataset"
- input: "arg0"
- input: "compression_type:output:0"
- input: "buffer_size:output:0"
- }
- ret {
- key: "TFRecordDataset"
- value: "TFRecordDataset:handle:0"
- }
- }
- function {
- signature {
- name: "tf_map_func_74b6b15c"
- input_arg {
- name: "arg0"
- type: DT_STRING
- }
- output_arg {
- name: "Reshape_1"
- type: DT_FLOAT
- }
- output_arg {
- name: "sub_1"
- type: DT_INT32
- }
- description: "A wrapper for Defun that facilitates shape inference."
- is_stateful: true
- }
- node_def {
- name: "ParseSingleExample/key_image/class/label"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: -1
- }
- }
- }
- }
- node_def {
- name: "ParseSingleExample/Reshape/shape"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- }
- }
- }
- }
- }
- }
- node_def {
- name: "ParseSingleExample/Reshape"
- op: "Reshape"
- input: "ParseSingleExample/key_image/class/label:output:0"
- input: "ParseSingleExample/Reshape/shape:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "Tshape"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "ParseSingleExample/key_image/class/text"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: ""
- }
- }
- }
- }
- node_def {
- name: "ParseSingleExample/Reshape_1/shape"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- }
- }
- }
- }
- }
- }
- node_def {
- name: "ParseSingleExample/Reshape_1"
- op: "Reshape"
- input: "ParseSingleExample/key_image/class/text:output:0"
- input: "ParseSingleExample/Reshape_1/shape:output:0"
- attr {
- key: "T"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "Tshape"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "ParseSingleExample/key_image/encoded"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: ""
- }
- }
- }
- }
- node_def {
- name: "ParseSingleExample/Reshape_2/shape"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- }
- }
- }
- }
- }
- }
- node_def {
- name: "ParseSingleExample/Reshape_2"
- op: "Reshape"
- input: "ParseSingleExample/key_image/encoded:output:0"
- input: "ParseSingleExample/Reshape_2/shape:output:0"
- attr {
- key: "T"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "Tshape"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "ParseSingleExample/key_image/format"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "jpeg"
- }
- }
- }
- }
- node_def {
- name: "ParseSingleExample/Reshape_3/shape"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- }
- }
- }
- }
- }
- }
- node_def {
- name: "ParseSingleExample/Reshape_3"
- op: "Reshape"
- input: "ParseSingleExample/key_image/format:output:0"
- input: "ParseSingleExample/Reshape_3/shape:output:0"
- attr {
- key: "T"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "Tshape"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "ParseSingleExample/ParseSingleExample"
- op: "ParseSingleExample"
- input: "arg0"
- input: "ParseSingleExample/Reshape:output:0"
- input: "ParseSingleExample/Reshape_1:output:0"
- input: "ParseSingleExample/Reshape_2:output:0"
- input: "ParseSingleExample/Reshape_3:output:0"
- attr {
- key: "Tdense"
- value {
- list {
- type: DT_INT64
- type: DT_STRING
- type: DT_STRING
- type: DT_STRING
- }
- }
- }
- attr {
- key: "dense_keys"
- value {
- list {
- s: "image/class/label"
- s: "image/class/text"
- s: "image/encoded"
- s: "image/format"
- }
- }
- }
- attr {
- key: "dense_shapes"
- value {
- list {
- shape {
- }
- shape {
- }
- shape {
- }
- shape {
- }
- }
- }
- }
- attr {
- key: "num_sparse"
- value {
- i: 5
- }
- }
- attr {
- key: "sparse_keys"
- value {
- list {
- s: "image/object/bbox/xmax"
- s: "image/object/bbox/xmin"
- s: "image/object/bbox/ymax"
- s: "image/object/bbox/ymin"
- s: "image/object/class/label"
- }
- }
- }
- attr {
- key: "sparse_types"
- value {
- list {
- type: DT_FLOAT
- type: DT_FLOAT
- type: DT_FLOAT
- type: DT_FLOAT
- type: DT_INT64
- }
- }
- }
- }
- node_def {
- name: "Reshape/shape"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- }
- }
- }
- }
- }
- }
- node_def {
- name: "Reshape"
- op: "Reshape"
- input: "ParseSingleExample/ParseSingleExample:dense_values:2"
- input: "Reshape/shape:output:0"
- attr {
- key: "T"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "Tshape"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "decode_image/Substr/pos"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 0
- }
- }
- }
- }
- node_def {
- name: "decode_image/Substr/len"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 3
- }
- }
- }
- }
- node_def {
- name: "decode_image/Substr"
- op: "Substr"
- input: "Reshape:output:0"
- input: "decode_image/Substr/pos:output:0"
- input: "decode_image/Substr/len:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "decode_image/is_jpeg/Substr/pos"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 0
- }
- }
- }
- }
- node_def {
- name: "decode_image/is_jpeg/Substr/len"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 3
- }
- }
- }
- }
- node_def {
- name: "decode_image/is_jpeg/Substr"
- op: "Substr"
- input: "Reshape:output:0"
- input: "decode_image/is_jpeg/Substr/pos:output:0"
- input: "decode_image/is_jpeg/Substr/len:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "decode_image/is_jpeg/Equal/y"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "\377\330\377"
- }
- }
- }
- }
- node_def {
- name: "decode_image/is_jpeg/Equal"
- op: "Equal"
- input: "decode_image/is_jpeg/Substr:output:0"
- input: "decode_image/is_jpeg/Equal/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_STRING
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/Switch"
- op: "Switch"
- input: "decode_image/is_jpeg/Equal:z:0"
- input: "decode_image/is_jpeg/Equal:z:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/switch_t"
- op: "Identity"
- input: "decode_image/cond_jpeg/Switch:output_true:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/switch_f"
- op: "Identity"
- input: "decode_image/cond_jpeg/Switch:output_false:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/pred_id"
- op: "Identity"
- input: "decode_image/is_jpeg/Equal:z:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/check_jpeg_channels/x"
- op: "Const"
- input: "^decode_image/cond_jpeg/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 3
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/check_jpeg_channels/y"
- op: "Const"
- input: "^decode_image/cond_jpeg/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 4
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/check_jpeg_channels"
- op: "NotEqual"
- input: "decode_image/cond_jpeg/check_jpeg_channels/x:output:0"
- input: "decode_image/cond_jpeg/check_jpeg_channels/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/Assert/Const"
- op: "Const"
- input: "^decode_image/cond_jpeg/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "Channels must be in (None, 0, 1, 3) when decoding JPEG images"
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/Assert/Assert/data_0"
- op: "Const"
- input: "^decode_image/cond_jpeg/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "Channels must be in (None, 0, 1, 3) when decoding JPEG images"
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/Assert/Assert"
- op: "Assert"
- input: "decode_image/cond_jpeg/check_jpeg_channels:z:0"
- input: "decode_image/cond_jpeg/Assert/Assert/data_0:output:0"
- attr {
- key: "T"
- value {
- list {
- type: DT_STRING
- }
- }
- }
- attr {
- key: "summarize"
- value {
- i: 3
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/DecodeJpeg"
- op: "DecodeJpeg"
- input: "decode_image/cond_jpeg/DecodeJpeg/Switch:output_true:0"
- input: "^decode_image/cond_jpeg/Assert/Assert"
- attr {
- key: "acceptable_fraction"
- value {
- f: 1.0
- }
- }
- attr {
- key: "channels"
- value {
- i: 3
- }
- }
- attr {
- key: "dct_method"
- value {
- s: ""
- }
- }
- attr {
- key: "fancy_upscaling"
- value {
- b: true
- }
- }
- attr {
- key: "ratio"
- value {
- i: 1
- }
- }
- attr {
- key: "try_recover_truncated"
- value {
- b: false
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/DecodeJpeg/Switch"
- op: "Switch"
- input: "Reshape:output:0"
- input: "decode_image/cond_jpeg/pred_id:output:0"
- attr {
- key: "T"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "_class"
- value {
- list {
- s: "loc:@Reshape"
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/is_png/y"
- op: "Const"
- input: "^decode_image/cond_jpeg/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "\211PN"
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/is_png"
- op: "Equal"
- input: "decode_image/cond_jpeg/is_png/Switch:output_false:0"
- input: "decode_image/cond_jpeg/is_png/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_STRING
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/is_png/Switch"
- op: "Switch"
- input: "decode_image/Substr:output:0"
- input: "decode_image/cond_jpeg/pred_id:output:0"
- attr {
- key: "T"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "_class"
- value {
- list {
- s: "loc:@decode_image/Substr"
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/Switch"
- op: "Switch"
- input: "decode_image/cond_jpeg/is_png:z:0"
- input: "decode_image/cond_jpeg/is_png:z:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/switch_t"
- op: "Identity"
- input: "decode_image/cond_jpeg/cond_png/Switch:output_true:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/switch_f"
- op: "Identity"
- input: "decode_image/cond_jpeg/cond_png/Switch:output_false:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/pred_id"
- op: "Identity"
- input: "decode_image/cond_jpeg/is_png:z:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/DecodePng"
- op: "DecodePng"
- input: "decode_image/cond_jpeg/cond_png/DecodePng/Switch_1:output_true:0"
- attr {
- key: "channels"
- value {
- i: 3
- }
- }
- attr {
- key: "dtype"
- value {
- type: DT_UINT8
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/DecodePng/Switch"
- op: "Switch"
- input: "Reshape:output:0"
- input: "decode_image/cond_jpeg/pred_id:output:0"
- attr {
- key: "T"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "_class"
- value {
- list {
- s: "loc:@Reshape"
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/DecodePng/Switch_1"
- op: "Switch"
- input: "decode_image/cond_jpeg/cond_png/DecodePng/Switch:output_false:0"
- input: "decode_image/cond_jpeg/cond_png/pred_id:output:0"
- attr {
- key: "T"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "_class"
- value {
- list {
- s: "loc:@Reshape"
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/is_gif/y"
- op: "Const"
- input: "^decode_image/cond_jpeg/cond_png/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "GIF"
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/is_gif"
- op: "Equal"
- input: "decode_image/cond_jpeg/cond_png/is_gif/Switch:output_false:0"
- input: "decode_image/cond_jpeg/cond_png/is_gif/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_STRING
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/is_gif/Switch"
- op: "Switch"
- input: "decode_image/cond_jpeg/is_png/Switch:output_false:0"
- input: "decode_image/cond_jpeg/cond_png/pred_id:output:0"
- attr {
- key: "T"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "_class"
- value {
- list {
- s: "loc:@decode_image/Substr"
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/Switch"
- op: "Switch"
- input: "decode_image/cond_jpeg/cond_png/is_gif:z:0"
- input: "decode_image/cond_jpeg/cond_png/is_gif:z:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/switch_t"
- op: "Identity"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/Switch:output_true:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/switch_f"
- op: "Identity"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/Switch:output_false:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/pred_id"
- op: "Identity"
- input: "decode_image/cond_jpeg/cond_png/is_gif:z:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/check_gif_channels/x"
- op: "Const"
- input: "^decode_image/cond_jpeg/cond_png/cond_gif/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 3
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/check_gif_channels/y"
- op: "Const"
- input: "^decode_image/cond_jpeg/cond_png/cond_gif/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/check_gif_channels"
- op: "NotEqual"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/check_gif_channels/x:output:0"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/check_gif_channels/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/check_gif_channels_1/x"
- op: "Const"
- input: "^decode_image/cond_jpeg/cond_png/cond_gif/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 3
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/check_gif_channels_1/y"
- op: "Const"
- input: "^decode_image/cond_jpeg/cond_png/cond_gif/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 4
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/check_gif_channels_1"
- op: "NotEqual"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/check_gif_channels_1/x:output:0"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/check_gif_channels_1/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/LogicalAnd"
- op: "LogicalAnd"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/check_gif_channels:z:0"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/check_gif_channels_1:z:0"
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/Assert/Const"
- op: "Const"
- input: "^decode_image/cond_jpeg/cond_png/cond_gif/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "Channels must be in (None, 0, 3) when decoding GIF images"
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/Assert/Assert/data_0"
- op: "Const"
- input: "^decode_image/cond_jpeg/cond_png/cond_gif/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "Channels must be in (None, 0, 3) when decoding GIF images"
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/Assert/Assert"
- op: "Assert"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/LogicalAnd:z:0"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/Assert/Assert/data_0:output:0"
- attr {
- key: "T"
- value {
- list {
- type: DT_STRING
- }
- }
- }
- attr {
- key: "summarize"
- value {
- i: 3
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/DecodeGif"
- op: "DecodeGif"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/DecodeGif/Switch_1:output_true:0"
- input: "^decode_image/cond_jpeg/cond_png/cond_gif/Assert/Assert"
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/DecodeGif/Switch"
- op: "Switch"
- input: "decode_image/cond_jpeg/cond_png/DecodePng/Switch:output_false:0"
- input: "decode_image/cond_jpeg/cond_png/pred_id:output:0"
- attr {
- key: "T"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "_class"
- value {
- list {
- s: "loc:@Reshape"
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/DecodeGif/Switch_1"
- op: "Switch"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/DecodeGif/Switch:output_false:0"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/pred_id:output:0"
- attr {
- key: "T"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "_class"
- value {
- list {
- s: "loc:@Reshape"
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/Substr/pos"
- op: "Const"
- input: "^decode_image/cond_jpeg/cond_png/cond_gif/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 0
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/Substr/len"
- op: "Const"
- input: "^decode_image/cond_jpeg/cond_png/cond_gif/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 2
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/Substr"
- op: "Substr"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/Substr/Switch:output_false:0"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/Substr/pos:output:0"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/Substr/len:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/Substr/Switch"
- op: "Switch"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/DecodeGif/Switch:output_false:0"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/pred_id:output:0"
- attr {
- key: "T"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "_class"
- value {
- list {
- s: "loc:@Reshape"
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/is_bmp/y"
- op: "Const"
- input: "^decode_image/cond_jpeg/cond_png/cond_gif/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "BM"
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/is_bmp"
- op: "Equal"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/Substr:output:0"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/is_bmp/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_STRING
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/Assert_1/Const"
- op: "Const"
- input: "^decode_image/cond_jpeg/cond_png/cond_gif/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "Unable to decode bytes as JPEG, PNG, GIF, or BMP"
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/Assert_1/Assert/data_0"
- op: "Const"
- input: "^decode_image/cond_jpeg/cond_png/cond_gif/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "Unable to decode bytes as JPEG, PNG, GIF, or BMP"
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/Assert_1/Assert"
- op: "Assert"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/is_bmp:z:0"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/Assert_1/Assert/data_0:output:0"
- attr {
- key: "T"
- value {
- list {
- type: DT_STRING
- }
- }
- }
- attr {
- key: "summarize"
- value {
- i: 3
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/check_channels/x"
- op: "Const"
- input: "^decode_image/cond_jpeg/cond_png/cond_gif/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 3
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/check_channels/y"
- op: "Const"
- input: "^decode_image/cond_jpeg/cond_png/cond_gif/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/check_channels"
- op: "NotEqual"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/check_channels/x:output:0"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/check_channels/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/Assert_2/Const"
- op: "Const"
- input: "^decode_image/cond_jpeg/cond_png/cond_gif/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "Channels must be in (None, 0, 3) when decoding BMP images"
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/Assert_2/Assert/data_0"
- op: "Const"
- input: "^decode_image/cond_jpeg/cond_png/cond_gif/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "Channels must be in (None, 0, 3) when decoding BMP images"
- }
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/Assert_2/Assert"
- op: "Assert"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/check_channels:z:0"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/Assert_2/Assert/data_0:output:0"
- attr {
- key: "T"
- value {
- list {
- type: DT_STRING
- }
- }
- }
- attr {
- key: "summarize"
- value {
- i: 3
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/DecodeBmp"
- op: "DecodeBmp"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/Substr/Switch:output_false:0"
- input: "^decode_image/cond_jpeg/cond_png/cond_gif/Assert_1/Assert"
- input: "^decode_image/cond_jpeg/cond_png/cond_gif/Assert_2/Assert"
- attr {
- key: "channels"
- value {
- i: 0
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/cond_gif/Merge"
- op: "Merge"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/DecodeBmp:image:0"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/DecodeGif:image:0"
- attr {
- key: "N"
- value {
- i: 2
- }
- }
- attr {
- key: "T"
- value {
- type: DT_UINT8
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/cond_png/Merge"
- op: "Merge"
- input: "decode_image/cond_jpeg/cond_png/cond_gif/Merge:output:0"
- input: "decode_image/cond_jpeg/cond_png/DecodePng:image:0"
- attr {
- key: "N"
- value {
- i: 2
- }
- }
- attr {
- key: "T"
- value {
- type: DT_UINT8
- }
- }
- }
- node_def {
- name: "decode_image/cond_jpeg/Merge"
- op: "Merge"
- input: "decode_image/cond_jpeg/cond_png/Merge:output:0"
- input: "decode_image/cond_jpeg/DecodeJpeg:image:0"
- attr {
- key: "N"
- value {
- i: 2
- }
- }
- attr {
- key: "T"
- value {
- type: DT_UINT8
- }
- }
- }
- node_def {
- name: "convert_image/Cast"
- op: "Cast"
- input: "decode_image/cond_jpeg/Merge:output:0"
- attr {
- key: "DstT"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "SrcT"
- value {
- type: DT_UINT8
- }
- }
- }
- node_def {
- name: "convert_image/y"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_FLOAT
- tensor_shape {
- }
- float_val: 0.00392156885937
- }
- }
- }
- }
- node_def {
- name: "convert_image"
- op: "Mul"
- input: "convert_image/Cast:y:0"
- input: "convert_image/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- }
- node_def {
- name: "Const"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_FLOAT
- tensor_shape {
- dim {
- size: 1
- }
- dim {
- size: 1
- }
- dim {
- size: 4
- }
- }
- tensor_content: "\000\000\000\000\000\000\000\000\000\000\200?\000\000\200?"
- }
- }
- }
- }
- node_def {
- name: "distorted_bounding_box_crop/Shape"
- op: "Shape"
- input: "convert_image:z:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "out_type"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "distorted_bounding_box_crop/sample_distorted_bounding_box/SampleDistortedBoundingBoxV2/min_object_covered"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_FLOAT
- tensor_shape {
- }
- float_val: 0.10000000149
- }
- }
- }
- }
- node_def {
- name: "distorted_bounding_box_crop/sample_distorted_bounding_box/SampleDistortedBoundingBoxV2"
- op: "SampleDistortedBoundingBoxV2"
- input: "distorted_bounding_box_crop/Shape:output:0"
- input: "Const:output:0"
- input: "distorted_bounding_box_crop/sample_distorted_bounding_box/SampleDistortedBoundingBoxV2/min_object_covered:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "area_range"
- value {
- list {
- f: 0.0799999982119
- f: 1.0
- }
- }
- }
- attr {
- key: "aspect_ratio_range"
- value {
- list {
- f: 0.75
- f: 1.33333337307
- }
- }
- }
- attr {
- key: "max_attempts"
- value {
- i: 1
- }
- }
- attr {
- key: "seed"
- value {
- i: 0
- }
- }
- attr {
- key: "seed2"
- value {
- i: 0
- }
- }
- attr {
- key: "use_image_if_no_bounding_boxes"
- value {
- b: true
- }
- }
- }
- node_def {
- name: "distorted_bounding_box_crop/Slice"
- op: "Slice"
- input: "convert_image:z:0"
- input: "distorted_bounding_box_crop/sample_distorted_bounding_box/SampleDistortedBoundingBoxV2:begin:0"
- input: "distorted_bounding_box_crop/sample_distorted_bounding_box/SampleDistortedBoundingBoxV2:size:0"
- attr {
- key: "Index"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- }
- node_def {
- name: "Shape"
- op: "Shape"
- input: "convert_image:z:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "out_type"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "Shape_1"
- op: "Shape"
- input: "distorted_bounding_box_crop/Slice:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "out_type"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "Equal"
- op: "Equal"
- input: "Shape:output:0"
- input: "Shape_1:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "Cast"
- op: "Cast"
- input: "Equal:z:0"
- attr {
- key: "DstT"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "SrcT"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "Const_1"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 0
- }
- }
- }
- }
- node_def {
- name: "Sum"
- op: "Sum"
- input: "Cast:y:0"
- input: "Const_1:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "Tidx"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "keep_dims"
- value {
- b: false
- }
- }
- }
- node_def {
- name: "GreaterEqual/y"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 3
- }
- }
- }
- }
- node_def {
- name: "GreaterEqual"
- op: "GreaterEqual"
- input: "Sum:output:0"
- input: "GreaterEqual/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "cond/Switch"
- op: "Switch"
- input: "GreaterEqual:z:0"
- input: "GreaterEqual:z:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "cond/switch_t"
- op: "Identity"
- input: "cond/Switch:output_true:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "cond/switch_f"
- op: "Identity"
- input: "cond/Switch:output_false:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "cond/pred_id"
- op: "Identity"
- input: "GreaterEqual:z:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "cond/Shape"
- op: "Shape"
- input: "cond/Shape/Switch:output_true:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "out_type"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "cond/Shape/Switch"
- op: "Switch"
- input: "convert_image:z:0"
- input: "cond/pred_id:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "_class"
- value {
- list {
- s: "loc:@convert_image"
- }
- }
- }
- }
- node_def {
- name: "cond/Cast"
- op: "Cast"
- input: "cond/Shape:output:0"
- attr {
- key: "DstT"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "SrcT"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "cond/strided_slice/stack"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 0
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice/stack_1"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice/stack_2"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice"
- op: "StridedSlice"
- input: "cond/Cast:y:0"
- input: "cond/strided_slice/stack:output:0"
- input: "cond/strided_slice/stack_1:output:0"
- input: "cond/strided_slice/stack_2:output:0"
- attr {
- key: "Index"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "begin_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "ellipsis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "end_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "new_axis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "shrink_axis_mask"
- value {
- i: 1
- }
- }
- }
- node_def {
- name: "cond/strided_slice_1/stack"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_1/stack_1"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 2
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_1/stack_2"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_1"
- op: "StridedSlice"
- input: "cond/Cast:y:0"
- input: "cond/strided_slice_1/stack:output:0"
- input: "cond/strided_slice_1/stack_1:output:0"
- input: "cond/strided_slice_1/stack_2:output:0"
- attr {
- key: "Index"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "begin_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "ellipsis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "end_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "new_axis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "shrink_axis_mask"
- value {
- i: 1
- }
- }
- }
- node_def {
- name: "cond/Greater"
- op: "Greater"
- input: "cond/strided_slice:output:0"
- input: "cond/strided_slice_1:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- }
- node_def {
- name: "cond/cond/Switch"
- op: "Switch"
- input: "cond/Greater:z:0"
- input: "cond/Greater:z:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "cond/cond/switch_t"
- op: "Identity"
- input: "cond/cond/Switch:output_true:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "cond/cond/switch_f"
- op: "Identity"
- input: "cond/cond/Switch:output_false:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "cond/cond/pred_id"
- op: "Identity"
- input: "cond/Greater:z:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "cond/cond/strided_slice/stack"
- op: "Const"
- input: "^cond/cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 0
- }
- }
- }
- }
- node_def {
- name: "cond/cond/strided_slice/stack_1"
- op: "Const"
- input: "^cond/cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/cond/strided_slice/stack_2"
- op: "Const"
- input: "^cond/cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/cond/strided_slice"
- op: "StridedSlice"
- input: "cond/cond/strided_slice/Switch:output_true:0"
- input: "cond/cond/strided_slice/stack:output:0"
- input: "cond/cond/strided_slice/stack_1:output:0"
- input: "cond/cond/strided_slice/stack_2:output:0"
- attr {
- key: "Index"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "begin_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "ellipsis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "end_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "new_axis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "shrink_axis_mask"
- value {
- i: 1
- }
- }
- }
- node_def {
- name: "cond/cond/strided_slice/Switch"
- op: "Switch"
- input: "cond/Cast:y:0"
- input: "cond/cond/pred_id:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "_class"
- value {
- list {
- s: "loc:@cond/Cast"
- }
- }
- }
- }
- node_def {
- name: "cond/cond/strided_slice_1/stack"
- op: "Const"
- input: "^cond/cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/cond/strided_slice_1/stack_1"
- op: "Const"
- input: "^cond/cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 2
- }
- }
- }
- }
- node_def {
- name: "cond/cond/strided_slice_1/stack_2"
- op: "Const"
- input: "^cond/cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/cond/strided_slice_1"
- op: "StridedSlice"
- input: "cond/cond/strided_slice/Switch:output_true:0"
- input: "cond/cond/strided_slice_1/stack:output:0"
- input: "cond/cond/strided_slice_1/stack_1:output:0"
- input: "cond/cond/strided_slice_1/stack_2:output:0"
- attr {
- key: "Index"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "begin_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "ellipsis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "end_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "new_axis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "shrink_axis_mask"
- value {
- i: 1
- }
- }
- }
- node_def {
- name: "cond/cond/truediv"
- op: "RealDiv"
- input: "cond/cond/strided_slice:output:0"
- input: "cond/cond/strided_slice_1:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- }
- node_def {
- name: "cond/cond/mul/y"
- op: "Const"
- input: "^cond/cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_FLOAT
- tensor_shape {
- }
- float_val: 224.0
- }
- }
- }
- }
- node_def {
- name: "cond/cond/mul"
- op: "Mul"
- input: "cond/cond/truediv:z:0"
- input: "cond/cond/mul/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- }
- node_def {
- name: "cond/cond/Cast/x/1"
- op: "Const"
- input: "^cond/cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_FLOAT
- tensor_shape {
- }
- float_val: 224.0
- }
- }
- }
- }
- node_def {
- name: "cond/cond/Cast/x"
- op: "Pack"
- input: "cond/cond/mul:z:0"
- input: "cond/cond/Cast/x/1:output:0"
- attr {
- key: "N"
- value {
- i: 2
- }
- }
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "axis"
- value {
- i: 0
- }
- }
- }
- node_def {
- name: "cond/cond/Cast"
- op: "Cast"
- input: "cond/cond/Cast/x:output:0"
- attr {
- key: "DstT"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "SrcT"
- value {
- type: DT_FLOAT
- }
- }
- }
- node_def {
- name: "cond/cond/strided_slice_2/stack"
- op: "Const"
- input: "^cond/cond/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/cond/strided_slice_2/stack_1"
- op: "Const"
- input: "^cond/cond/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 2
- }
- }
- }
- }
- node_def {
- name: "cond/cond/strided_slice_2/stack_2"
- op: "Const"
- input: "^cond/cond/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/cond/strided_slice_2"
- op: "StridedSlice"
- input: "cond/cond/strided_slice_2/Switch:output_false:0"
- input: "cond/cond/strided_slice_2/stack:output:0"
- input: "cond/cond/strided_slice_2/stack_1:output:0"
- input: "cond/cond/strided_slice_2/stack_2:output:0"
- attr {
- key: "Index"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "begin_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "ellipsis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "end_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "new_axis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "shrink_axis_mask"
- value {
- i: 1
- }
- }
- }
- node_def {
- name: "cond/cond/strided_slice_2/Switch"
- op: "Switch"
- input: "cond/Cast:y:0"
- input: "cond/cond/pred_id:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "_class"
- value {
- list {
- s: "loc:@cond/Cast"
- }
- }
- }
- }
- node_def {
- name: "cond/cond/strided_slice_3/stack"
- op: "Const"
- input: "^cond/cond/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 0
- }
- }
- }
- }
- node_def {
- name: "cond/cond/strided_slice_3/stack_1"
- op: "Const"
- input: "^cond/cond/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/cond/strided_slice_3/stack_2"
- op: "Const"
- input: "^cond/cond/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/cond/strided_slice_3"
- op: "StridedSlice"
- input: "cond/cond/strided_slice_2/Switch:output_false:0"
- input: "cond/cond/strided_slice_3/stack:output:0"
- input: "cond/cond/strided_slice_3/stack_1:output:0"
- input: "cond/cond/strided_slice_3/stack_2:output:0"
- attr {
- key: "Index"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "begin_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "ellipsis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "end_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "new_axis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "shrink_axis_mask"
- value {
- i: 1
- }
- }
- }
- node_def {
- name: "cond/cond/truediv_1"
- op: "RealDiv"
- input: "cond/cond/strided_slice_2:output:0"
- input: "cond/cond/strided_slice_3:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- }
- node_def {
- name: "cond/cond/mul_1/y"
- op: "Const"
- input: "^cond/cond/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_FLOAT
- tensor_shape {
- }
- float_val: 224.0
- }
- }
- }
- }
- node_def {
- name: "cond/cond/mul_1"
- op: "Mul"
- input: "cond/cond/truediv_1:z:0"
- input: "cond/cond/mul_1/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- }
- node_def {
- name: "cond/cond/Cast_1/x/0"
- op: "Const"
- input: "^cond/cond/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_FLOAT
- tensor_shape {
- }
- float_val: 224.0
- }
- }
- }
- }
- node_def {
- name: "cond/cond/Cast_1/x"
- op: "Pack"
- input: "cond/cond/Cast_1/x/0:output:0"
- input: "cond/cond/mul_1:z:0"
- attr {
- key: "N"
- value {
- i: 2
- }
- }
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "axis"
- value {
- i: 0
- }
- }
- }
- node_def {
- name: "cond/cond/Cast_1"
- op: "Cast"
- input: "cond/cond/Cast_1/x:output:0"
- attr {
- key: "DstT"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "SrcT"
- value {
- type: DT_FLOAT
- }
- }
- }
- node_def {
- name: "cond/cond/Merge"
- op: "Merge"
- input: "cond/cond/Cast_1:y:0"
- input: "cond/cond/Cast:y:0"
- attr {
- key: "N"
- value {
- i: 2
- }
- }
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "cond/ResizeBicubic/images"
- op: "Pack"
- input: "cond/Shape/Switch:output_true:0"
- attr {
- key: "N"
- value {
- i: 1
- }
- }
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "axis"
- value {
- i: 0
- }
- }
- }
- node_def {
- name: "cond/ResizeBicubic"
- op: "ResizeBicubic"
- input: "cond/ResizeBicubic/images:output:0"
- input: "cond/cond/Merge:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "align_corners"
- value {
- b: false
- }
- }
- }
- node_def {
- name: "cond/strided_slice_2/stack"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 0
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_2/stack_1"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_2/stack_2"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_2"
- op: "StridedSlice"
- input: "cond/ResizeBicubic:resized_images:0"
- input: "cond/strided_slice_2/stack:output:0"
- input: "cond/strided_slice_2/stack_1:output:0"
- input: "cond/strided_slice_2/stack_2:output:0"
- attr {
- key: "Index"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "begin_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "ellipsis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "end_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "new_axis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "shrink_axis_mask"
- value {
- i: 1
- }
- }
- }
- node_def {
- name: "cond/Shape_1"
- op: "Shape"
- input: "cond/strided_slice_2:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "out_type"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "cond/strided_slice_3/stack"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 0
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_3/stack_1"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_3/stack_2"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_3"
- op: "StridedSlice"
- input: "cond/Shape_1:output:0"
- input: "cond/strided_slice_3/stack:output:0"
- input: "cond/strided_slice_3/stack_1:output:0"
- input: "cond/strided_slice_3/stack_2:output:0"
- attr {
- key: "Index"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "begin_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "ellipsis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "end_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "new_axis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "shrink_axis_mask"
- value {
- i: 1
- }
- }
- }
- node_def {
- name: "cond/Shape_2"
- op: "Shape"
- input: "cond/strided_slice_2:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "out_type"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "cond/strided_slice_4/stack"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_4/stack_1"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 2
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_4/stack_2"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_4"
- op: "StridedSlice"
- input: "cond/Shape_2:output:0"
- input: "cond/strided_slice_4/stack:output:0"
- input: "cond/strided_slice_4/stack_1:output:0"
- input: "cond/strided_slice_4/stack_2:output:0"
- attr {
- key: "Index"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "begin_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "ellipsis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "end_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "new_axis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "shrink_axis_mask"
- value {
- i: 1
- }
- }
- }
- node_def {
- name: "cond/sub/y"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 224
- }
- }
- }
- }
- node_def {
- name: "cond/sub"
- op: "Sub"
- input: "cond/strided_slice_3:output:0"
- input: "cond/sub/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "cond/add/y"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/add"
- op: "Add"
- input: "cond/sub:z:0"
- input: "cond/add/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "cond/truediv/y"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 2
- }
- }
- }
- }
- node_def {
- name: "cond/truediv/Cast"
- op: "Cast"
- input: "cond/add:z:0"
- attr {
- key: "DstT"
- value {
- type: DT_DOUBLE
- }
- }
- attr {
- key: "SrcT"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "cond/truediv/Cast_1"
- op: "Cast"
- input: "cond/truediv/y:output:0"
- attr {
- key: "DstT"
- value {
- type: DT_DOUBLE
- }
- }
- attr {
- key: "SrcT"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "cond/truediv"
- op: "RealDiv"
- input: "cond/truediv/Cast:y:0"
- input: "cond/truediv/Cast_1:y:0"
- attr {
- key: "T"
- value {
- type: DT_DOUBLE
- }
- }
- }
- node_def {
- name: "cond/sub_1/y"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 224
- }
- }
- }
- }
- node_def {
- name: "cond/sub_1"
- op: "Sub"
- input: "cond/strided_slice_4:output:0"
- input: "cond/sub_1/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "cond/add_1/y"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/add_1"
- op: "Add"
- input: "cond/sub_1:z:0"
- input: "cond/add_1/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "cond/truediv_1/y"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 2
- }
- }
- }
- }
- node_def {
- name: "cond/truediv_1/Cast"
- op: "Cast"
- input: "cond/add_1:z:0"
- attr {
- key: "DstT"
- value {
- type: DT_DOUBLE
- }
- }
- attr {
- key: "SrcT"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "cond/truediv_1/Cast_1"
- op: "Cast"
- input: "cond/truediv_1/y:output:0"
- attr {
- key: "DstT"
- value {
- type: DT_DOUBLE
- }
- }
- attr {
- key: "SrcT"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "cond/truediv_1"
- op: "RealDiv"
- input: "cond/truediv_1/Cast:y:0"
- input: "cond/truediv_1/Cast_1:y:0"
- attr {
- key: "T"
- value {
- type: DT_DOUBLE
- }
- }
- }
- node_def {
- name: "cond/Shape_3"
- op: "Shape"
- input: "cond/strided_slice_2:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "out_type"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "cond/Rank"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 3
- }
- }
- }
- }
- node_def {
- name: "cond/Equal/y"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 3
- }
- }
- }
- }
- node_def {
- name: "cond/Equal"
- op: "Equal"
- input: "cond/Rank:output:0"
- input: "cond/Equal/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "cond/Assert/Const"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "Rank of image must be equal to 3."
- }
- }
- }
- }
- node_def {
- name: "cond/Assert/Assert/data_0"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "Rank of image must be equal to 3."
- }
- }
- }
- }
- node_def {
- name: "cond/Assert/Assert"
- op: "Assert"
- input: "cond/Equal:z:0"
- input: "cond/Assert/Assert/data_0:output:0"
- attr {
- key: "T"
- value {
- list {
- type: DT_STRING
- }
- }
- }
- attr {
- key: "summarize"
- value {
- i: 3
- }
- }
- }
- node_def {
- name: "cond/strided_slice_5/stack"
- op: "Const"
- input: "^cond/Assert/Assert"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 2
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_5/stack_1"
- op: "Const"
- input: "^cond/Assert/Assert"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 3
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_5/stack_2"
- op: "Const"
- input: "^cond/Assert/Assert"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_5"
- op: "StridedSlice"
- input: "cond/Shape_3:output:0"
- input: "cond/strided_slice_5/stack:output:0"
- input: "cond/strided_slice_5/stack_1:output:0"
- input: "cond/strided_slice_5/stack_2:output:0"
- attr {
- key: "Index"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "begin_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "ellipsis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "end_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "new_axis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "shrink_axis_mask"
- value {
- i: 1
- }
- }
- }
- node_def {
- name: "cond/stack/0"
- op: "Const"
- input: "^cond/Assert/Assert"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 224
- }
- }
- }
- }
- node_def {
- name: "cond/stack/1"
- op: "Const"
- input: "^cond/Assert/Assert"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 224
- }
- }
- }
- }
- node_def {
- name: "cond/stack"
- op: "Pack"
- input: "cond/stack/0:output:0"
- input: "cond/stack/1:output:0"
- input: "cond/strided_slice_5:output:0"
- attr {
- key: "N"
- value {
- i: 3
- }
- }
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "axis"
- value {
- i: 0
- }
- }
- }
- node_def {
- name: "cond/strided_slice_6/stack"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 0
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_6/stack_1"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_6/stack_2"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_6"
- op: "StridedSlice"
- input: "cond/Shape_3:output:0"
- input: "cond/strided_slice_6/stack:output:0"
- input: "cond/strided_slice_6/stack_1:output:0"
- input: "cond/strided_slice_6/stack_2:output:0"
- attr {
- key: "Index"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "begin_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "ellipsis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "end_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "new_axis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "shrink_axis_mask"
- value {
- i: 1
- }
- }
- }
- node_def {
- name: "cond/GreaterEqual/y"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 224
- }
- }
- }
- }
- node_def {
- name: "cond/GreaterEqual"
- op: "GreaterEqual"
- input: "cond/strided_slice_6:output:0"
- input: "cond/GreaterEqual/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "cond/strided_slice_7/stack"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_7/stack_1"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 2
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_7/stack_2"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_7"
- op: "StridedSlice"
- input: "cond/Shape_3:output:0"
- input: "cond/strided_slice_7/stack:output:0"
- input: "cond/strided_slice_7/stack_1:output:0"
- input: "cond/strided_slice_7/stack_2:output:0"
- attr {
- key: "Index"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "begin_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "ellipsis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "end_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "new_axis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "shrink_axis_mask"
- value {
- i: 1
- }
- }
- }
- node_def {
- name: "cond/GreaterEqual_1/y"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 224
- }
- }
- }
- }
- node_def {
- name: "cond/GreaterEqual_1"
- op: "GreaterEqual"
- input: "cond/strided_slice_7:output:0"
- input: "cond/GreaterEqual_1/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "cond/LogicalAnd"
- op: "LogicalAnd"
- input: "cond/GreaterEqual:z:0"
- input: "cond/GreaterEqual_1:z:0"
- }
- node_def {
- name: "cond/Assert_1/Const"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "Crop size greater than the image size."
- }
- }
- }
- }
- node_def {
- name: "cond/Assert_1/Assert/data_0"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "Crop size greater than the image size."
- }
- }
- }
- }
- node_def {
- name: "cond/Assert_1/Assert"
- op: "Assert"
- input: "cond/LogicalAnd:z:0"
- input: "cond/Assert_1/Assert/data_0:output:0"
- attr {
- key: "T"
- value {
- list {
- type: DT_STRING
- }
- }
- }
- attr {
- key: "summarize"
- value {
- i: 3
- }
- }
- }
- node_def {
- name: "cond/stack_1/2"
- op: "Const"
- input: "^cond/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_DOUBLE
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_DOUBLE
- tensor_shape {
- }
- double_val: 0.0
- }
- }
- }
- }
- node_def {
- name: "cond/stack_1"
- op: "Pack"
- input: "cond/truediv:z:0"
- input: "cond/truediv_1:z:0"
- input: "cond/stack_1/2:output:0"
- attr {
- key: "N"
- value {
- i: 3
- }
- }
- attr {
- key: "T"
- value {
- type: DT_DOUBLE
- }
- }
- attr {
- key: "axis"
- value {
- i: 0
- }
- }
- }
- node_def {
- name: "cond/ToInt32"
- op: "Cast"
- input: "cond/stack_1:output:0"
- attr {
- key: "DstT"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "SrcT"
- value {
- type: DT_DOUBLE
- }
- }
- }
- node_def {
- name: "cond/Slice"
- op: "Slice"
- input: "cond/strided_slice_2:output:0"
- input: "cond/ToInt32:y:0"
- input: "cond/stack:output:0"
- input: "^cond/Assert_1/Assert"
- attr {
- key: "Index"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- }
- node_def {
- name: "cond/Reshape"
- op: "Reshape"
- input: "cond/Slice:output:0"
- input: "cond/stack:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "Tshape"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "cond/ResizeBicubic_1/images"
- op: "Pack"
- input: "cond/ResizeBicubic_1/images/Switch:output_false:0"
- attr {
- key: "N"
- value {
- i: 1
- }
- }
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "axis"
- value {
- i: 0
- }
- }
- }
- node_def {
- name: "cond/ResizeBicubic_1/images/Switch"
- op: "Switch"
- input: "distorted_bounding_box_crop/Slice:output:0"
- input: "cond/pred_id:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "_class"
- value {
- list {
- s: "loc:@distorted_bounding_box_crop/Slice"
- }
- }
- }
- }
- node_def {
- name: "cond/ResizeBicubic_1/size"
- op: "Const"
- input: "^cond/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 2
- }
- }
- tensor_content: "\340\000\000\000\340\000\000\000"
- }
- }
- }
- }
- node_def {
- name: "cond/ResizeBicubic_1"
- op: "ResizeBicubic"
- input: "cond/ResizeBicubic_1/images:output:0"
- input: "cond/ResizeBicubic_1/size:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "align_corners"
- value {
- b: false
- }
- }
- }
- node_def {
- name: "cond/strided_slice_8/stack"
- op: "Const"
- input: "^cond/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 0
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_8/stack_1"
- op: "Const"
- input: "^cond/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_8/stack_2"
- op: "Const"
- input: "^cond/switch_f"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "cond/strided_slice_8"
- op: "StridedSlice"
- input: "cond/ResizeBicubic_1:resized_images:0"
- input: "cond/strided_slice_8/stack:output:0"
- input: "cond/strided_slice_8/stack_1:output:0"
- input: "cond/strided_slice_8/stack_2:output:0"
- attr {
- key: "Index"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "begin_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "ellipsis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "end_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "new_axis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "shrink_axis_mask"
- value {
- i: 1
- }
- }
- }
- node_def {
- name: "cond/Merge"
- op: "Merge"
- input: "cond/strided_slice_8:output:0"
- input: "cond/Reshape:output:0"
- attr {
- key: "N"
- value {
- i: 2
- }
- }
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- }
- node_def {
- name: "Const_2"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_FLOAT
- tensor_shape {
- dim {
- size: 1
- }
- dim {
- size: 1
- }
- dim {
- size: 3
- }
- }
- tensor_content: "\354Q\370>\325x\351>;\337\317>"
- }
- }
- }
- }
- node_def {
- name: "sub"
- op: "Sub"
- input: "cond/Merge:output:0"
- input: "Const_2:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- }
- node_def {
- name: "Const_3"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_FLOAT
- tensor_shape {
- dim {
- size: 1
- }
- dim {
- size: 1
- }
- dim {
- size: 3
- }
- }
- tensor_content: "\372~j>B`e>fff>"
- }
- }
- }
- }
- node_def {
- name: "truediv"
- op: "RealDiv"
- input: "sub:z:0"
- input: "Const_3:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- }
- node_def {
- name: "random_flip_left_right/control_dependency"
- op: "Identity"
- input: "truediv:z:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "_class"
- value {
- list {
- s: "loc:@truediv"
- }
- }
- }
- }
- node_def {
- name: "random_flip_left_right/random_uniform/shape"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- }
- }
- }
- }
- }
- }
- node_def {
- name: "random_flip_left_right/random_uniform/min"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_FLOAT
- tensor_shape {
- }
- float_val: 0.0
- }
- }
- }
- }
- node_def {
- name: "random_flip_left_right/random_uniform/max"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_FLOAT
- tensor_shape {
- }
- float_val: 1.0
- }
- }
- }
- }
- node_def {
- name: "random_flip_left_right/random_uniform/RandomUniform"
- op: "RandomUniform"
- input: "random_flip_left_right/random_uniform/shape:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "dtype"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "seed"
- value {
- i: 0
- }
- }
- attr {
- key: "seed2"
- value {
- i: 0
- }
- }
- }
- node_def {
- name: "random_flip_left_right/random_uniform/sub"
- op: "Sub"
- input: "random_flip_left_right/random_uniform/max:output:0"
- input: "random_flip_left_right/random_uniform/min:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- }
- node_def {
- name: "random_flip_left_right/random_uniform/mul"
- op: "Mul"
- input: "random_flip_left_right/random_uniform/RandomUniform:output:0"
- input: "random_flip_left_right/random_uniform/sub:z:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- }
- node_def {
- name: "random_flip_left_right/random_uniform"
- op: "Add"
- input: "random_flip_left_right/random_uniform/mul:z:0"
- input: "random_flip_left_right/random_uniform/min:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- }
- node_def {
- name: "random_flip_left_right/Less/y"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_FLOAT
- tensor_shape {
- }
- float_val: 0.5
- }
- }
- }
- }
- node_def {
- name: "random_flip_left_right/Less"
- op: "Less"
- input: "random_flip_left_right/random_uniform:z:0"
- input: "random_flip_left_right/Less/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- }
- node_def {
- name: "random_flip_left_right/Switch"
- op: "Switch"
- input: "random_flip_left_right/Less:z:0"
- input: "random_flip_left_right/Less:z:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "random_flip_left_right/switch_t"
- op: "Identity"
- input: "random_flip_left_right/Switch:output_true:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "random_flip_left_right/switch_f"
- op: "Identity"
- input: "random_flip_left_right/Switch:output_false:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "random_flip_left_right/pred_id"
- op: "Identity"
- input: "random_flip_left_right/Less:z:0"
- attr {
- key: "T"
- value {
- type: DT_BOOL
- }
- }
- }
- node_def {
- name: "random_flip_left_right/ReverseV2/axis"
- op: "Const"
- input: "^random_flip_left_right/switch_t"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "random_flip_left_right/ReverseV2"
- op: "ReverseV2"
- input: "random_flip_left_right/ReverseV2/Switch:output_true:0"
- input: "random_flip_left_right/ReverseV2/axis:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "Tidx"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "random_flip_left_right/ReverseV2/Switch"
- op: "Switch"
- input: "random_flip_left_right/control_dependency:output:0"
- input: "random_flip_left_right/pred_id:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "_class"
- value {
- list {
- s: "loc:@truediv"
- }
- }
- }
- }
- node_def {
- name: "random_flip_left_right/Switch_1"
- op: "Switch"
- input: "random_flip_left_right/control_dependency:output:0"
- input: "random_flip_left_right/pred_id:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "_class"
- value {
- list {
- s: "loc:@truediv"
- }
- }
- }
- }
- node_def {
- name: "random_flip_left_right/Merge"
- op: "Merge"
- input: "random_flip_left_right/Switch_1:output_false:0"
- input: "random_flip_left_right/ReverseV2:output:0"
- attr {
- key: "N"
- value {
- i: 2
- }
- }
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- }
- node_def {
- name: "Reshape_1/shape"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 3
- }
- }
- tensor_content: "\340\000\000\000\340\000\000\000\003\000\000\000"
- }
- }
- }
- }
- node_def {
- name: "Reshape_1"
- op: "Reshape"
- input: "random_flip_left_right/Merge:output:0"
- input: "Reshape_1/shape:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "Tshape"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "Reshape_2/shape"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- }
- }
- }
- }
- }
- }
- node_def {
- name: "Reshape_2"
- op: "Reshape"
- input: "ParseSingleExample/ParseSingleExample:dense_values:0"
- input: "Reshape_2/shape:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "Tshape"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "Cast_1"
- op: "Cast"
- input: "Reshape_2:output:0"
- attr {
- key: "DstT"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "SrcT"
- value {
- type: DT_INT64
- }
- }
- }
- node_def {
- name: "sub_1/y"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "sub_1"
- op: "Sub"
- input: "Cast_1:y:0"
- input: "sub_1/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT32
- }
- }
- }
- ret {
- key: "Reshape_1"
- value: "Reshape_1:output:0"
- }
- ret {
- key: "sub_1"
- value: "sub_1:z:0"
- }
- }
- function {
- signature {
- name: "tf_predicate_7089b845"
- input_arg {
- name: "arg0"
- type: DT_FLOAT
- }
- input_arg {
- name: "arg1"
- type: DT_INT32
- }
- input_arg {
- name: "Equal/Placeholder"
- type: DT_INT64
- }
- output_arg {
- name: "Equal"
- type: DT_BOOL
- }
- description: "A wrapper for Defun that facilitates shape inference."
- }
- node_def {
- name: "Shape"
- op: "Shape"
- input: "arg0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "out_type"
- value {
- type: DT_INT64
- }
- }
- }
- node_def {
- name: "strided_slice/stack"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 0
- }
- }
- }
- }
- node_def {
- name: "strided_slice/stack_1"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "strided_slice/stack_2"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "strided_slice"
- op: "StridedSlice"
- input: "Shape:output:0"
- input: "strided_slice/stack:output:0"
- input: "strided_slice/stack_1:output:0"
- input: "strided_slice/stack_2:output:0"
- attr {
- key: "Index"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "T"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "begin_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "ellipsis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "end_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "new_axis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "shrink_axis_mask"
- value {
- i: 1
- }
- }
- }
- node_def {
- name: "Equal"
- op: "Equal"
- input: "strided_slice:output:0"
- input: "Equal/Placeholder"
- attr {
- key: "T"
- value {
- type: DT_INT64
- }
- }
- }
- ret {
- key: "Equal"
- value: "Equal:z:0"
- }
- }
- function {
- signature {
- name: "_make_dataset_5fa5e1f4"
- output_arg {
- name: "PrefetchDataset_1"
- type: DT_VARIANT
- }
- is_stateful: true
- }
- node_def {
- name: "TensorSliceDataset/MatchingFiles/pattern"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "$(DATA_DIR)"
- }
- }
- }
- }
- node_def {
- name: "TensorSliceDataset/MatchingFiles"
- op: "MatchingFiles"
- input: "TensorSliceDataset/MatchingFiles/pattern:output:0"
- }
- node_def {
- name: "TensorSliceDataset"
- op: "TensorSliceDataset"
- input: "TensorSliceDataset/MatchingFiles:filenames:0"
- attr {
- key: "Toutput_types"
- value {
- list {
- type: DT_STRING
- }
- }
- }
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- }
- }
- }
- }
- }
- node_def {
- name: "ShuffleDataset/MatchingFiles/pattern"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "$(DATA_DIR)"
- }
- }
- }
- }
- node_def {
- name: "ShuffleDataset/MatchingFiles"
- op: "MatchingFiles"
- input: "ShuffleDataset/MatchingFiles/pattern:output:0"
- }
- node_def {
- name: "ShuffleDataset/Shape"
- op: "Shape"
- input: "ShuffleDataset/MatchingFiles:filenames:0"
- attr {
- key: "T"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "out_type"
- value {
- type: DT_INT64
- }
- }
- }
- node_def {
- name: "ShuffleDataset/strided_slice/stack"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 0
- }
- }
- }
- }
- node_def {
- name: "ShuffleDataset/strided_slice/stack_1"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "ShuffleDataset/strided_slice/stack_2"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "ShuffleDataset/strided_slice"
- op: "StridedSlice"
- input: "ShuffleDataset/Shape:output:0"
- input: "ShuffleDataset/strided_slice/stack:output:0"
- input: "ShuffleDataset/strided_slice/stack_1:output:0"
- input: "ShuffleDataset/strided_slice/stack_2:output:0"
- attr {
- key: "Index"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "T"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "begin_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "ellipsis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "end_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "new_axis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "shrink_axis_mask"
- value {
- i: 1
- }
- }
- }
- node_def {
- name: "ShuffleDataset/Maximum/y"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 1
- }
- }
- }
- }
- node_def {
- name: "ShuffleDataset/Maximum"
- op: "Maximum"
- input: "ShuffleDataset/strided_slice:output:0"
- input: "ShuffleDataset/Maximum/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_INT64
- }
- }
- }
- node_def {
- name: "ShuffleDataset/seed"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 0
- }
- }
- }
- }
- node_def {
- name: "ShuffleDataset/seed2"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 0
- }
- }
- }
- }
- node_def {
- name: "ShuffleDataset"
- op: "ShuffleDataset"
- input: "TensorSliceDataset:handle:0"
- input: "ShuffleDataset/Maximum:z:0"
- input: "ShuffleDataset/seed:output:0"
- input: "ShuffleDataset/seed2:output:0"
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- }
- }
- }
- }
- attr {
- key: "output_types"
- value {
- list {
- type: DT_STRING
- }
- }
- }
- attr {
- key: "reshuffle_each_iteration"
- value {
- b: true
- }
- }
- }
- node_def {
- name: "ShuffleDataset_1/buffer_size"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 1024
- }
- }
- }
- }
- node_def {
- name: "ShuffleDataset_1/seed_1"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 0
- }
- }
- }
- }
- node_def {
- name: "ShuffleDataset_1/seed2_1"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 0
- }
- }
- }
- }
- node_def {
- name: "ShuffleDataset_1"
- op: "ShuffleDataset"
- input: "ShuffleDataset:handle:0"
- input: "ShuffleDataset_1/buffer_size:output:0"
- input: "ShuffleDataset_1/seed_1:output:0"
- input: "ShuffleDataset_1/seed2_1:output:0"
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- }
- }
- }
- }
- attr {
- key: "output_types"
- value {
- list {
- type: DT_STRING
- }
- }
- }
- attr {
- key: "reshuffle_each_iteration"
- value {
- b: true
- }
- }
- }
- node_def {
- name: "RepeatDataset/count"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: -1
- }
- }
- }
- }
- node_def {
- name: "RepeatDataset"
- op: "RepeatDataset"
- input: "ShuffleDataset_1:handle:0"
- input: "RepeatDataset/count:output:0"
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- }
- }
- }
- }
- attr {
- key: "output_types"
- value {
- list {
- type: DT_STRING
- }
- }
- }
- }
- node_def {
- name: "ExperimentalParallelInterleaveDataset/cycle_length"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 8
- }
- }
- }
- }
- node_def {
- name: "ExperimentalParallelInterleaveDataset/block_length"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 1
- }
- }
- }
- }
- node_def {
- name: "ExperimentalParallelInterleaveDataset/sloppy"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_BOOL
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_BOOL
- tensor_shape {
- }
- bool_val: true
- }
- }
- }
- }
- node_def {
- name: "ExperimentalParallelInterleaveDataset/buffer_output_elements"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 2
- }
- }
- }
- }
- node_def {
- name: "ExperimentalParallelInterleaveDataset/prefetch_input_elements"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 16
- }
- }
- }
- }
- node_def {
- name: "ExperimentalParallelInterleaveDataset"
- op: "ExperimentalParallelInterleaveDataset"
- input: "RepeatDataset:handle:0"
- input: "ExperimentalParallelInterleaveDataset/cycle_length:output:0"
- input: "ExperimentalParallelInterleaveDataset/block_length:output:0"
- input: "ExperimentalParallelInterleaveDataset/sloppy:output:0"
- input: "ExperimentalParallelInterleaveDataset/buffer_output_elements:output:0"
- input: "ExperimentalParallelInterleaveDataset/prefetch_input_elements:output:0"
- attr {
- key: "Targuments"
- value {
- list {
- }
- }
- }
- attr {
- key: "f"
- value {
- func {
- name: "tf_map_func_91295dea"
- }
- }
- }
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- }
- }
- }
- }
- attr {
- key: "output_types"
- value {
- list {
- type: DT_STRING
- }
- }
- }
- }
- node_def {
- name: "ShuffleDataset_2/buffer_size_1"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 1024
- }
- }
- }
- }
- node_def {
- name: "ShuffleDataset_2/seed_2"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 0
- }
- }
- }
- }
- node_def {
- name: "ShuffleDataset_2/seed2_2"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 0
- }
- }
- }
- }
- node_def {
- name: "ShuffleDataset_2"
- op: "ShuffleDataset"
- input: "ExperimentalParallelInterleaveDataset:handle:0"
- input: "ShuffleDataset_2/buffer_size_1:output:0"
- input: "ShuffleDataset_2/seed_2:output:0"
- input: "ShuffleDataset_2/seed2_2:output:0"
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- }
- }
- }
- }
- attr {
- key: "output_types"
- value {
- list {
- type: DT_STRING
- }
- }
- }
- attr {
- key: "reshuffle_each_iteration"
- value {
- b: true
- }
- }
- }
- node_def {
- name: "ParallelMapDataset/num_parallel_calls"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- }
- int_val: 64
- }
- }
- }
- }
- node_def {
- name: "ParallelMapDataset"
- op: "ParallelMapDataset"
- input: "ShuffleDataset_2:handle:0"
- input: "ParallelMapDataset/num_parallel_calls:output:0"
- attr {
- key: "Targuments"
- value {
- list {
- }
- }
- }
- attr {
- key: "f"
- value {
- func {
- name: "tf_map_func_74b6b15c"
- }
- }
- }
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- dim {
- size: 224
- }
- dim {
- size: 224
- }
- dim {
- size: 3
- }
- }
- shape {
- }
- }
- }
- }
- attr {
- key: "output_types"
- value {
- list {
- type: DT_FLOAT
- type: DT_INT32
- }
- }
- }
- }
- node_def {
- name: "PrefetchDataset/buffer_size_2"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 64
- }
- }
- }
- }
- node_def {
- name: "PrefetchDataset"
- op: "PrefetchDataset"
- input: "ParallelMapDataset:handle:0"
- input: "PrefetchDataset/buffer_size_2:output:0"
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- dim {
- size: 224
- }
- dim {
- size: 224
- }
- dim {
- size: 3
- }
- }
- shape {
- }
- }
- }
- }
- attr {
- key: "output_types"
- value {
- list {
- type: DT_FLOAT
- type: DT_INT32
- }
- }
- }
- }
- node_def {
- name: "BatchDataset/batch_size"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 64
- }
- }
- }
- }
- node_def {
- name: "BatchDataset"
- op: "BatchDataset"
- input: "PrefetchDataset:handle:0"
- input: "BatchDataset/batch_size:output:0"
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- dim {
- size: -1
- }
- dim {
- size: 224
- }
- dim {
- size: 224
- }
- dim {
- size: 3
- }
- }
- shape {
- dim {
- size: -1
- }
- }
- }
- }
- }
- attr {
- key: "output_types"
- value {
- list {
- type: DT_FLOAT
- type: DT_INT32
- }
- }
- }
- }
- node_def {
- name: "FilterDataset/batch_size_1"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 64
- }
- }
- }
- }
- node_def {
- name: "FilterDataset"
- op: "FilterDataset"
- input: "BatchDataset:handle:0"
- input: "FilterDataset/batch_size_1:output:0"
- attr {
- key: "Targuments"
- value {
- list {
- type: DT_INT64
- }
- }
- }
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- dim {
- size: -1
- }
- dim {
- size: 224
- }
- dim {
- size: 224
- }
- dim {
- size: 3
- }
- }
- shape {
- dim {
- size: -1
- }
- }
- }
- }
- }
- attr {
- key: "output_types"
- value {
- list {
- type: DT_FLOAT
- type: DT_INT32
- }
- }
- }
- attr {
- key: "predicate"
- value {
- func {
- name: "tf_predicate_7089b845"
- }
- }
- }
- }
- node_def {
- name: "PrefetchDataset_1/buffer_size_3"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 2
- }
- }
- }
- }
- node_def {
- name: "PrefetchDataset_1"
- op: "PrefetchDataset"
- input: "FilterDataset:handle:0"
- input: "PrefetchDataset_1/buffer_size_3:output:0"
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- dim {
- size: 64
- }
- dim {
- size: 224
- }
- dim {
- size: 224
- }
- dim {
- size: 3
- }
- }
- shape {
- dim {
- size: 64
- }
- }
- }
- }
- }
- attr {
- key: "output_types"
- value {
- list {
- type: DT_FLOAT
- type: DT_INT32
- }
- }
- }
- }
- ret {
- key: "PrefetchDataset_1"
- value: "PrefetchDataset_1:handle:0"
- }
- }
-}
-)PREFIX";
-
- *dataset_name = "_make_dataset_5fa5e1f4";
- std::function mutate_proto_func =
- [dataset_name, file_path](FunctionDef* fdef) {
- VLOG(1) << "Processsing function " << fdef->DebugString();
- if (std::string(fdef->signature().name()) != *dataset_name) return;
- // Change the input file pattern to `file_path`.
- bool found = false;
- for (auto& node_def : *fdef->mutable_node_def()) {
- if (node_def.name() != "TensorSliceDataset/MatchingFiles/pattern" &&
- node_def.name() != "ShuffleDataset/MatchingFiles/pattern")
- continue;
- DCHECK_EQ(node_def.op(), "Const");
- DCHECK_GT(node_def.attr().count("value"), 0);
- found = true;
- DCHECK_EQ(node_def.attr().at("value").tensor().string_val(0),
- "$(DATA_DIR)");
- VLOG(1) << "Setting the value of node_def "
- "TensorSliceDataset/MatchingFiles/pattern to "
- << file_path;
- auto* tensor = (*node_def.mutable_attr())["value"].mutable_tensor();
- tensor->clear_string_val();
- tensor->add_string_val(file_path);
- }
- VLOG(1) << "Rewrote function to " << fdef->DebugString();
- DCHECK(found);
- };
- return CreateFunctionsFromTextProto(func_def, &mutate_proto_func, status);
-#endif
-}
-#endif
-
-#if not defined(PLATFORM_WINDOWS)
-// On success, returns a set of TF_Function instances encoding a dataset
-// node stack that reads an MNIST file dataset from `file_path`, and
-// sets `dataset_name` to the created dataset name. The returned functions must
-// be deleted by calling TF_DeleteFunction.
-static std::vector CreateMNISTDatasetFunctions(
- const char* file_path, int batch_size, std::string* dataset_name,
- TF_Status* status) {
-#if defined(PLATFORM_WINDOWS)
- status->status = tensorflow::errors::Unimplemented(
- "TF_MakeFileBasedIteratorGetNextWithDatasets in the experimental C API "
- "is not implemented for Windows");
- return nullptr;
-#else
- const char* func_def = R"PREFIX(
-library {
- function {
- signature {
- name: "tf_map_func_521bfd08"
- input_arg {
- name: "arg0"
- type: DT_STRING
- }
- output_arg {
- name: "truediv"
- type: DT_FLOAT
- }
- description: "A wrapper for Defun that facilitates shape inference."
- }
- node_def {
- name: "DecodeRaw"
- op: "DecodeRaw"
- input: "arg0"
- attr {
- key: "little_endian"
- value {
- b: true
- }
- }
- attr {
- key: "out_type"
- value {
- type: DT_UINT8
- }
- }
- }
- node_def {
- name: "Cast"
- op: "Cast"
- input: "DecodeRaw:output:0"
- attr {
- key: "DstT"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "SrcT"
- value {
- type: DT_UINT8
- }
- }
- }
- node_def {
- name: "Reshape/shape"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 784
- }
- }
- }
- }
- node_def {
- name: "Reshape"
- op: "Reshape"
- input: "Cast:y:0"
- input: "Reshape/shape:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "Tshape"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "truediv/y"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_FLOAT
- tensor_shape {
- }
- float_val: 255.0
- }
- }
- }
- }
- node_def {
- name: "truediv"
- op: "RealDiv"
- input: "Reshape:output:0"
- input: "truediv/y:output:0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- }
- ret {
- key: "truediv"
- value: "truediv:z:0"
- }
- }
- function {
- signature {
- name: "tf_map_func_9a08860d"
- input_arg {
- name: "arg0"
- type: DT_STRING
- }
- output_arg {
- name: "ToInt32"
- type: DT_INT32
- }
- description: "A wrapper for Defun that facilitates shape inference."
- }
- node_def {
- name: "DecodeRaw"
- op: "DecodeRaw"
- input: "arg0"
- attr {
- key: "little_endian"
- value {
- b: true
- }
- }
- attr {
- key: "out_type"
- value {
- type: DT_UINT8
- }
- }
- }
- node_def {
- name: "Reshape/shape"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- }
- }
- }
- }
- }
- }
- node_def {
- name: "Reshape"
- op: "Reshape"
- input: "DecodeRaw:output:0"
- input: "Reshape/shape:output:0"
- attr {
- key: "T"
- value {
- type: DT_UINT8
- }
- }
- attr {
- key: "Tshape"
- value {
- type: DT_INT32
- }
- }
- }
- node_def {
- name: "ToInt32"
- op: "Cast"
- input: "Reshape:output:0"
- attr {
- key: "DstT"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "SrcT"
- value {
- type: DT_UINT8
- }
- }
- }
- ret {
- key: "ToInt32"
- value: "ToInt32:y:0"
- }
- }
- function {
- signature {
- name: "tf_predicate_7089b845"
- input_arg {
- name: "arg0"
- type: DT_FLOAT
- }
- input_arg {
- name: "arg1"
- type: DT_INT32
- }
- input_arg {
- name: "Equal/Placeholder"
- type: DT_INT64
- }
- output_arg {
- name: "Equal"
- type: DT_BOOL
- }
- description: "A wrapper for Defun that facilitates shape inference."
- }
- node_def {
- name: "Shape"
- op: "Shape"
- input: "arg0"
- attr {
- key: "T"
- value {
- type: DT_FLOAT
- }
- }
- attr {
- key: "out_type"
- value {
- type: DT_INT64
- }
- }
- }
- node_def {
- name: "strided_slice/stack"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 0
- }
- }
- }
- }
- node_def {
- name: "strided_slice/stack_1"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "strided_slice/stack_2"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT32
- tensor_shape {
- dim {
- size: 1
- }
- }
- int_val: 1
- }
- }
- }
- }
- node_def {
- name: "strided_slice"
- op: "StridedSlice"
- input: "Shape:output:0"
- input: "strided_slice/stack:output:0"
- input: "strided_slice/stack_1:output:0"
- input: "strided_slice/stack_2:output:0"
- attr {
- key: "Index"
- value {
- type: DT_INT32
- }
- }
- attr {
- key: "T"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "begin_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "ellipsis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "end_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "new_axis_mask"
- value {
- i: 0
- }
- }
- attr {
- key: "shrink_axis_mask"
- value {
- i: 1
- }
- }
- }
- node_def {
- name: "Equal"
- op: "Equal"
- input: "strided_slice:output:0"
- input: "Equal/Placeholder"
- attr {
- key: "T"
- value {
- type: DT_INT64
- }
- }
- }
- ret {
- key: "Equal"
- value: "Equal:z:0"
- }
- }
- function {
- signature {
- name: "_make_dataset_2451e43a"
- output_arg {
- name: "FilterDataset"
- type: DT_VARIANT
- }
- is_stateful: true
- }
- node_def {
- name: "FixedLengthRecordDataset/filenames"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "$(DATA_DIR)/train-images-idx3-ubyte"
- }
- }
- }
- }
- node_def {
- name: "FixedLengthRecordDataset/header_bytes"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 16
- }
- }
- }
- }
- node_def {
- name: "FixedLengthRecordDataset/record_bytes"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 784
- }
- }
- }
- }
- node_def {
- name: "FixedLengthRecordDataset/footer_bytes"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 0
- }
- }
- }
- }
- node_def {
- name: "FixedLengthRecordDataset/buffer_size"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 262144
- }
- }
- }
- }
- node_def {
- name: "FixedLengthRecordDataset"
- op: "FixedLengthRecordDataset"
- input: "FixedLengthRecordDataset/filenames:output:0"
- input: "FixedLengthRecordDataset/header_bytes:output:0"
- input: "FixedLengthRecordDataset/record_bytes:output:0"
- input: "FixedLengthRecordDataset/footer_bytes:output:0"
- input: "FixedLengthRecordDataset/buffer_size:output:0"
- }
- node_def {
- name: "MapDataset"
- op: "MapDataset"
- input: "FixedLengthRecordDataset:handle:0"
- attr {
- key: "Targuments"
- value {
- list {
- }
- }
- }
- attr {
- key: "f"
- value {
- func {
- name: "tf_map_func_521bfd08"
- }
- }
- }
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- dim {
- size: 784
- }
- }
- }
- }
- }
- attr {
- key: "output_types"
- value {
- list {
- type: DT_FLOAT
- }
- }
- }
- }
- node_def {
- name: "FixedLengthRecordDataset_1/filenames_1"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: "$(DATA_DIR)/train-labels-idx1-ubyte"
- }
- }
- }
- }
- node_def {
- name: "FixedLengthRecordDataset_1/header_bytes_1"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 8
- }
- }
- }
- }
- node_def {
- name: "FixedLengthRecordDataset_1/record_bytes_1"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 1
- }
- }
- }
- }
- node_def {
- name: "FixedLengthRecordDataset_1/footer_bytes_1"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 0
- }
- }
- }
- }
- node_def {
- name: "FixedLengthRecordDataset_1/buffer_size_1"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 262144
- }
- }
- }
- }
- node_def {
- name: "FixedLengthRecordDataset_1"
- op: "FixedLengthRecordDataset"
- input: "FixedLengthRecordDataset_1/filenames_1:output:0"
- input: "FixedLengthRecordDataset_1/header_bytes_1:output:0"
- input: "FixedLengthRecordDataset_1/record_bytes_1:output:0"
- input: "FixedLengthRecordDataset_1/footer_bytes_1:output:0"
- input: "FixedLengthRecordDataset_1/buffer_size_1:output:0"
- }
- node_def {
- name: "MapDataset_1"
- op: "MapDataset"
- input: "FixedLengthRecordDataset_1:handle:0"
- attr {
- key: "Targuments"
- value {
- list {
- }
- }
- }
- attr {
- key: "f"
- value {
- func {
- name: "tf_map_func_9a08860d"
- }
- }
- }
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- }
- }
- }
- }
- attr {
- key: "output_types"
- value {
- list {
- type: DT_INT32
- }
- }
- }
- }
- node_def {
- name: "ZipDataset"
- op: "ZipDataset"
- input: "MapDataset:handle:0"
- input: "MapDataset_1:handle:0"
- attr {
- key: "N"
- value {
- i: 2
- }
- }
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- dim {
- size: 784
- }
- }
- shape {
- }
- }
- }
- }
- attr {
- key: "output_types"
- value {
- list {
- type: DT_FLOAT
- type: DT_INT32
- }
- }
- }
- }
- node_def {
- name: "CacheDataset/filename"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_STRING
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_STRING
- tensor_shape {
- }
- string_val: ""
- }
- }
- }
- }
- node_def {
- name: "CacheDataset"
- op: "CacheDataset"
- input: "ZipDataset:handle:0"
- input: "CacheDataset/filename:output:0"
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- dim {
- size: 784
- }
- }
- shape {
- }
- }
- }
- }
- attr {
- key: "output_types"
- value {
- list {
- type: DT_FLOAT
- type: DT_INT32
- }
- }
- }
- }
- node_def {
- name: "RepeatDataset/count"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: -1
- }
- }
- }
- }
- node_def {
- name: "RepeatDataset"
- op: "RepeatDataset"
- input: "CacheDataset:handle:0"
- input: "RepeatDataset/count:output:0"
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- dim {
- size: 784
- }
- }
- shape {
- }
- }
- }
- }
- attr {
- key: "output_types"
- value {
- list {
- type: DT_FLOAT
- type: DT_INT32
- }
- }
- }
- }
- node_def {
- name: "ShuffleDataset/buffer_size_2"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 50000
- }
- }
- }
- }
- node_def {
- name: "ShuffleDataset/seed"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 0
- }
- }
- }
- }
- node_def {
- name: "ShuffleDataset/seed2"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: 0
- }
- }
- }
- }
- node_def {
- name: "ShuffleDataset"
- op: "ShuffleDataset"
- input: "RepeatDataset:handle:0"
- input: "ShuffleDataset/buffer_size_2:output:0"
- input: "ShuffleDataset/seed:output:0"
- input: "ShuffleDataset/seed2:output:0"
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- dim {
- size: 784
- }
- }
- shape {
- }
- }
- }
- }
- attr {
- key: "output_types"
- value {
- list {
- type: DT_FLOAT
- type: DT_INT32
- }
- }
- }
- attr {
- key: "reshuffle_each_iteration"
- value {
- b: true
- }
- }
- }
- node_def {
- name: "BatchDataset/batch_size"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: -123
- }
- }
- }
- }
- node_def {
- name: "BatchDataset"
- op: "BatchDataset"
- input: "ShuffleDataset:handle:0"
- input: "BatchDataset/batch_size:output:0"
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- dim {
- size: -1
- }
- dim {
- size: 784
- }
- }
- shape {
- dim {
- size: -1
- }
- }
- }
- }
- }
- attr {
- key: "output_types"
- value {
- list {
- type: DT_FLOAT
- type: DT_INT32
- }
- }
- }
- }
- node_def {
- name: "FilterDataset/batch_size_1"
- op: "Const"
- attr {
- key: "dtype"
- value {
- type: DT_INT64
- }
- }
- attr {
- key: "value"
- value {
- tensor {
- dtype: DT_INT64
- tensor_shape {
- }
- int64_val: -123
- }
- }
- }
- }
- node_def {
- name: "FilterDataset"
- op: "FilterDataset"
- input: "BatchDataset:handle:0"
- input: "FilterDataset/batch_size_1:output:0"
- attr {
- key: "Targuments"
- value {
- list {
- type: DT_INT64
- }
- }
- }
- attr {
- key: "output_shapes"
- value {
- list {
- shape {
- dim {
- size: -1
- }
- dim {
- size: 784
- }
- }
- shape {
- dim {
- size: -1
- }
- }
- }
- }
- }
- attr {
- key: "output_types"
- value {
- list {
- type: DT_FLOAT
- type: DT_INT32
- }
- }
- }
- attr {
- key: "predicate"
- value {
- func {
- name: "tf_predicate_7089b845"
- }
- }
- }
- }
- ret {
- key: "FilterDataset"
- value: "FilterDataset:handle:0"
- }
- }
-}
-)PREFIX";
-
- *dataset_name = "_make_dataset_2451e43a";
- std::function mutate_proto_func =
- [dataset_name, file_path, batch_size](FunctionDef* fdef) {
- VLOG(1) << "Processsing function " << fdef->DebugString();
- if (std::string(fdef->signature().name()) != *dataset_name) return;
- // Change the input file pattern to `file_path`.
- bool found_file_path = false, found_batch_size = false;
- // `node_def` may be mutated.
- for (auto& node_def : *fdef->mutable_node_def()) {
- if (node_def.name() == "FixedLengthRecordDataset/filenames" ||
- node_def.name() == "FixedLengthRecordDataset_1/filenames_1") {
- DCHECK_EQ(node_def.op(), "Const");
- DCHECK_GT(node_def.attr().count("value"), 0);
- found_file_path = true;
- // Replace $(DATA_DIR)/foo with /foo
- // TODO(hongm): Use StringPiece manipulation for better efficiency.
- const std::string cur_value =
- node_def.attr().at("value").tensor().string_val(0);
- const std::string pattern = "$(DATA_DIR)";
- DCHECK_EQ(cur_value.compare(0, pattern.length(), pattern), 0);
- const std::string new_value =
- file_path + cur_value.substr(pattern.length());
- VLOG(1) << "Setting the value of node_def " << node_def.name()
- << " to " << new_value;
- auto* tensor = (*node_def.mutable_attr())["value"].mutable_tensor();
- tensor->clear_string_val();
- tensor->add_string_val(new_value);
- } else if (node_def.name() == "BatchDataset/batch_size" ||
- node_def.name() == "FilterDataset/batch_size_1") {
- DCHECK_EQ(node_def.op(), "Const");
- DCHECK_GT(node_def.attr().count("value"), 0);
- found_batch_size = true;
- // Replace $(BATCH_SIZE) with `batch_size`
- DCHECK_EQ(node_def.attr().at("value").tensor().int64_val(0), -123);
- VLOG(1) << "Setting the batch size attr value of node_def "
- << node_def.name() << " to " << batch_size;
- auto* tensor = (*node_def.mutable_attr())["value"].mutable_tensor();
- tensor->clear_int64_val();
- tensor->add_int64_val(batch_size);
- }
- }
- VLOG(1) << "Rewrote function to " << fdef->DebugString();
- DCHECK(found_file_path);
- DCHECK(found_batch_size);
- };
- return CreateFunctionsFromTextProto(func_def, &mutate_proto_func, status);
-#endif
-}
-#endif
-
-// Adds the input functions to `graph`. On success, returns the created
-// IteratorGetNext node.
-static TF_Operation* AddDatasetFunctionAndIteratorNodesToGraph(
- const std::vector& funcs, const std::string& dataset_name,
- const std::vector& output_types,
- const std::vector& output_shapes,
- TF_Graph* graph, TF_Status* status) {
- DCHECK(!dataset_name.empty());
- for (auto& func : funcs) {
- TF_GraphCopyFunction(graph, func.get(), /*gradient*/ nullptr, status);
- if (!status->status.ok()) {
- return nullptr;
- }
- }
-
- tensorflow::mutex_lock c(graph->mu);
-
- tensorflow::NameAttrList func;
- func.set_name(dataset_name);
- // Run the iterator node on CPU.
- Node* oneshot_iterator_node;
- tensorflow::Status s = NodeBuilder("OneShotIterator", "OneShotIterator")
- .Device("/device:CPU:0")
- .Attr("container", "")
- .Attr("dataset_factory", func)
- .Attr("output_types", output_types)
- .Attr("output_shapes", output_shapes)
- .Attr("shared_name", "")
- .Finalize(&graph->graph, &oneshot_iterator_node);
- if (!s.ok()) {
- status->status = s;
- return nullptr;
- }
- // Run shape inference function for each newly added node, so that more
- // subsequent nodes can be added to the graph via C API (TF_NewOperation()).
- s = graph->refiner.AddNode(oneshot_iterator_node);
- if (!s.ok()) {
- status->status = s;
- return nullptr;
- }
-
- // Run the iterator node on CPU.
- Node* getnext_node;
- s = NodeBuilder("IteratorGetNext", "IteratorGetNext")
- .Input(oneshot_iterator_node)
- .Device("/device:CPU:0")
- .Attr("output_types", output_types)
- .Attr("output_shapes", output_shapes)
- .Finalize(&graph->graph, &getnext_node);
- if (!s.ok()) {
- status->status = s;
- return nullptr;
- }
- // Run shape inference function for each newly added node, so that more
- // subsequent nodes can be added to the graph via C API (TF_NewOperation()).
- s = graph->refiner.AddNode(getnext_node);
- if (!s.ok()) {
- status->status = s;
- return nullptr;
- }
-
- VLOG(1) << "Output graph: " << graph->graph.ToGraphDefDebug().DebugString();
- return ToTF_Operation(getnext_node);
-}
-
-TF_Operation* TF_MakeFakeIteratorGetNextWithDatasets(TF_Graph* graph,
- TF_Status* status) {
- tensorflow::Status s;
-
- std::string dataset_name;
- UniqueFuncPtr result_func = CreateFakeDatasetFunction(&dataset_name, status);
- if (!status->status.ok()) {
- return nullptr;
- }
-
- std::vector funcs;
- funcs.push_back(std::move(result_func));
- std::vector output_shape_list;
- output_shape_list.push_back(tensorflow::TensorShapeProto());
- auto* getnext_node = AddDatasetFunctionAndIteratorNodesToGraph(
- funcs, dataset_name, {tensorflow::DT_FLOAT}, output_shape_list, graph,
- status);
- if (!status->status.ok()) {
- return nullptr;
- }
-
- return getnext_node;
-}
-
-TF_Operation* TF_MakeFileBasedIteratorGetNextWithDatasets(
- TF_Graph* graph, const char* file_path, int batch_size,
- unsigned char is_mnist, TF_Status* status) {
-#if defined(PLATFORM_WINDOWS)
- // TODO(ashankar): get these functions working on Windows.
- status->status = tensorflow::errors::Unimplemented(
- "TF_MakeFileBasedIteratorGetNextWithDatasets in the experimental C API "
- "is not implemented for Windows");
- return nullptr;
-#else
- tensorflow::Status s;
-
- std::string dataset_name;
- const auto& funcs =
- is_mnist
- ? CreateMNISTDatasetFunctions(file_path, batch_size, &dataset_name,
- status)
- : CreateImagenetDatasetFunctions(file_path, &dataset_name, status);
- if (!status->status.ok()) {
- return nullptr;
- }
-
- std::vector output_shape_list;
- // batch_size X 224 X 224 X 3
- auto image_shape = tensorflow::TensorShapeProto();
- image_shape.add_dim()->set_size(batch_size);
- if (is_mnist) {
- image_shape.add_dim()->set_size(784);
- } else {
- image_shape.add_dim()->set_size(224);
- image_shape.add_dim()->set_size(224);
- image_shape.add_dim()->set_size(3);
- }
- output_shape_list.push_back(image_shape);
-
- // batch_size
- auto label_shape = tensorflow::TensorShapeProto();
- label_shape.add_dim()->set_size(batch_size);
- output_shape_list.push_back(label_shape);
- auto* getnext_node = AddDatasetFunctionAndIteratorNodesToGraph(
- funcs, dataset_name, {tensorflow::DT_FLOAT, tensorflow::DT_INT32},
- output_shape_list, graph, status);
- if (!status->status.ok()) {
- return nullptr;
- }
-
- tensorflow::mutex_lock c(graph->mu);
- VLOG(1) << "The extended graph: "
- << graph->graph.ToGraphDefDebug().DebugString();
-
- return getnext_node;
-#endif
-}
-
TF_Tensor* TF_DequeueNamedTensor(TF_Session* session, int tensor_id,
TF_Status* status) {
assert(session);
@@ -8939,7 +695,7 @@ tensorflow::Status EnableCollectiveOps(const tensorflow::ServerDef& server_def,
LOG_AND_RETURN_IF_ERROR(grpc_server->Start());
- LOG_AND_RETURN_IF_ERROR(ctx->context.StoreCollectiveOpsServer(
+ LOG_AND_RETURN_IF_ERROR(ctx->context->StoreCollectiveOpsServer(
std::move(server), grpc_server->worker_env()->device_mgr,
grpc_server->worker_env()->collective_executor_mgr));
@@ -9062,8 +818,8 @@ TF_Operation* TFE_AddEagerOpToGraph(TFE_Op* op, TFE_TraceContext* trace_ctx,
const auto& op_type = op->operation.Name();
auto op_name =
tensorflow::strings::StrCat(op_type, "_", trace_ctx->node_counter++);
- auto* desc =
- TF_NewOperation(trace_ctx->graph, op_type.c_str(), op_name.c_str());
+ std::unique_ptr desc(
+ TF_NewOperation(trace_ctx->graph, op_type.c_str(), op_name.c_str()));
VLOG(1) << "Adding attrs.";
tensorflow::AttrValueMap attrs;
@@ -9077,30 +833,42 @@ TF_Operation* TFE_AddEagerOpToGraph(TFE_Op* op, TFE_TraceContext* trace_ctx,
size_t inputIndex = 0;
const tensorflow::OpDef& op_def = desc->node_builder.op_def();
for (const tensorflow::OpDef::ArgDef& input_arg : op_def.input_arg()) {
- // TODO(bgogul): Add support for number attributes.
- DCHECK(input_arg.number_attr().empty())
- << "Number attributes is not implemented yet.";
- if (input_arg.type_list_attr().empty()) {
+ if (input_arg.type_list_attr().empty() && input_arg.number_attr().empty()) {
auto symbolic_input =
getOrCreateSymbolicTensor(trace_ctx, inputs[inputIndex++], status);
if (!status->status.ok()) return nullptr;
- TF_AddInput(desc, symbolic_input);
+ TF_AddInput(desc.get(), symbolic_input);
continue;
}
- const std::string& type_list_attr = input_arg.type_list_attr();
- const auto& attr_value = attrs[type_list_attr];
- DCHECK(attr_value.value_case() == tensorflow::AttrValue::kList)
- << "Type list attribute should be a list!";
- std::vector list_inputs(attr_value.list().type_size());
+ size_t list_size = 0;
+ if (!input_arg.type_list_attr().empty()) {
+ const std::string& type_list_attr = input_arg.type_list_attr();
+ const auto& attr_value = attrs[type_list_attr];
+ CHECK(attr_value.value_case() == tensorflow::AttrValue::kList)
+ << "Type list attribute should be a list!";
+ list_size = attr_value.list().type_size();
+ } else {
+ CHECK(!input_arg.number_attr().empty());
+ const auto& attr_value = attrs[input_arg.number_attr()];
+ CHECK(attr_value.value_case() == tensorflow::AttrValue::kI)
+ << "Number attribute should be int!";
+ if (attr_value.i() < 0) {
+ status->status = tensorflow::errors::Internal(
+ "Number attribute for length should be >=0!");
+ return nullptr;
+ }
+ list_size = attr_value.i();
+ }
+ std::vector list_inputs(list_size);
for (TF_Output& list_input : list_inputs) {
list_input =
getOrCreateSymbolicTensor(trace_ctx, inputs[inputIndex++], status);
if (!status->status.ok()) return nullptr;
}
- TF_AddInputList(desc, list_inputs.data(), list_inputs.size());
+ TF_AddInputList(desc.get(), list_inputs.data(), list_inputs.size());
}
- auto* graph_op = TF_FinishOperation(desc, status);
+ auto* graph_op = TF_FinishOperation(desc.release(), status);
if (!status->status.ok()) return nullptr;
VLOG(1) << "Op finalized; setting return tensors.";
diff --git a/tensorflow/c/c_api_experimental.h b/tensorflow/c/c_api_experimental.h
index 8d1a8b82fba..795768a1415 100644
--- a/tensorflow/c/c_api_experimental.h
+++ b/tensorflow/c/c_api_experimental.h
@@ -62,6 +62,20 @@ extern "C" {
TF_CAPI_EXPORT extern void TF_EnableXLACompilation(TF_SessionOptions* options,
unsigned char enable);
+// Set XLA's internal BuildXlaOpsPassFlags.tf_xla_enable_lazy_compilation to the
+// value of 'enabled'. Also returns the original value of that flag.
+//
+// Use in tests to allow XLA to fallback to TF classic. This has global effect.
+TF_CAPI_EXPORT unsigned char TF_SetXlaEnableLazyCompilation(
+ unsigned char enable);
+
+// Sets XLA's auto jit mode according to the specified string, which is parsed
+// as if passed in XLA_FLAGS. This has global effect.
+TF_CAPI_EXPORT void TF_SetXLaAutoJitMode(const char* mode);
+
+// Sets XLA's minimum cluster size. This has global effect.
+TF_CAPI_EXPORT void TF_SetXlaMinClusterSize(int size);
+
// Create a serialized tensorflow.ConfigProto proto, where:
//
// a) ConfigProto.optimizer_options.global_jit_level is set to to ON_1 if
@@ -93,26 +107,6 @@ TF_CAPI_EXPORT extern const char* TF_GraphDebugString(TF_Graph* graph,
TF_CAPI_EXPORT extern char* TF_FunctionDebugString(TF_Function* func,
size_t* len);
-// Creates a stack of data set + iterator nodes, currently hard-coded to return
-// a sequence of 3 float values <42.0, 43.0, 44.0> over 3 calls. On success,
-// returns the IteratorGetNext node, which caller can run or feed into an node.
-//
-// TODO(hongm): Extend the API to allow customization of the nodes created.
-TF_CAPI_EXPORT extern TF_Operation* TF_MakeFakeIteratorGetNextWithDatasets(
- TF_Graph* graph, TF_Status* status);
-
-// Similar to the above API, except that the returned iterator reads the
-// file based dataset from `file_path`.
-// If `is_mnist` is 0, the dataset corresponds to ImageNet.
-// The iterators outputs 2 tensors:
-// - A float tensor of shape `batch_size` X 784 when `is_mnist` is non-zero, or
-// `batch_size` X 224 X 224 X 3 otherwise.
-// - An int32 tensor of shape `batch_size`
-// TODO(hongm): Extend the API to allow customization of the nodes created.
-TF_CAPI_EXPORT extern TF_Operation* TF_MakeFileBasedIteratorGetNextWithDatasets(
- TF_Graph* graph, const char* file_path, int batch_size,
- unsigned char is_mnist, TF_Status* status);
-
// On success, dequeues a tensor from a TF-managed FifoQueue given by
// `tensor_id`, associated with `session`. There must be a graph node named
// "fifo_queue_dequeue_", to be executed by this API call.
diff --git a/tensorflow/c/c_api_experimental_test.cc b/tensorflow/c/c_api_experimental_test.cc
index 2c92e38f03a..6eb289107c5 100644
--- a/tensorflow/c/c_api_experimental_test.cc
+++ b/tensorflow/c/c_api_experimental_test.cc
@@ -27,100 +27,6 @@ limitations under the License.
namespace tensorflow {
namespace {
-void TestFakeIteratorStack() {
- TF_Status* s = TF_NewStatus();
- TF_Graph* graph = TF_NewGraph();
-
- TF_Operation* get_next = TF_MakeFakeIteratorGetNextWithDatasets(graph, s);
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
-
- CSession csession(graph, s);
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
-
- // Run the graph.
- const float base_value = 42.0;
- for (int i = 0; i < 3; ++i) {
- csession.SetOutputs({get_next});
- csession.Run(s);
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Tensor* out = csession.output_tensor(0);
- ASSERT_TRUE(out != nullptr);
- ASSERT_EQ(TF_FLOAT, TF_TensorType(out));
- ASSERT_EQ(0, TF_NumDims(out)); // scalar
- ASSERT_EQ(sizeof(float), TF_TensorByteSize(out));
- float* output_contents = static_cast(TF_TensorData(out));
- ASSERT_EQ(base_value + i, *output_contents);
- }
-
- // This should error out since we've exhausted the iterator.
- csession.Run(s);
- ASSERT_EQ(TF_OUT_OF_RANGE, TF_GetCode(s)) << TF_Message(s);
-
- // Clean up
- csession.CloseAndDelete(s);
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_DeleteGraph(graph);
- TF_DeleteStatus(s);
-}
-
-TEST(CAPI_EXPERIMENTAL, FakeIteratorGetNext) { TestFakeIteratorStack(); }
-
-TEST(CAPI_EXPERIMENTAL, ImagenetIteratorGetNext) {
- TF_Status* s = TF_NewStatus();
- TF_Graph* graph = TF_NewGraph();
-
- const string file_path = tensorflow::io::JoinPath(
- tensorflow::testing::TensorFlowSrcRoot(), "c/testdata/tf_record");
- VLOG(1) << "data file path is " << file_path;
- const int batch_size = 64;
- TF_Operation* get_next = TF_MakeFileBasedIteratorGetNextWithDatasets(
- graph, file_path.c_str(), batch_size, /*is_mnist*/ false, s);
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
-
- CSession csession(graph, s);
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
-
- // Run the graph.
- // The two output tensors should look like:
- // Tensor("IteratorGetNext:0", shape=(batch_size, 224, 224, 3), dtype=float32)
- // Tensor("IteratorGetNext:1", shape=(batch_size, ), dtype=int32)
- for (int i = 0; i < 3; ++i) {
- LOG(INFO) << "Running iter " << i;
- csession.SetOutputs({{get_next, 0}, {get_next, 1}});
- csession.Run(s);
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
-
- {
- TF_Tensor* image = csession.output_tensor(0);
- ASSERT_TRUE(image != nullptr);
- ASSERT_EQ(TF_FLOAT, TF_TensorType(image));
- // Confirm shape is 224 X 224 X 3
- ASSERT_EQ(4, TF_NumDims(image));
- ASSERT_EQ(batch_size, TF_Dim(image, 0));
- ASSERT_EQ(224, TF_Dim(image, 1));
- ASSERT_EQ(224, TF_Dim(image, 2));
- ASSERT_EQ(3, TF_Dim(image, 3));
- ASSERT_EQ(sizeof(float) * batch_size * 224 * 224 * 3,
- TF_TensorByteSize(image));
- }
-
- {
- TF_Tensor* label = csession.output_tensor(1);
- ASSERT_TRUE(label != nullptr);
- ASSERT_EQ(TF_INT32, TF_TensorType(label));
- ASSERT_EQ(1, TF_NumDims(label));
- ASSERT_EQ(batch_size, TF_Dim(label, 0));
- ASSERT_EQ(sizeof(int32) * batch_size, TF_TensorByteSize(label));
- }
- }
-
- // Clean up
- csession.CloseAndDelete(s);
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_DeleteGraph(graph);
- TF_DeleteStatus(s);
-}
-
TEST(CAPI_EXPERIMENTAL, GetServerDefTest) {
const string expected_text_proto(R"(cluster {
job {
@@ -470,5 +376,60 @@ TEST_F(AddEagerOpToGraphTest, ListInputsAreAddedCorrectly) {
TFE_DeleteOp(identityn);
}
+TEST_F(AddEagerOpToGraphTest, NumberAttributesAreHandledCorrectly) {
+ TFE_TensorHandle* matrix = TestMatrixTensorHandle();
+ TFE_TensorHandle* axis = TestAxisTensorHandle();
+ TFE_Op* concatv2 = TFE_NewOp(eager_ctx_, "ConcatV2", status_);
+ CHECK_EQ(TF_OK, TF_GetCode(status_)) << TF_Message(status_);
+ TFE_OpSetAttrType(concatv2, "T", TF_FLOAT);
+ TFE_OpSetAttrInt(concatv2, "N", 2);
+ TFE_OpSetAttrType(concatv2, "Tidx", TF_INT32);
+ constexpr size_t kNumInputs = 2;
+ for (size_t i = 0; i < kNumInputs; ++i) {
+ TFE_OpAddInput(concatv2, matrix, status_);
+ CHECK_EQ(TF_OK, TF_GetCode(status_)) << TF_Message(status_);
+ }
+ TFE_OpAddInput(concatv2, axis, status_);
+ CHECK_EQ(TF_OK, TF_GetCode(status_)) << TF_Message(status_);
+ AddEagerOpToGraphAndCheck(
+ concatv2, [this, kNumInputs](TF_Operation* graph_op) {
+ EXPECT_EQ(TF_OperationNumInputs(graph_op), kNumInputs + 1);
+ int64_t attrN;
+ TF_OperationGetAttrInt(graph_op, "N", &attrN, status_);
+ CHECK_EQ(TF_OK, TF_GetCode(status_)) << TF_Message(status_);
+ EXPECT_EQ(attrN, kNumInputs);
+ EXPECT_EQ(TF_OperationInputListLength(graph_op, "values", status_),
+ kNumInputs);
+ CHECK_EQ(TF_OK, TF_GetCode(status_)) << TF_Message(status_);
+ });
+ TFE_DeleteTensorHandle(axis);
+ TFE_DeleteTensorHandle(matrix);
+ TFE_DeleteOp(concatv2);
+}
+
+TEST_F(AddEagerOpToGraphTest,
+ GeneratesInternalErrorsForInvalidNumberAttributes) {
+ TFE_TensorHandle* matrix = TestMatrixTensorHandle();
+ TFE_TensorHandle* axis = TestAxisTensorHandle();
+ int num_retvals = 5;
+ TFE_TensorHandle* retvals[5];
+
+ TFE_Op* concatv2 = TFE_NewOp(eager_ctx_, "ConcatV2", status_);
+ CHECK_EQ(TF_OK, TF_GetCode(status_)) << TF_Message(status_);
+ TFE_OpSetAttrType(concatv2, "T", TF_FLOAT);
+ TFE_OpSetAttrInt(concatv2, "N", -1);
+ TFE_OpSetAttrType(concatv2, "Tidx", TF_INT32);
+
+ TF_Operation* graph_op = TFE_AddEagerOpToGraph(concatv2, trace_ctx_, retvals,
+ &num_retvals, status_);
+ EXPECT_EQ(graph_op, nullptr);
+ EXPECT_EQ(status_->status.error_message(),
+ "Number attribute for length should be >=0!");
+
+ TFE_DeleteOp(concatv2);
+ TFE_DeleteTensorHandle(axis);
+ TFE_DeleteTensorHandle(matrix);
+}
+
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/c/c_api_function.cc b/tensorflow/c/c_api_function.cc
index 03d65ecefd4..5a82cb0c48f 100644
--- a/tensorflow/c/c_api_function.cc
+++ b/tensorflow/c/c_api_function.cc
@@ -13,12 +13,12 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include "tensorflow/c/c_api_internal.h"
-
#include
#include
#include
+#include "absl/strings/match.h"
+#include "tensorflow/c/c_api_internal.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
@@ -295,7 +295,8 @@ Status FillFunctionBody(
}
// Graph to FunctionDef conversion. This code is closely modeled on the Python
-// code in tensorflow/python/framework/function.py.
+// function graph_to_function_def(), which is located in
+// tensorflow/python/framework/graph_to_function_def.py.
Status GraphToFunctionDef(const Graph& fn_body, const string& fn_name,
bool append_hash_to_fn_name,
const std::vector& body_nodes,
@@ -352,6 +353,16 @@ Status GraphToFunctionDef(const Graph& fn_body, const string& fn_name,
argdef->set_type(node->output_type(idx));
const string& input_name = node_names.GetInputName(node->name());
argdef->set_name(input_name);
+ auto& arg_attrs = (*fdef->mutable_arg_attr())[i];
+ for (const auto& attr : node->attrs()) {
+ // Only copy internal attributes. These attributes will be applied to
+ // _Arg/Placeholder nodes when this FunctionDef is converted to graph, and
+ // normal attributes for nodes cannot be applied to those _Arg/Placeholder
+ // nodes.
+ if (absl::StartsWith(attr.first, "_")) {
+ arg_attrs.mutable_attr()->insert(attr);
+ }
+ }
tensor_renaming[strings::StrCat(node->name(), ":", idx)] = input_name;
}
@@ -442,12 +453,21 @@ Status GraphToFunctionDef(const Graph& fn_body, const string& fn_name,
} else {
signature_name = control_outputs[i]->name();
}
+ if (signature_name.empty()) {
+ return errors::InvalidArgument("Control output name must be not empty");
+ }
if (!control_output_names_set.insert(signature_name).second) {
return errors::InvalidArgument("Repeated control output name: ",
signature_name);
}
+ const string control_output_node =
+ node_names.Lookup(control_outputs[i]->name());
+ if (control_output_node.empty()) {
+ return errors::InvalidArgument(
+ "Control output node name must be not empty");
+ }
fdef->mutable_signature()->add_control_output(signature_name);
- (*fdef->mutable_control_ret())[signature_name] = control_outputs[i]->name();
+ (*fdef->mutable_control_ret())[signature_name] = control_output_node;
}
return Status::OK();
@@ -572,13 +592,13 @@ TF_Function* TF_GraphToFunctionWithControlOutputs(
std::unordered_map> input_nodes;
status->status = tensorflow::ProcessInputs(fn_body, fn_name, ninputs, inputs,
&input_tensors, &input_nodes);
- if (!status->status.ok()) return nullptr;
+ if (TF_GetCode(status) != TF_OK) return nullptr;
// Process outputs.
std::vector output_tensors;
status->status = tensorflow::ProcessOutputs(fn_body, fn_name, noutputs,
outputs, &output_tensors);
- if (!status->status.ok()) return nullptr;
+ if (TF_GetCode(status) != TF_OK) return nullptr;
// Process output names.
std::vector output_names_vec;
@@ -602,7 +622,7 @@ TF_Function* TF_GraphToFunctionWithControlOutputs(
std::vector body_nodes;
status->status = tensorflow::ComputeBodyNodes(
fn_body, fn_name, num_opers, opers, input_nodes, &body_nodes);
- if (!status->status.ok()) return nullptr;
+ if (TF_GetCode(status) != TF_OK) return nullptr;
// Compute body nodes.
std::vector control_output_nodes;
@@ -617,7 +637,7 @@ TF_Function* TF_GraphToFunctionWithControlOutputs(
fn_body->graph, fn_name, append_hash_to_fn_name != 0, body_nodes,
input_tensors, output_tensors, output_names_vec, control_output_nodes,
control_output_names_vec, description, &tf_function->fdef);
- if (!status->status.ok()) {
+ if (TF_GetCode(status) != TF_OK) {
TF_DeleteFunction(tf_function);
return nullptr;
}
diff --git a/tensorflow/c/c_api_function_test.cc b/tensorflow/c/c_api_function_test.cc
index 946f8c4a2c3..760f14cac5b 100644
--- a/tensorflow/c/c_api_function_test.cc
+++ b/tensorflow/c/c_api_function_test.cc
@@ -1278,6 +1278,46 @@ TEST_F(CApiFunctionTest, GraphToFunctionDefWithPlaceholderAttr) {
EXPECT_EQ(func_->fdef.signature().attr(1).type(), "int");
}
+void NodeWithAttrHelper(TF_Graph* graph, TF_Status* s, const char* name,
+ const char* attr_name, const char* attr_value,
+ TF_Operation** op) {
+ TF_OperationDescription* desc = TF_NewOperation(graph, "Placeholder", name);
+ TF_SetAttrType(desc, "dtype", TF_INT32);
+ TF_SetAttrString(desc, attr_name, attr_value, strlen(attr_value));
+ *op = TF_FinishOperation(desc, s);
+ ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
+ ASSERT_NE(*op, nullptr);
+}
+
+TEST_F(CApiFunctionTest, GraphToFunctionDefWithArgAttr) {
+ std::unique_ptr func_graph(
+ TF_NewGraph(), TF_DeleteGraph);
+ std::unique_ptr s(TF_NewStatus(),
+ TF_DeleteStatus);
+
+ TF_Operation* node;
+ NodeWithAttrHelper(func_graph.get(), s.get(), "node", "_test_attr", "value",
+ &node);
+
+ TF_Output inputs[] = {{node, 0}};
+ TF_Output outputs[] = {};
+ func_ = TF_GraphToFunction(
+ func_graph.get(), "func", /*append_hash_to_fn_name=*/false, -1,
+ /*opers=*/nullptr, 1, inputs, 0, outputs,
+ /*output_names=*/nullptr,
+ /*opts=*/nullptr, /*description=*/nullptr, s.get());
+ ASSERT_EQ(TF_OK, TF_GetCode(s.get())) << TF_Message(s.get());
+ ASSERT_NE(func_, nullptr);
+
+ // Verify that FunctionDef ArgDef has attributes.
+ ASSERT_EQ(func_->fdef.arg_attr_size(), 1);
+ auto arg_attrs = func_->fdef.arg_attr().find(0);
+ ASSERT_NE(arg_attrs, func_->fdef.arg_attr().end());
+ auto iter = arg_attrs->second.attr().find("_test_attr");
+ ASSERT_NE(iter, arg_attrs->second.attr().end());
+ EXPECT_EQ(iter->second.s(), "value");
+}
+
TEST_F(CApiFunctionTest, SetGradientAndRun) {
// Define the function and its grad
DefineFunction(func_name_, &func_);
diff --git a/tensorflow/c/c_api_internal.h b/tensorflow/c/c_api_internal.h
index 9a69c58718b..f02160044c5 100644
--- a/tensorflow/c/c_api_internal.h
+++ b/tensorflow/c/c_api_internal.h
@@ -24,8 +24,10 @@ limitations under the License.
#include
#include
+// clang-format off
// Required for IS_MOBILE_PLATFORM
-#include "tensorflow/core/platform/platform.h" // NO_LINT
+#include "tensorflow/core/platform/platform.h"
+// clang-format on
#if !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD)
#include "tensorflow/core/framework/op_gen_lib.h"
diff --git a/tensorflow/c/checkpoint_reader.cc b/tensorflow/c/checkpoint_reader.cc
index d3311f0cd06..1a92efb89bc 100644
--- a/tensorflow/c/checkpoint_reader.cc
+++ b/tensorflow/c/checkpoint_reader.cc
@@ -29,8 +29,7 @@ namespace checkpoint {
class TensorSliceReader;
-CheckpointReader::CheckpointReader(const string& filename,
- TF_Status* out_status)
+CheckpointReader::CheckpointReader(const string& filename, TF_Status* status)
: reader_(nullptr),
v2_reader_(nullptr),
var_to_shape_map_(nullptr),
@@ -43,7 +42,7 @@ CheckpointReader::CheckpointReader(const string& filename,
v2_reader_.reset(
new BundleReader(Env::Default(), filename /* prefix to a V2 ckpt */));
if (!v2_reader_->status().ok()) {
- Set_TF_Status_from_Status(out_status, v2_reader_->status());
+ Set_TF_Status_from_Status(status, v2_reader_->status());
return;
}
auto result = BuildV2VarMaps();
@@ -52,7 +51,7 @@ CheckpointReader::CheckpointReader(const string& filename,
} else {
reader_.reset(new TensorSliceReader(filename));
if (!reader_->status().ok()) {
- Set_TF_Status_from_Status(out_status, reader_->status());
+ Set_TF_Status_from_Status(status, reader_->status());
return;
}
var_to_shape_map_.reset(
diff --git a/tensorflow/c/checkpoint_reader.h b/tensorflow/c/checkpoint_reader.h
index 91654c8d4fb..0e613db7719 100644
--- a/tensorflow/c/checkpoint_reader.h
+++ b/tensorflow/c/checkpoint_reader.h
@@ -39,7 +39,7 @@ class TensorSliceReader;
// variables.
class CheckpointReader {
public:
- CheckpointReader(const string& filepattern, TF_Status* out_status);
+ CheckpointReader(const string& filename, TF_Status* status);
bool HasTensor(const string& name) const;
const string DebugString() const;
diff --git a/tensorflow/c/eager/BUILD b/tensorflow/c/eager/BUILD
index 445b2cd2581..8c2be2af3e0 100644
--- a/tensorflow/c/eager/BUILD
+++ b/tensorflow/c/eager/BUILD
@@ -1,4 +1,5 @@
# Experimental extensions to the C API for eager execution of kernels.
+
licenses(["notice"]) # Apache 2.0
load(
@@ -70,6 +71,7 @@ tf_cuda_library(
"//tensorflow/core/distributed_runtime:remote_device",
"//tensorflow/core/distributed_runtime:server_lib",
"//tensorflow/core/distributed_runtime:worker_env",
+ "//tensorflow/core/profiler/lib:profiler_eager_lib",
"//tensorflow/core/profiler/lib:profiler_session",
"//tensorflow/core:gpu_runtime",
],
@@ -110,6 +112,7 @@ tf_cuda_library(
"//tensorflow/core/distributed_runtime/rpc:grpc_worker_service",
"//tensorflow/core/distributed_runtime/rpc:rpc_rendezvous_mgr",
"//tensorflow/core/distributed_runtime/rpc/eager:grpc_eager_client",
+ "//tensorflow/core/profiler/lib:profiler_eager_lib",
"//tensorflow/core/profiler/lib:profiler_session",
],
)
@@ -200,6 +203,7 @@ tf_cuda_library(
"//conditions:default": [],
}) + [
"@com_google_absl//absl/memory",
+ "//tensorflow/c:tf_status_helper",
"//tensorflow/core/common_runtime/eager:eager_operation",
"//tensorflow/core/distributed_runtime/eager:eager_client",
"//tensorflow/core/distributed_runtime/rpc/eager:grpc_eager_client",
@@ -236,7 +240,6 @@ tf_cuda_cc_test(
"//tensorflow/core:protos_all_cc",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
- "//tensorflow/core/profiler:protos_all_cc",
"@com_google_absl//absl/strings",
],
)
@@ -256,3 +259,22 @@ filegroup(
srcs = ["c_api.h"],
visibility = ["//tensorflow:__subpackages__"],
)
+
+# TODO(karllessard): only used by //tensorflow/core:mobile_srcs_only_runtime
+# right now, remove this public rule when no longer needed (it should be
+# replaced by TF Lite)
+filegroup(
+ name = "srcs",
+ srcs = glob(
+ [
+ "*.cc",
+ "*.h",
+ ],
+ exclude = [
+ "c_api_experimental.cc",
+ "c_api_experimental.h",
+ "*test*",
+ ],
+ ),
+ visibility = ["//visibility:public"],
+)
diff --git a/tensorflow/c/eager/c_api.cc b/tensorflow/c/eager/c_api.cc
old mode 100755
new mode 100644
index 9509135e239..9c2d1dd38fd
--- a/tensorflow/c/eager/c_api.cc
+++ b/tensorflow/c/eager/c_api.cc
@@ -21,11 +21,18 @@ limitations under the License.
#include
#include
+// clang-format off
+// Required for IS_MOBILE_PLATFORM
+#include "tensorflow/core/platform/platform.h"
+// clang-format on
+
#include "absl/memory/memory.h"
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_internal.h"
#include "tensorflow/c/eager/c_api_internal.h"
+#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/platform/host_info.h"
+#include "tensorflow/core/platform/platform.h" // NOLINT
#ifdef TENSORFLOW_EAGER_USE_XLA
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#endif // TENSORFLOW_EAGER_USE_XLA
@@ -38,11 +45,15 @@ limitations under the License.
#include "tensorflow/core/common_runtime/eager/execute.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
+#if !defined(IS_MOBILE_PLATFORM)
+#include "tensorflow/core/distributed_runtime/remote_device.h"
#include "tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_server_lib.h"
+#include "tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.h"
#include "tensorflow/core/distributed_runtime/server_lib.h"
#include "tensorflow/core/distributed_runtime/worker_env.h"
+#endif // !IS_MOBILE_PLATFORM
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/rendezvous.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
@@ -63,6 +74,17 @@ using tensorflow::int64;
using tensorflow::string;
namespace {
+
+const tensorflow::OpDef* GetOpDef(TFE_Op* op, TF_Status* status) {
+ if (op->inference_ctx) {
+ return op->inference_ctx->op_def;
+ }
+ const tensorflow::OpDef* op_def;
+ status->status =
+ tensorflow::OpDefForOp(op->operation.Name().c_str(), &op_def);
+ return op_def;
+}
+
bool IsCPU(const tensorflow::Device* d) {
return d == nullptr || d->tensorflow_gpu_device_info() == nullptr;
}
@@ -77,6 +99,7 @@ string DeviceName(const tensorflow::Device* d) {
return (d == nullptr) ? "cpu:0" : d->name();
}
+#if !defined(IS_MOBILE_PLATFORM)
tensorflow::Status GetAllRemoteDevices(
const std::vector& remote_workers,
tensorflow::WorkerCacheInterface* worker_cache,
@@ -114,11 +137,12 @@ tensorflow::Status CreateRemoteContexts(
const std::vector& remote_workers, int64 rendezvous_id,
int keep_alive_secs, const tensorflow::ServerDef& server_def,
tensorflow::eager::EagerClientCache* remote_eager_workers, bool async,
+ const tensorflow::eager::CreateContextRequest& base_request,
tensorflow::gtl::FlatMap* remote_contexts) {
for (int i = 0; i < remote_workers.size(); i++) {
const string& remote_worker = remote_workers[i];
- tensorflow::eager::CreateContextRequest request;
+ tensorflow::eager::CreateContextRequest request(base_request);
tensorflow::eager::CreateContextResponse response;
request.set_rendezvous_id(rendezvous_id);
tensorflow::DeviceNameUtils::ParsedName parsed_name;
@@ -132,7 +156,9 @@ tensorflow::Status CreateRemoteContexts(
request.mutable_server_def()->set_task_index(parsed_name.task);
request.set_async(async);
request.set_keep_alive_secs(keep_alive_secs);
- auto* eager_client = remote_eager_workers->GetClient(remote_worker);
+ tensorflow::eager::EagerClient* eager_client;
+ TF_RETURN_IF_ERROR(
+ remote_eager_workers->GetClient(remote_worker, &eager_client));
if (eager_client == nullptr) {
return tensorflow::errors::Internal(
"Cannot find a client for the given target:", remote_worker);
@@ -198,6 +224,23 @@ tensorflow::Status UpdateTFE_ContextWithServerDef(
remote_workers, grpc_server->master_env()->worker_cache,
&remote_device_mgr));
+ std::vector cluster_device_attributes;
+ remote_device_mgr->ListDeviceAttributes(&cluster_device_attributes);
+
+ std::vector local_device_attributes;
+ grpc_server->worker_env()->device_mgr->ListDeviceAttributes(
+ &local_device_attributes);
+
+ // This request make sure that we can create Rendevzous properly between
+ // Local and Remote context.
+ tensorflow::eager::CreateContextRequest base_request;
+ for (const auto& da : cluster_device_attributes) {
+ *base_request.add_cluster_device_attributes() = da;
+ }
+ for (const auto& da : local_device_attributes) {
+ *base_request.add_cluster_device_attributes() = da;
+ }
+
std::shared_ptr channel_cache =
grpc_server->channel_cache();
std::unique_ptr remote_eager_workers(
@@ -207,14 +250,16 @@ tensorflow::Status UpdateTFE_ContextWithServerDef(
tensorflow::gtl::FlatMap remote_contexts;
LOG_AND_RETURN_IF_ERROR(CreateRemoteContexts(
remote_workers, rendezvous_id, keep_alive_secs, server_def,
- remote_eager_workers.get(), ctx->context.Async(), &remote_contexts));
+ remote_eager_workers.get(), ctx->context->Async(), base_request,
+ &remote_contexts));
tensorflow::RemoteRendezvous* r =
grpc_server->worker_env()->rendezvous_mgr->Find(rendezvous_id);
auto session_name = tensorflow::strings::StrCat("eager_", rendezvous_id);
TF_RETURN_IF_ERROR(grpc_server->worker_env()->session_mgr->CreateSession(
- session_name, server_def, true));
+ session_name, server_def, base_request.cluster_device_attributes(),
+ true));
std::shared_ptr worker_session;
TF_RETURN_IF_ERROR(
@@ -226,14 +271,14 @@ tensorflow::Status UpdateTFE_ContextWithServerDef(
auto* device_mgr = grpc_server->worker_env()->device_mgr;
- ctx->context.InitializeRemote(std::move(server),
- std::move(remote_eager_workers),
- std::move(remote_device_mgr), remote_contexts,
- r, device_mgr, keep_alive_secs);
-
- return tensorflow::Status::OK();
+ return ctx->context->InitializeRemote(
+ std::move(server), grpc_server->worker_env(), worker_session,
+ std::move(remote_eager_workers), std::move(remote_device_mgr),
+ remote_contexts, r, device_mgr, keep_alive_secs,
+ worker_session->cluster_flr.get());
#undef LOG_AND_RETURN_IF_ERROR
}
+#endif // !IS_MOBILE_PLATFORM
tensorflow::Status OpInferSingleInputAttrs(TFE_Op* op,
TFE_TensorHandle* input) {
@@ -330,7 +375,7 @@ void TFE_ContextOptionsSetDevicePlacementPolicy(
TF_CAPI_EXPORT extern void TFE_ContextSetAsyncForThread(TFE_Context* ctx,
unsigned char enable,
TF_Status* status) {
- status->status = ctx->context.SetAsyncForThread(enable);
+ status->status = ctx->context->SetAsyncForThread(enable);
}
void TFE_DeleteContextOptions(TFE_ContextOptions* options) { delete options; }
@@ -349,7 +394,8 @@ TFE_Context* TFE_NewContext(const TFE_ContextOptions* opts, TF_Status* status) {
return new TFE_Context(opts->session_options.options, opts->policy,
opts->async, device_mgr.release(),
- /*device_mgr_owned*/ true, r);
+ /*device_mgr_owned*/ true, r,
+ tensorflow::GetDefaultCustomKernelCreator());
}
TFE_Context* TFE_NewContextFromSession(const TFE_ContextOptions* opts,
@@ -359,23 +405,24 @@ TFE_Context* TFE_NewContextFromSession(const TFE_ContextOptions* opts,
if (!status->status.ok()) return nullptr;
tensorflow::Rendezvous* r =
new tensorflow::IntraProcessRendezvous(device_mgr);
+
return new TFE_Context(opts->session_options.options, opts->policy,
- opts->async, device_mgr, /*device_mgr_owned*/ false,
- r);
+ opts->async, device_mgr, /*device_mgr_owned*/ false, r,
+ tensorflow::GetDefaultCustomKernelCreator());
}
void TFE_DeleteContext(TFE_Context* ctx) { delete ctx; }
TF_DeviceList* TFE_ContextListDevices(TFE_Context* ctx, TF_Status* status) {
TF_DeviceList* list = new TF_DeviceList;
- ctx->context.local_device_mgr()->ListDeviceAttributes(&list->response);
- if (ctx->context.remote_device_mgr()) {
- ctx->context.remote_device_mgr()->ListDeviceAttributes(&list->response);
+ ctx->context->local_device_mgr()->ListDeviceAttributes(&list->response);
+ if (ctx->context->remote_device_mgr()) {
+ ctx->context->remote_device_mgr()->ListDeviceAttributes(&list->response);
}
return list;
}
-void TFE_ContextClearCaches(TFE_Context* ctx) { ctx->context.ClearCaches(); }
+void TFE_ContextClearCaches(TFE_Context* ctx) { ctx->context->ClearCaches(); }
// Set server_def on the context, possibly updating it.
TF_CAPI_EXPORT extern void TFE_ContextSetServerDef(TFE_Context* ctx,
@@ -383,6 +430,10 @@ TF_CAPI_EXPORT extern void TFE_ContextSetServerDef(TFE_Context* ctx,
const void* proto,
size_t proto_len,
TF_Status* status) {
+#if defined(IS_MOBILE_PLATFORM)
+ status->status = tensorflow::errors::Unimplemented(
+ "TFE_ContextSetServerDef not supported on mobile");
+#else // !defined(IS_MOBILE_PLATFORM)
tensorflow::ServerDef server_def;
if (!server_def.ParseFromArray(proto, proto_len)) {
status->status = tensorflow::errors::InvalidArgument(
@@ -391,11 +442,12 @@ TF_CAPI_EXPORT extern void TFE_ContextSetServerDef(TFE_Context* ctx,
}
status->status =
UpdateTFE_ContextWithServerDef(keep_alive_secs, server_def, ctx);
+#endif // !IS_MOBILE_PLATFORM
}
void TFE_ContextSetThreadLocalDevicePlacementPolicy(
TFE_Context* ctx, TFE_ContextDevicePlacementPolicy policy) {
- ctx->context.SetThreadLocalDevicePlacementPolicy(
+ ctx->context->SetThreadLocalDevicePlacementPolicy(
static_cast(policy));
}
@@ -405,19 +457,19 @@ void TFE_ContextSetThreadLocalDevicePlacementPolicy(
extern TFE_ContextDevicePlacementPolicy TFE_ContextGetDevicePlacementPolicy(
TFE_Context* ctx) {
return static_cast(
- ctx->context.GetDevicePlacementPolicy());
+ ctx->context->GetDevicePlacementPolicy());
}
void TFE_ContextAsyncWait(TFE_Context* ctx, TF_Status* status) {
- status->status = ctx->context.AsyncWait();
+ status->status = ctx->context->AsyncWait();
}
void TFE_ContextGetStatus(TFE_Context* ctx, TF_Status* status) {
- status->status = ctx->context.GetStatus();
+ status->status = ctx->context->GetStatus();
}
void TFE_ContextAsyncClearError(TFE_Context* ctx) {
- ctx->context.ClearAsyncError();
+ ctx->context->ClearAsyncError();
}
TFE_TensorHandle* TFE_NewTensorHandle(TF_Tensor* t, TF_Status* status) {
@@ -577,7 +629,7 @@ TFE_Op* TFE_NewOp(TFE_Context* ctx, const char* op_or_function_name,
return new TFE_Op(ctx, name, false, types,
new TFE_OpInferenceContext(op_def));
}
- if (!ctx->context.FindFunctionByName(name)) {
+ if (!ctx->context->FindFunctionByName(name)) {
status->status = tensorflow::errors::NotFound(
"'", name,
"' is neither a type of a primitive operation nor a name "
@@ -807,6 +859,54 @@ void TFE_OpSetAttrFunctionList(TFE_Op* op, const char* attr_name,
funcs.get(), num_values));
}
+TF_CAPI_EXPORT extern int TFE_OpGetInputLength(TFE_Op* op,
+ const char* input_name,
+ TF_Status* status) {
+ const tensorflow::OpDef* op_def = GetOpDef(op, status);
+ if (!status->status.ok()) {
+ return -1;
+ }
+ tensorflow::AttrValueMap attrs;
+ op->operation.Attrs().FillAttrValueMap(&attrs);
+ tensorflow::NameRangeMap name_ranges;
+ status->status = tensorflow::NameRangesForNode(
+ tensorflow::AttrSlice(&attrs), *op_def, &name_ranges, nullptr);
+ if (!status->status.ok()) {
+ return -1;
+ }
+ auto iter = name_ranges.find(input_name);
+ if (iter == name_ranges.end()) {
+ status->status = tensorflow::errors::InvalidArgument("Input '", input_name,
+ "' not found");
+ return -1;
+ }
+ return iter->second.second - iter->second.first;
+}
+
+TF_CAPI_EXPORT extern int TFE_OpGetOutputLength(TFE_Op* op,
+ const char* output_name,
+ TF_Status* status) {
+ const tensorflow::OpDef* op_def = GetOpDef(op, status);
+ if (!status->status.ok()) {
+ return -1;
+ }
+ tensorflow::AttrValueMap attrs;
+ op->operation.Attrs().FillAttrValueMap(&attrs);
+ tensorflow::NameRangeMap name_ranges;
+ status->status = tensorflow::NameRangesForNode(
+ tensorflow::AttrSlice(&attrs), *op_def, nullptr, &name_ranges);
+ if (!status->status.ok()) {
+ return -1;
+ }
+ auto iter = name_ranges.find(output_name);
+ if (iter == name_ranges.end()) {
+ status->status = tensorflow::errors::InvalidArgument(
+ "Output '", output_name, "' not found");
+ return -1;
+ }
+ return iter->second.second - iter->second.first;
+}
+
void TFE_Execute(TFE_Op* op, TFE_TensorHandle** retvals, int* num_retvals,
TF_Status* status) {
VLOG(1) << "Calling TFE_Execute() on op " << op;
@@ -827,7 +927,7 @@ TFE_TensorHandle* TFE_TensorHandleCopyToDevice(TFE_TensorHandle* h,
const char* device_name,
TF_Status* status) {
tensorflow::TensorHandle* handle;
- status->status = tensorflow::EagerCopyToDevice(h->handle, &ctx->context,
+ status->status = tensorflow::EagerCopyToDevice(h->handle, ctx->context,
device_name, &handle);
if (status->status.ok()) {
return new TFE_TensorHandle(handle);
@@ -844,26 +944,31 @@ void TFE_ContextAddFunctionDef(TFE_Context* ctx,
tensorflow::errors::InvalidArgument("Invalid FunctionDef proto");
return;
}
- status->status = ctx->context.AddFunctionDef(function_def);
+ status->status = ctx->context->AddFunctionDef(function_def);
}
void TFE_ContextAddFunction(TFE_Context* ctx, TF_Function* function,
TF_Status* status) {
- status->status = ctx->context.AddFunctionDef(function->fdef);
+ status->status = ctx->context->AddFunctionDef(function->fdef);
+}
+
+void TFE_ContextRemoveFunction(TFE_Context* ctx, const char* name,
+ TF_Status* status) {
+ status->status = ctx->context->RemoveFunction(name);
}
unsigned char TFE_ContextHasFunction(TFE_Context* ctx, const char* name) {
- return ctx->context.FindFunctionDef(name) != nullptr;
+ return ctx->context->FindFunctionDef(name) != nullptr;
}
void TFE_ContextEnableRunMetadata(TFE_Context* ctx) {
- ctx->context.SetShouldStoreGraphs(true);
- ctx->context.SetShouldStoreStepStats(true);
+ ctx->context->SetShouldStoreGraphs(true);
+ ctx->context->SetShouldStoreStepStats(true);
}
void TFE_ContextDisableRunMetadata(TFE_Context* ctx) {
- ctx->context.SetShouldStoreGraphs(false);
- ctx->context.SetShouldStoreStepStats(false);
+ ctx->context->SetShouldStoreGraphs(false);
+ ctx->context->SetShouldStoreStepStats(false);
}
} // extern "C"
@@ -892,9 +997,9 @@ void TFE_ContextExportRunMetadata(TFE_Context* ctx, TF_Buffer* buf,
TF_Status* status) {
TFE_ContextAsyncWait(ctx, status);
if (!status->status.ok()) return;
- tensorflow::mutex_lock ml(*ctx->context.MetadataMu());
- status->status = MessageToBuffer(*ctx->context.RunMetadataProto(), buf);
- ctx->context.ClearRunMetadata();
+ tensorflow::mutex_lock ml(*ctx->context->MetadataMu());
+ status->status = MessageToBuffer(*ctx->context->RunMetadataProto(), buf);
+ ctx->context->ClearRunMetadata();
}
namespace {
@@ -910,9 +1015,9 @@ TFE_Op* GetFunc(TFE_Context* ctx, const tensorflow::NameAttrList& func,
}
} // namespace
-void TFE_ContextStartStep(TFE_Context* ctx) { ctx->context.StartStep(); }
+void TFE_ContextStartStep(TFE_Context* ctx) { ctx->context->StartStep(); }
-void TFE_ContextEndStep(TFE_Context* ctx) { ctx->context.EndStep(); }
+void TFE_ContextEndStep(TFE_Context* ctx) { ctx->context->EndStep(); }
namespace tensorflow {
void SetOpAttrValueScalar(TFE_Context* ctx, TFE_Op* op,
diff --git a/tensorflow/c/eager/c_api.h b/tensorflow/c/eager/c_api.h
index ce3da7f9189..d5223e63f13 100755
--- a/tensorflow/c/eager/c_api.h
+++ b/tensorflow/c/eager/c_api.h
@@ -366,6 +366,18 @@ TF_CAPI_EXPORT extern void TFE_OpSetAttrFunctionList(TFE_Op* op,
const TFE_Op** value,
int num_values);
+// Returns the length (number of tensors) of the input argument `input_name`
+// found in the provided `op`.
+TF_CAPI_EXPORT extern int TFE_OpGetInputLength(TFE_Op* op,
+ const char* input_name,
+ TF_Status* status);
+
+// Returns the length (number of tensors) of the output argument `output_name`
+// found in the provided `op`.
+TF_CAPI_EXPORT extern int TFE_OpGetOutputLength(TFE_Op* op,
+ const char* output_name,
+ TF_Status* status);
+
// Execute the operation defined by 'op' and return handles to computed
// tensors in `retvals`.
//
@@ -398,6 +410,13 @@ TF_CAPI_EXPORT extern void TFE_ContextAddFunction(TFE_Context* ctx,
TF_Function* function,
TF_Status* status);
+// Removes a function from the context. Once removed, you can no longer
+// TFE_Execute it or TFE_Execute any TFE_Op which has it as an attribute or any
+// other function which calls it as an attribute.
+TF_CAPI_EXPORT extern void TFE_ContextRemoveFunction(TFE_Context* ctx,
+ const char* name,
+ TF_Status* status);
+
// Checks whether a function is registered under `name`.
TF_CAPI_EXPORT unsigned char TFE_ContextHasFunction(TFE_Context* ctx,
const char* name);
diff --git a/tensorflow/c/eager/c_api_debug.cc b/tensorflow/c/eager/c_api_debug.cc
index ffcd5ace0b9..b4192716c4f 100644
--- a/tensorflow/c/eager/c_api_debug.cc
+++ b/tensorflow/c/eager/c_api_debug.cc
@@ -32,13 +32,13 @@ std::vector TensorShapeAsVector(TFE_TensorHandle* handle,
TF_Status* status) {
std::vector shape;
int rank = TFE_TensorHandleNumDims(handle, status);
- if (!status->status.ok()) {
+ if (TF_GetCode(status) != TF_OK) {
return shape;
}
shape.reserve(rank);
for (int i = 0; i < rank; ++i) {
shape.push_back(TFE_TensorHandleDim(handle, i, status));
- if (!status->status.ok()) {
+ if (TF_GetCode(status) != TF_OK) {
return shape;
}
}
@@ -53,7 +53,7 @@ TF_CAPI_EXPORT extern TFE_TensorDebugInfo* TFE_TensorHandleTensorDebugInfo(
TFE_TensorHandle* handle, TF_Status* status) {
const tensorflow::Tensor* tensor;
status->status = handle->handle->Tensor(&tensor);
- if (!status->status.ok()) {
+ if (TF_GetCode(status) != TF_OK) {
return nullptr;
}
@@ -139,7 +139,7 @@ TF_CAPI_EXPORT extern TFE_TensorDebugInfo* TFE_TensorHandleTensorDebugInfo(
// If the tensor is not an XLA tensor, the device shape is
// the same as regular tensor shape.
std::vector dev_dims = TensorShapeAsVector(handle, status);
- if (!status->status.ok()) {
+ if (TF_GetCode(status) != TF_OK) {
return nullptr;
}
return new TFE_TensorDebugInfo(dev_dims);
diff --git a/tensorflow/c/eager/c_api_experimental.cc b/tensorflow/c/eager/c_api_experimental.cc
index c6a12247ef1..0c170ead40a 100644
--- a/tensorflow/c/eager/c_api_experimental.cc
+++ b/tensorflow/c/eager/c_api_experimental.cc
@@ -17,6 +17,12 @@ limitations under the License.
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/eager/c_api_internal.h"
+#include "tensorflow/c/tf_status_helper.h"
+#include "tensorflow/core/lib/monitoring/counter.h"
+#include "tensorflow/core/lib/monitoring/gauge.h"
+#include "tensorflow/core/lib/monitoring/sampler.h"
+#include "tensorflow/core/lib/strings/strcat.h"
+#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/profiler/rpc/client/capture_profile.h"
#include "tensorflow/core/profiler/rpc/profiler_server.h"
@@ -39,7 +45,7 @@ void TFE_DeleteProfiler(TFE_Profiler* profiler) { delete profiler; }
void TFE_ProfilerSerializeToString(TFE_Context* ctx, TFE_Profiler* profiler,
TF_Buffer* buf, TF_Status* status) {
TFE_ContextAsyncWait(ctx, status);
- if (!status->status.ok()) return;
+ if (TF_GetCode(status) != TF_OK) return;
string content;
status->status = profiler->profiler->SerializeToString(&content);
void* data = tensorflow::port::Malloc(content.length());
@@ -57,7 +63,7 @@ TFE_ProfilerContext* TFE_NewProfilerContext() {
void TFE_ProfilerContextSetEagerContext(TFE_ProfilerContext* profiler_context,
TFE_Context* eager_context) {
- profiler_context->profiler_context.eager_context = &eager_context->context;
+ profiler_context->profiler_context.eager_context = eager_context->context;
}
void TFE_DeleteProfilerContext(TFE_ProfilerContext* profiler_context) {
@@ -71,11 +77,11 @@ void TFE_StartProfilerServer(TFE_ProfilerContext* context, int port) {
}
void TFE_ContextEnableGraphCollection(TFE_Context* ctx) {
- ctx->context.SetShouldStoreGraphs(true);
+ ctx->context->SetShouldStoreGraphs(true);
}
void TFE_ContextDisableGraphCollection(TFE_Context* ctx) {
- ctx->context.SetShouldStoreGraphs(false);
+ ctx->context->SetShouldStoreGraphs(false);
}
bool TFE_ProfilerClientStartTracing(const char* service_addr,
@@ -92,3 +98,423 @@ bool TFE_ProfilerClientStartTracing(const char* service_addr,
num_tracing_attempts);
return s.ok();
}
+
+void TFE_MonitoringCounterCellIncrementBy(TFE_MonitoringCounterCell* cell,
+ int64_t value) {
+ cell->cell.IncrementBy(value);
+}
+
+int64_t TFE_MonitoringCounterCellValue(TFE_MonitoringCounterCell* cell) {
+ return cell->cell.value();
+}
+
+TFE_MonitoringCounter0* TFE_MonitoringNewCounter0(const char* name,
+ TF_Status* status,
+ const char* description) {
+ auto* result = new TFE_MonitoringCounter0({name, description});
+ Set_TF_Status_from_Status(status, result->counter->GetStatus());
+ if (!result->counter->GetStatus().ok()) {
+ delete result;
+ return nullptr;
+ }
+ return result;
+}
+
+void TFE_MonitoringDeleteCounter0(TFE_MonitoringCounter0* counter) {
+ delete counter;
+}
+
+TFE_MonitoringCounterCell* TFE_MonitoringGetCellCounter0(
+ TFE_MonitoringCounter0* counter) {
+ return static_cast(
+ static_cast(counter->counter->GetCell()));
+}
+
+TFE_MonitoringCounter1* TFE_MonitoringNewCounter1(const char* name,
+ TF_Status* status,
+ const char* description,
+ const char* label1) {
+ auto* result = new TFE_MonitoringCounter1({name, description, label1});
+ Set_TF_Status_from_Status(status, result->counter->GetStatus());
+ if (!result->counter->GetStatus().ok()) {
+ delete result;
+ return nullptr;
+ }
+ return result;
+}
+
+void TFE_MonitoringDeleteCounter1(TFE_MonitoringCounter1* counter) {
+ delete counter;
+}
+
+TFE_MonitoringCounterCell* TFE_MonitoringGetCellCounter1(
+ TFE_MonitoringCounter1* counter, const char* label1) {
+ return static_cast(
+ static_cast(counter->counter->GetCell(label1)));
+}
+
+TFE_MonitoringCounter2* TFE_MonitoringNewCounter2(const char* name,
+ TF_Status* status,
+ const char* description,
+ const char* label1,
+ const char* label2) {
+ auto* result =
+ new TFE_MonitoringCounter2({name, description, label1, label2});
+ Set_TF_Status_from_Status(status, result->counter->GetStatus());
+ if (!result->counter->GetStatus().ok()) {
+ delete result;
+ return nullptr;
+ }
+ return result;
+}
+
+void TFE_MonitoringDeleteCounter2(TFE_MonitoringCounter2* counter) {
+ delete counter;
+}
+
+TFE_MonitoringCounterCell* TFE_MonitoringGetCellCounter2(
+ TFE_MonitoringCounter2* counter, const char* label1, const char* label2) {
+ return static_cast(
+ static_cast(counter->counter->GetCell(label1, label2)));
+}
+
+void TFE_MonitoringIntGaugeCellSet(TFE_MonitoringIntGaugeCell* cell,
+ int64_t value) {
+ cell->cell.Set(value);
+}
+
+int64_t TFE_MonitoringIntGaugeCellValue(TFE_MonitoringIntGaugeCell* cell) {
+ return cell->cell.value();
+}
+
+TFE_MonitoringIntGauge0* TFE_MonitoringNewIntGauge0(const char* name,
+ TF_Status* status,
+ const char* description) {
+ auto* result = new TFE_MonitoringIntGauge0({name, description});
+ Set_TF_Status_from_Status(status, result->gauge->GetStatus());
+ if (!result->gauge->GetStatus().ok()) {
+ delete result;
+ return nullptr;
+ }
+ return result;
+}
+
+void TFE_MonitoringDeleteIntGauge0(TFE_MonitoringIntGauge0* gauge) {
+ delete gauge;
+}
+
+TFE_MonitoringIntGaugeCell* TFE_MonitoringGetCellIntGauge0(
+ TFE_MonitoringIntGauge0* gauge) {
+ return static_cast(
+ static_cast(gauge->gauge->GetCell()));
+}
+
+TFE_MonitoringIntGauge1* TFE_MonitoringNewIntGauge1(const char* name,
+ TF_Status* status,
+ const char* description,
+ const char* label1) {
+ auto* result = new TFE_MonitoringIntGauge1({name, description, label1});
+ Set_TF_Status_from_Status(status, result->gauge->GetStatus());
+ if (!result->gauge->GetStatus().ok()) {
+ delete result;
+ return nullptr;
+ }
+ return result;
+}
+
+void TFE_MonitoringDeleteIntGauge1(TFE_MonitoringIntGauge1* gauge) {
+ delete gauge;
+}
+
+TFE_MonitoringIntGaugeCell* TFE_MonitoringGetCellIntGauge1(
+ TFE_MonitoringIntGauge1* gauge, const char* label1) {
+ return static_cast(
+ static_cast(gauge->gauge->GetCell(label1)));
+}
+
+TFE_MonitoringIntGauge2* TFE_MonitoringNewIntGauge2(const char* name,
+ TF_Status* status,
+ const char* description,
+ const char* label1,
+ const char* label2) {
+ auto* result =
+ new TFE_MonitoringIntGauge2({name, description, label1, label2});
+ Set_TF_Status_from_Status(status, result->gauge->GetStatus());
+ if (!result->gauge->GetStatus().ok()) {
+ delete result;
+ return nullptr;
+ }
+ return result;
+}
+
+void TFE_MonitoringDeleteIntGauge2(TFE_MonitoringIntGauge2* gauge) {
+ delete gauge;
+}
+
+TFE_MonitoringIntGaugeCell* TFE_MonitoringGetCellIntGauge2(
+ TFE_MonitoringIntGauge2* gauge, const char* label1, const char* label2) {
+ return static_cast(
+ static_cast(gauge->gauge->GetCell(label1, label2)));
+}
+
+void TFE_MonitoringStringGaugeCellSet(TFE_MonitoringStringGaugeCell* cell,
+ const char* value) {
+ cell->cell.Set({value});
+}
+
+const void TFE_MonitoringStringGaugeCellValue(
+ TFE_MonitoringStringGaugeCell* cell, TF_Buffer* buf) {
+ tensorflow::string value = cell->cell.value();
+ void* data = tensorflow::port::Malloc(value.length());
+ value.copy(static_cast(data), value.length(), 0);
+ buf->data = data;
+ buf->length = value.length();
+ buf->data_deallocator = [](void* data, size_t length) {
+ tensorflow::port::Free(data);
+ };
+}
+
+TFE_MonitoringStringGauge0* TFE_MonitoringNewStringGauge0(
+ const char* name, TF_Status* status, const char* description) {
+ auto* result = new TFE_MonitoringStringGauge0({name, description});
+ Set_TF_Status_from_Status(status, result->gauge->GetStatus());
+ if (!result->gauge->GetStatus().ok()) {
+ delete result;
+ return nullptr;
+ }
+ return result;
+}
+
+void TFE_MonitoringDeleteStringGauge0(TFE_MonitoringStringGauge0* gauge) {
+ delete gauge;
+}
+
+TFE_MonitoringStringGaugeCell* TFE_MonitoringGetCellStringGauge0(
+ TFE_MonitoringStringGauge0* gauge) {
+ return static_cast(
+ static_cast(gauge->gauge->GetCell()));
+}
+
+TFE_MonitoringStringGauge1* TFE_MonitoringNewStringGauge1(
+ const char* name, TF_Status* status, const char* description,
+ const char* label1) {
+ auto* result = new TFE_MonitoringStringGauge1({name, description, label1});
+ Set_TF_Status_from_Status(status, result->gauge->GetStatus());
+ if (!result->gauge->GetStatus().ok()) {
+ delete result;
+ return nullptr;
+ }
+ return result;
+}
+
+void TFE_MonitoringDeleteStringGauge1(TFE_MonitoringStringGauge1* gauge) {
+ delete gauge;
+}
+
+TFE_MonitoringStringGaugeCell* TFE_MonitoringGetCellStringGauge1(
+ TFE_MonitoringStringGauge1* gauge, const char* label1) {
+ return static_cast(
+ static_cast(gauge->gauge->GetCell(label1)));
+}
+
+TFE_MonitoringStringGauge2* TFE_MonitoringNewStringGauge2(
+ const char* name, TF_Status* status, const char* description,
+ const char* label1, const char* label2) {
+ auto* result =
+ new TFE_MonitoringStringGauge2({name, description, label1, label2});
+ Set_TF_Status_from_Status(status, result->gauge->GetStatus());
+ if (!result->gauge->GetStatus().ok()) {
+ delete result;
+ return nullptr;
+ }
+ return result;
+}
+
+void TFE_MonitoringDeleteStringGauge2(TFE_MonitoringStringGauge2* gauge) {
+ delete gauge;
+}
+
+TFE_MonitoringStringGaugeCell* TFE_MonitoringGetCellStringGauge2(
+ TFE_MonitoringStringGauge2* gauge, const char* label1, const char* label2) {
+ return static_cast(
+ static_cast(gauge->gauge->GetCell(label1, label2)));
+}
+
+void TFE_MonitoringBoolGaugeCellSet(TFE_MonitoringBoolGaugeCell* cell,
+ bool value) {
+ cell->cell.Set(value);
+}
+
+bool TFE_MonitoringBoolGaugeCellValue(TFE_MonitoringBoolGaugeCell* cell) {
+ return cell->cell.value();
+}
+
+TFE_MonitoringBoolGauge0* TFE_MonitoringNewBoolGauge0(const char* name,
+ TF_Status* status,
+ const char* description) {
+ auto* result = new TFE_MonitoringBoolGauge0({name, description});
+ Set_TF_Status_from_Status(status, result->gauge->GetStatus());
+ if (!result->gauge->GetStatus().ok()) {
+ delete result;
+ return nullptr;
+ }
+ return result;
+}
+
+void TFE_MonitoringDeleteBoolGauge0(TFE_MonitoringBoolGauge0* gauge) {
+ delete gauge;
+}
+
+TFE_MonitoringBoolGaugeCell* TFE_MonitoringGetCellBoolGauge0(
+ TFE_MonitoringBoolGauge0* gauge) {
+ return static_cast(
+ static_cast(gauge->gauge->GetCell()));
+}
+
+TFE_MonitoringBoolGauge1* TFE_MonitoringNewBoolGauge1(const char* name,
+ TF_Status* status,
+ const char* description,
+ const char* label1) {
+ auto* result = new TFE_MonitoringBoolGauge1({name, description, label1});
+ Set_TF_Status_from_Status(status, result->gauge->GetStatus());
+ if (!result->gauge->GetStatus().ok()) {
+ delete result;
+ return nullptr;
+ }
+ return result;
+}
+
+void TFE_MonitoringDeleteBoolGauge1(TFE_MonitoringBoolGauge1* gauge) {
+ delete gauge;
+}
+
+TFE_MonitoringBoolGaugeCell* TFE_MonitoringGetCellBoolGauge1(
+ TFE_MonitoringBoolGauge1* gauge, const char* label1) {
+ return static_cast(
+ static_cast(gauge->gauge->GetCell(label1)));
+}
+
+TFE_MonitoringBoolGauge2* TFE_MonitoringNewBoolGauge2(const char* name,
+ TF_Status* status,
+ const char* description,
+ const char* label1,
+ const char* label2) {
+ auto* result =
+ new TFE_MonitoringBoolGauge2({name, description, label1, label2});
+ Set_TF_Status_from_Status(status, result->gauge->GetStatus());
+ if (!result->gauge->GetStatus().ok()) {
+ delete result;
+ return nullptr;
+ }
+ return result;
+}
+
+void TFE_MonitoringDeleteBoolGauge2(TFE_MonitoringBoolGauge2* gauge) {
+ delete gauge;
+}
+
+TFE_MonitoringBoolGaugeCell* TFE_MonitoringGetCellBoolGauge2(
+ TFE_MonitoringBoolGauge2* gauge, const char* label1, const char* label2) {
+ return static_cast(
+ static_cast(gauge->gauge->GetCell(label1, label2)));
+}
+
+void TFE_MonitoringSamplerCellAdd(TFE_MonitoringSamplerCell* cell,
+ double value) {
+ cell->cell.Add(value);
+}
+
+void TFE_MonitoringSamplerCellValue(TFE_MonitoringSamplerCell* cell,
+ TF_Buffer* buf) {
+ string content;
+ cell->cell.value().SerializeToString(&content);
+ void* data = tensorflow::port::Malloc(content.length());
+ content.copy(static_cast(data), content.length(), 0);
+ buf->data = data;
+ buf->length = content.length();
+ buf->data_deallocator = [](void* data, size_t length) {
+ tensorflow::port::Free(data);
+ };
+}
+
+TFE_MonitoringBuckets* TFE_MonitoringNewExponentialBuckets(double scale,
+ double growth_factor,
+ int bucket_count) {
+ return new TFE_MonitoringBuckets([scale, growth_factor, bucket_count]() {
+ return tensorflow::monitoring::Buckets::Exponential(scale, growth_factor,
+ bucket_count);
+ });
+}
+
+void TFE_MonitoringDeleteBuckets(TFE_MonitoringBuckets* buckets) {
+ delete buckets;
+}
+
+TFE_MonitoringSampler0* TFE_MonitoringNewSampler0(
+ const char* name, TFE_MonitoringBuckets* buckets, TF_Status* status,
+ const char* description) {
+ auto* result = new TFE_MonitoringSampler0(
+ {name, buckets->create_buckets(), description});
+ Set_TF_Status_from_Status(status, result->sampler->GetStatus());
+ if (!result->sampler->GetStatus().ok()) {
+ delete result;
+ return nullptr;
+ }
+ return result;
+}
+
+void TFE_MonitoringDeleteSampler0(TFE_MonitoringSampler0* sampler) {
+ delete sampler;
+}
+
+TFE_MonitoringSamplerCell* TFE_MonitoringGetCellSampler0(
+ TFE_MonitoringSampler0* sampler) {
+ return static_cast(
+ static_cast(sampler->sampler->GetCell()));
+}
+
+TFE_MonitoringSampler1* TFE_MonitoringNewSampler1(
+ const char* name, TFE_MonitoringBuckets* buckets, TF_Status* status,
+ const char* description, const char* label1) {
+ auto* result = new TFE_MonitoringSampler1(
+ {name, buckets->create_buckets(), description, label1});
+ Set_TF_Status_from_Status(status, result->sampler->GetStatus());
+ if (!result->sampler->GetStatus().ok()) {
+ delete result;
+ return nullptr;
+ }
+ return result;
+}
+
+void TFE_MonitoringDeleteSampler1(TFE_MonitoringSampler1* sampler) {
+ delete sampler;
+}
+
+TFE_MonitoringSamplerCell* TFE_MonitoringGetCellSampler1(
+ TFE_MonitoringSampler1* sampler, const char* label1) {
+ return static_cast(
+ static_cast(sampler->sampler->GetCell(label1)));
+}
+
+TFE_MonitoringSampler2* TFE_MonitoringNewSampler2(
+ const char* name, TFE_MonitoringBuckets* buckets, TF_Status* status,
+ const char* description, const char* label1, const char* label2) {
+ auto* result = new TFE_MonitoringSampler2(
+ {name, buckets->create_buckets(), description, label1, label2});
+ Set_TF_Status_from_Status(status, result->sampler->GetStatus());
+ if (!result->sampler->GetStatus().ok()) {
+ delete result;
+ return nullptr;
+ }
+ return result;
+}
+
+void TFE_MonitoringDeleteSampler2(TFE_MonitoringSampler2* sampler) {
+ delete sampler;
+}
+
+TFE_MonitoringSamplerCell* TFE_MonitoringGetCellSampler2(
+ TFE_MonitoringSampler2* sampler, const char* label1, const char* label2) {
+ return static_cast(
+ static_cast(sampler->sampler->GetCell(label1, label2)));
+}
diff --git a/tensorflow/c/eager/c_api_experimental.h b/tensorflow/c/eager/c_api_experimental.h
index 219b9f40720..4dc57e1eec5 100644
--- a/tensorflow/c/eager/c_api_experimental.h
+++ b/tensorflow/c/eager/c_api_experimental.h
@@ -87,6 +87,229 @@ TF_CAPI_EXPORT extern bool TFE_ProfilerClientStartTracing(
const char* service_addr, const char* logdir, const char* worker_list,
bool include_dataset_ops, int duration_ms, int num_tracing_attempts);
+// TODO(fishx): Move these monitoring APIs into a separate file.
+// -----------------------------------------------------------------------------
+// Monitoring Counter APIs.
+// These APIs de-templated monitoring Counter for swig.
+
+typedef struct TFE_MonitoringCounterCell TFE_MonitoringCounterCell;
+
+// Atomically increments the value of the cell. The value must be non-negative.
+TF_CAPI_EXPORT extern void TFE_MonitoringCounterCellIncrementBy(
+ TFE_MonitoringCounterCell* cell, int64_t value);
+
+// Retrieves the current value of the cell.
+TF_CAPI_EXPORT extern int64_t TFE_MonitoringCounterCellValue(
+ TFE_MonitoringCounterCell* cell);
+
+// APIs for Counter without label.
+typedef struct TFE_MonitoringCounter0 TFE_MonitoringCounter0;
+// Returns a new Counter metric object. The caller should manage lifetime of
+// the object. Using duplicate metric name will crash the program with fatal
+// error.
+TF_CAPI_EXPORT extern TFE_MonitoringCounter0* TFE_MonitoringNewCounter0(
+ const char* name, TF_Status* status, const char* description);
+// Deletes the Counter object.
+TF_CAPI_EXPORT extern void TFE_MonitoringDeleteCounter0(
+ TFE_MonitoringCounter0* counter);
+// Retrieves the cell from the Counter object. The Counter object will manage
+// lifetime of the cell.
+TF_CAPI_EXPORT extern TFE_MonitoringCounterCell* TFE_MonitoringGetCellCounter0(
+ TFE_MonitoringCounter0* counter);
+
+// APIs for Counter with 1 label.
+typedef struct TFE_MonitoringCounter1 TFE_MonitoringCounter1;
+TF_CAPI_EXPORT extern TFE_MonitoringCounter1* TFE_MonitoringNewCounter1(
+ const char* name, TF_Status* status, const char* description,
+ const char* label1);
+TF_CAPI_EXPORT extern void TFE_MonitoringDeleteCounter1(
+ TFE_MonitoringCounter1* counter);
+TF_CAPI_EXPORT extern TFE_MonitoringCounterCell* TFE_MonitoringGetCellCounter1(
+ TFE_MonitoringCounter1* counter, const char* label1);
+
+// APIs for Counter with 2 labels.
+typedef struct TFE_MonitoringCounter2 TFE_MonitoringCounter2;
+TF_CAPI_EXPORT extern TFE_MonitoringCounter2* TFE_MonitoringNewCounter2(
+ const char* name, TF_Status* status, const char* description,
+ const char* label1, const char* label2);
+TF_CAPI_EXPORT extern void TFE_MonitoringDeleteCounter2(
+ TFE_MonitoringCounter2* counter);
+TF_CAPI_EXPORT extern TFE_MonitoringCounterCell* TFE_MonitoringGetCellCounter2(
+ TFE_MonitoringCounter2* counter, const char* label1, const char* label2);
+
+// -----------------------------------------------------------------------------
+// Monitoring Gauge APIs.
+// These APIs de-templated monitoring Gauge for swig.
+
+typedef struct TFE_MonitoringIntGaugeCell TFE_MonitoringIntGaugeCell;
+
+// Atomically set the value of the cell.
+TF_CAPI_EXPORT extern void TFE_MonitoringIntGaugeCellSet(
+ TFE_MonitoringIntGaugeCell* cell, int64_t value);
+
+// Retrieves the current value of the cell.
+TF_CAPI_EXPORT extern int64_t TFE_MonitoringIntGaugeCellValue(
+ TFE_MonitoringIntGaugeCell* cell);
+
+// APIs for Int Gauge without label.
+typedef struct TFE_MonitoringIntGauge0 TFE_MonitoringIntGauge0;
+TF_CAPI_EXPORT extern TFE_MonitoringIntGauge0* TFE_MonitoringNewIntGauge0(
+ const char* name, TF_Status* out_status, const char* description);
+TF_CAPI_EXPORT extern void TFE_MonitoringDeleteIntGauge0(
+ TFE_MonitoringIntGauge0* gauge);
+TF_CAPI_EXPORT extern TFE_MonitoringIntGaugeCell*
+TFE_MonitoringGetCellIntGauge0(TFE_MonitoringIntGauge0* gauge);
+
+// APIs for Int Gauge with 1 label.
+typedef struct TFE_MonitoringIntGauge1 TFE_MonitoringIntGauge1;
+TF_CAPI_EXPORT extern TFE_MonitoringIntGauge1* TFE_MonitoringNewIntGauge1(
+ const char* name, TF_Status* out_status, const char* description,
+ const char* label1);
+TF_CAPI_EXPORT extern void TFE_MonitoringDeleteIntGauge1(
+ TFE_MonitoringIntGauge1* gauge);
+TF_CAPI_EXPORT extern TFE_MonitoringIntGaugeCell*
+TFE_MonitoringGetCellIntGauge1(TFE_MonitoringIntGauge1* gauge,
+ const char* label1);
+
+// APIs for Int Gauge with 2 label.
+typedef struct TFE_MonitoringIntGauge2 TFE_MonitoringIntGauge2;
+TF_CAPI_EXPORT extern TFE_MonitoringIntGauge2* TFE_MonitoringNewIntGauge2(
+ const char* name, TF_Status* out_status, const char* description,
+ const char* label1, const char* label2);
+TF_CAPI_EXPORT extern void TFE_MonitoringDeleteIntGauge2(
+ TFE_MonitoringIntGauge2* gauge);
+TF_CAPI_EXPORT extern TFE_MonitoringIntGaugeCell*
+TFE_MonitoringGetCellIntGauge2(TFE_MonitoringIntGauge2* gauge,
+ const char* label1, const char* label2);
+
+typedef struct TFE_MonitoringStringGaugeCell TFE_MonitoringStringGaugeCell;
+TF_CAPI_EXPORT extern void TFE_MonitoringStringGaugeCellSet(
+ TFE_MonitoringStringGaugeCell* cell, const char* value);
+// Retrieves the string value and saves it in buffer.
+TF_CAPI_EXPORT extern const void TFE_MonitoringStringGaugeCellValue(
+ TFE_MonitoringStringGaugeCell* cell, TF_Buffer* buf);
+
+// APIs for String Gauge without label.
+typedef struct TFE_MonitoringStringGauge0 TFE_MonitoringStringGauge0;
+TF_CAPI_EXPORT extern TFE_MonitoringStringGauge0* TFE_MonitoringNewStringGauge0(
+ const char* name, TF_Status* out_status, const char* description);
+TF_CAPI_EXPORT extern void TFE_MonitoringDeleteStringGauge0(
+ TFE_MonitoringStringGauge0* gauge);
+TF_CAPI_EXPORT extern TFE_MonitoringStringGaugeCell*
+TFE_MonitoringGetCellStringGauge0(TFE_MonitoringStringGauge0* gauge);
+
+// APIs for String Gauge with 1 label.
+typedef struct TFE_MonitoringStringGauge1 TFE_MonitoringStringGauge1;
+TF_CAPI_EXPORT extern TFE_MonitoringStringGauge1* TFE_MonitoringNewStringGauge1(
+ const char* name, TF_Status* out_status, const char* description,
+ const char* label1);
+TF_CAPI_EXPORT extern void TFE_MonitoringDeleteStringGauge1(
+ TFE_MonitoringStringGauge1* gauge);
+TF_CAPI_EXPORT extern TFE_MonitoringStringGaugeCell*
+TFE_MonitoringGetCellStringGauge1(TFE_MonitoringStringGauge1* gauge,
+ const char* label1);
+
+// APIs for String Gauge with 2 label.
+typedef struct TFE_MonitoringStringGauge2 TFE_MonitoringStringGauge2;
+TF_CAPI_EXPORT extern TFE_MonitoringStringGauge2* TFE_MonitoringNewStringGauge2(
+ const char* name, TF_Status* out_status, const char* description,
+ const char* label1, const char* label2);
+TF_CAPI_EXPORT extern void TFE_MonitoringDeleteStringGauge2(
+ TFE_MonitoringStringGauge2* gauge);
+TF_CAPI_EXPORT extern TFE_MonitoringStringGaugeCell*
+TFE_MonitoringGetCellStringGauge2(TFE_MonitoringStringGauge2* gauge,
+ const char* label1, const char* label2);
+
+typedef struct TFE_MonitoringBoolGaugeCell TFE_MonitoringBoolGaugeCell;
+TF_CAPI_EXPORT extern void TFE_MonitoringBoolGaugeCellSet(
+ TFE_MonitoringBoolGaugeCell* cell, bool value);
+TF_CAPI_EXPORT extern bool TFE_MonitoringBoolGaugeCellValue(
+ TFE_MonitoringBoolGaugeCell* cell);
+
+// APIs for Bool Gauge without label.
+typedef struct TFE_MonitoringBoolGauge0 TFE_MonitoringBoolGauge0;
+TF_CAPI_EXPORT extern TFE_MonitoringBoolGauge0* TFE_MonitoringNewBoolGauge0(
+ const char* name, TF_Status* out_status, const char* description);
+TF_CAPI_EXPORT extern void TFE_MonitoringDeleteBoolGauge0(
+ TFE_MonitoringBoolGauge0* gauge);
+TF_CAPI_EXPORT extern TFE_MonitoringBoolGaugeCell*
+TFE_MonitoringGetCellBoolGauge0(TFE_MonitoringBoolGauge0* gauge);
+
+// APIs for Bool Gauge with 1 label.
+typedef struct TFE_MonitoringBoolGauge1 TFE_MonitoringBoolGauge1;
+TF_CAPI_EXPORT extern TFE_MonitoringBoolGauge1* TFE_MonitoringNewBoolGauge1(
+ const char* name, TF_Status* out_status, const char* description,
+ const char* label1);
+TF_CAPI_EXPORT extern void TFE_MonitoringDeleteBoolGauge1(
+ TFE_MonitoringBoolGauge1* gauge);
+TF_CAPI_EXPORT extern TFE_MonitoringBoolGaugeCell*
+TFE_MonitoringGetCellBoolGauge1(TFE_MonitoringBoolGauge1* gauge,
+ const char* label1);
+
+// APIs for Bool Gauge with 2 label.
+typedef struct TFE_MonitoringBoolGauge2 TFE_MonitoringBoolGauge2;
+TF_CAPI_EXPORT extern TFE_MonitoringBoolGauge2* TFE_MonitoringNewBoolGauge2(
+ const char* name, TF_Status* out_status, const char* description,
+ const char* label1, const char* label2);
+TF_CAPI_EXPORT extern void TFE_MonitoringDeleteBoolGauge2(
+ TFE_MonitoringBoolGauge2* gauge);
+TF_CAPI_EXPORT extern TFE_MonitoringBoolGaugeCell*
+TFE_MonitoringGetCellBoolGauge2(TFE_MonitoringBoolGauge2* gauge,
+ const char* label1, const char* label2);
+
+// -----------------------------------------------------------------------------
+// Monitoring Sampler APIs.
+// These APIs de-templated monitoring Sampler for swig.
+
+typedef struct TFE_MonitoringSamplerCell TFE_MonitoringSamplerCell;
+
+// Atomically add the value of the cell.
+TF_CAPI_EXPORT extern void TFE_MonitoringSamplerCellAdd(
+ TFE_MonitoringSamplerCell* cell, double value);
+
+// Retrieves the current value of the cell. The return value is a HistogramProto
+// saved in buffer.
+TF_CAPI_EXPORT extern void TFE_MonitoringSamplerCellValue(
+ TFE_MonitoringSamplerCell* cell, TF_Buffer* buf);
+
+// APIs for sampler buckets
+typedef struct TFE_MonitoringBuckets TFE_MonitoringBuckets;
+TF_CAPI_EXPORT extern TFE_MonitoringBuckets*
+TFE_MonitoringNewExponentialBuckets(double scale, double growth_factor,
+ int bucket_count);
+TF_CAPI_EXPORT extern void TFE_MonitoringDeleteBuckets(
+ TFE_MonitoringBuckets* buckets);
+
+// APIs for Sampler without label.
+typedef struct TFE_MonitoringSampler0 TFE_MonitoringSampler0;
+TF_CAPI_EXPORT extern TFE_MonitoringSampler0* TFE_MonitoringNewSampler0(
+ const char* name, TFE_MonitoringBuckets* buckets, TF_Status* out_status,
+ const char* description);
+TF_CAPI_EXPORT extern void TFE_MonitoringDeleteSampler0(
+ TFE_MonitoringSampler0* sampler);
+TF_CAPI_EXPORT extern TFE_MonitoringSamplerCell* TFE_MonitoringGetCellSampler0(
+ TFE_MonitoringSampler0* sampler);
+
+// APIs for Sampler with 1 label.
+typedef struct TFE_MonitoringSampler1 TFE_MonitoringSampler1;
+TF_CAPI_EXPORT extern TFE_MonitoringSampler1* TFE_MonitoringNewSampler1(
+ const char* name, TFE_MonitoringBuckets* buckets, TF_Status* out_status,
+ const char* description, const char* label1);
+TF_CAPI_EXPORT extern void TFE_MonitoringDeleteSampler1(
+ TFE_MonitoringSampler1* sampler);
+TF_CAPI_EXPORT extern TFE_MonitoringSamplerCell* TFE_MonitoringGetCellSampler1(
+ TFE_MonitoringSampler1* sampler, const char* label1);
+
+// APIs for Sampler with 2 label.
+typedef struct TFE_MonitoringSampler2 TFE_MonitoringSampler2;
+TF_CAPI_EXPORT extern TFE_MonitoringSampler2* TFE_MonitoringNewSampler2(
+ const char* name, TFE_MonitoringBuckets* buckets, TF_Status* out_status,
+ const char* description, const char* label1, const char* label2);
+TF_CAPI_EXPORT extern void TFE_MonitoringDeleteSampler2(
+ TFE_MonitoringSampler2* sampler);
+TF_CAPI_EXPORT extern TFE_MonitoringSamplerCell* TFE_MonitoringGetCellSampler2(
+ TFE_MonitoringSampler2* sampler, const char* label1, const char* label2);
+
#ifdef __cplusplus
} /* end extern "C" */
#endif
diff --git a/tensorflow/c/eager/c_api_experimental_test.cc b/tensorflow/c/eager/c_api_experimental_test.cc
index d85048caa7c..4e48a7591a9 100644
--- a/tensorflow/c/eager/c_api_experimental_test.cc
+++ b/tensorflow/c/eager/c_api_experimental_test.cc
@@ -16,14 +16,16 @@ limitations under the License.
#include "tensorflow/c/eager/c_api_experimental.h"
#include
+
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/cc/profiler/profiler.h"
+#include "tensorflow/core/lib/monitoring/collection_registry.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
-#include "tensorflow/core/profiler/trace_events.pb.h"
+#include "tensorflow/core/protobuf/trace_events.pb.h"
using tensorflow::string;
@@ -79,11 +81,15 @@ void ExecuteWithProfiling(bool async) {
profiler_result->length}));
string profile_proto_str = profile_proto.DebugString();
if (!gpu_device_name.empty()) {
- EXPECT_TRUE(HasSubstr(profile_proto_str, "GPU:0"));
+ EXPECT_TRUE(HasSubstr(profile_proto_str, "/device:GPU:0"));
// device name with "stream:all" is collected by Device Tracer.
EXPECT_TRUE(HasSubstr(profile_proto_str, "stream:all"));
+ // TODO(fishx): move following check out from this if statement.
+ // This is collected by TraceMe
+ EXPECT_TRUE(HasSubstr(profile_proto_str, "/host:CPU"));
}
- EXPECT_TRUE(HasSubstr(profile_proto_str, "CPU:0"));
+ EXPECT_TRUE(HasSubstr(profile_proto_str, "/device:CPU:0"));
+ EXPECT_TRUE(HasSubstr(profile_proto_str, "MatMul"));
TF_DeleteBuffer(profiler_result);
TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status);
@@ -125,5 +131,165 @@ TEST(CAPI, MultipleProfilerSession) {
TFE_DeleteProfilerContext(profiler_context);
}
+TEST(CAPI, MonitoringCounter0) {
+ TF_Status* status = TF_NewStatus();
+ auto* counter =
+ TFE_MonitoringNewCounter0("test/counter", status, "description");
+ CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
+ TF_DeleteStatus(status);
+ auto* cell = TFE_MonitoringGetCellCounter0(counter);
+ TFE_MonitoringCounterCellIncrementBy(cell, 1);
+ EXPECT_EQ(TFE_MonitoringCounterCellValue(cell), 1);
+ auto* collection_registry = monitoring::CollectionRegistry::Default();
+ monitoring::CollectionRegistry::CollectMetricsOptions options;
+ std::unique_ptr metrics =
+ collection_registry->CollectMetrics(options);
+
+ EXPECT_EQ("test/counter",
+ metrics->point_set_map.at("test/counter")->metric_name);
+ EXPECT_EQ(
+ 1, metrics->point_set_map.at("test/counter")->points.at(0)->int64_value);
+
+ TFE_MonitoringCounterCellIncrementBy(cell, 5);
+ EXPECT_EQ(TFE_MonitoringCounterCellValue(cell), 6);
+ metrics = collection_registry->CollectMetrics(options);
+ EXPECT_EQ(
+ 6, metrics->point_set_map.at("test/counter")->points.at(0)->int64_value);
+
+ TFE_MonitoringDeleteCounter0(counter);
+ metrics = collection_registry->CollectMetrics(options);
+ EXPECT_EQ(metrics->point_set_map.end(),
+ metrics->point_set_map.find("test/counter"));
+}
+
+TEST(CAPI, MonitoringCounterMultiple) {
+ TF_Status* status = TF_NewStatus();
+ auto* counter1 = TFE_MonitoringNewCounter1("test/counter1", status,
+ "description", "label1");
+ CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
+ auto* cell1 = TFE_MonitoringGetCellCounter1(counter1, "test");
+ TFE_MonitoringCounterCellIncrementBy(cell1, 1);
+ EXPECT_EQ(TFE_MonitoringCounterCellValue(cell1), 1);
+
+ auto* counter2 = TFE_MonitoringNewCounter2("test/counter2", status,
+ "description", "label1", "label2");
+ CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
+ TF_DeleteStatus(status);
+ auto* cell2 = TFE_MonitoringGetCellCounter2(counter2, "foo", "bar");
+ TFE_MonitoringCounterCellIncrementBy(cell2, 2);
+ EXPECT_EQ(TFE_MonitoringCounterCellValue(cell2), 2);
+
+ TFE_MonitoringDeleteCounter1(counter1);
+ TFE_MonitoringDeleteCounter2(counter2);
+}
+
+TEST(CAPI, MonitoringGauge0) {
+ TF_Status* status = TF_NewStatus();
+ auto* gauge = TFE_MonitoringNewIntGauge0("test/gauge", status, "test");
+ CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
+ auto* cell = TFE_MonitoringGetCellIntGauge0(gauge);
+ TFE_MonitoringIntGaugeCellSet(cell, 1);
+ EXPECT_EQ(TFE_MonitoringIntGaugeCellValue(cell), 1);
+ auto* collection_registry = monitoring::CollectionRegistry::Default();
+ monitoring::CollectionRegistry::CollectMetricsOptions options;
+ std::unique_ptr metrics =
+ collection_registry->CollectMetrics(options);
+
+ EXPECT_EQ("test/gauge", metrics->point_set_map.at("test/gauge")->metric_name);
+ EXPECT_EQ(1,
+ metrics->point_set_map.at("test/gauge")->points.at(0)->int64_value);
+
+ TFE_MonitoringIntGaugeCellSet(cell, 5);
+ metrics = collection_registry->CollectMetrics(options);
+ EXPECT_EQ(5,
+ metrics->point_set_map.at("test/gauge")->points.at(0)->int64_value);
+ TF_DeleteStatus(status);
+}
+
+TEST(CAPI, MonitoringMultipleGauge) {
+ TF_Status* status = TF_NewStatus();
+ auto* gauge1 =
+ TFE_MonitoringNewBoolGauge1("test/gauge1", status, "test", "label1");
+ CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
+ auto* cell1 = TFE_MonitoringGetCellBoolGauge1(gauge1, "foo");
+ TFE_MonitoringBoolGaugeCellSet(cell1, true);
+ EXPECT_TRUE(TFE_MonitoringBoolGaugeCellValue(cell1));
+
+ auto* gauge2 = TFE_MonitoringNewStringGauge2("test/gauge2", status, "test",
+ "label1", "label2");
+ CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
+ auto* cell2 = TFE_MonitoringGetCellStringGauge2(gauge2, "foo", "bar");
+ TFE_MonitoringStringGaugeCellSet(cell2, "str");
+ auto* buf = new TF_Buffer;
+ TFE_MonitoringStringGaugeCellValue(cell2, buf);
+ string data(static_cast(buf->data), buf->length);
+ delete buf;
+ EXPECT_EQ(data, "str");
+ TF_DeleteStatus(status);
+}
+
+TEST(CAPI, MonitoringSampler0) {
+ TF_Status* status = TF_NewStatus();
+ auto* buckets = TFE_MonitoringNewExponentialBuckets(1.0, 2.0, 2);
+ auto* sampler =
+ TFE_MonitoringNewSampler0("test/sampler", buckets, status, "test");
+ CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
+ auto* cell = TFE_MonitoringGetCellSampler0(sampler);
+ TFE_MonitoringSamplerCellAdd(cell, 1.0);
+ auto* collection_registry = monitoring::CollectionRegistry::Default();
+ monitoring::CollectionRegistry::CollectMetricsOptions options;
+ std::unique_ptr metrics =
+ collection_registry->CollectMetrics(options);
+
+ EXPECT_EQ("test/sampler",
+ metrics->point_set_map.at("test/sampler")->metric_name);
+ EXPECT_EQ(1.0, metrics->point_set_map.at("test/sampler")
+ ->points.at(0)
+ ->histogram_value.sum());
+
+ TFE_MonitoringSamplerCellAdd(cell, 5.0);
+ metrics = collection_registry->CollectMetrics(options);
+ EXPECT_EQ(6.0, metrics->point_set_map.at("test/sampler")
+ ->points.at(0)
+ ->histogram_value.sum());
+ TFE_MonitoringDeleteBuckets(buckets);
+ TF_DeleteStatus(status);
+}
+
+TEST(CAPI, MonitoringMultipleSampler) {
+ TF_Status* status = TF_NewStatus();
+ auto* buckets = TFE_MonitoringNewExponentialBuckets(1.0, 2.0, 2);
+ auto* sampler1 = TFE_MonitoringNewSampler1("test/sampler1", buckets, status,
+ "test", "label1");
+ CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
+ auto* cell1 = TFE_MonitoringGetCellSampler1(sampler1, "foo");
+ TFE_MonitoringSamplerCellAdd(cell1, 1.0);
+ TFE_MonitoringSamplerCellAdd(cell1, 2.0);
+ TF_Buffer* result1 = TF_NewBuffer();
+ TFE_MonitoringSamplerCellValue(cell1, result1);
+ tensorflow::HistogramProto hitogram1;
+ EXPECT_TRUE(hitogram1.ParseFromString(
+ {reinterpret_cast(result1->data), result1->length}));
+ EXPECT_EQ(hitogram1.sum(), 3.0);
+ delete result1;
+
+ auto* sampler2 = TFE_MonitoringNewSampler2("test/sampler2", buckets, status,
+ "test", "label1", "label2");
+ CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
+ auto* cell2 = TFE_MonitoringGetCellSampler2(sampler2, "foo", "bar");
+ TFE_MonitoringSamplerCellAdd(cell2, 2.0);
+ TFE_MonitoringSamplerCellAdd(cell2, 3.0);
+ TF_Buffer* result2 = TF_NewBuffer();
+ TFE_MonitoringSamplerCellValue(cell2, result2);
+ tensorflow::HistogramProto hitogram2;
+ EXPECT_TRUE(hitogram2.ParseFromString(
+ {reinterpret_cast(result2->data), result2->length}));
+ EXPECT_EQ(hitogram2.sum(), 5.0);
+ delete result2;
+
+ TFE_MonitoringDeleteBuckets(buckets);
+ TF_DeleteStatus(status);
+}
+
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/c/eager/c_api_internal.h b/tensorflow/c/eager/c_api_internal.h
index 35dafb9a7f1..061b0e5adcd 100644
--- a/tensorflow/c/eager/c_api_internal.h
+++ b/tensorflow/c/eager/c_api_internal.h
@@ -15,8 +15,6 @@ limitations under the License.
#ifndef TENSORFLOW_C_EAGER_C_API_INTERNAL_H_
#define TENSORFLOW_C_EAGER_C_API_INTERNAL_H_
-#include "tensorflow/c/eager/c_api.h"
-
#include
#include
#include