Compare commits

..

No commits in common. "rei/fork-r2.3" and "v2.3.0-rc0" have entirely different histories.

99 changed files with 499 additions and 2802 deletions

View File

@ -94,9 +94,6 @@ build:libc++ --linkopt -fuse-ld=lld
# https://docs.bazel.build/versions/master/user-manual.html#flag--fat_apk_cpu
build:android --crosstool_top=//external:android/crosstool
build:android --host_crosstool_top=@bazel_tools//tools/cpp:toolchain
build:android --copt=-D_GLIBCXX_USE_C99
build:android --cxxopt=-std=c++14
build:android --action_env ANDROID_NDK_API_LEVEL=21
build:android_arm --config=android
build:android_arm --cpu=armeabi-v7a
build:android_arm --fat_apk_cpu=armeabi-v7a
@ -205,29 +202,6 @@ build:sycl_asan --copt -fno-omit-frame-pointer --copt -fsanitize-coverage=3 --co
build:sycl_nodouble --config=sycl
build:sycl_trisycl --define=using_trisycl=true
build --copt=-DTFLITE_WITH_RUY_GEMV
build:rpi3 --host_crosstool_top=@bazel_tools//tools/cpp:toolchain
build:rpi3 --crosstool_top=//third_party/toolchains/embedded/linaro-gcc72-armeabi:toolchain
build:rpi3 --cpu=armv7a --define=target_system=rpi3
build:rpi3 --copt=-march=armv7-a --copt=-mtune=cortex-a53 --copt=-mfloat-abi=hard --copt=-mfpu=neon-fp-armv8 --copt=-DRASPBERRY_PI --copt=-D_GLIBCXX_USE_CXX11_ABI=0 --copt=-std=gnu99 --copt=-mno-unaligned-access
build:rpi3 --define=tensorflow_mkldnn_contraction_kernel=0
build:rpi3_opt -c opt --config=rpi3 --copt=-funsafe-math-optimizations --copt=-ftree-vectorize --copt=-pipe
build:rpi3-armv8 --host_crosstool_top=@bazel_tools//tools/cpp:toolchain
build:rpi3-armv8 --crosstool_top=//third_party/toolchains/embedded/linaro-gcc72-aarch64:toolchain
build:rpi3-armv8 --cpu=aarch64 --define=target_system=rpi3-armv8
build:rpi3-armv8 --copt=-march=armv8-a --copt=-mtune=cortex-a53 --copt=-DRASPBERRY_PI --copt=-D_GLIBCXX_USE_CXX11_ABI=0 --copt=-std=gnu99
build:rpi3-armv8 --define=tensorflow_mkldnn_contraction_kernel=0
build:rpi3-armv8_opt -c opt --config=rpi3-armv8 --copt=-funsafe-math-optimizations --copt=-ftree-vectorize --copt=-pipe
build:rpi4ub-armv8 --host_crosstool_top=@bazel_tools//tools/cpp:toolchain
build:rpi4ub-armv8 --crosstool_top=//third_party/toolchains/embedded/linaro-gcc72-aarch64:toolchain
build:rpi4ub-armv8 --cpu=aarch64 --define=target_system=rpi4ub-armv8
build:rpi4ub-armv8 --copt=-march=armv8-a --copt=-mtune=cortex-a72 --copt=-DRASPBERRY_PI --copt=-D_GLIBCXX_USE_CXX11_ABI=0 --copt=-std=gnu99
build:rpi4ub-armv8 --define=tensorflow_mkldnn_contraction_kernel=0
build:rpi4ub-armv8_opt -c opt --config=rpi4ub-armv8 --copt=-funsafe-math-optimizations --copt=-ftree-vectorize --copt=-pipe
# Options extracted from configure script
build:ngraph --define=with_ngraph_support=true
build:numa --define=with_numa_support=true

View File

@ -1,15 +0,0 @@
# Pull request guidelines
Welcome to the 🐸tensorflow project! We are excited to see your interest, and appreciate your support!
This repository is governed by the Contributor Covenant Code of Conduct. For more details, see the [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) file.
In order to make a good pull request, please see our [CONTRIBUTING.md](CONTRIBUTING.md) file.
Before accepting your pull request, you will be asked to sign a [Contributor License Agreement](https://cla-assistant.io/coqui-ai/tensorflow).
This [Contributor License Agreement](https://cla-assistant.io/coqui-ai/tensorflow):
- Protects you, Coqui, and the users of the code.
- Does not change your rights to use your contributions for any purpose.
- Does not change the license of the 🐸tensorflow project. It just makes the terms of your contribution clearer and lets us know you are OK to contribute.

View File

@ -1,206 +1,15 @@
# Release 2.3.0
## Major Features and Improvements
* `tf.data` adds two new mechanisms to solve input pipeline bottlenecks and save resources:
* [snapshot](https://www.tensorflow.org/api_docs/python/tf/data/experimental/snapshot)
* [tf.data service](https://www.tensorflow.org/api_docs/python/tf/data/experimental/service).
In addition checkout the detailed [guide](https://www.tensorflow.org/guide/data_performance_analysis) for analyzing input pipeline performance with TF Profiler.
* [`tf.distribute.TPUStrategy`](https://www.tensorflow.org/api_docs/python/tf/distribute/TPUStrategy) is now a stable API and no longer considered experimental for TensorFlow. (earlier `tf.distribute.experimental.TPUStrategy`).
* [TF Profiler](https://www.tensorflow.org/guide/profiler) introduces two new tools: a memory profiler to visualize your models memory usage over time and a [python tracer](https://www.tensorflow.org/guide/profiler#events) which allows you to trace python function calls in your model. Usability improvements include better diagnostic messages and [profile options](https://tensorflow.org/guide/profiler#collect_performance_data) to customize the host and device trace verbosity level.
* Introduces experimental support for Keras Preprocessing Layers API ([`tf.keras.layers.experimental.preprocessing.*`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing?version=nightly)) to handle data preprocessing operations, with support for composite tensor inputs. Please see below for additional details on these layers.
* TFLite now properly supports dynamic shapes during conversion and inference. Weve also added opt-in support on Android and iOS for [XNNPACK](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/delegates/xnnpack), a highly optimized set of CPU kernels, as well as opt-in support for [executing quantized models on the GPU](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/performance/gpu_advanced.md#running-quantized-models-experimental).
* Libtensorflow packages are available in GCS starting this release. We have also started to [release a nightly version of these packages](https://github.com/tensorflow/tensorflow#official-builds).
* The experimental Python API [`tf.debugging.experimental.enable_dump_debug_info()`](https://www.tensorflow.org/api_docs/python/tf/debugging/experimental/enable_dump_debug_info) now allows you to instrument a TensorFlow program and dump debugging information to a directory on the file system. The directory can be read and visualized by a new interactive dashboard in TensorBoard 2.3 called [Debugger V2](https://www.tensorflow.org/tensorboard/debugger_v2), which reveals the details of the TensorFlow program including graph structures, history of op executions at the Python (eager) and intra-graph levels, the runtime dtype, shape, and numerical composistion of tensors, as well as their code locations.
## Breaking Changes
* Increases the **minimum bazel version** required to build TF to **3.1.0**.
* `tf.data`
* Makes the following (breaking) changes to the `tf.data`.
* C++ API: - `IteratorBase::RestoreInternal`, `IteratorBase::SaveInternal`, and `DatasetBase::CheckExternalState` become pure-virtual and subclasses are now expected to provide an implementation.
* The deprecated `DatasetBase::IsStateful` method is removed in favor of `DatasetBase::CheckExternalState`.
* Deprecated overrides of `DatasetBase::MakeIterator` and `MakeIteratorFromInputElement` are removed.
* The signature of `tensorflow::data::IteratorBase::SaveInternal` and `tensorflow::data::IteratorBase::SaveInput` has been extended with `SerializationContext` argument to enable overriding the default policy for the handling external state during iterator checkpointing. This is not a backwards compatible change and all subclasses of `IteratorBase` *need to be updated* accordingly.
* `tf.keras`
* Add a new `BackupAndRestore` callback for handling distributed training failures & restarts. Please take a look at this [tutorial](https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras) for details on how to use the callback.
* `tf.image.extract_glimpse` has been updated to correctly process the case
where `centered=False` and `normalized=False`. This is a breaking change as
the output is different from (incorrect) previous versions. Note this
breaking change only impacts `tf.image.extract_glimpse` and
`tf.compat.v2.image.extract_glimpse` API endpoints. The behavior of
`tf.compat.v1.image.extract_glimpse` does not change. The behavior of
exsiting C++ kernel `ExtractGlimpse` does not change either, so saved
models using `tf.raw_ops.ExtractGlimpse` will not be impacted.
## Known Caveats
* `tf.lite`
* Keras-based LSTM models must be converted with an explicit batch size in the input layer.
## Bug Fixes and Other Changes
### TF Core:
* Set `tf2_behavior` to 1 to enable V2 for early loading cases.
* Add `execute_fn_for_device function` to dynamically choose the implementation based on underlying device placement.
* Eager:
* Add `reduce_logsumexp` benchmark with experiment compile.
* Give `EagerTensor`s a meaningful `__array__` implementation.
* Add another version of defun matmul for performance analysis.
* `tf.function`/AutoGraph:
* `AutoGraph` now includes into TensorFlow loops any variables that are closed over by local functions. Previously, such variables were sometimes incorrectly ignored.
* functions returned by the `get_concrete_function` method of `tf.function` objects can now be called with arguments consistent with the original arguments or type specs passed to `get_concrete_function`. This calling convention is now the preferred way to use concrete functions with nested values and composite tensors. Please check the [guide](https://www.tensorflow.org/guide/concrete_function) for more details on `concrete_ function`.
* Update `tf.function`'s `experimental_relax_shapes` to handle composite tensors appropriately.
* Optimize `tf.function` invocation, by removing redundant list converter.
* `tf.function` will retrace when called with a different variable instead of simply using the `dtype` & `shape`.
* [Improve support](https://github.com/tensorflow/tensorflow/issues/33862) for dynamically-sized TensorArray inside `tf.function`.
* `tf.math`:
* Narrow down `argmin`/`argmax` contract to always return the smallest index for ties.
* `tf.math.reduce_variance` and `tf.math.reduce_std` return correct computation for complex types and no longer support integer types.
* Add Bessel functions of order 0,1 to `tf.math.special`.
* `tf.divide` now always returns a tensor to be consistent with documentation and other APIs.
* `tf.image`:
* Replaced [`tf.image.non_max_suppression_padded`](https://www.tensorflow.org/versions/r2.3/api_docs/python/tf/image/non_max_suppression_padded?hl=en) with a new implementation that supports batched inputs, which is considerably faster on TPUs and GPUs. Boxes with area=0 will be ignored. Existing usage with single inputs should still work as before.
* `tf.linalg`
* Add `tf.linalg.banded_triangular_solve`.
* `tf.random`:
* Add `tf.random.stateless_parameterized_truncated_normal`.
* `tf.ragged`:
* Add `tf.ragged.cross` and `tf.ragged.cross_hashed` operations.
* `tf.RaggedTensor`:
* `RaggedTensor.to_tensor()` now preserves static shape.
* Add `tf.strings.format()` and `tf.print()` to support RaggedTensors.
* `tf.saved_model`:
* `@tf.function` from SavedModel no longer ignores args after a `RaggedTensor` when selecting the concrete function to run.
* Fix save model issue for ops with a list of functions.
* Add `tf.saved_model.LoadOptions` with [`experimental_io_device`](https://www.tensorflow.org/versions/r2.3/api_docs/python/tf/saved_model/LoadOptions?hl=en) as arg with default value `None` to choose the I/O device for loading models and weights.
* Update `tf.saved_model.SaveOptions` with [`experimental_io_device`](https://www.tensorflow.org/versions/r2.3/api_docs/python/tf/saved_model/SaveOptions?hl=en) as arg with default value `None` to choose the I/O device for saving models and weights.
* Mutable tables now restore checkpointed values when loaded from SavedModel.
* GPU
* TF 2.3 includes PTX kernels only for [compute capability](https://developer.nvidia.com/cuda-gpus) 7.0 to reduce the TF pip binary size. Earlier releases included PTX for a variety of older compute capabilities.
* Others
* Retain parent namescope for ops added inside `tf.while_loop`/`tf.cond`/`tf.switch_case`.
* Update `tf.vectorized_map` to support vectorizing `tf.while_loop` and TensorList operations.
* `tf.custom_gradient` can now be applied to functions that accept nested structures of `tensors` as inputs (instead of just a list of tensors). Note that Python structures such as tuples and lists now won't be treated as tensors, so if you still want them to be treated that way, you need to wrap them with `tf.convert_to_tensor`.
* No lowering on gradient case op when input is `DeviceIndex` op.
* Extend the ragged version of `tf.gather` to support `batch_dims` and `axis` args.
* Update `tf.map_fn` to support RaggedTensors and SparseTensors.
* Deprecate `tf.group`. It is not useful in eager mode.
* Add CPU and GPU implementation of modified variation of [`FTRL`](https://www.tensorflow.org/versions/r2.3/api_docs/python/tf/raw_ops/ApplyFtrl)/[`FTRLV2`](https://www.tensorflow.org/versions/r2.3/api_docs/python/tf/raw_ops/ApplyFtrlV2) that can triggerred by `multiply_linear_by_lr` allowing a learning rate of zero.
### `tf.data`:
* `tf.data.experimental.dense_to_ragged_batch` works correctly with tuples.
* `tf.data.experimental.dense_to_ragged_batch` to output variable ragged rank.
* `tf.data.experimental.cardinality` is now a method on `tf.data.Dataset`.
* `tf.data.Dataset` now supports `len(Dataset)` when the cardinality is finite.
### `tf.distribute`:
* Expose experimental [`tf.distribute.DistributedDataset`](https://www.tensorflow.org/versions/r2.3/api_docs/python/tf/distribute/DistributedDataset?hl=en) and [`tf.distribute.DistributedIterator`](https://www.tensorflow.org/versions/r2.3/api_docs/python/tf/distribute/DistributedIterator) to distribute input data when using `tf.distribute` to scale training on multiple devices.
* Added a [`get_next_as_optional`](https://www.tensorflow.org/versions/r2.3/api_docs/python/tf/distribute/DistributedIterator?hl=en#get_next_as_optional) method for [`tf.distribute.DistributedIterator`](https://www.tensorflow.org/versions/r2.3/api_docs/python/tf/distribute/DistributedIterator?hl=en) class to return a `tf.experimental.Optional` instance that contains the next value for all replicas or none instead of raising an out of range error. Also see *new* [guide on input distribution](https://www.tensorflow.org/tutorials/distribute/input).
* Allow var.assign on MirroredVariables with aggregation=NONE in replica context. Previously this would raise an error. We now allow this because many users and library writers find using `.assign` in replica context to be more convenient, instead of having to use `Strategy.extended.update` which was the previous way of updating variables in this situation.
* `tf.distribute.experimental.MultiWorkerMirroredStrategy` adds support for partial batches. Workers running out of data now continue to participate in the training with empty inputs, instead of raising an error. Learn more about [partial batches here](https://www.tensorflow.org/tutorials/distribute/input#partial_batches).
* Improve the performance of reading metrics eagerly under `tf.distribute.experimental.MultiWorkerMirroredStrategy`.
* Fix the issue that `strategy.reduce()` inside `tf.function` may raise exceptions when the values to reduce are from loops or if-clauses.
* Fix the issue that `tf.distribute.MirroredStrategy` cannot be used together with `tf.distribute.experimental.MultiWorkerMirroredStrategy`.
* Add a `tf.distribute.cluster_resolver.TPUClusterResolver.connect` API to simplify TPU initialization.
### `tf.keras`:
* Introduces experimental preprocessing layers API (`tf.keras.layers.experimental.preprocessing`) to handle data preprocessing operations such as categorical feature encoding, text vectorization, data normalization, and data discretization (binning). The newly added layers provide a replacement for the legacy feature column API, and support composite tensor inputs.
* Added **categorical data** processing layers:
* `IntegerLookup` & `StringLookup`: build an index of categorical feature values
* `CategoryEncoding`: turn integer-encoded categories into one-hot, multi-hot, or tf-idf encoded representations
* `CategoryCrossing`: create new categorical features representing co-occurrences of previous categorical feature values
* `Hashing`: the hashing trick, for large-vocabulary categorical features
* `Discretization`: turn continuous numerical features into categorical features by binning their values
* Improved **image preprocessing** layers: `CenterCrop`, `Rescaling`
* Improved **image augmentation** layers: `RandomCrop`, `RandomFlip`, `RandomTranslation`, `RandomRotation`, `RandomHeight`, `RandomWidth`, `RandomZoom`, `RandomContrast`
* Improved **`TextVectorization`** layer, which handles string tokenization, n-gram generation, and token encoding
* The `TextVectorization` layer now accounts for the mask_token as part of the vocabulary size when output_mode='int'. This means that, if you have a max_tokens value of 5000, your output will have 5000 unique values (not 5001 as before).
* Change the return value of `TextVectorization.get_vocabulary()` from `byte` to `string`. Users who previously were calling 'decode' on the output of this method should no longer need to do so.
* Introduce new Keras dataset generation utilities :
* **[`image_dataset_from_directory`](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image_dataset_from_directory)** is a utility based on `tf.data.Dataset`, meant to replace the legacy `ImageDataGenerator`. It takes you from a structured directory of images to a labeled dataset, in one function call. Note that it doesn't perform image data augmentation (which is meant to be done using preprocessing layers).
* **[`text_dataset_from_directory`](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/text_dataset_from_directory)** takes you from a structured directory of text files to a labeled dataset, in one function call.
* **[`timeseries_dataset_from_array`](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/timeseries_dataset_from_array)** is a `tf.data.Dataset`-based replacement of the legacy `TimeseriesGenerator`. It takes you from an array of timeseries data to a dataset of shifting windows with their targets.
* Added [`experimental_steps_per_execution`](https://www.tensorflow.org/versions/r2.3/api_docs/python/tf/keras/Model?hl=en#compile)
arg to `model.compile` to indicate the number of batches to run per `tf.function` call. This can speed up Keras Models on TPUs up to 3x.
* Extends `tf.keras.layers.Lambda` layers to support multi-argument lambdas, and keyword arguments when calling the layer.
* Functional models now get constructed if *any* tensor in a layer call's arguments/keyword arguments comes from a keras input. Previously the functional api would only work if all of the elements in the first argument to the layer came from a keras input.
* Clean up `BatchNormalization` layer's `trainable` property to act like standard python state when it's used inside `tf.functions` (frozen at tracing time), instead of acting like a pseudo-variable whose updates *kind of sometimes* get reflected in already-traced `tf.function` traces.
* Add the `Conv1DTranspose` layer.
* Refine the semantics of `SensitivitySpecificityBase` derived metrics. See the updated API docstrings for [`tf.keras.metrics.SensitivityAtSpecificity`](https://www.tensorflow.org/versions/r2.3/api_docs/python/tf/keras/metrics/SensitivityAtSpecificity) and [`tf.keras.metrics.SpecificityAtSensitivty`](https://www.tensorflow.org/versions/r2.3/api_docs/python/tf/keras/metrics/SpecificityAtSensitivity).
### `tf.lite`:
* Converter
* Restored `inference_input_type` and `inference_output_type` flags in TF 2.x TFLiteConverter (backward compatible with TF 1.x) to support integer (tf.int8, tf.uint8) input and output types in post training full integer quantized models.
* Added support for converting and resizing models with dynamic (placeholder) dimensions. Previously, there was only limited support for dynamic batch size, and even that did not guarantee that the model could be properly resized at runtime.
* Enabled experimental support for a new quantization mode with 16-bit activations and 8-bit weights. See `lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8`.
* CPU
* Fix an issue w/ dynamic weights and `Conv2D` on x86.
* Add a runtime Android flag for enabling `XNNPACK` for optimized CPU performance.
* Add a runtime iOS flag for enabling `XNNPACK` for optimized CPU performance.
* Add a compiler flag to enable building a TFLite library that applies `XNNPACK` delegate automatically when the model has a `fp32` operation.
* GPU
* Allow GPU acceleration starting with internal graph nodes
* Experimental support for quantized models with the Android GPU delegate
* Add GPU delegate whitelist.
* Rename GPU whitelist -> compatibility (list).
* Improve GPU compatibility list entries from crash reports.
* NNAPI
* Set default value for `StatefulNnApiDelegate::Options::max_number_delegated_partitions` to 3.
* Add capability to disable `NNAPI` CPU and check `NNAPI` Errno.
* Fix crashes when using `NNAPI` with target accelerator specified with model containing Conv2d or FullyConnected or LSTM nodes with quantized weights.
* Fix `ANEURALNETWORKS_BAD_DATA` execution failures with `sum`/`max`/`min`/`reduce` operations with `scalar` inputs.
* Hexagon
* TFLite Hexagon Delegate out of experimental.
* Experimental `int8` support for most hexagon ops.
* Experimental per-channel quant support for `conv` in Hexagon delegate.
* Support dynamic batch size in C++ API.
* CoreML
* Opensource CoreML delegate
* Misc
* Enable building Android TFLite targets on Windows
* Add support for `BatchMatMul`.
* Add support for `half_pixel_centers` with `ResizeNearestNeighbor`.
* Add 3D support for `BatchToSpaceND`.
* Add 5D support for `BroadcastSub`, `Maximum`, `Minimum`, `Transpose` and `BroadcastDiv`.
* Rename `kTfLiteActRelu1` to `kTfLiteActReluN1To1`.
* Enable flex delegate on tensorflow.lite.Interpreter Python package.
* Add `Buckettize`, `SparseCross` and `BoostedTreesBucketize` to the flex whitelist.
* Add support for selective registration of flex ops.
* Add missing kernels for flex delegate whitelisted ops.
* Fix issue when using direct `ByteBuffer` inputs with graphs that have dynamic shapes.
* Fix error checking supported operations in a model containing `HardSwish`.
### Packaging Support
* Added `tf.sysconfig.get_build_info()`. Returns a dict that describes the build environment of the currently installed TensorFlow package, e.g. the NVIDIA CUDA and NVIDIA CuDNN versions used when TensorFlow was built.
### Profiler
* Fix a subtle use-after-free issue in `XStatVisitor::RefValue()`.
### TPU Enhancements
* Adds 3D mesh support in TPU configurations ops.
* Added TPU code for `FTRL` with `multiply_linear_by_lr`.
* Silently adds a new file system registry at `gstpu`.
* Support `restartType` in cloud tpu client.
* Depend on a specific version of google-api-python-client.
* Fixes apiclient import.
### Tracing and Debugging
* Add a `TFE_Py_Execute` traceme.
### XLA Support
* Implement stable `argmin` and `argmax`
## Thanks to our Contributors
This release contains contributions from many people at Google, as well as:
902449@58880@bigcat_chen@ASIC, Abdul Baseer Khan, Abhineet Choudhary, Abolfazl Shahbazi, Adam Hillier, ag.ramesh, Agoniii, Ajay P, Alex Hoffman, Alexander Bayandin, Alexander Grund, Alexandre Abadie, Alexey Rogachevskiy, amoitra, Andrew Stevens, Angus-Luo, Anshuman Tripathy, Anush Elangovan, Artem Mavrin, Ashutosh Hathidara, autoih, Ayushman Kumar, ayushmankumar7, Bairen Yi, Bas Aarts, Bastian Eichenberger, Ben Barsdell, bhack, Bharat Raghunathan, Biagio Montaruli, Bigcat-Himax, blueyi, Bryan Cutler, Byambaa, Carlos Hernandez-Vaquero, Chen Lei, Chris Knorowski, Christian Clauss, chuanqiw, CuiYifeng, Daniel Situnayake, Daria Zhuravleva, Dayananda-V, Deven Desai, Devi Sandeep Endluri, Dmitry Zakharov, Dominic Jack, Duncan Riach, Edgar Liberis, Ehsan Toosi, ekuznetsov139, Elena Zhelezina, Eugene Kuznetsov, Eugene Mikhantiev, Evgenii Zheltonozhskii, Fabio Di Domenico, Fausto Morales, Fei Sun, feihugis, Felix E. Klee, flyingcat, Frederic Bastien, Fredrik Knutsson, frreiss, fsx950223, ganler, Gaurav Singh, Georgios Pinitas, Gian Marco Iodice, Giorgio Arena, Giuseppe Rossini, Gregory Keith, Guozhong Zhuang, gurushantj, Hahn Anselm, Harald Husum, Harjyot Bagga, Hristo Vrigazov, Ilya Persky, Ir1d, Itamar Turner-Trauring, jacco, Jake Tae, Janosh Riebesell, Jason Zaman, jayanth, Jeff Daily, Jens Elofsson, Jinzhe Zeng, JLZ, Jonas Skog, Jonathan Dekhtiar, Josh Meyer, Joshua Chia, Judd, justkw, Kaixi Hou, Kam D Kasravi, Kamil Rakoczy, Karol Gugala, Kayou, Kazuaki Ishizaki, Keith Smiley, Khaled Besrour, Kilaru Yasaswi Sri Chandra Gandhi, Kim, Young Soo, Kristian Hartikainen, Kwabena W. Agyeman, Leslie-Fang, Leslie-Fang-Intel, Li, Guizi, Lukas Geiger, Lutz Roeder, M\U00E5Ns Nilsson, Mahmoud Abuzaina, Manish, Marcel Koester, Marcin Sielski, marload, Martin Jul, Matt Conley, mdfaijul, Meng, Peng, Meteorix, Michael Käufl, Michael137, Milan Straka, Mitchell Vitez, Ml-0, Mokke Meguru, Mshr-H, nammbash, Nathan Luehr, naumkin, Neeraj Bhadani, ngc92, Nick Morgan, nihui, Niranjan Hasabnis, Niranjan Yadla, Nishidha Panpaliya, Oceania2018, oclyke, Ouyang Jin, OverLordGoldDragon, Owen Lyke, Patrick Hemmer, Paul Andrey, Peng Sun, periannath, Phil Pearl, Prashant Dandriyal, Prashant Kumar, Rahul Huilgol, Rajan Singh, Rajeshwar Reddy T, rangjiaheng, Rishit Dagli, Rohan Reddy, rpalakkal, rposts, Ruan Kunliang, Rushabh Vasani, Ryohei Ikegami, Semun Lee, Seo-Inyoung, Sergey Mironov, Sharada Shiddibhavi, ShengYang1, Shraiysh Vaishay, Shunya Ueta, shwetaoj, Siyavash Najafzade, Srinivasan Narayanamoorthy, Stephan Uphoff, storypku, sunchenggen, sunway513, Sven-Hendrik Haase, Swapnil Parekh, Tamas Bela Feher, Teng Lu, tigertang, tomas, Tomohiro Ubukata, tongxuan.ltx, Tony Tonev, Tzu-Wei Huang, Téo Bouvard, Uday Bondhugula, Vaibhav Jade, Vijay Tadikamalla, Vikram Dattu, Vincent Abriou, Vishnuvardhan Janapati, Vo Van Nghia, VoVAllen, Will Battel, William D. Irons, wyzhao, Xiaoming (Jason) Cui, Xiaoquan Kong, Xinan Jiang, xutianming, Yair Ehrenwald, Yasir Modak, Yasuhiro Matsumoto, Yixing Fu, Yong Tang, Yuan Tang, zhaozheng09, Zilin Zhu, zilinzhu, 张志豪
* `tf.image.extract_glimpse` has been updated to correctly process the case
where `centered=False` and `normalized=False`. This is a breaking change as
the output is different from (incorrect) previous versions. Note this
breaking change only impacts `tf.image.extract_glimpse` and
`tf.compat.v2.image.extract_glimpse` API endpoints. The behavior of
`tf.compat.v1.image.extract_glimpse` does not change. The behavior of
exsiting C++ kernel `ExtractGlimpse` does not change as well, so saved
models will not be impacted.
# Release 2.1.1

View File

@ -18,18 +18,6 @@ load("//tensorflow:workspace.bzl", "tf_repositories")
# Please add all new TensorFlow dependencies in workspace.bzl.
tf_repositories()
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
git_repository(
name = "com_github_nelhage_rules_boost",
commit = "1e3a69bf2d5cd10c34b74f066054cd335d033d71",
remote = "https://github.com/nelhage/rules_boost",
shallow_since = "1591047380 -0700",
)
load("@com_github_nelhage_rules_boost//:boost/boost.bzl", "boost_deps")
boost_deps()
register_toolchains("@local_config_python//:py_toolchain")
load("@io_bazel_rules_closure//closure:defs.bzl", "closure_repositories")

View File

@ -1 +0,0 @@
../native_client

View File

@ -221,7 +221,8 @@ Status TfDataTypeFormDlDataType(const DLDataType& dtype,
// Wraps the deleter function of DLManagedTensor to match the function signature
// TFE_NewTensorHandleFromDeviceMemory.
void DeallocatorWrapperFunc(void* data, size_t len, void* dlmt_vptr) {
TFE_CallDLManagedTensorDeleter(dlmt_vptr);
DLManagedTensor* dlmt = static_cast<DLManagedTensor*>(dlmt_vptr);
dlmt->deleter(const_cast<DLManagedTensor*>(dlmt));
}
// Checks whether the stride array matches the layout of compact, row-majored
@ -323,7 +324,7 @@ TFE_TensorHandle* TFE_HandleFromDLPack(void* dlm, TF_Status* status,
TFE_TensorHandle* handle = TFE_NewTensorHandleFromDeviceMemory(
ctx, device_name.value().c_str(), dtype, dims, num_dims, data,
total_bytes, &DeallocatorWrapperFunc, dlmt, status);
total_bytes, &DeallocatorWrapperFunc, &dlmt, status);
return handle;
}

View File

@ -476,36 +476,10 @@ Status XlaComputationLaunchContext::PopulateOutputs(
stream->ThenRecordEvent(definition_event.get());
}
std::vector<TensorShape> output_tensor_shapes;
output_tensor_shapes.reserve(ctx->num_outputs());
if (output.on_host_shape().is_dynamic()) {
TF_ASSIGN_OR_RETURN(
auto transfer_manager,
xla::TransferManager::GetForPlatform(stream->parent()->platform()));
xla::Shape output_host_shape = output.on_host_shape();
xla::Shape output_device_shape = output.on_device_shape();
TF_RETURN_IF_ERROR(transfer_manager->ReadDynamicShapes(
stream, &output, &output_host_shape, &output_device_shape));
output.set_shapes(output_host_shape, output_device_shape);
for (int i = 0; i < ctx->num_outputs(); ++i) {
const xla::Shape& subshape =
xla::ShapeUtil::GetSubshape(output_host_shape, {i});
TensorShape shape;
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(subshape, &shape));
output_tensor_shapes.push_back(shape);
}
} else {
for (int i = 0; i < ctx->num_outputs(); ++i) {
output_tensor_shapes.push_back(compilation_result->outputs[i].shape);
}
}
// Copy XLA results to the OpOutputList.
int output_num = 0;
for (int i = 0; i < ctx->num_outputs(); ++i) {
const TensorShape& shape = output_tensor_shapes[i];
const TensorShape& shape = compilation_result->outputs[i].shape;
const DataType& type = compilation_result->outputs[i].type;
VLOG(2) << "Retval " << i << " shape " << shape.DebugString() << " type "
<< DataTypeString(type);

View File

@ -30,7 +30,6 @@ from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import image_ops
@ -775,7 +774,6 @@ class ResizeBilinearNonAlignCornersTest(xla_test.XLATestCase):
class NonMaxSuppressionTest(xla_test.XLATestCase):
@test_util.disable_mlir_bridge("%1")
def testNMS128From1024(self):
num_boxes = 1024
boxes_np = np.random.normal(50, 10, (num_boxes, 4)).astype("f4")
@ -810,7 +808,6 @@ class NonMaxSuppressionTest(xla_test.XLATestCase):
self.assertEqual(indices_tf.size, max_output_size)
@test_util.disable_mlir_bridge("%1")
def testNMS3From6Boxes(self):
# Three boxes are selected based on IOU.
boxes_data = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
@ -852,7 +849,6 @@ class NonMaxSuppressionTest(xla_test.XLATestCase):
self.assertEqual(num_valid, 3)
self.assertAllClose(indices_tf[:num_valid], [3, 0, 5])
@test_util.disable_mlir_bridge("%1")
def testNMS3Then2WithScoreThresh(self):
# Three boxes are selected based on IOU.
# One is filtered out by score threshold.
@ -895,7 +891,6 @@ class NonMaxSuppressionTest(xla_test.XLATestCase):
self.assertEqual(num_valid, 2)
self.assertAllClose(indices_tf[:num_valid], [3, 0])
@test_util.disable_mlir_bridge("%1")
def testNMS3Then1WithScoreMaxThresh(self):
# Three boxes are selected based on IOU.
# One is filtered out by score threshold.
@ -939,7 +934,6 @@ class NonMaxSuppressionTest(xla_test.XLATestCase):
self.assertEqual(num_valid, 1)
self.assertAllClose(indices_tf[:num_valid], [3])
@test_util.disable_mlir_bridge("%1")
def testSelectFromContinuousOverLap(self):
# Tests that a suppressed box does not itself suppress other boxes.
@ -984,7 +978,6 @@ class NonMaxSuppressionTest(xla_test.XLATestCase):
class BatchedNonMaxSuppressionCorrectnessTest(xla_test.XLATestCase):
@test_util.disable_mlir_bridge("%1")
def testBatchedNMSFrom6(self):
boxes_data = [[[0, 0, 1, 1], [3, 3, 4, 4], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]],
@ -1022,7 +1015,6 @@ class BatchedNonMaxSuppressionCorrectnessTest(xla_test.XLATestCase):
indices_output)
self.assertAllEqual([5, 4], num_valid_output)
@test_util.disable_mlir_bridge("%1")
def testBatchedNMSFrom6Max3(self):
boxes_data = [[[0, 0, 1, 1], [3, 3, 4, 4], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]],
@ -1056,7 +1048,6 @@ class BatchedNonMaxSuppressionCorrectnessTest(xla_test.XLATestCase):
self.assertAllEqual([[0, 1, 2], [0, 1, 3]], indices_output)
self.assertAllEqual([3, 3], num_valid_output)
@test_util.disable_mlir_bridge("%1")
def testBatchedNMSSingleFrom6Max3(self):
boxes_data = [[0, 0, 1, 1], [3, 3, 4, 4], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]
@ -1087,7 +1078,6 @@ class BatchedNonMaxSuppressionCorrectnessTest(xla_test.XLATestCase):
self.assertAllEqual([0, 1, 2], indices_output)
self.assertAllEqual(3, num_valid_output)
@test_util.disable_mlir_bridge("%1")
def testBatchedNMSSingleFrom6NoPad(self):
boxes_data = [[0, 0, 1, 1], [3, 3, 4, 4], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]
@ -1117,7 +1107,6 @@ class BatchedNonMaxSuppressionCorrectnessTest(xla_test.XLATestCase):
self.assertAllEqual([0, 1, 2, 4, 5], indices_output)
self.assertAllEqual(5, num_valid_output)
@test_util.disable_mlir_bridge("%1")
def testBatchedNMSBatchDimsFrom6Max3(self):
boxes_data = [[[[0, 0, 1, 1], [3, 3, 4, 4], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]],
@ -1151,7 +1140,6 @@ class BatchedNonMaxSuppressionCorrectnessTest(xla_test.XLATestCase):
self.assertAllEqual([[[0, 1, 2], [0, 1, 3]]], indices_output)
self.assertAllEqual([[3, 3]], num_valid_output)
@test_util.disable_mlir_bridge("%1")
def testBatchedNMSScoreThresholdFrom6Max3(self):
boxes_data = [[[0, 0, 1, 1], [3, 3, 4, 4], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]],
@ -1187,7 +1175,6 @@ class BatchedNonMaxSuppressionCorrectnessTest(xla_test.XLATestCase):
self.assertAllEqual([3, 2], num_valid_output)
self.assertAllEqual([[0, 1, 2], [0, 1, invalid_index]], indices_output)
@test_util.disable_mlir_bridge("%1")
def testBatchedNMSUnsortedInputFrom6(self):
boxes_data = [[[0, 2, 1, 2], [3, 3, 4, 4], [0, 0, 1, 1],
[0, 0.4, 1, 1.4], [0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8]],
@ -1224,7 +1211,6 @@ class BatchedNonMaxSuppressionCorrectnessTest(xla_test.XLATestCase):
indices_output)
self.assertAllEqual([5, 4], num_valid_output)
@test_util.disable_mlir_bridge("%1")
def testBatchedNMSNoncanonicalizedInputFrom6(self):
boxes_data = [[[1, 0, 0, 1], [4, 3, 3, 4], [1, 0.4, 0, 1.4],
[1, 0.6, 0, 1.6], [1, 0.8, 0, 1.8], [1, 2, 0, 2]],
@ -1262,7 +1248,6 @@ class BatchedNonMaxSuppressionCorrectnessTest(xla_test.XLATestCase):
indices_output)
self.assertAllEqual([5, 4], num_valid_output)
@test_util.disable_mlir_bridge("%1")
def testBatchedNMSScoreThresholdCanInputsFrom6Max3(self):
boxes_data = [[[0, 0, 1, 1], [3, 3, 4, 4], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]],
@ -1298,7 +1283,6 @@ class BatchedNonMaxSuppressionCorrectnessTest(xla_test.XLATestCase):
self.assertAllEqual([3, 2], num_valid_output)
self.assertAllEqual([[0, 1, 2], [0, 1, invalid_index]], indices_output)
@test_util.disable_mlir_bridge("%1")
def testBatchedNMSFrom6DynamicInput(self):
boxes_data = [[[0, 0, 1, 1], [3, 3, 4, 4], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]],

View File

@ -1202,9 +1202,6 @@ cc_library(
srcs = ["transfer_manager.cc"],
hdrs = ["transfer_manager.h"],
deps = [
":compiler",
":executable",
":maybe_owning_device_memory",
":shaped_buffer",
"//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
@ -1213,6 +1210,8 @@ cc_library(
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto_cc",
"//tensorflow/compiler/xla/service:executable",
"//tensorflow/compiler/xla/service:maybe_owning_device_memory",
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"//tensorflow/stream_executor:device_memory",

View File

@ -20,7 +20,6 @@ limitations under the License.
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/xla/service/compiler.h"
#include "tensorflow/compiler/xla/service/maybe_owning_device_memory.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
@ -34,7 +33,6 @@ limitations under the License.
using absl::StrCat;
namespace xla {
/* static */ tensorflow::mutex
TransferManager::platform_transfer_manager_mutex_(
tensorflow::LINKER_INITIALIZED);
@ -202,67 +200,6 @@ void TransferManager::TransferArrayFromDevice(
std::move(done), transfer_metadata);
}
Status TransferManager::ReadDynamicShapes(se::Stream* stream,
ShapedBuffer* device_buffer,
Shape* host_shape,
Shape* device_shape) {
DCHECK(device_shape->is_dynamic());
Shape original_device_shape = *device_shape;
Shape original_host_shape = *host_shape;
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
TF_ASSIGN_OR_RETURN(auto compiler,
Compiler::GetForPlatform(stream->parent()->platform()));
TF_RETURN_IF_ERROR(device_buffer->buffers().ForEachMutableElementWithStatus(
[&](const ShapeIndex& index, se::DeviceMemoryBase* buffer) {
const Shape& buffer_shape =
ShapeUtil::GetSubshape(*device_shape, index);
if (buffer_shape.IsTuple()) {
return Status::OK();
}
Shape& host_sub_shape =
*ShapeUtil::GetMutableSubshape(host_shape, index);
Shape& device_sub_shape =
*ShapeUtil::GetMutableSubshape(device_shape, index);
if (device_sub_shape.is_static()) {
return Status::OK();
}
// Read the dynamic shape metadata from the device stream.
auto shape_size_fn = compiler->ShapeSizeBytesFunction();
Shape buffer_shape_static = ShapeUtil::MakeStaticShape(buffer_shape);
const int64 offset = shape_size_fn(buffer_shape_static);
int64 metadata_size = shape_size_fn(buffer_shape) - offset;
if (metadata_size == 0) {
return InvalidArgument("Dynamic shape metadata size should not be 0");
}
auto buffer_8 = se::DeviceMemory<uint8>(*buffer);
auto metadata_buffer =
stream->parent()->GetSubBuffer(&buffer_8, offset, metadata_size);
TF_ASSIGN_OR_RETURN(
auto metadata,
TransferArrayFromDevice(
stream,
ShapeUtil::MakeShape(S32, {buffer_shape.dimensions_size()}),
metadata_buffer));
// Update shape size from metadata.
for (int64 i = 0; i < metadata.element_count(); ++i) {
host_sub_shape.mutable_dimensions()[i] = metadata.Get<int32>({i});
device_sub_shape.mutable_dimensions()[i] = metadata.Get<int32>({i});
}
return Status::OK();
}));
host_shape->clear_dynamic_dimensions();
device_shape->clear_dynamic_dimensions();
TF_RET_CHECK(ShapeUtil::DynamicShapeIsCompatible(*device_shape,
original_device_shape));
TF_RET_CHECK(
ShapeUtil::DynamicShapeIsCompatible(*host_shape, original_host_shape));
return Status::OK();
}
/* static */ void TransferManager::RegisterTransferManager(
se::Platform::Id platform_id,
TransferManagerCreationFunction creation_function) {

View File

@ -184,15 +184,6 @@ class TransferManager {
const se::DeviceMemoryBase& source,
const TransferMetadata* transfer_metadata = nullptr);
// Read from a device buffer and update the dynamic dimension sizes of
// `host_shape` and `device_shape`. The function takes in bounded dynamic
// shapes, and returns static shapes with dynamic shapes updated.
// The shape of the buffer also have to be compatible with the host shape and
// device shape.
virtual Status ReadDynamicShapes(se::Stream* stream,
ShapedBuffer* device_buffer,
Shape* host_shape, Shape* device_shape);
// Transfers the given literal into the Infeed interface of the device,
// using the given executor.
virtual Status TransferLiteralToInfeed(se::StreamExecutor* executor,

View File

@ -264,28 +264,86 @@ Status UpdateDynamicInputs(
return Status::OK();
}
xla::StatusOr<xla::Literal> ReadMetadataLiteral(
se::Stream* stream, se::DeviceMemoryBase buffer,
const xla::Shape& buffer_shape, xla::TransferManager* transfer_manager) {
TF_ASSIGN_OR_RETURN(auto compiler, xla::Compiler::GetForPlatform(
stream->parent()->platform()));
auto shape_size_fn = compiler->ShapeSizeBytesFunction();
xla::Shape buffer_shape_static =
xla::ShapeUtil::MakeStaticShape(buffer_shape);
const int64 offset = shape_size_fn(buffer_shape_static);
int64 metadata_size = shape_size_fn(buffer_shape) - offset;
TF_RET_CHECK(metadata_size != 0);
auto buffer_8 = se::DeviceMemory<uint8>(buffer);
auto metadata_buffer =
stream->parent()->GetSubBuffer(&buffer_8, offset, metadata_size);
return transfer_manager->TransferArrayFromDevice(
stream,
xla::ShapeUtil::MakeShape(xla::S32, {buffer_shape.dimensions_size()}),
metadata_buffer);
}
// For each subshape in the result buffer that's dynamic, read the dynamic
// dimension sizes from the metadata, and update output shapes. The result shape
// is a static and concrete shape.
xla::Status UpdateDynamicOutputs(se::Stream* stream,
const xla::ShapedBuffer& shaped_buffer,
xla::Shape* output_host_shape,
xla::Shape* output_device_shape) {
DCHECK(output_device_shape->is_dynamic());
TF_ASSIGN_OR_RETURN(
auto transfer_manager,
xla::TransferManager::GetForPlatform(stream->parent()->platform()));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
TF_RETURN_IF_ERROR(shaped_buffer.buffers().ForEachElementWithStatus(
[&](const xla::ShapeIndex& index, const se::DeviceMemoryBase& buffer) {
const xla::Shape& buffer_shape =
xla::ShapeUtil::GetSubshape(*output_device_shape, index);
if (buffer_shape.IsTuple()) {
return Status::OK();
}
xla::Shape& host_shape =
*xla::ShapeUtil::GetMutableSubshape(output_host_shape, index);
xla::Shape& device_shape =
*xla::ShapeUtil::GetMutableSubshape(output_device_shape, index);
if (device_shape.is_static()) {
return Status::OK();
}
TF_ASSIGN_OR_RETURN(auto metadata,
ReadMetadataLiteral(stream, buffer, buffer_shape,
transfer_manager));
// Update shape size from metadata.
for (int64 i = 0; i < metadata.element_count(); ++i) {
host_shape.mutable_dimensions()[i] = metadata.Get<int32>({i});
device_shape.mutable_dimensions()[i] = metadata.Get<int32>({i});
}
return Status::OK();
}));
output_host_shape->clear_dynamic_dimensions();
output_device_shape->clear_dynamic_dimensions();
return Status::OK();
}
xla::StatusOr<RefPtr<XRTTupleAllocation>> CreateOutputTuple(
se::Stream* stream, xla::ExecutionOutput run_result, xla::Backend* backend,
int device_ordinal) {
XRTTupleAllocation* output_tuple;
xla::ScopedShapedBuffer* shaped_buffer = run_result.MutableResult();
if (shaped_buffer->on_device_shape().is_dynamic()) {
const xla::ScopedShapedBuffer& shaped_buffer = run_result.Result();
if (shaped_buffer.on_device_shape().is_dynamic()) {
// Update dynamic shapes from output buffer, and create a XRT tensor with
// dimension sizes read from metadata.
xla::Shape output_host_shape = shaped_buffer->on_host_shape();
xla::Shape output_device_shape = shaped_buffer->on_device_shape();
TF_ASSIGN_OR_RETURN(
auto transfer_manager,
xla::TransferManager::GetForPlatform(stream->parent()->platform()));
TF_RETURN_IF_ERROR(transfer_manager->ReadDynamicShapes(
xla::Shape output_host_shape = shaped_buffer.on_host_shape();
xla::Shape output_device_shape = shaped_buffer.on_device_shape();
TF_RETURN_IF_ERROR(UpdateDynamicOutputs(
stream, shaped_buffer, &output_host_shape, &output_device_shape));
TF_RETURN_IF_ERROR(XRTTupleAllocation::CreateFromBuffer(
*shaped_buffer, output_host_shape, output_device_shape, backend,
shaped_buffer, output_host_shape, output_device_shape, backend,
device_ordinal, &output_tuple));
} else {
// Fast-path: Don't copy shapes of output buffer.
TF_RETURN_IF_ERROR(XRTTupleAllocation::CreateFromBuffer(
*shaped_buffer, backend, device_ordinal, &output_tuple));
shaped_buffer, backend, device_ordinal, &output_tuple));
}
// After the output tuple is created, we can release the output result
// buffers, to make sure they won't be cleared by its destructor.

View File

@ -28,8 +28,8 @@ tf_proto_library(
)
tf_proto_library(
name = "dispatcher_proto",
srcs = ["dispatcher.proto"],
name = "master_proto",
srcs = ["master.proto"],
has_services = 1,
cc_api_version = 2,
protodeps = tf_additional_all_protos() + [
@ -49,17 +49,17 @@ tf_proto_library(
)
cc_library(
name = "dispatcher_impl",
srcs = ["dispatcher_impl.cc"],
name = "master_impl",
srcs = ["master_impl.cc"],
hdrs = [
"dispatcher_impl.h",
"master_impl.h",
],
deps = [
":common_proto_cc",
":credentials_factory",
":data_service",
":dispatcher_proto_cc",
":grpc_util",
":master_proto_cc",
":worker_cc_grpc_proto",
":worker_proto_cc",
"//tensorflow/c:c_api_internal",
@ -86,9 +86,9 @@ cc_library(
deps = [
":common_proto_cc",
":credentials_factory",
":dispatcher_cc_grpc_proto",
":dispatcher_proto_cc",
":grpc_util",
":master_cc_grpc_proto",
":master_proto_cc",
":worker_proto_cc",
"//tensorflow/c:c_api_internal",
"//tensorflow/c:tf_status_helper",
@ -207,12 +207,12 @@ tf_cc_test(
)
cc_library(
name = "grpc_dispatcher_impl",
srcs = ["grpc_dispatcher_impl.cc"],
hdrs = ["grpc_dispatcher_impl.h"],
name = "grpc_master_impl",
srcs = ["grpc_master_impl.cc"],
hdrs = ["grpc_master_impl.h"],
deps = [
":dispatcher_cc_grpc_proto",
":dispatcher_impl",
":master_cc_grpc_proto",
":master_impl",
"//tensorflow/core/distributed_runtime/rpc:grpc_util",
tf_grpc_cc_dependency(),
],
@ -250,7 +250,7 @@ cc_library(
],
deps = [
":credentials_factory",
":grpc_dispatcher_impl",
":grpc_master_impl",
":grpc_util",
":grpc_worker_impl",
"//tensorflow/core:lib",
@ -268,9 +268,9 @@ cc_library(
],
deps = [
":credentials_factory",
":dispatcher_cc_grpc_proto",
":dispatcher_proto_cc",
":grpc_util",
":master_cc_grpc_proto",
":master_proto_cc",
":worker_cc_grpc_proto",
":worker_proto_cc",
"//tensorflow/core:framework",
@ -287,12 +287,12 @@ tf_cc_test(
tags = ["no_windows"],
deps = [
":data_service",
":dispatcher_cc_grpc_proto",
":dispatcher_proto_cc",
":grpc_dispatcher_impl",
":grpc_master_impl",
":grpc_util",
":grpc_worker_impl",
":local_credentials_factory",
":master_cc_grpc_proto",
":master_proto_cc",
":server_lib",
":test_cluster",
":test_util",
@ -309,11 +309,11 @@ tf_cc_test(
)
cc_grpc_library(
name = "dispatcher_cc_grpc_proto",
srcs = [":dispatcher_proto"],
name = "master_cc_grpc_proto",
srcs = [":master_proto"],
generate_mocks = True,
grpc_only = True,
deps = [":dispatcher_proto_cc"],
deps = [":master_proto_cc"],
)
cc_grpc_library(

View File

@ -18,8 +18,8 @@ limitations under the License.
#include "grpcpp/create_channel.h"
#include "grpcpp/security/credentials.h"
#include "tensorflow/core/data/service/credentials_factory.h"
#include "tensorflow/core/data/service/dispatcher.grpc.pb.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/data/service/master.grpc.pb.h"
#include "tensorflow/core/data/service/worker.grpc.pb.h"
#include "tensorflow/core/framework/dataset.h"
@ -54,8 +54,8 @@ std::string ProcessingModeToString(ProcessingMode mode) {
}
}
Status DataServiceDispatcherClient::RegisterDataset(GraphDef dataset,
int64* dataset_id) {
Status DataServiceMasterClient::RegisterDataset(GraphDef dataset,
int64* dataset_id) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetOrRegisterDatasetRequest req;
*req.mutable_dataset()->mutable_graph() = dataset;
@ -69,9 +69,9 @@ Status DataServiceDispatcherClient::RegisterDataset(GraphDef dataset,
return Status::OK();
}
Status DataServiceDispatcherClient::CreateJob(int64 dataset_id,
ProcessingMode processing_mode,
int64* job_id) {
Status DataServiceMasterClient::CreateJob(int64 dataset_id,
ProcessingMode processing_mode,
int64* job_id) {
TF_RETURN_IF_ERROR(EnsureInitialized());
CreateJobRequest req;
req.set_dataset_id(dataset_id);
@ -88,9 +88,11 @@ Status DataServiceDispatcherClient::CreateJob(int64 dataset_id,
return Status::OK();
}
Status DataServiceDispatcherClient::GetOrCreateJob(
int64 dataset_id, ProcessingMode processing_mode,
const std::string& job_name, int job_name_index, int64* job_id) {
Status DataServiceMasterClient::GetOrCreateJob(int64 dataset_id,
ProcessingMode processing_mode,
const std::string& job_name,
int job_name_index,
int64* job_id) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetOrCreateJobRequest req;
req.set_dataset_id(dataset_id);
@ -110,9 +112,9 @@ Status DataServiceDispatcherClient::GetOrCreateJob(
return Status::OK();
}
Status DataServiceDispatcherClient::GetTasks(int64 job_id,
std::vector<TaskInfo>* tasks,
bool* job_finished) {
Status DataServiceMasterClient::GetTasks(int64 job_id,
std::vector<TaskInfo>* tasks,
bool* job_finished) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetTasksRequest req;
req.set_job_id(job_id);
@ -130,8 +132,7 @@ Status DataServiceDispatcherClient::GetTasks(int64 job_id,
return Status::OK();
}
Status DataServiceDispatcherClient::GetWorkers(
std::vector<WorkerInfo>* workers) {
Status DataServiceMasterClient::GetWorkers(std::vector<WorkerInfo>* workers) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetWorkersRequest req;
GetWorkersResponse resp;
@ -147,12 +148,12 @@ Status DataServiceDispatcherClient::GetWorkers(
return Status::OK();
}
Status DataServiceDispatcherClient::EnsureInitialized() {
Status DataServiceMasterClient::EnsureInitialized() {
std::shared_ptr<grpc::ChannelCredentials> credentials;
TF_RETURN_IF_ERROR(
CredentialsFactory::CreateClientCredentials(protocol_, &credentials));
auto channel = grpc::CreateChannel(address_, credentials);
stub_ = DispatcherService::NewStub(channel);
stub_ = MasterService::NewStub(channel);
return Status::OK();
}
@ -186,11 +187,10 @@ Status DataServiceWorkerClient::EnsureInitialized() {
return Status::OK();
}
Status CreateDataServiceDispatcherClient(
Status CreateDataServiceMasterClient(
const std::string& address, const std::string& protocol,
std::unique_ptr<DataServiceDispatcherClient>* out) {
auto client =
absl::make_unique<DataServiceDispatcherClient>(address, protocol);
std::unique_ptr<DataServiceMasterClient>* out) {
auto client = absl::make_unique<DataServiceMasterClient>(address, protocol);
TF_RETURN_IF_ERROR(client->Initialize());
*out = std::move(client);
return Status::OK();

View File

@ -16,7 +16,7 @@ limitations under the License.
#ifndef TENSORFLOW_CORE_DATA_SERVICE_DATA_SERVICE_H_
#define TENSORFLOW_CORE_DATA_SERVICE_DATA_SERVICE_H_
#include "tensorflow/core/data/service/dispatcher.grpc.pb.h"
#include "tensorflow/core/data/service/master.grpc.pb.h"
#include "tensorflow/core/data/service/worker.grpc.pb.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/op_kernel.h"
@ -67,11 +67,11 @@ class DataServiceClientBase {
const std::string protocol_;
};
// Client for communicating with the tf.data service dispatcher.
class DataServiceDispatcherClient : public DataServiceClientBase {
// Client for communicating with the tf.data service master.
class DataServiceMasterClient : public DataServiceClientBase {
public:
DataServiceDispatcherClient(const std::string& address,
const std::string& protocol)
DataServiceMasterClient(const std::string& address,
const std::string& protocol)
: DataServiceClientBase(address, protocol) {}
// Registers a dataset with the tf.data service, and stores the generated
@ -90,13 +90,13 @@ class DataServiceDispatcherClient : public DataServiceClientBase {
const std::string& job_name, int job_name_index,
int64* job_id);
// Queries the dispatcher for the tasks associated with the specified job.
// Queries the master for the tasks associated with the specified job.
// The tasks will be stored in *tasks, and whether the job is finished will
// be stored in `*job_finished`.
Status GetTasks(int64 job_id, std::vector<TaskInfo>* tasks,
bool* job_finished);
// Queries the dispatcher for its registered workers. The worker info will be
// Queries the master for its registered workers. The worker info will be
// stored in `*workers`.
Status GetWorkers(std::vector<WorkerInfo>* workers);
@ -104,7 +104,7 @@ class DataServiceDispatcherClient : public DataServiceClientBase {
Status EnsureInitialized() override;
private:
std::unique_ptr<DispatcherService::Stub> stub_;
std::unique_ptr<MasterService::Stub> stub_;
};
// Client for communicating with the tf.data service worker.
@ -127,10 +127,10 @@ class DataServiceWorkerClient : public DataServiceClientBase {
std::unique_ptr<WorkerService::Stub> stub_;
};
// Creates and initializes a new tf.data service dispatcher client.
Status CreateDataServiceDispatcherClient(
// Creates and initializes a new tf.data service master client.
Status CreateDataServiceMasterClient(
const std::string& address, const std::string& protocol,
std::unique_ptr<DataServiceDispatcherClient>* out);
std::unique_ptr<DataServiceMasterClient>* out);
// Creates and initializes a new tf.data service worker client.
Status CreateDataServiceWorkerClient(

View File

@ -19,9 +19,9 @@ limitations under the License.
#include "grpcpp/security/credentials.h"
#include "absl/strings/str_split.h"
#include "tensorflow/core/data/compression_utils.h"
#include "tensorflow/core/data/service/dispatcher.grpc.pb.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/data/service/master.grpc.pb.h"
#include "tensorflow/core/data/service/master.pb.h"
#include "tensorflow/core/data/service/server_lib.h"
#include "tensorflow/core/data/service/test_cluster.h"
#include "tensorflow/core/data/service/test_util.h"
@ -66,10 +66,9 @@ TEST(DataService, ProcessingModeToString) {
TEST(DataService, GetWorkers) {
TestCluster cluster(1);
TF_ASSERT_OK(cluster.Initialize());
DataServiceDispatcherClient dispatcher(cluster.DispatcherAddress(),
kProtocol);
DataServiceMasterClient master(cluster.MasterAddress(), kProtocol);
std::vector<WorkerInfo> workers;
TF_EXPECT_OK(dispatcher.GetWorkers(&workers));
TF_EXPECT_OK(master.GetWorkers(&workers));
EXPECT_EQ(1, workers.size());
}

View File

@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/core/data/service/grpc_dispatcher_impl.h"
#include "tensorflow/core/data/service/grpc_master_impl.h"
#include "grpcpp/server_context.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
@ -25,18 +25,18 @@ using ::grpc::ServerBuilder;
using ::grpc::ServerContext;
using ::grpc::Status;
GrpcDispatcherImpl::GrpcDispatcherImpl(ServerBuilder* server_builder,
const std::string& protocol)
GrpcMasterImpl::GrpcMasterImpl(ServerBuilder* server_builder,
const std::string& protocol)
: impl_(protocol) {
server_builder->RegisterService(this);
VLOG(1) << "Registered data service dispatcher";
VLOG(1) << "Registered data service master";
}
#define HANDLER(method) \
Status GrpcDispatcherImpl::method(ServerContext* context, \
const method##Request* request, \
method##Response* response) { \
return ToGrpcStatus(impl_.method(request, response)); \
#define HANDLER(method) \
Status GrpcMasterImpl::method(ServerContext* context, \
const method##Request* request, \
method##Response* response) { \
return ToGrpcStatus(impl_.method(request, response)); \
}
HANDLER(RegisterWorker);
HANDLER(WorkerUpdate);

View File

@ -13,12 +13,12 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_DATA_SERVICE_GRPC_DISPATCHER_IMPL_H_
#define TENSORFLOW_CORE_DATA_SERVICE_GRPC_DISPATCHER_IMPL_H_
#ifndef TENSORFLOW_CORE_DATA_SERVICE_GRPC_MASTER_IMPL_H_
#define TENSORFLOW_CORE_DATA_SERVICE_GRPC_MASTER_IMPL_H_
#include "grpcpp/server_builder.h"
#include "tensorflow/core/data/service/dispatcher.grpc.pb.h"
#include "tensorflow/core/data/service/dispatcher_impl.h"
#include "tensorflow/core/data/service/master.grpc.pb.h"
#include "tensorflow/core/data/service/master_impl.h"
namespace tensorflow {
namespace data {
@ -29,14 +29,14 @@ namespace data {
//
// ::grpc::ServerBuilder builder;
// // configure builder
// GrpcDispatcherImpl data_service(&builder);
// GrpcMasterImpl data_service(&builder);
// builder.BuildAndStart()
//
class GrpcDispatcherImpl : public DispatcherService::Service {
class GrpcMasterImpl : public MasterService::Service {
public:
explicit GrpcDispatcherImpl(grpc::ServerBuilder* server_builder,
const std::string& protocol);
~GrpcDispatcherImpl() override {}
explicit GrpcMasterImpl(grpc::ServerBuilder* server_builder,
const std::string& protocol);
~GrpcMasterImpl() override {}
#define HANDLER(method) \
grpc::Status method(grpc::ServerContext* context, \
@ -52,12 +52,12 @@ class GrpcDispatcherImpl : public DispatcherService::Service {
#undef HANDLER
private:
DataServiceDispatcherImpl impl_;
DataServiceMasterImpl impl_;
TF_DISALLOW_COPY_AND_ASSIGN(GrpcDispatcherImpl);
TF_DISALLOW_COPY_AND_ASSIGN(GrpcMasterImpl);
};
} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CORE_DATA_SERVICE_GRPC_DISPATCHER_IMPL_H_
#endif // TENSORFLOW_CORE_DATA_SERVICE_GRPC_MASTER_IMPL_H_

View File

@ -26,9 +26,9 @@ using ::grpc::ServerContext;
using ::grpc::Status;
GrpcWorkerImpl::GrpcWorkerImpl(ServerBuilder* server_builder,
const std::string& dispatcher_address,
const std::string& master_address,
const std::string& protocol)
: impl_(dispatcher_address, protocol) {
: impl_(master_address, protocol) {
server_builder->RegisterService(this);
VLOG(1) << "Registered data service worker";
}

View File

@ -35,7 +35,7 @@ namespace data {
class GrpcWorkerImpl : public WorkerService::Service {
public:
explicit GrpcWorkerImpl(grpc::ServerBuilder* server_builder,
const std::string& dispatcher_address,
const std::string& master_address,
const std::string& protocol);
~GrpcWorkerImpl() override {}

View File

@ -110,11 +110,11 @@ message GetWorkersResponse {
repeated WorkerInfo workers = 1;
}
service DispatcherService {
// Registers a worker with the dispatcher.
service MasterService {
// Registers a worker with the master.
rpc RegisterWorker(RegisterWorkerRequest) returns (RegisterWorkerResponse);
// Updates the dispatcher with information about the worker's state.
// Updates the master with information about the worker's state.
rpc WorkerUpdate(WorkerUpdateRequest) returns (WorkerUpdateResponse);
// Registers a dataset with the server, or returns its id if it is already
@ -134,6 +134,6 @@ service DispatcherService {
// Reports a list of all tasks for a job.
rpc GetTasks(GetTasksRequest) returns (GetTasksResponse);
// Reports a list of all workers registered with the dispatcher.
// Reports a list of all workers registered with the master.
rpc GetWorkers(GetWorkersRequest) returns (GetWorkersResponse);
}

View File

@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/core/data/service/dispatcher_impl.h"
#include "tensorflow/core/data/service/master_impl.h"
#include <memory>
#include <tuple>
@ -26,8 +26,8 @@ limitations under the License.
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/credentials_factory.h"
#include "tensorflow/core/data/service/data_service.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/data/service/master.pb.h"
#include "tensorflow/core/data/service/worker.grpc.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/kernels/data/dataset_utils.h"
@ -53,10 +53,10 @@ Status CreateWorkerStub(const std::string& address,
}
} // namespace
DataServiceDispatcherImpl::DataServiceDispatcherImpl(const std::string protocol)
DataServiceMasterImpl::DataServiceMasterImpl(const std::string protocol)
: protocol_(protocol) {}
Status DataServiceDispatcherImpl::RegisterWorker(
Status DataServiceMasterImpl::RegisterWorker(
const RegisterWorkerRequest* request, RegisterWorkerResponse* response) {
VLOG(3) << "Received register worker request";
mutex_lock l(mu_);
@ -86,8 +86,8 @@ Status DataServiceDispatcherImpl::RegisterWorker(
return Status::OK();
}
Status DataServiceDispatcherImpl::WorkerUpdate(
const WorkerUpdateRequest* request, WorkerUpdateResponse* response) {
Status DataServiceMasterImpl::WorkerUpdate(const WorkerUpdateRequest* request,
WorkerUpdateResponse* response) {
mutex_lock l(mu_);
int64 worker_id = request->worker_id();
for (auto& update : request->updates()) {
@ -106,7 +106,7 @@ Status DataServiceDispatcherImpl::WorkerUpdate(
return Status::OK();
}
Status DataServiceDispatcherImpl::GetOrRegisterDataset(
Status DataServiceMasterImpl::GetOrRegisterDataset(
const GetOrRegisterDatasetRequest* request,
GetOrRegisterDatasetResponse* response) {
uint64 fingerprint;
@ -128,8 +128,8 @@ Status DataServiceDispatcherImpl::GetOrRegisterDataset(
return Status::OK();
}
int64 DataServiceDispatcherImpl::RegisterDataset(uint64 fingerprint,
const DatasetDef& dataset)
int64 DataServiceMasterImpl::RegisterDataset(uint64 fingerprint,
const DatasetDef& dataset)
EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64 dataset_id = next_dataset_id_++;
auto new_dataset =
@ -142,8 +142,8 @@ int64 DataServiceDispatcherImpl::RegisterDataset(uint64 fingerprint,
return dataset_id;
}
Status DataServiceDispatcherImpl::CreateJob(const CreateJobRequest* request,
CreateJobResponse* response) {
Status DataServiceMasterImpl::CreateJob(const CreateJobRequest* request,
CreateJobResponse* response) {
VLOG(3) << "Received create job request for dataset id "
<< request->dataset_id();
ProcessingMode processing_mode = ProcessingMode(request->processing_mode());
@ -157,7 +157,7 @@ Status DataServiceDispatcherImpl::CreateJob(const CreateJobRequest* request,
return Status::OK();
}
Status DataServiceDispatcherImpl::GetOrCreateJob(
Status DataServiceMasterImpl::GetOrCreateJob(
const GetOrCreateJobRequest* request, GetOrCreateJobResponse* response) {
VLOG(3) << "Received get or create job request for dataset id "
<< request->dataset_id() << " with name " << request->job_name()
@ -193,7 +193,7 @@ Status DataServiceDispatcherImpl::GetOrCreateJob(
}
// Validates that the job matches the given processing_mode and dataset_id.
Status DataServiceDispatcherImpl::ValidateMatchingJob(
Status DataServiceMasterImpl::ValidateMatchingJob(
const Job& job, ProcessingMode processing_mode, int64 dataset_id) {
DCHECK(job.name().has_value());
std::string job_name = job.name().value();
@ -214,10 +214,10 @@ Status DataServiceDispatcherImpl::ValidateMatchingJob(
return Status::OK();
}
Status DataServiceDispatcherImpl::CreateJob(
int64 dataset_id, ProcessingMode processing_mode,
absl::optional<std::string> job_name, int64* out_job_id)
LOCKS_EXCLUDED(mu_) {
Status DataServiceMasterImpl::CreateJob(int64 dataset_id,
ProcessingMode processing_mode,
absl::optional<std::string> job_name,
int64* out_job_id) LOCKS_EXCLUDED(mu_) {
switch (processing_mode) {
case ProcessingMode::PARALLEL_EPOCHS:
break;
@ -274,16 +274,14 @@ Status DataServiceDispatcherImpl::CreateJob(
return Status::OK();
}
const DataServiceDispatcherImpl::Task& DataServiceDispatcherImpl::CreateTask(
const DataServiceMasterImpl::Task& DataServiceMasterImpl::CreateTask(
Job* job, const std::string& worker_address) LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return CreateTaskLocked(job, worker_address);
}
const DataServiceDispatcherImpl::Task&
DataServiceDispatcherImpl::CreateTaskLocked(Job* job,
const std::string& worker_address)
EXCLUSIVE_LOCKS_REQUIRED(mu_) {
const DataServiceMasterImpl::Task& DataServiceMasterImpl::CreateTaskLocked(
Job* job, const std::string& worker_address) EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64 task_id = next_task_id_++;
DCHECK(!tasks_.contains(task_id));
tasks_.insert({task_id, Task(task_id, job->job_id(), job->dataset_id(),
@ -292,7 +290,7 @@ DataServiceDispatcherImpl::CreateTaskLocked(Job* job,
return tasks_.at(task_id);
}
Status DataServiceDispatcherImpl::EnsureWorkerStubInitialized(Worker* worker) {
Status DataServiceMasterImpl::EnsureWorkerStubInitialized(Worker* worker) {
if (!worker->stub()) {
std::unique_ptr<WorkerService::Stub> stub;
TF_RETURN_IF_ERROR(CreateWorkerStub(worker->address(), protocol_, &stub));
@ -301,8 +299,8 @@ Status DataServiceDispatcherImpl::EnsureWorkerStubInitialized(Worker* worker) {
return Status::OK();
}
Status DataServiceDispatcherImpl::AllocateTaskToWorker(const Task& task,
Worker* worker)
Status DataServiceMasterImpl::AllocateTaskToWorker(const Task& task,
Worker* worker)
LOCKS_EXCLUDED(mu_) {
TF_RETURN_IF_ERROR(EnsureWorkerStubInitialized(worker));
grpc::ClientContext client_ctx;
@ -324,8 +322,8 @@ Status DataServiceDispatcherImpl::AllocateTaskToWorker(const Task& task,
return Status::OK();
}
Status DataServiceDispatcherImpl::GetTasks(const GetTasksRequest* request,
GetTasksResponse* response) {
Status DataServiceMasterImpl::GetTasks(const GetTasksRequest* request,
GetTasksResponse* response) {
mutex_lock l(mu_);
VLOG(3) << "Looking up tasks for job id " << request->job_id();
auto it = jobs_.find(request->job_id());
@ -348,8 +346,8 @@ Status DataServiceDispatcherImpl::GetTasks(const GetTasksRequest* request,
return Status::OK();
}
Status DataServiceDispatcherImpl::GetWorkers(const GetWorkersRequest* request,
GetWorkersResponse* response) {
Status DataServiceMasterImpl::GetWorkers(const GetWorkersRequest* request,
GetWorkersResponse* response) {
mutex_lock l(mu_);
VLOG(3) << "Enter GetWorkers";
for (auto& worker : workers_) {

View File

@ -13,13 +13,13 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_DATA_SERVICE_DISPATCHER_IMPL_H_
#define TENSORFLOW_CORE_DATA_SERVICE_DISPATCHER_IMPL_H_
#ifndef TENSORFLOW_CORE_DATA_SERVICE_MASTER_IMPL_H_
#define TENSORFLOW_CORE_DATA_SERVICE_MASTER_IMPL_H_
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/data_service.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/master.pb.h"
#include "tensorflow/core/data/service/worker.grpc.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/mutex.h"
@ -40,11 +40,11 @@ namespace data {
// ProcessingModeDef which determines what data it produces.
// * Task: A job is broken into multiple tasks, which each represent
// iterating over all of or part of the dataset. Workers process tasks.
class DataServiceDispatcherImpl {
class DataServiceMasterImpl {
public:
explicit DataServiceDispatcherImpl(const std::string protocol);
explicit DataServiceMasterImpl(const std::string protocol);
// See dispatcher.proto for API documentation.
// See master.proto for API documentation.
/// Worker-facing API.
Status RegisterWorker(const RegisterWorkerRequest* request,
@ -191,7 +191,7 @@ class DataServiceDispatcherImpl {
// Creates a new task for a job, returning a reference to the task.
const Task& CreateTask(Job* job, const std::string& worker_address)
LOCKS_EXCLUDED(mu_);
// Same as `CreateTask`, but expects that the dispatcher lock is already held.
// Same as `CreateTask`, but expects that the master lock is already held.
const Task& CreateTaskLocked(Job* job, const std::string& worker_address)
EXCLUSIVE_LOCKS_REQUIRED(mu_);
// Validates that an existing job matches the given processing_mode and
@ -225,10 +225,10 @@ class DataServiceDispatcherImpl {
absl::flat_hash_map<NamedJobKey, std::shared_ptr<Job>> named_jobs_
TF_GUARDED_BY(mu_);
TF_DISALLOW_COPY_AND_ASSIGN(DataServiceDispatcherImpl);
TF_DISALLOW_COPY_AND_ASSIGN(DataServiceMasterImpl);
};
} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CORE_DATA_SERVICE_DISPATCHER_IMPL_H_
#endif // TENSORFLOW_CORE_DATA_SERVICE_MASTER_IMPL_H_

View File

@ -16,7 +16,7 @@ limitations under the License.
#include "tensorflow/core/data/service/server_lib.h"
#include "tensorflow/core/data/service/credentials_factory.h"
#include "tensorflow/core/data/service/grpc_dispatcher_impl.h"
#include "tensorflow/core/data/service/grpc_master_impl.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/data/service/grpc_worker_impl.h"
#include "tensorflow/core/platform/errors.h"
@ -72,18 +72,18 @@ void GrpcDataServerBase::Join() { server_->Wait(); }
int GrpcDataServerBase::BoundPort() { return bound_port(); }
DispatchGrpcDataServer::DispatchGrpcDataServer(int port,
const std::string& protocol)
MasterGrpcDataServer::MasterGrpcDataServer(int port,
const std::string& protocol)
: GrpcDataServerBase(port, protocol) {}
DispatchGrpcDataServer::~DispatchGrpcDataServer() { delete service_; }
MasterGrpcDataServer::~MasterGrpcDataServer() { delete service_; }
void DispatchGrpcDataServer::AddServiceToBuilder(grpc::ServerBuilder* builder) {
auto service = absl::make_unique<GrpcDispatcherImpl>(builder, protocol_);
void MasterGrpcDataServer::AddServiceToBuilder(grpc::ServerBuilder* builder) {
auto service = absl::make_unique<GrpcMasterImpl>(builder, protocol_);
service_ = service.release();
}
Status DispatchGrpcDataServer::NumWorkers(int* num_workers) {
Status MasterGrpcDataServer::NumWorkers(int* num_workers) {
GetWorkersRequest req;
GetWorkersResponse resp;
grpc::ServerContext ctx;
@ -95,18 +95,19 @@ Status DispatchGrpcDataServer::NumWorkers(int* num_workers) {
return Status::OK();
}
WorkerGrpcDataServer::WorkerGrpcDataServer(
int port, const std::string& protocol,
const std::string& dispatcher_address, const std::string& worker_address)
WorkerGrpcDataServer::WorkerGrpcDataServer(int port,
const std::string& protocol,
const std::string& master_address,
const std::string& worker_address)
: GrpcDataServerBase(port, protocol),
dispatcher_address_(dispatcher_address),
master_address_(master_address),
worker_address_(worker_address) {}
WorkerGrpcDataServer::~WorkerGrpcDataServer() { delete service_; }
void WorkerGrpcDataServer::AddServiceToBuilder(grpc::ServerBuilder* builder) {
auto service = absl::make_unique<GrpcWorkerImpl>(builder, dispatcher_address_,
protocol_);
auto service =
absl::make_unique<GrpcWorkerImpl>(builder, master_address_, protocol_);
service_ = service.release();
}
@ -122,25 +123,25 @@ Status WorkerGrpcDataServer::StartServiceInternal() {
return Status::OK();
}
Status NewDispatchServer(int port, const std::string& protocol,
std::unique_ptr<DispatchGrpcDataServer>* out_server) {
*out_server = absl::make_unique<DispatchGrpcDataServer>(port, protocol);
Status NewMasterServer(int port, const std::string& protocol,
std::unique_ptr<MasterGrpcDataServer>* out_server) {
*out_server = absl::make_unique<MasterGrpcDataServer>(port, protocol);
return Status::OK();
}
Status NewWorkerServer(int port, const std::string& protocol,
const std::string& dispatcher_address,
const std::string& master_address,
std::unique_ptr<WorkerGrpcDataServer>* out_server) {
return NewWorkerServer(port, protocol, dispatcher_address,
/*worker_address=*/"", out_server);
return NewWorkerServer(port, protocol, master_address, /*worker_address=*/"",
out_server);
}
Status NewWorkerServer(int port, const std::string& protocol,
const std::string& dispatcher_address,
const std::string& master_address,
const std::string& worker_address,
std::unique_ptr<WorkerGrpcDataServer>* out_server) {
*out_server = absl::make_unique<WorkerGrpcDataServer>(
port, protocol, dispatcher_address, worker_address);
port, protocol, master_address, worker_address);
return Status::OK();
}

View File

@ -25,7 +25,7 @@ namespace data {
// Forward declared because transitively depending on .grpc.pb.h files causes
// issues in the pywrap build.
class GrpcDispatcherImpl;
class GrpcMasterImpl;
class GrpcWorkerImpl;
// A grpc server for the tf.data service.
@ -35,7 +35,7 @@ class GrpcDataServerBase {
// server will find an available port in `Start()`. The chosen port can be
// found in the output of `Target()`.
//
// dispatcher_address is only needed for worker data servers.
// master_address is only needed for worker data servers.
GrpcDataServerBase(int requested_port, const std::string& protocol);
virtual ~GrpcDataServerBase() {}
@ -70,12 +70,12 @@ class GrpcDataServerBase {
std::unique_ptr<grpc::Server> server_;
};
class DispatchGrpcDataServer : public GrpcDataServerBase {
class MasterGrpcDataServer : public GrpcDataServerBase {
public:
DispatchGrpcDataServer(int requested_port, const std::string& protocol);
~DispatchGrpcDataServer() override;
MasterGrpcDataServer(int requested_port, const std::string& protocol);
~MasterGrpcDataServer() override;
// Returns the number of workers registerd with the dispatcher.
// Returns the number of workers registerd with the master.
Status NumWorkers(int* num_workers);
protected:
@ -83,14 +83,14 @@ class DispatchGrpcDataServer : public GrpcDataServerBase {
Status StartServiceInternal() override { return Status::OK(); }
private:
// Owned. We use a raw pointer because GrpcDispatcherImpl is forward-declared.
GrpcDispatcherImpl* service_;
// Owned. We use a raw pointer because GrpcMasterImpl is forward-declared.
GrpcMasterImpl* service_;
};
class WorkerGrpcDataServer : public GrpcDataServerBase {
public:
WorkerGrpcDataServer(int requested_port, const std::string& protocol,
const std::string& dispatcher_address,
const std::string& master_address,
const std::string& worker_address);
~WorkerGrpcDataServer() override;
@ -99,15 +99,15 @@ class WorkerGrpcDataServer : public GrpcDataServerBase {
Status StartServiceInternal() override;
private:
const std::string dispatcher_address_;
const std::string master_address_;
const std::string worker_address_;
// Owned. We use a raw pointer because GrpcWorkerImpl is forward-declared.
GrpcWorkerImpl* service_;
};
// Creates a dispatch tf.data server and stores it in `*out_server`.
Status NewDispatchServer(int port, const std::string& protocol,
std::unique_ptr<DispatchGrpcDataServer>* out_server);
// Creates a master tf.data server and stores it in `*out_server`.
Status NewMasterServer(int port, const std::string& protocol,
std::unique_ptr<MasterGrpcDataServer>* out_server);
// Creates a worker tf.data server and stores it in `*out_server`.
//
@ -115,18 +115,18 @@ Status NewDispatchServer(int port, const std::string& protocol,
// will be chosen in Start(). This value can be queried with BoundPort().
//
// The worker_address argument is optional. If left empty, it will default to
// "localhost:%port%". When the worker registers with the dispatcher, the worker
// will report the worker address, so that the dispatcher can tell clients where
// to read from. The address may contain the placeholder "%port%", which will be
// "localhost:%port%". When the worker registers with the master, the worker
// will report the worker address, so that the master can tell clients where to
// read from. The address may contain the placeholder "%port%", which will be
// replaced with the value of BoundPort().
Status NewWorkerServer(int port, const std::string& protocol,
const std::string& dispatcher_address,
const std::string& master_address,
const std::string& worker_address,
std::unique_ptr<WorkerGrpcDataServer>* out_server);
// Creates a worker using the default worker_address.
Status NewWorkerServer(int port, const std::string& protocol,
const std::string& dispatcher_address,
const std::string& master_address,
std::unique_ptr<WorkerGrpcDataServer>* out_server);
} // namespace data

View File

@ -45,9 +45,9 @@ Status TestCluster::Initialize() {
"Test cluster has already been initialized.");
}
initialized_ = true;
TF_RETURN_IF_ERROR(NewDispatchServer(/*port=*/0, kProtocol, &dispatcher_));
TF_RETURN_IF_ERROR(dispatcher_->Start());
dispatcher_address_ = absl::StrCat("localhost:", dispatcher_->BoundPort());
TF_RETURN_IF_ERROR(NewMasterServer(/*port=*/0, kProtocol, &master_));
TF_RETURN_IF_ERROR(master_->Start());
master_address_ = absl::StrCat("localhost:", master_->BoundPort());
workers_.reserve(num_workers_);
worker_addresses_.reserve(num_workers_);
for (int i = 0; i < num_workers_; ++i) {
@ -59,14 +59,14 @@ Status TestCluster::Initialize() {
Status TestCluster::AddWorker() {
std::unique_ptr<WorkerGrpcDataServer> worker;
TF_RETURN_IF_ERROR(
NewWorkerServer(/*port=*/0, kProtocol, dispatcher_address_, &worker));
NewWorkerServer(/*port=*/0, kProtocol, master_address_, &worker));
TF_RETURN_IF_ERROR(worker->Start());
worker_addresses_.push_back(absl::StrCat("localhost:", worker->BoundPort()));
workers_.push_back(std::move(worker));
return Status::OK();
}
std::string TestCluster::DispatcherAddress() { return dispatcher_address_; }
std::string TestCluster::MasterAddress() { return master_address_; }
std::string TestCluster::WorkerAddress(int index) {
DCHECK_GE(index, 0);

View File

@ -24,7 +24,7 @@ namespace data {
// Helper class for unit testing a tf.data service cluster.
class TestCluster {
public:
// Creates a new test cluster with a dispatcher and `num_workers` workers.
// Creates a new test cluster with a master and `num_workers` workers.
explicit TestCluster(int num_workers);
// Initializes the test cluster. This must be called before interacting with
@ -32,8 +32,8 @@ class TestCluster {
Status Initialize();
// Adds a new worker to the cluster.
Status AddWorker();
// Returns the dispatcher address in the form "hostname:port".
std::string DispatcherAddress();
// Returns the master address in the form "hostname:port".
std::string MasterAddress();
// Returns the address of the worker at the specified index, in the form
// "hostname:port". The index must be non-negative and less than the number of
// workers in the cluster.
@ -42,8 +42,8 @@ class TestCluster {
private:
bool initialized_ = false;
int num_workers_;
std::unique_ptr<DispatchGrpcDataServer> dispatcher_;
std::string dispatcher_address_;
std::unique_ptr<MasterGrpcDataServer> master_;
std::string master_address_;
std::vector<std::unique_ptr<WorkerGrpcDataServer>> workers_;
std::vector<std::string> worker_addresses_;
};

View File

@ -21,9 +21,9 @@ limitations under the License.
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/data/dataset.pb.h"
#include "tensorflow/core/data/service/credentials_factory.h"
#include "tensorflow/core/data/service/dispatcher.grpc.pb.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/data/service/master.grpc.pb.h"
#include "tensorflow/core/data/service/master.pb.h"
#include "tensorflow/core/data/standalone.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/core/errors.h"
@ -45,9 +45,9 @@ auto* tf_data_service_created =
"has been created.");
} // namespace
DataServiceWorkerImpl::DataServiceWorkerImpl(
const std::string& dispatcher_address, const std::string& protocol)
: dispatcher_address_(dispatcher_address), protocol_(protocol) {
DataServiceWorkerImpl::DataServiceWorkerImpl(const std::string& master_address,
const std::string& protocol)
: master_address_(master_address), protocol_(protocol) {
tf_data_service_created->GetCell()->Set(true);
}
@ -67,13 +67,14 @@ void DataServiceWorkerImpl::Start(const std::string& worker_address) {
heartbeat_thread_.reset(thread);
Status s = Register();
while (!s.ok()) {
LOG(WARNING) << "Failed to register with dispatcher at "
<< dispatcher_address_ << ": " << s;
LOG(WARNING) << "Failed to register with master at " << master_address_
<< ": " << s;
Env::Default()->SleepForMicroseconds(kHeartbeatIntervalMicros);
s = Register();
}
}
Status DataServiceWorkerImpl::ProcessTask(const ProcessTaskRequest* request,
ProcessTaskResponse* response) {
mutex_lock l(mu_);
@ -168,29 +169,29 @@ Status DataServiceWorkerImpl::GetElement(const GetElementRequest* request,
return Status::OK();
}
Status DataServiceWorkerImpl::EnsureDispatcherStubInitialized()
Status DataServiceWorkerImpl::EnsureMasterStubInitialized()
EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!dispatcher_stub_) {
if (!master_stub_) {
::grpc::ChannelArguments args;
std::shared_ptr<::grpc::ChannelCredentials> credentials;
TF_RETURN_IF_ERROR(
CredentialsFactory::CreateClientCredentials(protocol_, &credentials));
auto channel =
::grpc::CreateCustomChannel(dispatcher_address_, credentials, args);
dispatcher_stub_ = DispatcherService::NewStub(channel);
::grpc::CreateCustomChannel(master_address_, credentials, args);
master_stub_ = MasterService::NewStub(channel);
}
return Status::OK();
}
Status DataServiceWorkerImpl::Register() EXCLUSIVE_LOCKS_REQUIRED(mu_) {
VLOG(3) << "Registering with dispatcher at " << dispatcher_address_;
TF_RETURN_IF_ERROR(EnsureDispatcherStubInitialized());
VLOG(3) << "Registering with master at " << master_address_;
TF_RETURN_IF_ERROR(EnsureMasterStubInitialized());
RegisterWorkerRequest req;
req.set_worker_address(worker_address_);
RegisterWorkerResponse resp;
grpc::ClientContext ctx;
grpc::Status s = dispatcher_stub_->RegisterWorker(&ctx, req, &resp);
grpc::Status s = master_stub_->RegisterWorker(&ctx, req, &resp);
if (!s.ok()) {
return grpc_util::WrapError("Failed to register worker", s);
}
@ -204,8 +205,8 @@ Status DataServiceWorkerImpl::Register() EXCLUSIVE_LOCKS_REQUIRED(mu_) {
Status DataServiceWorkerImpl::SendTaskUpdate() EXCLUSIVE_LOCKS_REQUIRED(mu_) {
VLOG(3) << "Sending " << pending_completed_tasks_.size()
<< " task updates to dispatcher";
TF_RETURN_IF_ERROR(EnsureDispatcherStubInitialized());
<< " task updates to master";
TF_RETURN_IF_ERROR(EnsureMasterStubInitialized());
WorkerUpdateRequest req;
req.set_worker_id(worker_id_);
for (int task_id : pending_completed_tasks_) {
@ -216,7 +217,7 @@ Status DataServiceWorkerImpl::SendTaskUpdate() EXCLUSIVE_LOCKS_REQUIRED(mu_) {
WorkerUpdateResponse resp;
grpc::ClientContext ctx;
grpc::Status s = dispatcher_stub_->WorkerUpdate(&ctx, req, &resp);
grpc::Status s = master_stub_->WorkerUpdate(&ctx, req, &resp);
if (!s.ok()) {
return grpc_util::WrapError("Failed to send task updates", s);
}
@ -237,7 +238,7 @@ void DataServiceWorkerImpl::HeartbeatThread() {
}
Status s = SendTaskUpdate();
if (!s.ok()) {
LOG(WARNING) << "Failed to send task updates to dispatcher: " << s;
LOG(WARNING) << "Failed to send task updates to master: " << s;
}
}
}

View File

@ -17,7 +17,7 @@ limitations under the License.
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/dispatcher.grpc.pb.h"
#include "tensorflow/core/data/service/master.grpc.pb.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/data/standalone.h"
#include "tensorflow/core/lib/core/status.h"
@ -29,17 +29,17 @@ namespace data {
// A TensorFlow DataService serves dataset elements over RPC.
class DataServiceWorkerImpl {
public:
explicit DataServiceWorkerImpl(const std::string& dispatcher_address,
explicit DataServiceWorkerImpl(const std::string& master_address,
const std::string& protocol);
~DataServiceWorkerImpl();
// Starts the worker. The worker needs to know its own address so that it can
// register with the dispatcher.
// register with the master.
void Start(const std::string& worker_address);
// See worker.proto for API documentation.
/// Dispatcher-facing API.
/// Master-facing API.
Status ProcessTask(const ProcessTaskRequest* request,
ProcessTaskResponse* response);
@ -48,15 +48,15 @@ class DataServiceWorkerImpl {
GetElementResponse* response);
private:
// Sets dispatcher_stub_ if it isn't already set.
Status EnsureDispatcherStubInitialized();
// Registers the worker with the dispatcher.
// Sets master_stub_ if it isn't already set.
Status EnsureMasterStubInitialized();
// Registers the worker with the master.
Status Register();
// Sends task status to the dispatcher.
// Sends task status to the master.
Status SendTaskUpdate();
// Creates an iterator to process a task.
Status ProcessTaskInternal(const TaskDef& task);
// A thread for updating the dispatcher with worker status.
// A thread for updating the master with worker status.
void HeartbeatThread();
typedef struct Task {
@ -67,19 +67,18 @@ class DataServiceWorkerImpl {
std::unique_ptr<standalone::Iterator> iterator;
} Task;
const std::string dispatcher_address_;
// Protocol for communicating with the dispatcher.
const std::string master_address_;
// Protocol for communicating with the master.
const std::string protocol_;
// The worker's own address.
std::string worker_address_;
mutex mu_;
int64 worker_id_ TF_GUARDED_BY(mu_);
std::unique_ptr<DispatcherService::Stub> dispatcher_stub_ TF_GUARDED_BY(mu_);
std::unique_ptr<MasterService::Stub> master_stub_ TF_GUARDED_BY(mu_);
// Information about tasks, keyed by task ids.
absl::flat_hash_map<int64, Task> tasks_ TF_GUARDED_BY(mu_);
// List of completed tasks which haven't yet been communicated to the
// dispatcher.
// List of completed tasks which haven't yet been communicated to the master.
std::vector<int64> pending_completed_tasks_ TF_GUARDED_BY(mu_);
bool cancelled_ TF_GUARDED_BY(mu_) = false;
// Condition variable for notifying the heartbeat thread.

View File

@ -5864,15 +5864,15 @@ cc_library(
":string_format_op",
":string_join_op",
":string_length_op",
# ":string_lower_op",
":string_lower_op",
":string_ngrams_op",
":string_split_op",
":string_strip_op",
":string_to_hash_bucket_op",
# ":string_upper_op",
":string_upper_op",
":substr_op",
# ":unicode_ops",
# ":unicode_script_op",
":unicode_ops",
":unicode_script_op",
":unsorted_segment_join_op",
],
)
@ -5885,7 +5885,7 @@ cc_library(
"//tensorflow/core:framework",
"//tensorflow/core:lib",
"//tensorflow/core:protos_all_cc",
# "@icu//:common",
"@icu//:common",
],
)
@ -6041,7 +6041,7 @@ tf_kernel_library(
prefix = "string_lower_op",
deps = STRING_DEPS + [
"@com_google_absl//absl/strings",
# "@icu//:common",
"@icu//:common",
],
)
@ -6050,7 +6050,7 @@ tf_kernel_library(
prefix = "string_upper_op",
deps = STRING_DEPS + [
"@com_google_absl//absl/strings",
# "@icu//:common",
"@icu//:common",
],
)
@ -6096,7 +6096,7 @@ tf_kernel_library(
"//tensorflow/core:lib_internal",
"//third_party/eigen3",
"//third_party/icu/data:conversion_data",
# "@icu//:common",
"@icu//:common",
],
)
@ -7125,10 +7125,10 @@ filegroup(
"mutex_ops.*",
"batch_kernels.*",
"regex_replace_op.cc",
# "string_lower_op.cc", # Requires ICU for unicode.
# "string_upper_op.cc", # Requires ICU for unicode.
"string_lower_op.cc", # Requires ICU for unicode.
"string_upper_op.cc", # Requires ICU for unicode.
"unicode_ops.cc",
# "unicode_script_op.cc",
"unicode_script_op.cc",
# Ops that are inherently incompatible with Android (e.g. tied to x86 platform).
"mkl_*",
"xsmm_*",
@ -8620,7 +8620,7 @@ tf_kernel_library(
srcs = ["unicode_script_op.cc"],
deps = [
"//tensorflow/core:framework",
# "@icu//:common",
"@icu//:common",
],
)
@ -8652,39 +8652,6 @@ cc_library(
],
)
tf_kernel_library(
name = "deepspeech_cwise_ops",
srcs = [
"cwise_op_add_1.cc",
"cwise_op_add_2.cc",
"cwise_op_less.cc",
"cwise_op_minimum.cc",
"cwise_op_mul_1.cc",
"cwise_op_rsqrt.cc",
"cwise_op_squared_difference.cc",
"cwise_op_sub.cc",
"cwise_op_sigmoid.cc",
"cwise_op_tanh.cc",
],
gpu_srcs = [
"cwise_op_gpu_add.cu.cc",
"cwise_op_gpu_less.cu.cc",
"cwise_op_gpu_minimum.cu.cc",
"cwise_op_gpu_mul.cu.cc",
"cwise_op_gpu_rsqrt.cu.cc",
"cwise_op_gpu_squared_difference.cu.cc",
"cwise_op_gpu_sub.cu.cc",
"cwise_op_gpu_sigmoid.cu.cc",
"cwise_op_gpu_tanh.cu.cc",
],
deps = [
":cwise_lib",
"//tensorflow/core:framework",
"//tensorflow/core:lib",
"//third_party/eigen3",
],
)
# Header-only version of cwise_lib for clients that want to use the cwise_ops
# functionality in their own custom ops.
cc_header_only_library(

View File

@ -116,7 +116,6 @@ REGISTER_KERNEL(GPU, int16);
REGISTER_KERNEL(GPU, qint16);
REGISTER_KERNEL(GPU, quint16);
REGISTER_KERNEL(GPU, uint32);
REGISTER_KERNEL(GPU, int32);
REGISTER_KERNEL(GPU, qint32);
REGISTER_KERNEL(GPU, int64);
REGISTER_KERNEL(GPU, uint64);

View File

@ -69,7 +69,7 @@ const int64 kDefaultTaskRefreshIntervalMs = 1000; // 1 second.
// Dataset for reading data from the tf.data service non-deterministically.
//
// This dataset interleaves dataset elements produced by multiple tf.data
// workers. We periodically query the dispatcher to determine which workers
// workers. We periodically query the tf.data master to determine which workers
// to read from (in case workers are added or removed).
class DataServiceDatasetOp::Dataset : public DatasetBase {
public:
@ -199,13 +199,12 @@ class DataServiceDatasetOp::Dataset : public DatasetBase {
Status Initialize(IteratorContext* ctx) override {
VLOG(3) << "Connecting to " << dataset()->address_
<< " in data service dataset op";
DataServiceDispatcherClient dispatcher(dataset()->address_,
dataset()->protocol_);
DataServiceMasterClient master(dataset()->address_, dataset()->protocol_);
if (dataset()->job_name_.empty()) {
TF_RETURN_IF_ERROR(dispatcher.CreateJob(
TF_RETURN_IF_ERROR(master.CreateJob(
dataset()->dataset_id_, dataset()->processing_mode_, &job_id_));
} else {
TF_RETURN_IF_ERROR(dispatcher.GetOrCreateJob(
TF_RETURN_IF_ERROR(master.GetOrCreateJob(
dataset()->dataset_id_, dataset()->processing_mode_,
dataset()->job_name_, iterator_index_, &job_id_));
}
@ -284,12 +283,11 @@ class DataServiceDatasetOp::Dataset : public DatasetBase {
// Periodically refresh the task list.
// Maintain one thread fetching elements for each task.
// TODO(aaudibert): Instead of polling, have dispatcher send updates when
// TODO(aaudibert): Instead of polling, have master send updates when
// the list of tasks changes.
void TaskThreadManager(std::unique_ptr<IteratorContext> ctx) {
VLOG(3) << "Starting task thread manager";
DataServiceDispatcherClient dispatcher(dataset()->address_,
dataset()->protocol_);
DataServiceMasterClient master(dataset()->address_, dataset()->protocol_);
uint64 next_check = Env::Default()->NowMicros();
while (true) {
{
@ -307,19 +305,18 @@ class DataServiceDatasetOp::Dataset : public DatasetBase {
return;
}
}
UpdateTasks(&dispatcher);
UpdateTasks(&master);
UpdateWorkerThreads(ctx.get());
next_check = Env::Default()->NowMicros() +
dataset()->task_refresh_interval_ms_ * 1000;
}
}
void UpdateTasks(DataServiceDispatcherClient* dispatcher)
LOCKS_EXCLUDED(mu_) {
void UpdateTasks(DataServiceMasterClient* master) LOCKS_EXCLUDED(mu_) {
VLOG(3) << "Updating tasks";
std::vector<TaskInfo> tasks;
bool job_finished;
Status s = dispatcher->GetTasks(job_id_, &tasks, &job_finished);
Status s = master->GetTasks(job_id_, &tasks, &job_finished);
if (!s.ok()) {
LOG(WARNING) << "Failed to get task info for job id " << job_id_ << ": "
<< s;

View File

@ -53,7 +53,7 @@ void RegisterDatasetOp::Compute(OpKernelContext* ctx) {
OP_REQUIRES_OK(
ctx, AsGraphDef(ctx, dataset, std::move(serialization_ctx), &graph_def));
DataServiceDispatcherClient client(address, protocol);
DataServiceMasterClient client(address, protocol);
int64 dataset_id;
OP_REQUIRES_OK(ctx, client.RegisterDataset(graph_def, &dataset_id));

View File

@ -25,7 +25,7 @@ namespace data {
// Registers a dataset with the tf.data service.
//
// The address and protocol inputs are used to connect to the dispatcher.
// The address and protocol inputs are used to connect to the tf.data master.
// The external state policy attribute determines whether to ignore, warn, or
// error out when the dataset contains external state.
// The op produces a dataset id for identifying the registered dataset.

View File

@ -61,8 +61,6 @@ message SavedObject {
SavedConstant constant = 9;
SavedResource resource = 10;
}
map<string, SaveableObject> saveable_objects = 11;
}
// A SavedUserObject is an object (in the object-oriented language of the
@ -164,9 +162,3 @@ message SavedResource {
// device.
string device = 1;
}
message SaveableObject {
// Node ids of concrete functions for saving and loading from a checkpoint.
int32 save_function = 2;
int32 restore_function = 3;
}

View File

@ -26,7 +26,7 @@ limitations under the License.
// TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1",
// "-beta", "-rc", "-rc.1")
#define TF_VERSION_SUFFIX ""
#define TF_VERSION_SUFFIX "-rc0"
#define TF_STR_HELPER(x) #x
#define TF_STR(x) TF_STR_HELPER(x)

View File

@ -57,6 +57,7 @@ cc_library(
"//conditions:default": [],
}) + select({
"//tensorflow:fuchsia": [],
"//tensorflow:windows": [],
"//conditions:default": [
"//tensorflow/lite/delegates/xnnpack:xnnpack_delegate",
],

View File

@ -77,7 +77,7 @@ class _DataServiceDatasetV2(dataset_ops.DatasetSource):
amount of memory used, since `distribute` won't use more than
`element_size` * `max_outstanding_requests` of memory.
task_refresh_interval_hint_ms: (Optional.) A hint for how often to query
the dispatcher for task changes.
the master for task changes.
"""
if job_name is None:
@ -173,7 +173,7 @@ def _distribute(processing_mode,
of memory used, since `distribute` won't use more than `element_size` *
`max_outstanding_requests` of memory.
task_refresh_interval_hint_ms: (Optional.) A hint for how often to query the
dispatcher for task changes.
master for task changes.
Returns:
Dataset: A `Dataset` of the elements produced by the data service.

View File

@ -19,5 +19,5 @@ from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops.data_service_ops import distribute
from tensorflow.python.data.experimental.service.server_lib import DispatchServer
from tensorflow.python.data.experimental.service.server_lib import MasterServer
from tensorflow.python.data.experimental.service.server_lib import WorkerServer

View File

@ -24,35 +24,35 @@ from tensorflow.python.data.experimental.service import _pywrap_server_lib
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.service.DispatchServer", v1=[])
class DispatchServer(object):
"""An in-process tf.data service dispatch server.
@tf_export("data.experimental.service.MasterServer", v1=[])
class MasterServer(object):
"""An in-process tf.data service master server.
A `tf.data.experimental.service.DispatchServer` coordinates a cluster of
A `tf.data.experimental.service.MasterServer` coordinates a cluster of
`tf.data.experimental.service.WorkerServer`s. When the workers start, they
register themselves with the dispatcher.
register themselves with the master.
>>> dispatcher = tf.data.experimental.service.DispatchServer(port=0)
>>> dispatcher_address = dispatcher.target.split("://")[1]
>>> master = tf.data.experimental.service.MasterServer(port=0)
>>> master_address = master.target.split("://")[1]
>>> worker = tf.data.experimental.service.WorkerServer(
... port=0, dispatcher_address=dispatcher_address)
... port=0, master_address=master_address)
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.apply(tf.data.experimental.service.distribute(
... processing_mode="parallel_epochs", service=dispatcher.target))
... processing_mode="parallel_epochs", service=master.target))
>>> print(list(dataset.as_numpy_iterator()))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
When starting a dedicated tf.data dispatch process, use join() to block
When starting a dedicated tf.data master process, use join() to block
indefinitely after starting up the server.
```
dispatcher = tf.data.experimental.service.DispatchServer(port=5050)
dispatcher.join()
master = tf.data.experimental.service.MasterServer(port=5050)
master.join()
```
"""
def __init__(self, port, protocol=None, start=True):
"""Creates a new dispatch server.
"""Creates a new master server.
Args:
port: Specifies the port to bind to.
@ -68,16 +68,15 @@ class DispatchServer(object):
if protocol is None:
protocol = "grpc"
self._protocol = protocol
self._server = _pywrap_server_lib.TF_DATA_NewDispatchServer(port, protocol)
self._server = _pywrap_server_lib.TF_DATA_NewMasterServer(port, protocol)
if start:
self._server.start()
def start(self):
"""Starts this server.
>>> dispatcher = tf.data.experimental.service.DispatchServer(port=0,
... start=False)
>>> dispatcher.start()
>>> master = tf.data.experimental.service.MasterServer(port=0, start=False)
>>> master.start()
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
@ -88,11 +87,11 @@ class DispatchServer(object):
def join(self):
"""Blocks until the server has shut down.
This is useful when starting a dedicated dispatch process.
This is useful when starting a dedicated master process.
```
dispatcher = tf.data.experimental.service.DispatchServer(port=5050)
dispatcher.join()
master = tf.data.experimental.service.MasterServer(port=5050)
master.join()
```
Raises:
@ -105,10 +104,10 @@ class DispatchServer(object):
def target(self):
"""Returns a target that can be used to connect to the server.
>>> dispatcher = tf.data.experimental.service.DispatchServer(port=0)
>>> master = tf.data.experimental.service.MasterServer(port=0)
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.apply(tf.data.experimental.service.distribute(
... processing_mode="parallel_epochs", service=dispatcher.target))
... processing_mode="parallel_epochs", service=master.target))
The returned string will be in the form protocol://address, e.g.
"grpc://localhost:5050".
@ -137,7 +136,7 @@ class DispatchServer(object):
return "localhost:{0}".format(self._server.bound_port())
def _num_workers(self):
"""Returns the number of workers registered with the dispatcher."""
"""Returns the number of workers registered with the master."""
return self._server.num_workers()
@ -148,15 +147,15 @@ class WorkerServer(object):
A `tf.data.experimental.service.WorkerServer` performs `tf.data.Dataset`
processing for user-defined datasets, and provides the resulting elements over
RPC. A worker is associated with a single
`tf.data.experimental.service.DispatchServer`.
`tf.data.experimental.service.MasterServer`.
>>> dispatcher = tf.data.experimental.service.DispatchServer(port=0)
>>> dispatcher_address = dispatcher.target.split("://")[1]
>>> master = tf.data.experimental.service.MasterServer(port=0)
>>> master_address = master.target.split("://")[1]
>>> worker = tf.data.experimental.service.WorkerServer(
... port=0, dispatcher_address=dispatcher_address)
... port=0, master_address=master_address)
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.apply(tf.data.experimental.service.distribute(
... processing_mode="parallel_epochs", service=dispatcher.target))
... processing_mode="parallel_epochs", service=master.target))
>>> print(list(dataset.as_numpy_iterator()))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
@ -165,14 +164,14 @@ class WorkerServer(object):
```
worker = tf.data.experimental.service.WorkerServer(
port=5051, dispatcher_address="grpc://localhost:5050")
port=5051, master_address="grpc://localhost:5050")
worker.join()
```
"""
def __init__(self,
port,
dispatcher_address,
master_address,
worker_address=None,
protocol=None,
start=True):
@ -181,12 +180,11 @@ class WorkerServer(object):
Args:
port: Specifies the port to bind to. A value of 0 indicates that the
worker can bind to any available port.
dispatcher_address: Specifies the address of the dispatcher.
master_address: Specifies the address of the master server.
worker_address: (Optional.) Specifies the address of the worker server.
This address is passed to the dispatcher so that the dispatcher can
tell clients how to connect to this worker. Defaults to
`"localhost:%port%"`, where `%port%` will be replaced with the port used
by the worker.
This address is passed to the master server so that the master can tell
clients how to connect to this worker. Defaults to `"localhost:%port%"`,
where `%port%` will be replaced with the port used by the worker.
protocol: (Optional.) Specifies the protocol to be used by the server.
Acceptable values include `"grpc", "grpc+local"`. Defaults to `"grpc"`.
start: (Optional.) Boolean, indicating whether to start the server after
@ -203,7 +201,7 @@ class WorkerServer(object):
self._protocol = protocol
self._server = _pywrap_server_lib.TF_DATA_NewWorkerServer(
port, protocol, dispatcher_address, worker_address)
port, protocol, master_address, worker_address)
if start:
self._server.start()
@ -223,7 +221,7 @@ class WorkerServer(object):
```
worker_server = tf.data.experimental.service.WorkerServer(
port=5051, dispatcher_address="grpc://localhost:5050")
port=5051, master_address="grpc://localhost:5050")
worker_server.join()
```

View File

@ -25,68 +25,68 @@ from tensorflow.python.platform import test
class ServerLibTest(test.TestCase):
def testStartDispatcher(self):
dispatcher = server_lib.DispatchServer(0, start=False)
dispatcher.start()
def testStartMaster(self):
master = server_lib.MasterServer(0, start=False)
master.start()
def testMultipleStartDispatcher(self):
dispatcher = server_lib.DispatchServer(0, start=True)
dispatcher.start()
def testMultipleStartMaster(self):
master = server_lib.MasterServer(0, start=True)
master.start()
def testStartWorker(self):
dispatcher = server_lib.DispatchServer(0)
worker = server_lib.WorkerServer(0, dispatcher._address, start=False)
master = server_lib.MasterServer(0)
worker = server_lib.WorkerServer(0, master._address, start=False)
worker.start()
def testMultipleStartWorker(self):
dispatcher = server_lib.DispatchServer(0)
worker = server_lib.WorkerServer(0, dispatcher._address, start=True)
master = server_lib.MasterServer(0)
worker = server_lib.WorkerServer(0, master._address, start=True)
worker.start()
def testStopDispatcher(self):
dispatcher = server_lib.DispatchServer(0)
dispatcher._stop()
dispatcher._stop()
def testStopMaster(self):
master = server_lib.MasterServer(0)
master._stop()
master._stop()
def testStopWorker(self):
dispatcher = server_lib.DispatchServer(0)
worker = server_lib.WorkerServer(0, dispatcher._address)
master = server_lib.MasterServer(0)
worker = server_lib.WorkerServer(0, master._address)
worker._stop()
worker._stop()
def testStopStartDispatcher(self):
dispatcher = server_lib.DispatchServer(0)
dispatcher._stop()
def testStopStartMaster(self):
master = server_lib.MasterServer(0)
master._stop()
with self.assertRaisesRegex(
RuntimeError, "Server cannot be started after it has been stopped"):
dispatcher.start()
master.start()
def testStopStartWorker(self):
dispatcher = server_lib.DispatchServer(0)
worker = server_lib.WorkerServer(0, dispatcher._address)
master = server_lib.MasterServer(0)
worker = server_lib.WorkerServer(0, master._address)
worker._stop()
with self.assertRaisesRegex(
RuntimeError, "Server cannot be started after it has been stopped"):
worker.start()
def testJoinDispatcher(self):
dispatcher = server_lib.DispatchServer(0)
dispatcher._stop()
dispatcher.join()
def testJoinMaster(self):
master = server_lib.MasterServer(0)
master._stop()
master.join()
def testJoinWorker(self):
dispatcher = server_lib.DispatchServer(0)
worker = server_lib.WorkerServer(0, dispatcher._address)
master = server_lib.MasterServer(0)
worker = server_lib.WorkerServer(0, master._address)
worker._stop()
worker.join()
def testDispatcherNumWorkers(self):
dispatcher = server_lib.DispatchServer(0)
self.assertEqual(0, dispatcher._num_workers())
worker1 = server_lib.WorkerServer(0, dispatcher._address) # pylint: disable=unused-variable
self.assertEqual(1, dispatcher._num_workers())
worker2 = server_lib.WorkerServer(0, dispatcher._address) # pylint: disable=unused-variable
self.assertEqual(2, dispatcher._num_workers())
def testMasterNumWorkers(self):
master = server_lib.MasterServer(0)
self.assertEqual(0, master._num_workers())
worker1 = server_lib.WorkerServer(0, master._address) # pylint: disable=unused-variable
self.assertEqual(1, master._num_workers())
worker2 = server_lib.WorkerServer(0, master._address) # pylint: disable=unused-variable
self.assertEqual(2, master._num_workers())
if __name__ == "__main__":

View File

@ -28,14 +28,13 @@ limitations under the License.
namespace py = pybind11;
PYBIND11_MODULE(_pywrap_server_lib, m) {
py::class_<tensorflow::data::DispatchGrpcDataServer>(m,
"DispatchGrpcDataServer")
.def("start", &tensorflow::data::DispatchGrpcDataServer::Start)
.def("stop", &tensorflow::data::DispatchGrpcDataServer::Stop)
.def("join", &tensorflow::data::DispatchGrpcDataServer::Join)
.def("bound_port", &tensorflow::data::DispatchGrpcDataServer::BoundPort)
py::class_<tensorflow::data::MasterGrpcDataServer>(m, "MasterGrpcDataServer")
.def("start", &tensorflow::data::MasterGrpcDataServer::Start)
.def("stop", &tensorflow::data::MasterGrpcDataServer::Stop)
.def("join", &tensorflow::data::MasterGrpcDataServer::Join)
.def("bound_port", &tensorflow::data::MasterGrpcDataServer::BoundPort)
.def("num_workers",
[](tensorflow::data::DispatchGrpcDataServer* server) -> int {
[](tensorflow::data::MasterGrpcDataServer* server) -> int {
int num_workers;
tensorflow::Status status = server->NumWorkers(&num_workers);
tensorflow::MaybeRaiseFromStatus(status);
@ -49,12 +48,12 @@ PYBIND11_MODULE(_pywrap_server_lib, m) {
.def("bound_port", &tensorflow::data::WorkerGrpcDataServer::BoundPort);
m.def(
"TF_DATA_NewDispatchServer",
"TF_DATA_NewMasterServer",
[](int port, std::string protocol)
-> std::unique_ptr<tensorflow::data::DispatchGrpcDataServer> {
std::unique_ptr<tensorflow::data::DispatchGrpcDataServer> server;
-> std::unique_ptr<tensorflow::data::MasterGrpcDataServer> {
std::unique_ptr<tensorflow::data::MasterGrpcDataServer> server;
tensorflow::Status status =
tensorflow::data::NewDispatchServer(port, protocol, &server);
tensorflow::data::NewMasterServer(port, protocol, &server);
tensorflow::MaybeRaiseFromStatus(status);
return server;
},
@ -62,12 +61,12 @@ PYBIND11_MODULE(_pywrap_server_lib, m) {
m.def(
"TF_DATA_NewWorkerServer",
[](int port, std::string protocol, std::string dispatcher_address,
[](int port, std::string protocol, std::string master_address,
std::string worker_address)
-> std::unique_ptr<tensorflow::data::WorkerGrpcDataServer> {
std::unique_ptr<tensorflow::data::WorkerGrpcDataServer> server;
tensorflow::Status status = tensorflow::data::NewWorkerServer(
port, protocol, dispatcher_address, worker_address, &server);
port, protocol, master_address, worker_address, &server);
tensorflow::MaybeRaiseFromStatus(status);
return server;
},

View File

@ -59,25 +59,23 @@ class DataServiceOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
num_workers: The number of workers in the cluster.
Returns:
The address of the dispatcher.
The address of the master.
"""
self._dispatcher = server_lib.DispatchServer(port=0, protocol=PROTOCOL)
self._master = server_lib.MasterServer(port=0, protocol=PROTOCOL)
self._servers = []
for _ in range(num_workers):
self._servers.append(
server_lib.WorkerServer(
port=0,
dispatcher_address=self._dispatcher._address,
protocol=PROTOCOL))
port=0, master_address=self._master._address, protocol=PROTOCOL))
return self._dispatcher._address
return self._master._address
@combinations.generate(test_base.eager_only_combinations())
def testDistributeBasic(self):
num_elements = 10
dispatcher_address = self.create_cluster(1)
master_address = self.create_cluster(1)
ds = dataset_ops.Dataset.range(num_elements)
ds = _make_distributed_dataset(ds, dispatcher_address)
ds = _make_distributed_dataset(ds, master_address)
results = [elem.numpy() for elem in ds]
self.assertEqual(list(range(num_elements)), results)
@ -85,10 +83,10 @@ class DataServiceOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
def testDifferentShuffleOrders(self):
random_seed.set_random_seed(None)
num_elements = 100
dispatcher_address = self.create_cluster(2)
master_address = self.create_cluster(2)
ds = dataset_ops.Dataset.range(num_elements)
ds = ds.shuffle(num_elements)
ds = _make_distributed_dataset(ds, dispatcher_address)
ds = _make_distributed_dataset(ds, master_address)
output = [elem.numpy() for elem in ds]
# The output will be two sequences of range(num_elements)
@ -106,9 +104,9 @@ class DataServiceOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.eager_only_combinations())
def testMultipleEpochs(self):
num_elements = 3
dispatcher_address = self.create_cluster(1)
master_address = self.create_cluster(1)
ds = dataset_ops.Dataset.range(num_elements)
ds = _make_distributed_dataset(ds, dispatcher_address)
ds = _make_distributed_dataset(ds, master_address)
for _ in range(10):
self.assertEqual(list(range(num_elements)), [elem.numpy() for elem in ds])
@ -116,9 +114,9 @@ class DataServiceOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
def testRepeatedDataset(self):
num_elements = 10
num_repetitions = 5
dispatcher_address = self.create_cluster(1)
master_address = self.create_cluster(1)
ds = dataset_ops.Dataset.range(num_elements)
ds = _make_distributed_dataset(ds, dispatcher_address)
ds = _make_distributed_dataset(ds, master_address)
ds = ds.repeat(num_repetitions)
self.assertDatasetProduces(
ds, expected_output=num_repetitions * list(range(num_elements)))
@ -127,12 +125,12 @@ class DataServiceOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
def testConcurrentEpoch(self):
num_elements = 10
num_datasets = 3
dispatcher_address = self.create_cluster(1)
master_address = self.create_cluster(1)
iterators = []
results = []
for _ in range(num_datasets):
ds = dataset_ops.Dataset.range(num_elements)
ds = _make_distributed_dataset(ds, dispatcher_address)
ds = _make_distributed_dataset(ds, master_address)
iterators.append(iter(ds))
results.append([])
@ -148,9 +146,9 @@ class DataServiceOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
self.skipTest("Not yet implemented")
num_elements = 10
num_iterators = 3
dispatcher_address = self.create_cluster(1)
master_address = self.create_cluster(1)
ds = dataset_ops.Dataset.range(num_elements)
ds = _make_distributed_dataset(ds, dispatcher_address)
ds = _make_distributed_dataset(ds, master_address)
result = []
iterators = []
for _ in range(num_iterators):
@ -172,20 +170,20 @@ class DataServiceOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
def testMultiWorker(self):
num_workers = 3
num_elements = 10
dispatcher_address = self.create_cluster(num_workers)
master_address = self.create_cluster(num_workers)
ds = dataset_ops.Dataset.range(num_elements)
ds = _make_distributed_dataset(ds, dispatcher_address)
ds = _make_distributed_dataset(ds, master_address)
results = [elem.numpy() for elem in ds]
self.assertCountEqual(num_workers * list(range(num_elements)), results)
@combinations.generate(test_base.eager_only_combinations())
def testAddWorkerMidJob(self):
self._dispatcher = server_lib.DispatchServer(port=0, protocol=PROTOCOL)
self._master = server_lib.MasterServer(port=0, protocol=PROTOCOL)
self._worker = server_lib.WorkerServer(
port=0, dispatcher_address=self._dispatcher._address, protocol=PROTOCOL)
port=0, master_address=self._master._address, protocol=PROTOCOL)
num_elements = 100
ds = dataset_ops.Dataset.range(num_elements)
ds = _make_distributed_dataset(ds, self._dispatcher._address)
ds = _make_distributed_dataset(ds, self._master._address)
iterator = iter(ds)
results = []
# Read halfway through the dataset.
@ -193,10 +191,10 @@ class DataServiceOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
results.append(next(iterator).numpy())
self._new_worker = server_lib.WorkerServer(
port=0, dispatcher_address=self._dispatcher._address, protocol=PROTOCOL)
port=0, master_address=self._master._address, protocol=PROTOCOL)
# Wait for the new worker to register with the dispatcher.
while self._dispatcher._num_workers() < 2:
# Wait for the new worker to register with the master.
while self._master._num_workers() < 2:
time.sleep(10 / 1000) # 10ms
for elem in iterator:
@ -208,12 +206,12 @@ class DataServiceOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
combinations.times(test_base.eager_only_combinations(),
combinations.combine(use_same_port=[True, False])))
def testRestartWorker(self, use_same_port):
self._dispatcher = server_lib.DispatchServer(port=0, protocol=PROTOCOL)
self._master = server_lib.MasterServer(port=0, protocol=PROTOCOL)
self._worker = server_lib.WorkerServer(
port=0, dispatcher_address=self._dispatcher._address, protocol=PROTOCOL)
port=0, master_address=self._master._address, protocol=PROTOCOL)
num_elements = 100
ds = dataset_ops.Dataset.range(num_elements)
ds = _make_distributed_dataset(ds, self._dispatcher._address)
ds = _make_distributed_dataset(ds, self._master._address)
iterator = iter(ds)
# Read halfway through the dataset.
midpoint = num_elements // 2
@ -226,9 +224,7 @@ class DataServiceOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
port = int(self._worker._address.split(":")[1])
self._worker._stop()
self._new_worker = server_lib.WorkerServer(
port=port,
dispatcher_address=self._dispatcher._address,
protocol=PROTOCOL)
port=port, master_address=self._master._address, protocol=PROTOCOL)
# There may have been some elements prefetched from the first worker
# before it was stopped.
@ -263,12 +259,12 @@ class DataServiceOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
def testInsideFunction(self):
num_workers = 3
num_elements = 10
dispatcher_address = self.create_cluster(num_workers)
master_address = self.create_cluster(num_workers)
@def_function.function
def f():
ds = dataset_ops.Dataset.range(num_elements)
ds = _make_distributed_dataset(ds, dispatcher_address)
ds = _make_distributed_dataset(ds, master_address)
result = tensor_array_ops.TensorArray(
dtypes.int64, size=num_workers * num_elements, dynamic_size=True)
i = 0
@ -283,10 +279,10 @@ class DataServiceOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.eager_only_combinations())
def testSharedJobName(self):
num_elements = 100
dispatcher_address = self.create_cluster(1)
master_address = self.create_cluster(1)
ds = dataset_ops.Dataset.range(num_elements)
ds1 = _make_distributed_dataset(ds, dispatcher_address, job_name="job_name")
ds2 = _make_distributed_dataset(ds, dispatcher_address, job_name="job_name")
ds1 = _make_distributed_dataset(ds, master_address, job_name="job_name")
ds2 = _make_distributed_dataset(ds, master_address, job_name="job_name")
iter1 = iter(ds1)
iter2 = iter(ds2)
results = []
@ -302,22 +298,20 @@ class DataServiceOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.eager_only_combinations())
def testDifferentJobNames(self):
num_elements = 10
dispatcher_address = self.create_cluster(1)
master_address = self.create_cluster(1)
ds = dataset_ops.Dataset.range(num_elements)
ds1 = _make_distributed_dataset(
ds, dispatcher_address, job_name="job_name1")
ds2 = _make_distributed_dataset(
ds, dispatcher_address, job_name="job_name2")
ds1 = _make_distributed_dataset(ds, master_address, job_name="job_name1")
ds2 = _make_distributed_dataset(ds, master_address, job_name="job_name2")
self.assertDatasetProduces(ds1, list(range(num_elements)))
self.assertDatasetProduces(ds2, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testSharedJobNameMultiIteration(self):
num_elements = 10
dispatcher_address = self.create_cluster(1)
master_address = self.create_cluster(1)
ds = dataset_ops.Dataset.range(num_elements)
ds1 = _make_distributed_dataset(ds, dispatcher_address, job_name="job_name")
ds2 = _make_distributed_dataset(ds, dispatcher_address, job_name="job_name")
ds1 = _make_distributed_dataset(ds, master_address, job_name="job_name")
ds2 = _make_distributed_dataset(ds, master_address, job_name="job_name")
# iteration 1
self.assertDatasetProduces(ds1, list(range(num_elements)))
self.assertDatasetProduces(ds2, [])
@ -329,11 +323,11 @@ class DataServiceOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
def testSharedJobNameRepeat(self):
num_elements = 100
num_repetitions = 3
dispatcher_address = self.create_cluster(1)
master_address = self.create_cluster(1)
ds = dataset_ops.Dataset.range(num_elements)
ds1 = _make_distributed_dataset(ds, dispatcher_address, job_name="job_name")
ds1 = _make_distributed_dataset(ds, master_address, job_name="job_name")
ds1 = ds1.repeat(num_repetitions)
ds2 = _make_distributed_dataset(ds, dispatcher_address, job_name="job_name")
ds2 = _make_distributed_dataset(ds, master_address, job_name="job_name")
ds2 = ds2.repeat(num_repetitions)
results = []
iter1 = iter(ds1)
@ -351,7 +345,7 @@ class DataServiceOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.eager_only_combinations())
def testApplyDeterminismOption(self):
elements = list(range(10))
dispatcher_address = self.create_cluster(1)
master_address = self.create_cluster(1)
def dataset_fn(delay_ms):
@ -368,7 +362,7 @@ class DataServiceOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
opts = dataset_ops.Options()
opts.experimental_deterministic = False
ds = ds.with_options(opts)
ds = _make_distributed_dataset(ds, dispatcher_address)
ds = _make_distributed_dataset(ds, master_address)
return ds
self.checkDeterminism(
@ -385,8 +379,8 @@ class DataServiceOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
options.experimental_external_state_policy = external_state_policy
ds = ds.with_options(options)
dispatcher_address = self.create_cluster(3)
ds = _make_distributed_dataset(ds, dispatcher_address)
master_address = self.create_cluster(3)
ds = _make_distributed_dataset(ds, master_address)
next(iter(ds))
@combinations.generate(
@ -406,12 +400,12 @@ class DataServiceOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.eager_only_combinations())
def testDistributeFromInterleave(self):
dispatcher_address = self.create_cluster(1)
master_address = self.create_cluster(1)
ds = dataset_ops.Dataset.range(2)
def interleave_fn(_):
ds = dataset_ops.Dataset.range(2)
_make_distributed_dataset(ds, dispatcher_address)
_make_distributed_dataset(ds, master_address)
return ds
with self.assertRaisesRegex(

View File

@ -123,15 +123,6 @@ class TPUTest(test.TestCase):
result = bar() + 1
self.assertAllEqual(result, 2)
def test_on_demand_op_with_dynamic_output(self):
with ops.device("/device:TPU:0"):
where_output = array_ops.where([True, False, True])
self.assertAllEqual(where_output, [[0], [2]])
with ops.device("/device:TPU:0"):
repeat_output = array_ops.repeat(math_ops.range(2), [1, 4])
self.assertAllEqual(repeat_output, [0, 1, 1, 1, 1])
@parameterized.named_parameters([("PackedVar", True), ("", False)])
class TPUStrategyTest(test.TestCase, parameterized.TestCase):

View File

@ -4690,7 +4690,7 @@ def categorical_crossentropy(target, output, from_logits=False, axis=-1):
labels=target, logits=output, axis=axis)
if (not isinstance(output, (ops.EagerTensor, variables_module.Variable)) and
output.op.type == 'Softmax') and not hasattr(output, '_keras_history'):
output.op.type == 'Softmax'):
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
@ -4735,7 +4735,7 @@ def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
if (not from_logits and
not isinstance(output, (ops.EagerTensor, variables_module.Variable)) and
output.op.type == 'Softmax') and not hasattr(output, '_keras_history'):
output.op.type == 'Softmax'):
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
@ -4814,7 +4814,7 @@ def binary_crossentropy(target, output, from_logits=False):
return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
if (not isinstance(output, (ops.EagerTensor, variables_module.Variable)) and
output.op.type == 'Sigmoid') and not hasattr(output, '_keras_history'):
output.op.type == 'Sigmoid'):
# When sigmoid activation function is used for output operation, we
# use logits from the sigmoid function directly to compute loss in order
# to prevent collapsing zero when training.

View File

@ -921,7 +921,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
# >> inputs = tf.keras.Input(10)
# >> outputs = MyLayer()(inputs) # Functional construction mode.
# >> model = tf.keras.Model(inputs, outputs)
if _in_functional_construction_mode(self, inputs, args, kwargs, input_list):
if _in_functional_construction_mode(inputs, args, kwargs, input_list):
return self._functional_construction_call(inputs, args, kwargs,
input_list)
@ -3205,7 +3205,7 @@ class AddMetric(Layer):
return config
def _in_functional_construction_mode(layer, inputs, args, kwargs, input_list): # pylint: disable=unused-argument
def _in_functional_construction_mode(inputs, args, kwargs, input_list): # pylint: disable=unused-argument
"""Check the arguments to see if we are constructing a functional model."""
if keras_tensor.keras_tensors_enabled():
# We are constructing a functional model if any of the inputs
@ -3215,20 +3215,7 @@ def _in_functional_construction_mode(layer, inputs, args, kwargs, input_list):
for tensor in nest.flatten([inputs, args, kwargs]))
else:
if context.executing_eagerly():
all_inputs_symbolic = all(
tf_utils.is_symbolic_tensor(t) for t in input_list)
if (base_layer_utils.is_subclassed(layer) and
any(tf_utils.is_symbolic_tensor(t) for t in nest.flatten(
[inputs, args, kwargs])) and not all_inputs_symbolic):
raise ValueError('It appears you are trying to construct a '
'functional model, but not all of the inputs in '
'the first positional argument of your layer call '
'are symbolic tensors. '
'(Input objects, or the output of another layer) '
'Functional models cannot correctly track custom '
'layers unless all values in the first call argument '
'are symbolic.')
return all_inputs_symbolic
return all(tf_utils.is_symbolic_tensor(t) for t in input_list)
else:
return (base_layer_utils.is_in_keras_graph() or
all(hasattr(t, '_keras_history') for t in input_list))

View File

@ -252,9 +252,6 @@ class Layer(base_layer.Layer):
# might want to turn it off, like Sequential model.
self._auto_track_sub_layers = True
# Mark this layer as having been originally built as a tf1 layer/model
self._originally_built_as_v1 = True
@trackable.no_automatic_dependency_tracking
@generic_utils.default
def build(self, input_shape):
@ -654,8 +651,6 @@ class Layer(base_layer.Layer):
ValueError: if the layer's `call` method returns None (an invalid value).
RuntimeError: if `super().__init__()` was not called in the constructor.
"""
self._assert_built_as_v1()
if not hasattr(self, '_thread_local'):
raise RuntimeError(
'You must call `super().__init__()` in the layer constructor.')
@ -823,20 +818,6 @@ class Layer(base_layer.Layer):
return outputs
def _assert_built_as_v1(self):
if not hasattr(self, '_originally_built_as_v1'):
raise ValueError(
'Your Layer or Model is in an invalid state. This can happen if you '
'are interleaving estimator/non-estimator models or '
'interleaving models/layers made in tf.compat.v1.Graph.as_default() '
'with models/layers created outside of it. '
'Converting a model to an estimator (via model_to_estimator) '
'invalidates all models/layers made before the conversion (even '
'if they were not the model converted to an estimator). '
'Similarly, making a layer or a model inside a '
'a tf.compat.v1.Graph invalidates all layers/models you previously '
'made outside of the graph.')
@property
def dtype(self):
return self._dtype_policy.variable_dtype

View File

@ -34,7 +34,6 @@ from tensorflow.python.keras import combinations
from tensorflow.python.keras import initializers
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers
from tensorflow.python.keras import losses
from tensorflow.python.keras import models
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
@ -932,72 +931,6 @@ class NetworkConstructionTest(keras_parameterized.TestCase):
# Check that second input was correctly added to first.
self.assertEqual(history.history['loss'][0], 0.0)
@combinations.generate(combinations.times(
combinations.keras_mode_combinations(mode='eager'),
combinations.combine(use_keras_tensors=False)))
def test_only_some_in_first_arg_derived_from_keras_layer(self):
class MyAddAll(layers.Layer):
def call(self, inputs):
x = inputs[0]
for inp in inputs[1:]:
if inp is not None:
x = x + inp
return x
input1 = input_layer_lib.Input(10)
input2 = input_layer_lib.Input(10)
layer = MyAddAll()
with self.assertRaisesRegexp(ValueError, 'construct a functional'):
layer([0.0, input1, None, input2, None])
@combinations.generate(combinations.times(
combinations.keras_mode_combinations(mode='eager'),
combinations.combine(use_keras_tensors=True)))
def test_only_some_in_first_arg_derived_from_keras_layer_keras_tensors(self):
# This functionality is unsupported in v1 graphs
class MyAddAll(layers.Layer):
def call(self, inputs):
x = inputs[0]
for inp in inputs[1:]:
if inp is not None:
x = x + inp
return x
input1 = input_layer_lib.Input(10)
input2 = input_layer_lib.Input(10)
layer = MyAddAll()
outputs = layer([0.0, input1, None, input2, None])
model = training_lib.Model([input1, input2], outputs)
self.assertIn(layer, model.layers)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(
x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))],
y=10 * np.ones((10, 10)),
batch_size=2)
# Check that second input was correctly added to first.
self.assertEqual(history.history['loss'][0], 0.0)
# Check serialization.
model = training_lib.Model.from_config(
model.get_config(), custom_objects={'MyAddAll': MyAddAll})
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(
x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))],
y=10 * np.ones((10, 10)),
batch_size=2)
# Check that second input was correctly added to first.
self.assertEqual(history.history['loss'][0], 0.0)
@combinations.generate(combinations.keras_mode_combinations())
def test_call_kwarg_derived_from_keras_layer(self):
@ -1136,8 +1069,7 @@ class NetworkConstructionTest(keras_parameterized.TestCase):
input2 = input_layer_lib.Input(10)
input3 = input_layer_lib.Input(10)
layer = AddAll()
outputs = layer(
outputs = AddAll()(
[input1, 4 * array_ops.ones((1, 10))],
x3={
'a': input2,
@ -1145,7 +1077,6 @@ class NetworkConstructionTest(keras_parameterized.TestCase):
'c': 5 * array_ops.ones((1, 10))
})
model = training_lib.Model([input1, input2, input3], outputs)
self.assertIn(layer, model.layers)
model.compile(
'sgd',
'mse',
@ -1902,37 +1833,6 @@ class AddLossTest(keras_parameterized.TestCase):
self.assertAllClose(model.get_weights(), model2.get_weights())
def test_add_loss_crossentropy_backtracking(self):
inputs = input_layer_lib.Input((2,))
labels = input_layer_lib.Input((1,))
outputs = layers.Dense(1, activation='sigmoid')(inputs)
model = functional.Functional([inputs, labels], outputs)
model.add_loss(losses.binary_crossentropy(labels, outputs))
model.compile('adam')
x = np.random.random((2, 2))
y = np.random.random((2, 1))
model.fit([x, y])
inputs = input_layer_lib.Input((2,))
labels = input_layer_lib.Input((2,))
outputs = layers.Dense(2, activation='softmax')(inputs)
model = functional.Functional([inputs, labels], outputs)
model.add_loss(losses.categorical_crossentropy(labels, outputs))
model.compile('adam')
x = np.random.random((2, 2))
y = np.random.random((2, 2))
model.fit([x, y])
inputs = input_layer_lib.Input((2,))
labels = input_layer_lib.Input((1,), dtype='int32')
outputs = layers.Dense(2, activation='softmax')(inputs)
model = functional.Functional([inputs, labels], outputs)
model.add_loss(losses.sparse_categorical_crossentropy(labels, outputs))
model.compile('adam')
x = np.random.random((2, 2))
y = np.random.randint(0, 2, size=(2, 1))
model.fit([x, y])
@combinations.generate(combinations.keras_mode_combinations())
class WeightAccessTest(keras_parameterized.TestCase):

View File

@ -303,7 +303,6 @@ class Model(training_lib.Model):
ValueError: In case of invalid arguments for
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
"""
self._assert_built_as_v1()
self._run_eagerly = kwargs.pop('run_eagerly', None)
self._experimental_run_tf_function = kwargs.pop(
'experimental_run_tf_function', True)
@ -774,7 +773,6 @@ class Model(training_lib.Model):
ValueError: In case of mismatch between the provided input data
and what the model expects.
"""
self._assert_built_as_v1()
_keras_api_gauge.get_cell('fit_v1').set(True)
# Legacy support
if 'nb_epoch' in kwargs:
@ -895,7 +893,6 @@ class Model(training_lib.Model):
Raises:
ValueError: in case of invalid arguments.
"""
self._assert_built_as_v1()
_keras_api_gauge.get_cell('evaluate_v1').set(True)
self._assert_compile_was_called()
self._check_call_args('evaluate')
@ -975,7 +972,6 @@ class Model(training_lib.Model):
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
self._assert_built_as_v1()
_keras_api_gauge.get_cell('predict_v1').set(True)
self._check_call_args('predict')

View File

@ -132,7 +132,8 @@ class Embedding(Layer):
# right now. Checking for the presence of GPUs to avoid complicating the
# TPU codepaths which can handle sparse optimizers. But if we are within
# a tf.function, we go back the graph mode logic and rely on the placer.
if context.executing_eagerly() and context.context().num_gpus():
if (context.executing_eagerly() and context.context().num_gpus() and
not ops.inside_function()):
with ops.device('cpu:0'):
self.embeddings = self.add_weight(
shape=(self.input_dim, self.output_dim),

View File

@ -4579,11 +4579,11 @@ def non_max_suppression_padded(boxes,
Raises:
ValueError: When set pad_to_max_output_size to False for batched input.
"""
# if no new arguments are used and no later than 2020/6/23, use the old
# version to give us time to fix TFLite conversion after the TF 2.3 release.
# if no new arguments are used and no later than 2020/4/20, use the old
# version to give us time to fix TFLite conversion
if (not sorted_input) and \
(not canonicalized_coordinates) and \
tile_size == 512 and not compat.forward_compatible(2020, 6, 23):
tile_size == 512 and not compat.forward_compatible(2020, 4, 20):
return non_max_suppression_padded_v1(
boxes, scores, max_output_size, iou_threshold, score_threshold,
pad_to_max_output_size, name)

View File

@ -1870,27 +1870,25 @@ class MutableHashTable(LookupInterface):
return {
"table":
functools.partial(
MutableHashTable._Saveable, table=self, name=self._name,
table_name=self._name)
MutableHashTable._Saveable, table=self, name=self._name)
}
class _Saveable(BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for DenseHashTable."""
"""SaveableObject implementation for MutableHashTable."""
def __init__(self, table, name, table_name=None):
def __init__(self, table, name):
tensors = table.export()
specs = [
BaseSaverBuilder.SaveSpec(tensors[0], "", name + "-keys"),
BaseSaverBuilder.SaveSpec(tensors[1], "", name + "-values")
]
self.table_name = table_name or name
# pylint: disable=protected-access
super(MutableHashTable._Saveable, self).__init__(table, specs, name)
def restore(self, restored_tensors, restored_shapes):
def restore(self, restored_tensors, restored_shapes, name=None):
del restored_shapes # unused
# pylint: disable=protected-access
with ops.name_scope("%s_table_restore" % self.table_name):
with ops.name_scope(name, "%s_table_restore" % self.name):
with ops.colocate_with(self.op.resource_handle):
return gen_lookup_ops.lookup_table_import_v2(self.op.resource_handle,
restored_tensors[0],
@ -2168,27 +2166,25 @@ class DenseHashTable(LookupInterface):
return {
"table":
functools.partial(
DenseHashTable._Saveable, table=self, name=self._name,
table_name=self._name)
DenseHashTable._Saveable, table=self, name=self._name)
}
class _Saveable(BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for DenseHashTable."""
def __init__(self, table, name, table_name=None):
def __init__(self, table, name):
tensors = table.export()
specs = [
BaseSaverBuilder.SaveSpec(tensors[0], "", name + "-keys"),
BaseSaverBuilder.SaveSpec(tensors[1], "", name + "-values")
]
self.table_name = table_name or name
# pylint: disable=protected-access
super(DenseHashTable._Saveable, self).__init__(table, specs, name)
def restore(self, restored_tensors, restored_shapes):
def restore(self, restored_tensors, restored_shapes, name=None):
del restored_shapes # unused
# pylint: disable=protected-access
with ops.name_scope("%s_table_restore" % self.table_name):
with ops.name_scope(name, "%s_table_restore" % self.name):
with ops.colocate_with(self.op.resource_handle):
return gen_lookup_ops.lookup_table_import_v2(self.op.resource_handle,
restored_tensors[0],

View File

@ -45,7 +45,6 @@ from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.saved_model import revived_types
from tensorflow.python.saved_model import utils_impl as saved_model_utils
from tensorflow.python.training.saving import checkpoint_options
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import tracking
@ -147,18 +146,6 @@ class Loader(object):
self._setup_functions_structures()
self._setup_functions_captures()
self._create_saveable_object_factories()
def _create_saveable_object_factories(self):
for node_id, proto in enumerate(self._proto.nodes):
node = self.get(node_id)
node._self_saveable_object_factories = {} # pylint: disable=protected-access
for name, saveable_object_proto in proto.saveable_objects.items():
node._self_saveable_object_factories[name] = ( # pylint: disable=protected-access
saveable_object_util.restored_saved_object_factory(
self.get(saveable_object_proto.save_function),
self.get(saveable_object_proto.restore_function)))
def _load_edges(self):
"""Adds edges from objects to other objects and functions."""
for node_id, object_proto in enumerate(self._proto.nodes):

View File

@ -1795,22 +1795,6 @@ class LoadTest(test.TestCase, parameterized.TestCase):
options = load_options.LoadOptions(experimental_io_device="/job:localhost")
self.assertEqual("/job:localhost", options.experimental_io_device)
def test_load_custom_saveable_object(self, cycles):
root = tracking.AutoTrackable()
root.table = lookup_ops.MutableHashTable(dtypes.string, dtypes.float32, -1)
root.table.insert("foo", 15)
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.string)])
def lookup(key):
return root.table.lookup(key)
root.lookup = lookup
imported = cycle(root, cycles)
self.assertEqual(self.evaluate(imported.lookup("foo")), 15)
self.assertEqual(self.evaluate(imported.lookup("idk")), -1)
class SingleCycleTests(test.TestCase, parameterized.TestCase):

View File

@ -19,7 +19,6 @@ from __future__ import division
from __future__ import print_function
import collections
import functools
import os
from tensorflow.core.framework import versions_pb2
@ -54,7 +53,6 @@ from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils_impl
from tensorflow.python.training.saving import checkpoint_options
from tensorflow.python.training.saving import functional_saver
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import tracking
@ -138,15 +136,12 @@ class _AugmentedGraphView(graph_view.ObjectGraphView):
return obj._list_extra_dependencies_for_serialization( # pylint: disable=protected-access
self._serialization_cache)
def list_functions(self, obj, extra_functions=None):
def list_functions(self, obj):
obj_functions = self._functions.get(obj, None)
if obj_functions is None:
obj_functions = obj._list_functions_for_serialization( # pylint: disable=protected-access
self._serialization_cache)
self._functions[obj] = obj_functions
if extra_functions:
obj_functions = obj_functions.copy()
obj_functions.update(extra_functions)
return obj_functions
@ -182,12 +177,6 @@ class _SaveableView(object):
self.slot_variables = slot_variables
self.concrete_functions = []
self.saveable_objects_for_node, all_saveable_functions = (
self._add_saveable_objects())
saveable_object_functions = {
"__SAVEABLE_FUNCTION_{}".format(n): fn
for n, fn in enumerate(all_saveable_functions)}
# Maps functions -> wrapped functions that capture variables
self.wrapped_functions = wrapped_functions or {}
# Maps names of concrete functions in the object to names of wrapped
@ -201,8 +190,7 @@ class _SaveableView(object):
nodes_without_functions = list(self.nodes)
seen_function_names = set()
for node in nodes_without_functions:
for function in checkpoint_view.list_functions(
node, saveable_object_functions).values():
for function in checkpoint_view.list_functions(node).values():
if function not in self.node_ids:
self.node_ids[function] = len(self.nodes)
self.nodes.append(function)
@ -221,25 +209,6 @@ class _SaveableView(object):
seen_function_names.add(concrete_function.name)
self.concrete_functions.append(concrete_function)
def _add_saveable_objects(self):
"""Retrieves SaveablesObjects and traces their save/restore functions."""
# Maps node -> local name -> (save function, restore function)
saveable_objects_map = object_identity.ObjectIdentityDictionary()
all_saveable_functions = []
for node in self.nodes:
if resource_variable_ops.is_resource_variable(node):
# Resource (and TPU/Mirrored) variables are automatically revived with
# their saveables defined, so there is no need to trace the save
# and restore functions.
continue
saveable_map = saveable_object_util.trace_save_restore_functions(node)
if saveable_map:
saveable_objects_map[node] = saveable_map
for save_fn, restore_fn in saveable_map.values():
all_saveable_functions.append(save_fn)
all_saveable_functions.append(restore_fn)
return saveable_objects_map, all_saveable_functions
@property
def root(self):
return self.nodes[0]
@ -264,15 +233,6 @@ class _SaveableView(object):
child_proto.node_id = self.node_ids[ref_function]
child_proto.local_name = local_name
if node not in self.saveable_objects_for_node:
continue
for local_name, (save_fn, restore_fn) in (
self.saveable_objects_for_node[node].items()):
saveable_object_proto = object_proto.saveable_objects[local_name]
saveable_object_proto.save_function = self.node_ids[save_fn]
saveable_object_proto.restore_function = self.node_ids[restore_fn]
def map_resources(self):
"""Makes new resource handle ops corresponding to existing resource tensors.
@ -645,9 +605,7 @@ def _fill_meta_graph_def(meta_graph_def, saveable_view, signature_functions,
# the exported graph (thus the `to_graph` argument).
saver = functional_saver.MultiDeviceSaver(
saveable_view.checkpoint_view.frozen_saveable_objects(
object_map=object_map, to_graph=exported_graph,
call_with_mapped_captures=functools.partial(
_call_function_with_mapped_captures, resource_map=resource_map)))
object_map=object_map, to_graph=exported_graph))
with exported_graph.as_default():
signatures = _generate_signatures(signature_functions, resource_map)

View File

@ -1169,9 +1169,7 @@ PYBIND11_MODULE(_pywrap_tfe, m) {
PyCapsule_SetName(pycapsule.ptr(), "used_dltensor");
PyCapsule_SetDestructor(pycapsule.ptr(), nullptr);
PyObject* pyhandle = EagerTensorFromHandle(thandle);
return tensorflow::PyoOrThrow(pyhandle);
return py::handle(EagerTensorFromHandle(thandle));
});
m.def("TFE_Py_RegisterCustomDevice", [](const py::handle& context,

View File

@ -17,26 +17,15 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import six
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.saving import saveable_object
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
@ -290,7 +279,7 @@ def op_list_to_dict(op_list, convert_variable_to_tensor=True):
raise ValueError(
("Two different ResourceVariable objects with the same "
"shared_name '%s' were passed to the Saver. This likely means "
"that they were created in different Graphs or isoWlation "
"that they were created in different Graphs or isolation "
"contexts, and may not be checkpointed together.") %
(var._shared_name,))
else:
@ -360,147 +349,3 @@ def validate_and_slice_inputs(names_to_saveables):
for converted_saveable_object in saveable_objects_for_op(op, name):
_add_saveable(saveables, seen_ops, converted_saveable_object)
return saveables
def trace_save_restore_functions(object_to_save):
"""Gathers all SaveableObjects and traces the save and restore ops."""
saveable_map = {} # Maps name -> (save function, restore function)
for name, saveable_factory in (
object_to_save._gather_saveables_for_checkpoint().items()): # pylint: disable=protected-access
if not callable(saveable_factory):
if isinstance(saveable_factory, saveable_object.SaveableObject):
logging.debug(
"Trackable {} should return callable factories, not SaveableObjects"
" in `_gather_saveables_for_checkpoint`. This could lead to "
"problems loading the SavedModel back into Python."
.format(object_to_save))
continue
if is_factory_for_restored_saveable_object(saveable_factory):
saveable_map[name] = (saveable_factory.keywords["save_function"],
saveable_factory.keywords["restore_function"])
else:
concrete_save_fn, concrete_restore_fn = _trace_save_and_restore_function(
saveable_factory, object_to_save)
if concrete_save_fn is not None:
saveable_map[name] = (concrete_save_fn, concrete_restore_fn)
return saveable_map
def _trace_save_and_restore_function(saveable_factory, object_to_save):
"""Traces the save and restore concrete functions."""
saveables = []
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def save_fn(checkpoint_key):
maybe_saveable = saveable_factory(name=checkpoint_key)
if isinstance(maybe_saveable, saveable_object.SaveableObject):
maybe_saveable = [maybe_saveable]
saveables[:] = maybe_saveable
# Return list of all SaveSpecs created by the factory.
ret = []
for saveable in saveables:
for spec in saveable.specs:
ret.append({"name": spec.name, "tensor": spec.tensor,
"slice_spec": spec.slice_spec})
return ret
concrete_save_fn = save_fn.get_concrete_function()
if any(isinstance(saveable, trackable.PythonStateSaveable)
for saveable in saveables):
logging.warn(
"Note that object {} stores python values into the checkpoint. "
"These values will not be restored when loading the SavedModel "
"into python.".format(object_to_save))
return None, None
if any(isinstance(saveable, trackable.NoRestoreSaveable)
for saveable in saveables):
return None, None
restored_type_specs = []
tensor_structure = []
for saveable in saveables:
saveable_tensor_structure = []
tensor_structure.append(saveable_tensor_structure)
for spec in saveable.specs:
restored_type_specs.append(type_spec.type_spec_from_value(spec.tensor))
saveable_tensor_structure.append(spec.name)
@def_function.function(input_signature=restored_type_specs)
def restore_fn(*restored_tensors):
structured_restored_tensors = nest.pack_sequence_as(
tensor_structure, restored_tensors)
for saveable, restored_tensors in zip(saveables,
structured_restored_tensors):
saveable.restore(restored_tensors, restored_shapes=None)
return 1
concrete_restore_fn = restore_fn.get_concrete_function()
return concrete_save_fn, concrete_restore_fn
class RestoredSaveableObject(saveable_object.SaveableObject):
"""SaveableObject restored from SavedModel using the traced save/restore."""
def __init__(self, save_function, restore_function, name):
self.save_function = save_function
self.restore_function = restore_function
if tensor_util.is_tensor(name):
name_tensor = name
else:
with ops.init_scope():
name_tensor = constant_op.constant(name)
tensors = save_function(name_tensor)
specs = [saveable_object.SaveSpec(x["tensor"], x["slice_spec"], x["name"])
for x in tensors]
super(RestoredSaveableObject, self).__init__(None, specs, name)
def restore(self, restored_tensors, restored_shapes):
del restored_shapes # unused
return self.restore_function(
*[restored_tensors[i] for i in range(len(self.specs))])
def restored_saved_object_factory(save_function, restore_function):
return functools.partial(RestoredSaveableObject,
save_function=save_function,
restore_function=restore_function)
def create_saveable_object(factory, name, call_with_mapped_captures):
"""Creates a SaveableObject while potentially in a different graph.
When creating the frozen saver for SavedModel, the save and restore ops are
placed in a separate graph. Since RestoredSaveableObject uses tf.functions to
save and restore, the function captures must be mapped to the new graph.
Args:
factory: Factory method for creating the SaveableObject.
name: Checkpoint key of this SaveableObject.
call_with_mapped_captures: Helper that calls a tf.function while remapping
the captures.
Returns:
a SaveableObject.
"""
if (call_with_mapped_captures is None or
not is_factory_for_restored_saveable_object(factory)):
return factory(name=name)
concrete_save_fn = factory.keywords["save_function"]
def save_fn(name):
return call_with_mapped_captures(concrete_save_fn, [name])
concrete_restore_fn = factory.keywords["restore_function"]
def restore_fn(*restored_tensors):
return call_with_mapped_captures(concrete_restore_fn, restored_tensors)
return factory(save_function=save_fn, restore_function=restore_fn, name=name)
def is_factory_for_restored_saveable_object(factory):
return (isinstance(factory, functools.partial) and
factory.func is RestoredSaveableObject)

View File

@ -293,10 +293,9 @@ class CheckpointPosition(object):
checkpoint_key = serialized_tensor.checkpoint_key
dtype = self._checkpoint.dtype_map[checkpoint_key]
base_type = dtype.base_dtype
io_device = self._checkpoint.options.experimental_io_device or "cpu:0"
with ops.init_scope():
with ops.device(io_device):
# Run the restore itself on the io_device(CPU or specified).
with ops.device("/cpu:0"):
# Run the restore itself on the CPU.
value, = io_ops.restore_v2(
prefix=self._checkpoint.save_path_tensor,
tensor_names=[checkpoint_key],
@ -612,12 +611,6 @@ class Trackable(object):
# building.
self._self_name_based_restores = set()
# Dictionary of SaveableObjects factories. This dictionary is defined when
# the object is loaded from the SavedModel. When writing a custom class,
# prefer overriding "_gather_saveables_from_checkpoint" to using this
# attribute.
self._self_saveable_object_factories = {}
@property
def _object_identifier(self):
"""String used to identify this object in a SavedModel.
@ -979,7 +972,7 @@ class Trackable(object):
lambda name="global_name_for_this_object":
SaveableObject(name=name, ...)}
"""
return self._self_saveable_object_factories
return {}
def _list_extra_dependencies_for_serialization(self, serialization_cache):
"""Lists extra dependencies to serialize.

View File

@ -208,7 +208,7 @@ class ObjectGraphView(object):
def _add_attributes_to_object_graph(
self, trackable_objects, object_graph_proto, node_ids, object_names,
object_map, call_with_mapped_captures):
object_map):
"""Create SaveableObjects and corresponding SerializedTensor protos."""
named_saveable_objects = []
if self._saveables_cache is None:
@ -253,9 +253,7 @@ class ObjectGraphView(object):
break
if saveables is None:
if callable(saveable_factory):
maybe_saveable = saveable_object_util.create_saveable_object(
saveable_factory, attribute.checkpoint_key,
call_with_mapped_captures)
maybe_saveable = saveable_factory(name=attribute.checkpoint_key)
else:
maybe_saveable = saveable_factory
if isinstance(maybe_saveable, saveable_object_lib.SaveableObject):
@ -334,8 +332,7 @@ class ObjectGraphView(object):
return object_graph_proto
def _serialize_gathered_objects(self, trackable_objects, path_to_root,
object_map=None,
call_with_mapped_captures=None):
object_map=None):
"""Create SaveableObjects and protos for gathered objects."""
object_names = object_identity.ObjectIdentityDictionary()
for obj, path in path_to_root.items():
@ -357,8 +354,7 @@ class ObjectGraphView(object):
object_graph_proto=object_graph_proto,
node_ids=node_ids,
object_names=object_names,
object_map=object_map,
call_with_mapped_captures=call_with_mapped_captures))
object_map=object_map))
return named_saveable_objects, object_graph_proto, feed_additions
def serialize_object_graph(self):
@ -386,8 +382,7 @@ class ObjectGraphView(object):
return self._serialize_gathered_objects(
trackable_objects, path_to_root)
def frozen_saveable_objects(self, object_map=None, to_graph=None,
call_with_mapped_captures=None):
def frozen_saveable_objects(self, object_map=None, to_graph=None):
"""Creates SaveableObjects with the current object graph frozen."""
trackable_objects, path_to_root = self._breadth_first_traversal()
if to_graph:
@ -398,8 +393,7 @@ class ObjectGraphView(object):
named_saveable_objects, graph_proto, _ = self._serialize_gathered_objects(
trackable_objects,
path_to_root,
object_map,
call_with_mapped_captures)
object_map)
with ops.device("/cpu:0"):
object_graph_tensor = constant_op.constant(
graph_proto.SerializeToString(), dtype=dtypes.string)

View File

@ -1,6 +1,6 @@
path: "tensorflow.data.experimental.service.DispatchServer"
path: "tensorflow.data.experimental.service.MasterServer"
tf_class {
is_instance: "<class \'tensorflow.python.data.experimental.service.server_lib.DispatchServer\'>"
is_instance: "<class \'tensorflow.python.data.experimental.service.server_lib.MasterServer\'>"
is_instance: "<type \'object\'>"
member {
name: "target"

View File

@ -4,7 +4,7 @@ tf_class {
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
argspec: "args=[\'self\', \'port\', \'dispatcher_address\', \'worker_address\', \'protocol\', \'start\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], "
argspec: "args=[\'self\', \'port\', \'master_address\', \'worker_address\', \'protocol\', \'start\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], "
}
member_method {
name: "join"

View File

@ -1,7 +1,7 @@
path: "tensorflow.data.experimental.service"
tf_module {
member {
name: "DispatchServer"
name: "MasterServer"
mtype: "<type \'type\'>"
}
member {

View File

@ -99,8 +99,8 @@ tensorflow::data::GrpcDataServerBase::Join
tensorflow::data::GrpcDataServerBase::Start
tensorflow::data::GrpcDataServerBase::Stop
tensorflow::data::GrpcDataServerBase::BoundPort
tensorflow::data::DispatchGrpcDataServer::NumWorkers
tensorflow::data::NewDispatchServer
tensorflow::data::MasterGrpcDataServer::NumWorkers
tensorflow::data::NewMasterServer
tensorflow::data::NewWorkerServer
[protos_all] # device_lib, dtypes

View File

@ -49,7 +49,7 @@ from setuptools.dist import Distribution
# result for pip.
# Also update tensorflow/tensorflow.bzl and
# tensorflow/core/public/version.h
_VERSION = '2.3.0'
_VERSION = '2.3.0-rc0'
REQUIRED_PACKAGES = [
'absl-py >= 0.7.0',
@ -63,8 +63,8 @@ REQUIRED_PACKAGES = [
'numpy >= 1.16.0, < 1.19.0',
'opt_einsum >= 2.3.2',
'protobuf >= 3.9.2',
'tensorboard >= 2.3.0, < 3',
'tensorflow_estimator >= 2.3.0, < 2.4.0',
'tensorboard >= 2.2.0, < 2.3.0',
'tf-estimator-nightly >= 2.3.0.dev2020062301, < 2.3.0.dev2020062302',
'termcolor >= 1.1.0',
'wrapt >= 1.11.1',
'wheel >= 0.26',

View File

@ -292,26 +292,6 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""):
],
)
tf_http_archive(
name = "LinaroArmGcc72",
build_file = clean_dep("//third_party/toolchains/embedded/linaro-gcc72-armeabi:linaro-gcc72-armeabi.BUILD"),
strip_prefix = "gcc-linaro-7.2.1-2017.11-x86_64_arm-linux-gnueabihf/",
urls = [
"https://releases.linaro.org/components/toolchain/binaries/7.2-2017.11/arm-linux-gnueabihf/gcc-linaro-7.2.1-2017.11-x86_64_arm-linux-gnueabihf.tar.xz",
],
sha256 = "cee0087b1f1205b73996651b99acd3a926d136e71047048f1758ffcec69b1ca2",
)
tf_http_archive(
name = "LinaroAarch64Gcc72",
build_file = clean_dep("//third_party/toolchains/embedded/linaro-gcc72-aarch64:linaro-gcc72-aarch64.BUILD"),
strip_prefix = "gcc-linaro-7.2.1-2017.11-x86_64_aarch64-linux-gnu/",
urls = [
"https://releases.linaro.org/components/toolchain/binaries/7.2-2017.11/aarch64-linux-gnu/gcc-linaro-7.2.1-2017.11-x86_64_aarch64-linux-gnu.tar.xz",
],
sha256 = "20181f828e1075f1a493947ff91e82dd578ce9f8638fbdfc39e24b62857d8f8d",
)
tf_http_archive(
name = "libxsmm_archive",
build_file = clean_dep("//third_party:libxsmm.BUILD"),

View File

@ -9,7 +9,7 @@ def repo():
third_party_http_archive(
name = "aws",
urls = [
"https://mirror.tensorflow.orgg/github.com/aws/aws-sdk-cpp/archive/1.7.336.tar.gz",
"https://mirror.bazel.build/github.com/aws/aws-sdk-cpp/archive/1.7.336.tar.gz",
"https://github.com/aws/aws-sdk-cpp/archive/1.7.336.tar.gz",
],
sha256 = "758174f9788fed6cc1e266bcecb20bf738bd5ef1c3d646131c9ed15c2d6c5720",

View File

@ -1,14 +1,3 @@
--- ./absl/time/internal/cctz/include/cctz/civil_time_detail.h 2020-08-06 01:33:56.005757145 +0200
+++ ./absl/time/internal/cctz/include/cctz/civil_time_detail.h 2020-08-06 01:33:35.460579387 +0200
@@ -23,7 +23,7 @@
#include "absl/base/config.h"
// Disable constexpr support unless we are in C++14 mode.
-#if __cpp_constexpr >= 201304 || (defined(_MSC_VER) && _MSC_VER >= 1910)
+#if (!defined(NO_CONSTEXPR_FOR_YOU) && __cpp_constexpr >= 201304) || (defined(_MSC_VER) && _MSC_VER >= 1910)
#define CONSTEXPR_D constexpr // data
#define CONSTEXPR_F constexpr // function
#define CONSTEXPR_M constexpr // member
--- ./absl/time/internal/cctz/BUILD.bazel 2019-09-23 13:20:52.000000000 -0700
+++ ./absl/time/internal/cctz/BUILD.bazel.fixed 2019-09-23 13:20:48.000000000 -0700
@@ -74,15 +74,6 @@
@ -312,3 +301,4 @@
+ .internal_compressed_tuple::template Storage<CompressedTuple, I>::get();
}
};

View File

@ -1,8 +1,5 @@
# We make everything here private to make any dependencies on ICU become a build
# failure and easier/faster to track down, as it's not needed for DeepSpeech and
# causes linking problems on Windows.
package(
default_visibility = ["//visibility:private"],
default_visibility = ["//visibility:public"],
)
licenses(["notice"]) # Apache 2.0

View File

@ -67,4 +67,5 @@ config_setting(
)
%{PYTHON_INCLUDE_GENRULE}
%{NUMPY_INCLUDE_GENRULE}
%{PYTHON_IMPORT_LIB_GENRULE}

View File

@ -210,7 +210,7 @@ def _create_local_python_repository(repository_ctx):
python_lib = _get_python_lib(repository_ctx, python_bin)
_check_python_lib(repository_ctx, python_lib)
python_include = _get_python_include(repository_ctx, python_bin)
# numpy_include = _get_numpy_include(repository_ctx, python_bin) + "/numpy"
numpy_include = _get_numpy_include(repository_ctx, python_bin) + "/numpy"
python_include_rule = _symlink_genrule_for_dir(
repository_ctx,
python_include,
@ -233,12 +233,12 @@ def _create_local_python_repository(repository_ctx):
[python_import_lib_src],
[python_import_lib_name],
)
#numpy_include_rule = _symlink_genrule_for_dir(
# repository_ctx,
# numpy_include,
# "numpy_include/numpy",
# "numpy_include",
#)
numpy_include_rule = _symlink_genrule_for_dir(
repository_ctx,
numpy_include,
"numpy_include/numpy",
"numpy_include",
)
platform_constraint = ""
if repository_ctx.attr.platform_constraint:
@ -247,7 +247,7 @@ def _create_local_python_repository(repository_ctx):
"%{PYTHON_BIN_PATH}": python_bin,
"%{PYTHON_INCLUDE_GENRULE}": python_include_rule,
"%{PYTHON_IMPORT_LIB_GENRULE}": python_import_lib_genrule,
#"%{NUMPY_INCLUDE_GENRULE}": numpy_include_rule,
"%{NUMPY_INCLUDE_GENRULE}": numpy_include_rule,
"%{PLATFORM_CONSTRAINT}": platform_constraint,
})

View File

@ -16,8 +16,6 @@
_SINGLE_URL_WHITELIST = depset([
"arm_compiler",
"LinaroArmGcc72",
"LinaroAarch64Gcc72",
])
def _is_windows(ctx):

View File

@ -1,67 +0,0 @@
# This is the entry point for --crosstool_top.
#
# The cc_toolchain rule used is found by:
#
# 1. Finding the appropriate toolchain in the CROSSTOOL file based on the --cpu
# and --compiler command line flags (if they exist, otherwise using the
# "default_target_cpu" / "default_toolchain" fields in the CROSSTOOL file)
# 2. Concatenating the "target_cpu" and "compiler" fields of the toolchain in
# use and using that as a key in the map in the "toolchains" attribute
package(default_visibility = ["//visibility:public"])
load(":linaro_toolchain_config.bzl", "linaro_toolchain_config")
cc_toolchain_suite(
name = "toolchain",
toolchains = {
"aarch64": ":cc-compiler-aarch64",
},
)
filegroup(
name = "empty",
srcs = [],
)
filegroup(
name = "gcc_linux_all_files",
srcs = [
"//third_party/toolchains/embedded/linaro-gcc72-aarch64/gcc:tool-wrappers",
"@LinaroAarch64Gcc72//:compiler_pieces",
],
)
filegroup(
name = "gcc_linux_linker_files",
srcs = [
"//third_party/toolchains/embedded/linaro-gcc72-aarch64/gcc:ld",
"//third_party/toolchains/embedded/linaro-gcc72-aarch64/gcc:ar",
"@LinaroAarch64Gcc72//:compiler_pieces",
],
)
filegroup(
name = "gcc_linux_compiler_files",
srcs = [
"//third_party/toolchains/embedded/linaro-gcc72-aarch64/gcc:gcc",
"//third_party/toolchains/embedded/linaro-gcc72-aarch64/gcc:as",
],
)
linaro_toolchain_config(name = "linaro_aarch64")
cc_toolchain(
name = "cc-compiler-aarch64",
all_files = ":gcc_linux_all_files",
compiler_files = ":gcc_linux_compiler_files",
toolchain_identifier = "gcc72_linaro_aarch64",
toolchain_config = ":linaro_aarch64",
dwp_files = ":empty",
dynamic_runtime_lib = ":empty",
linker_files = ":gcc_linux_linker_files",
objcopy_files = "//third_party/toolchains/embedded/linaro-gcc72-aarch64/gcc:objcopy",
static_runtime_lib = ":empty",
strip_files = "//third_party/toolchains/embedded/linaro-gcc72-aarch64/gcc:strip",
supports_param_files = 1,
visibility = ["//visibility:public"],
)

View File

@ -1,79 +0,0 @@
package(default_visibility = ['//third_party/toolchains/embedded/linaro-gcc72-aarch64:__pkg__'])
filegroup(
name = 'gcc',
srcs = [
'@LinaroAarch64Gcc72//:gcc',
'aarch64-linux-gnu-gcc',
],
)
filegroup(
name = 'ar',
srcs = [
'@LinaroAarch64Gcc72//:ar',
'aarch64-linux-gnu-ar',
],
)
filegroup(
name = 'ld',
srcs = [
'@LinaroAarch64Gcc72//:ld',
'aarch64-linux-gnu-ld',
],
)
filegroup(
name = 'nm',
srcs = [
'@LinaroAarch64Gcc72//:nm',
'aarch64-linux-gnu-nm',
],
)
filegroup(
name = 'objcopy',
srcs = [
'@LinaroAarch64Gcc72//:objcopy',
'aarch64-linux-gnu-objcopy',
],
)
filegroup(
name = 'objdump',
srcs = [
'@LinaroAarch64Gcc72//:objdump',
'aarch64-linux-gnu-objdump',
],
)
filegroup(
name = 'strip',
srcs = [
'@LinaroAarch64Gcc72//:strip',
'aarch64-linux-gnu-strip',
],
)
filegroup(
name = 'as',
srcs = [
'@LinaroAarch64Gcc72//:as',
'aarch64-linux-gnu-as',
],
)
filegroup(
name = 'tool-wrappers',
srcs = [
':gcc',
':ar',
':ld',
':nm',
':objcopy',
':objdump',
':strip',
':as',
],
)

View File

@ -1,5 +0,0 @@
#!/bin/bash --norc
exec -a aarch64-linux-gnu-ar \
external/LinaroAarch64Gcc72/bin/aarch64-linux-gnu-ar \
"$@"

View File

@ -1,5 +0,0 @@
#!/bin/bash --norc
exec -a aarch64-linux-gnu-as \
external/LinaroAarch64Gcc72/bin/aarch64-linux-gnu-as \
"$@"

View File

@ -1,5 +0,0 @@
#!/bin/bash --norc
exec -a aarch64-linux-gnu-cpp \
external/LinaroAarch64Gcc72/bin/aarch64-linux-gnu-cpp \
"$@"

View File

@ -1,6 +0,0 @@
#!/bin/bash --norc
PATH="external/LinaroAarch64Gcc72/libexec/gcc/aarch64-linux-gnu/7.2.1/:$PATH" \
exec \
external/LinaroAarch64Gcc72/bin/aarch64-linux-gnu-gcc \
"$@"

View File

@ -1,5 +0,0 @@
#!/bin/bash --norc
exec -a aarch64-linux-gnu-gcov \
external/LinaroAarch64Gcc72/bin/aarch64-linux-gnu-gcov \
"$@"

View File

@ -1,5 +0,0 @@
#!/bin/bash --norc
exec -a aarch64-linux-gnu-ld \
external/LinaroAarch64Gcc72/bin/aarch64-linux-gnu-ld \
"$@"

View File

@ -1,5 +0,0 @@
#!/bin/bash --norc
exec -a aarch64-linux-gnu-nm \
external/LinaroAarch64Gcc72/bin/aarch64-linux-gnu-nm \
"$@"

View File

@ -1,5 +0,0 @@
#!/bin/bash --norc
exec -a aarch64-linux-gnu-objcopy \
external/LinaroAarch64Gcc72/bin/aarch64-linux-gnu-objcopy \
"$@"

View File

@ -1,5 +0,0 @@
#!/bin/bash --norc
exec -a aarch64-linux-gnu-objdump \
external/LinaroAarch64Gcc72/bin/aarch64-linux-gnu-objdump \
"$@"

View File

@ -1,5 +0,0 @@
#!/bin/bash --norc
exec -a aarch64-linux-gnu-strip \
external/LinaroAarch64Gcc72/bin/aarch64-linux-gnu-strip \
"$@"

View File

@ -1,81 +0,0 @@
package(default_visibility = ['//visibility:public'])
filegroup(
name = 'gcc',
srcs = [
'bin/aarch64-linux-gnu-gcc',
],
)
filegroup(
name = 'ar',
srcs = [
'bin/aarch64-linux-gnu-ar',
],
)
filegroup(
name = 'ld',
srcs = [
'bin/aarch64-linux-gnu-ld',
],
)
filegroup(
name = 'nm',
srcs = [
'bin/aarch64-linux-gnu-nm',
],
)
filegroup(
name = 'objcopy',
srcs = [
'bin/aarch64-linux-gnu-objcopy',
],
)
filegroup(
name = 'objdump',
srcs = [
'bin/aarch64-linux-gnu-objdump',
],
)
filegroup(
name = 'strip',
srcs = [
'bin/aarch64-linux-gnu-strip',
],
)
filegroup(
name = 'as',
srcs = [
'bin/aarch64-linux-gnu-as',
],
)
filegroup(
name = 'compiler_pieces',
srcs = glob([
'aarch64-linux-gnu/**',
'libexec/**',
'lib/gcc/aarch64-linux-gnu/**',
'include/**',
]),
)
filegroup(
name = 'compiler_components',
srcs = [
':gcc',
':ar',
':ld',
':nm',
':objcopy',
':objdump',
':strip',
':as',
],
)

View File

@ -1,484 +0,0 @@
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Starlark cc_toolchain configuration rule"""
load("@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"action_config",
"artifact_name_pattern",
"env_entry",
"env_set",
"feature",
"feature_set",
"flag_group",
"flag_set",
"make_variable",
"tool",
"tool_path",
"variable_with_value",
"with_feature_set",
)
load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
all_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.clif_match,
ACTION_NAMES.lto_backend,
]
all_cpp_compile_actions = [
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.clif_match,
]
preprocessor_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.clif_match,
]
codegen_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
]
all_link_actions = [
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
]
def _impl(ctx):
abi_version = "aarch64"
abi_libc_version = "glibc_2.24"
builtin_sysroot = None
compiler = "gcc"
host_system_name = "aarch64"
needs_pic = True
supports_gold_linker = False
supports_incremental_linker = False
supports_fission = False
supports_interface_shared_objects = False
supports_normalizing_ar = False
supports_start_end_lib = False
supports_thin_archives = False
target_libc = "glibc_2.24"
target_cpu = "armv8"
target_system_name = "armv8"
toolchain_identifier = "gcc72_linaro_aarch64"
cc_target_os = None
action_configs = []
supports_pic_feature = feature(name = "supports_pic", enabled = True)
supports_dynamic_linker_feature = feature(name = "supports_dynamic_linker", enabled = True)
user_compile_flags_feature = feature(
name = "user_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = ["%{user_compile_flags}"],
iterate_over = "user_compile_flags",
expand_if_available = "user_compile_flags",
),
],
),
],
)
user_link_flags_feature = feature(
name = "user_link_flags",
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = ["%{user_link_flags}"],
iterate_over = "user_link_flags",
expand_if_available = "user_link_flags",
),
],
),
],
)
shared_flag_feature = feature(
name = "shared_flag",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
ACTION_NAMES.lto_index_for_dynamic_library,
ACTION_NAMES.lto_index_for_nodeps_dynamic_library,
],
flag_groups = [flag_group(flags = ["-shared"])],
),
],
)
sysroot_feature = feature(
name = "sysroot",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
],
flag_groups = [
flag_group(
flags = ["--sysroot=%{sysroot}"],
expand_if_available = "sysroot",
),
],
),
],
)
objcopy_embed_flags_feature = feature(
name = "objcopy_embed_flags",
enabled = True,
flag_sets = [
flag_set(
actions = ["objcopy_embed_data"],
flag_groups = [flag_group(flags = ["-I", "binary"])],
),
],
)
unfiltered_compile_flags_feature = feature(
name = "unfiltered_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
# Make C++ compilation deterministic. Use linkstamping instead of these
# compiler symbols.
"-Wno-builtin-macro-redefined",
"-D__DATE__=\"redacted\"",
"-D__TIMESTAMP__=\"redacted\"",
"-D__TIME__=\"redacted\"",
# This makes GCC and Clang do what we want when called through symlinks.
"-no-canonical-prefixes",
],
),
],
),
],
)
default_compile_flags_feature = feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-U_FORTIFY_SOURCE",
"-D_FORTIFY_SOURCE=1",
"-fstack-protector",
],
),
],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [flag_group(flags = ["-g"])],
with_features = [with_feature_set(features = ["dbg"])],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-g0",
"-O2",
"-DNDEBUG",
"-ffunction-sections",
"-fdata-sections",
],
),
],
with_features = [with_feature_set(features = ["opt"])],
),
flag_set(
actions = [
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-std=c++11",
"--sysroot=external/LinaroAarch64Gcc72/aarch64-linux-gnu/libc",
"-pthread",
"-nostdinc",
"-isystem",
"external/LinaroAarch64Gcc72/aarch64-linux-gnu/include/c++/7.2.1/aarch64-linux-gnu",
"-isystem",
"external/LinaroAarch64Gcc72/aarch64-linux-gnu/include/c++/7.2.1",
"-isystem",
"external/LinaroAarch64Gcc72/lib/gcc/aarch64-linux-gnu/7.2.1/include",
"-isystem",
"external/LinaroAarch64Gcc72/aarch64-linux-gnu/libc/usr/include",
"-isystem",
"external/LinaroAarch64Gcc72/lib/gcc/aarch64-linux-gnu/7.2.1/include-fixed",
"-isystem",
"external/LinaroAarch64Gcc72/aarch64-linux-gnu/libc/usr/include",
"-isystem",
"external/LinaroAarch64Gcc72/aarch64-linux-gnu/libc/usr/include/aarch64-linux-gnu",
"-isystem",
"external/LinaroAarch64Gcc72/lib/gcc/aarch64-linux-gnu/7.2.1/include",
"-isystem",
"external/LinaroAarch64Gcc72/include/c++/7.2.1/aarch64-linux-gnu",
"-isystem",
"external/LinaroAarch64Gcc72/include/c++/7.2.1",
# Security hardening on by default.
"-fstack-protector",
"-fPIE",
# All warnings are enabled. Maybe enable -Werror as well?
"-Wall",
# Enable a few more warnings that aren't part of -Wall.
"-Wunused-but-set-parameter",
# But disable some that are problematic.
"-Wno-free-nonheap-object", # has false positives
# Keep stack frames for debugging, even in opt mode.
"-fno-omit-frame-pointer",
# Enable coloring even if there's no attached terminal. Bazel removes the
# escape sequences if --nocolor is specified.
"-fdiagnostics-color=always",
],
),
],
),
],
)
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = [
# "-target",
# "aarch64-linux-gnu",
"--sysroot=external/LinaroAarch64Gcc72/aarch64-linux-gnu/libc",
"-pass-exit-codes",
"-pie",
"-lstdc++",
"-lm",
"-lpthread",
"-Wl,--dynamic-linker=/lib/ld-linux-aarch64.so.1",
"-Wl,-no-as-needed",
"-Wl,-z,relro,-z,now",
"-no-canonical-prefixes",
# Stamp the binary with a unique identifier.
"-Wl,--build-id=md5",
"-Wl,--hash-style=gnu",
"-Lexternal/LinaroAarch64Gcc72/aarch64-linux-gnu/lib",
"-Lexternal/LinaroAarch64Gcc72/aarch64-linux-gnu/libc/lib",
"-Lexternal/LinaroAarch64Gcc72/aarch64-linux-gnu/libc/usr/lib",
"-Bexternal/LinaroAarch64Gcc72/aarch64-linux-gnu/bin",
],
),
],
),
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["-Wl,--gc-sections"])],
with_features = [with_feature_set(features = ["opt"])],
),
],
)
opt_feature = feature(name = "opt")
dbg_feature = feature(name = "dbg")
features = [
default_compile_flags_feature,
default_link_flags_feature,
supports_dynamic_linker_feature,
supports_pic_feature,
objcopy_embed_flags_feature,
opt_feature,
dbg_feature,
user_compile_flags_feature,
user_link_flags_feature,
shared_flag_feature,
sysroot_feature,
unfiltered_compile_flags_feature,
]
cxx_builtin_include_directories = [
"%package(@LinaroAarch64Gcc72//include)%",
"%package(@LinaroAarch64Gcc72//aarch64-linux-gnu/libc/usr/include)%",
"%package(@LinaroAarch64Gcc72//aarch64-linux-gnu/libc/lib/gcc/aarch64-linux-gnu/7.2.1/include-fixed)%",
"%package(@LinaroAarch64Gcc72//include)%/c++/7.2.1",
"%package(@LinaroAarch64Gcc72//aarch64-linux-gnu/libc/lib/gcc/aarch64-linux-gnu/7.2.1/include)%",
"%package(@LinaroAarch64Gcc72//aarch64-linux-gnu/libc/lib/gcc/aarch64-linux-gnu/7.2.1/include-fixed)%",
"%package(@LinaroAarch64Gcc72//lib/gcc/aarch64-linux-gnu/7.2.1/include)%",
"%package(@LinaroAarch64Gcc72//lib/gcc/aarch64-linux-gnu/7.2.1/include-fixed)%",
"%package(@LinaroAarch64Gcc72//aarch64-linux-gnu/include)%/c++/7.2.1",
]
artifact_name_patterns = []
make_variables = []
tool_paths = [
tool_path(name = "ar", path = "gcc/aarch64-linux-gnu-ar"),
tool_path(name = "compat-ld", path = "gcc/aarch64-linux-gnu-ld"),
tool_path(name = "cpp", path = "gcc/aarch64-linux-gnu-cpp"),
tool_path(name = "dwp", path = "gcc/aarch64-linux-gnu-dwp"),
tool_path(name = "gcc", path = "gcc/aarch64-linux-gnu-gcc"),
tool_path(name = "gcov", path = "arm-frc-linux-gnueabi/arm-frc-linux-gnueabi-gcov-4.9"),
# C(++), compiles invoke the compiler (as that is the one knowing where
# to find libraries),, but we provide LD so other rules can invoke the linker.
tool_path(name = "ld", path = "gcc/aarch64-linux-gnu-ld"),
tool_path(name = "nm", path = "gcc/aarch64-linux-gnu-nm"),
tool_path(name = "objcopy", path = "gcc/aarch64-linux-gnu-objcopy"),
tool_path(name = "objdump", path = "gcc/aarch64-linux-gnu-objdump"),
tool_path(name = "strip", path = "gcc/aarch64-linux-gnu-strip"),
]
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
features = features,
action_configs = action_configs,
artifact_name_patterns = artifact_name_patterns,
cxx_builtin_include_directories = cxx_builtin_include_directories,
toolchain_identifier = toolchain_identifier,
host_system_name = host_system_name,
target_system_name = target_system_name,
target_cpu = target_cpu,
target_libc = target_libc,
compiler = compiler,
abi_version = abi_version,
abi_libc_version = abi_libc_version,
tool_paths = tool_paths,
make_variables = make_variables,
builtin_sysroot = builtin_sysroot,
cc_target_os = cc_target_os,
)
linaro_toolchain_config = rule(
implementation = _impl,
attrs = {},
provides = [CcToolchainConfigInfo],
)

View File

@ -1,67 +0,0 @@
# This is the entry point for --crosstool_top.
#
# The cc_toolchain rule used is found by:
#
# 1. Finding the appropriate toolchain in the CROSSTOOL file based on the --cpu
# and --compiler command line flags (if they exist, otherwise using the
# "default_target_cpu" / "default_toolchain" fields in the CROSSTOOL file)
# 2. Concatenating the "target_cpu" and "compiler" fields of the toolchain in
# use and using that as a key in the map in the "toolchains" attribute
package(default_visibility = ["//visibility:public"])
load(":linaro_toolchain_config.bzl", "linaro_toolchain_config")
cc_toolchain_suite(
name = "toolchain",
toolchains = {
"armv7a": ":cc-compiler-armv7a",
},
)
filegroup(
name = "empty",
srcs = [],
)
filegroup(
name = "gcc_linux_all_files",
srcs = [
"//third_party/toolchains/embedded/linaro-gcc72-armeabi/gcc:tool-wrappers",
"@LinaroArmGcc72//:compiler_pieces",
],
)
filegroup(
name = "gcc_linux_linker_files",
srcs = [
"//third_party/toolchains/embedded/linaro-gcc72-armeabi/gcc:ld",
"//third_party/toolchains/embedded/linaro-gcc72-armeabi/gcc:ar",
"@LinaroArmGcc72//:compiler_pieces",
],
)
filegroup(
name = "gcc_linux_compiler_files",
srcs = [
"//third_party/toolchains/embedded/linaro-gcc72-armeabi/gcc:gcc",
"//third_party/toolchains/embedded/linaro-gcc72-armeabi/gcc:as",
],
)
linaro_toolchain_config(name = "linaro_armeabi-v7a")
cc_toolchain(
name = "cc-compiler-armv7a",
all_files = ":gcc_linux_all_files",
compiler_files = ":gcc_linux_compiler_files",
toolchain_identifier = "gcc72_linaro_armhf",
toolchain_config = ":linaro_armeabi-v7a",
dwp_files = ":empty",
dynamic_runtime_lib = ":empty",
linker_files = ":gcc_linux_linker_files",
objcopy_files = "//third_party/toolchains/embedded/linaro-gcc72-armeabi/gcc:objcopy",
static_runtime_lib = ":empty",
strip_files = "//third_party/toolchains/embedded/linaro-gcc72-armeabi/gcc:strip",
supports_param_files = 0,
visibility = ["//visibility:public"],
)

View File

@ -1,79 +0,0 @@
package(default_visibility = ['//third_party/toolchains/embedded/linaro-gcc72-armeabi:__pkg__'])
filegroup(
name = 'gcc',
srcs = [
'@LinaroArmGcc72//:gcc',
'arm-linux-gnueabihf-gcc',
],
)
filegroup(
name = 'ar',
srcs = [
'@LinaroArmGcc72//:ar',
'arm-linux-gnueabihf-ar',
],
)
filegroup(
name = 'ld',
srcs = [
'@LinaroArmGcc72//:ld',
'arm-linux-gnueabihf-ld',
],
)
filegroup(
name = 'nm',
srcs = [
'@LinaroArmGcc72//:nm',
'arm-linux-gnueabihf-nm',
],
)
filegroup(
name = 'objcopy',
srcs = [
'@LinaroArmGcc72//:objcopy',
'arm-linux-gnueabihf-objcopy',
],
)
filegroup(
name = 'objdump',
srcs = [
'@LinaroArmGcc72//:objdump',
'arm-linux-gnueabihf-objdump',
],
)
filegroup(
name = 'strip',
srcs = [
'@LinaroArmGcc72//:strip',
'arm-linux-gnueabihf-strip',
],
)
filegroup(
name = 'as',
srcs = [
'@LinaroArmGcc72//:as',
'arm-linux-gnueabihf-as',
],
)
filegroup(
name = 'tool-wrappers',
srcs = [
':gcc',
':ar',
':ld',
':nm',
':objcopy',
':objdump',
':strip',
':as',
],
)

View File

@ -1,5 +0,0 @@
#!/bin/bash --norc
exec -a arm-linux-gnueabihf-ar \
external/LinaroArmGcc72/bin/arm-linux-gnueabihf-ar \
"$@"

View File

@ -1,5 +0,0 @@
#!/bin/bash --norc
exec -a arm-linux-gnueabihf-as \
external/LinaroArmGcc72/bin/arm-linux-gnueabihf-as \
"$@"

View File

@ -1,5 +0,0 @@
#!/bin/bash --norc
exec -a arm-linux-gnueabihf-cpp \
external/LinaroArmGcc72/bin/arm-linux-gnueabihf-cpp \
"$@"

View File

@ -1,6 +0,0 @@
#!/bin/bash --norc
PATH="external/LinaroArmGcc72/libexec/gcc/arm-linux-gnueabihf/7.2.1/:$PATH" \
exec \
external/LinaroArmGcc72/bin/arm-linux-gnueabihf-gcc \
"$@"

View File

@ -1,5 +0,0 @@
#!/bin/bash --norc
exec -a arm-linux-gnueabihf-gcov \
external/LinaroArmGcc72/bin/arm-linux-gnueabihf-gcov \
"$@"

View File

@ -1,5 +0,0 @@
#!/bin/bash --norc
exec -a arm-linux-gnueabihf-ld \
external/LinaroArmGcc72/bin/arm-linux-gnueabihf-ld \
"$@"

View File

@ -1,5 +0,0 @@
#!/bin/bash --norc
exec -a arm-linux-gnueabihf-nm \
external/LinaroArmGcc72/bin/arm-linux-gnueabihf-nm \
"$@"

View File

@ -1,5 +0,0 @@
#!/bin/bash --norc
exec -a arm-linux-gnueabihf-objcopy \
external/LinaroArmGcc72/bin/arm-linux-gnueabihf-objcopy \
"$@"

View File

@ -1,5 +0,0 @@
#!/bin/bash --norc
exec -a arm-linux-gnueabihf-objdump \
external/LinaroArmGcc72/bin/arm-linux-gnueabihf-objdump \
"$@"

View File

@ -1,5 +0,0 @@
#!/bin/bash --norc
exec -a arm-linux-gnueabihf-strip \
external/LinaroArmGcc72/bin/arm-linux-gnueabihf-strip \
"$@"

View File

@ -1,81 +0,0 @@
package(default_visibility = ['//visibility:public'])
filegroup(
name = 'gcc',
srcs = [
'bin/arm-linux-gnueabihf-gcc',
],
)
filegroup(
name = 'ar',
srcs = [
'bin/arm-linux-gnueabihf-ar',
],
)
filegroup(
name = 'ld',
srcs = [
'bin/arm-linux-gnueabihf-ld',
],
)
filegroup(
name = 'nm',
srcs = [
'bin/arm-linux-gnueabihf-nm',
],
)
filegroup(
name = 'objcopy',
srcs = [
'bin/arm-linux-gnueabihf-objcopy',
],
)
filegroup(
name = 'objdump',
srcs = [
'bin/arm-linux-gnueabihf-objdump',
],
)
filegroup(
name = 'strip',
srcs = [
'bin/arm-linux-gnueabihf-strip',
],
)
filegroup(
name = 'as',
srcs = [
'bin/arm-linux-gnueabihf-as',
],
)
filegroup(
name = 'compiler_pieces',
srcs = glob([
'arm-linux-gnueabihf/**',
'libexec/**',
'lib/gcc/arm-linux-gnueabihf/**',
'include/**',
]),
)
filegroup(
name = 'compiler_components',
srcs = [
':gcc',
':ar',
':ld',
':nm',
':objcopy',
':objdump',
':strip',
':as',
],
)

View File

@ -1,484 +0,0 @@
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Starlark cc_toolchain configuration rule"""
load("@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"action_config",
"artifact_name_pattern",
"env_entry",
"env_set",
"feature",
"feature_set",
"flag_group",
"flag_set",
"make_variable",
"tool",
"tool_path",
"variable_with_value",
"with_feature_set",
)
load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
all_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.clif_match,
ACTION_NAMES.lto_backend,
]
all_cpp_compile_actions = [
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.clif_match,
]
preprocessor_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.clif_match,
]
codegen_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
]
all_link_actions = [
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
]
def _impl(ctx):
abi_version = "armeabi"
abi_libc_version = "glibc_2.24"
builtin_sysroot = None
compiler = "gcc"
host_system_name = "armeabi"
needs_pic = True
supports_gold_linker = False
supports_incremental_linker = False
supports_fission = False
supports_interface_shared_objects = False
supports_normalizing_ar = False
supports_start_end_lib = False
supports_thin_archives = False
target_libc = "glibc_2.24"
target_cpu = "armv7"
target_system_name = "armeabi-v7a"
toolchain_identifier = "gcc72_linaro_armhf"
cc_target_os = None
action_configs = []
supports_pic_feature = feature(name = "supports_pic", enabled = True)
supports_dynamic_linker_feature = feature(name = "supports_dynamic_linker", enabled = True)
user_compile_flags_feature = feature(
name = "user_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = ["%{user_compile_flags}"],
iterate_over = "user_compile_flags",
expand_if_available = "user_compile_flags",
),
],
),
],
)
user_link_flags_feature = feature(
name = "user_link_flags",
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = ["%{user_link_flags}"],
iterate_over = "user_link_flags",
expand_if_available = "user_link_flags",
),
],
),
],
)
shared_flag_feature = feature(
name = "shared_flag",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
ACTION_NAMES.lto_index_for_dynamic_library,
ACTION_NAMES.lto_index_for_nodeps_dynamic_library,
],
flag_groups = [flag_group(flags = ["-shared"])],
),
],
)
sysroot_feature = feature(
name = "sysroot",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
],
flag_groups = [
flag_group(
flags = ["--sysroot=%{sysroot}"],
expand_if_available = "sysroot",
),
],
),
],
)
objcopy_embed_flags_feature = feature(
name = "objcopy_embed_flags",
enabled = True,
flag_sets = [
flag_set(
actions = ["objcopy_embed_data"],
flag_groups = [flag_group(flags = ["-I", "binary"])],
),
],
)
unfiltered_compile_flags_feature = feature(
name = "unfiltered_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
# Make C++ compilation deterministic. Use linkstamping instead of these
# compiler symbols.
"-Wno-builtin-macro-redefined",
"-D__DATE__=\"redacted\"",
"-D__TIMESTAMP__=\"redacted\"",
"-D__TIME__=\"redacted\"",
# This makes GCC and Clang do what we want when called through symlinks.
"-no-canonical-prefixes",
],
),
],
),
],
)
default_compile_flags_feature = feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-U_FORTIFY_SOURCE",
"-D_FORTIFY_SOURCE=1",
"-fstack-protector",
],
),
],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [flag_group(flags = ["-g"])],
with_features = [with_feature_set(features = ["dbg"])],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-g0",
"-O2",
"-DNDEBUG",
"-ffunction-sections",
"-fdata-sections",
],
),
],
with_features = [with_feature_set(features = ["opt"])],
),
flag_set(
actions = [
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-std=c++11",
"--sysroot=external/LinaroArmGcc72/arm-linux-gnueabihf/libc",
"-pthread",
"-nostdinc",
"-isystem",
"external/LinaroArmGcc72/arm-linux-gnueabihf/include/c++/7.2.1/arm-linux-gnueabihf",
"-isystem",
"external/LinaroArmGcc72/arm-linux-gnueabihf/include/c++/7.2.1",
"-isystem",
"external/LinaroArmGcc72/lib/gcc/arm-linux-gnueabihf/7.2.1/include",
"-isystem",
"external/LinaroArmGcc72/arm-linux-gnueabihf/libc/usr/include",
"-isystem",
"external/LinaroArmGcc72/lib/gcc/arm-linux-gnueabihf/7.2.1/include-fixed",
"-isystem",
"external/LinaroArmGcc72/arm-linux-gnueabihf/libc/usr/include",
"-isystem",
"external/LinaroArmGcc72/arm-linux-gnueabihf/libc/usr/include/arm-linux-gnueabihf",
"-isystem",
"external/LinaroArmGcc72/lib/gcc/arm-linux-gnueabihf/7.2.1/include",
"-isystem",
"external/LinaroArmGcc72/include/c++/7.2.1/arm-linux-gnueabihf",
"-isystem",
"external/LinaroArmGcc72/include/c++/7.2.1",
# Security hardening on by default.
"-fstack-protector",
"-fPIE",
# All warnings are enabled. Maybe enable -Werror as well?
"-Wall",
# Enable a few more warnings that aren't part of -Wall.
"-Wunused-but-set-parameter",
# But disable some that are problematic.
"-Wno-free-nonheap-object", # has false positives
# Keep stack frames for debugging, even in opt mode.
"-fno-omit-frame-pointer",
# Enable coloring even if there's no attached terminal. Bazel removes the
# escape sequences if --nocolor is specified.
"-fdiagnostics-color=always",
],
),
],
),
],
)
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = [
# "-target",
# "arm-linux-gnueabihf",
"--sysroot=external/LinaroArmGcc72/arm-linux-gnueabihf/libc",
"-pass-exit-codes",
"-pie",
"-lstdc++",
"-lm",
"-lpthread",
"-Wl,--dynamic-linker=/lib/ld-linux-armhf.so.3",
"-Wl,-no-as-needed",
"-Wl,-z,relro,-z,now",
"-no-canonical-prefixes",
# Stamp the binary with a unique identifier.
"-Wl,--build-id=md5",
"-Wl,--hash-style=gnu",
"-Lexternal/LinaroArmGcc72/arm-linux-gnueabihf/lib",
"-Lexternal/LinaroArmGcc72/arm-linux-gnueabihf/libc/lib",
"-Lexternal/LinaroArmGcc72/arm-linux-gnueabihf/libc/usr/lib",
"-Bexternal/LinaroArmGcc72/arm-linux-gnueabihf/bin",
],
),
],
),
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["-Wl,--gc-sections"])],
with_features = [with_feature_set(features = ["opt"])],
),
],
)
opt_feature = feature(name = "opt")
dbg_feature = feature(name = "dbg")
features = [
default_compile_flags_feature,
default_link_flags_feature,
supports_dynamic_linker_feature,
supports_pic_feature,
objcopy_embed_flags_feature,
opt_feature,
dbg_feature,
user_compile_flags_feature,
user_link_flags_feature,
shared_flag_feature,
sysroot_feature,
unfiltered_compile_flags_feature,
]
cxx_builtin_include_directories = [
"%package(@LinaroArmGcc72//include)%",
"%package(@LinaroArmGcc72//arm-linux-gnueabihf/libc/usr/include)%",
"%package(@LinaroArmGcc72//arm-linux-gnueabihf/libc/lib/gcc/arm-linux-gnueabihf/7.2.1/include-fixed)%",
"%package(@LinaroArmGcc72//include)%/c++/7.2.1",
"%package(@LinaroArmGcc72//arm-linux-gnueabihf/libc/lib/gcc/arm-linux-gnueabihf/7.2.1/include)%",
"%package(@LinaroArmGcc72//arm-linux-gnueabihf/libc/lib/gcc/arm-linux-gnueabihf/7.2.1/include-fixed)%",
"%package(@LinaroArmGcc72//lib/gcc/arm-linux-gnueabihf/7.2.1/include)%",
"%package(@LinaroArmGcc72//lib/gcc/arm-linux-gnueabihf/7.2.1/include-fixed)%",
"%package(@LinaroArmGcc72//arm-linux-gnueabihf/include)%/c++/7.2.1",
]
artifact_name_patterns = []
make_variables = []
tool_paths = [
tool_path(name = "ar", path = "gcc/arm-linux-gnueabihf-ar"),
tool_path(name = "compat-ld", path = "gcc/arm-linux-gnueabihf-ld"),
tool_path(name = "cpp", path = "gcc/arm-linux-gnueabihf-cpp"),
tool_path(name = "dwp", path = "gcc/arm-linux-gnueabihf-dwp"),
tool_path(name = "gcc", path = "gcc/arm-linux-gnueabihf-gcc"),
tool_path(name = "gcov", path = "arm-frc-linux-gnueabi/arm-frc-linux-gnueabi-gcov-4.9"),
# C(++), compiles invoke the compiler (as that is the one knowing where
# to find libraries),, but we provide LD so other rules can invoke the linker.
tool_path(name = "ld", path = "gcc/arm-linux-gnueabihf-ld"),
tool_path(name = "nm", path = "gcc/arm-linux-gnueabihf-nm"),
tool_path(name = "objcopy", path = "gcc/arm-linux-gnueabihf-objcopy"),
tool_path(name = "objdump", path = "gcc/arm-linux-gnueabihf-objdump"),
tool_path(name = "strip", path = "gcc/arm-linux-gnueabihf-strip"),
]
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
features = features,
action_configs = action_configs,
artifact_name_patterns = artifact_name_patterns,
cxx_builtin_include_directories = cxx_builtin_include_directories,
toolchain_identifier = toolchain_identifier,
host_system_name = host_system_name,
target_system_name = target_system_name,
target_cpu = target_cpu,
target_libc = target_libc,
compiler = compiler,
abi_version = abi_version,
abi_libc_version = abi_libc_version,
tool_paths = tool_paths,
make_variables = make_variables,
builtin_sysroot = builtin_sysroot,
cc_target_os = cc_target_os,
)
linaro_toolchain_config = rule(
implementation = _impl,
attrs = {},
provides = [CcToolchainConfigInfo],
)