From ecfa268e7432ff4befaaf24c135acd80b277965b Mon Sep 17 00:00:00 2001 From: shashvatshahi1998 Date: Fri, 29 Mar 2019 19:49:23 +0530 Subject: [PATCH 01/10] Modified tf.math.add_n accordingly. --- .../api_def/base_api/api_def_Bitcast.pbtxt | 24 ------------------- tensorflow/python/ops/math_ops.py | 12 +++++++++- 2 files changed, 11 insertions(+), 25 deletions(-) diff --git a/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt b/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt index 773df2fbe1c..c51d04230bb 100644 --- a/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt +++ b/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt @@ -15,30 +15,6 @@ dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from tf.bitcast() is used when you have to cast to an unsigned dtype (e.g. tf.uint8) where tf.cast() will not work effectively. -``` -python -code here -``` -For example, - tf.bitcast(tf.cast(my_tensor, tf.int8), tf.uint8) - -Other example, ->>> import tensorflow as tf ->>> x = [1., 2., 3.] ->>> y = [0., 2., 3.] ->>> equality= tf.equal(x,y) ->>> equality_cast = tf.cast(equality,tf.float32) ->>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8) ->>> print(equality) -tf.Tensor([False True True], shape=(3,), dtype=bool) ->>> print(equality_cast) -tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) ->>> print(equality_bitcast) -tf.Tensor( -[[ 0 0 0 0] - [ 0 0 128 63] - [ 0 0 128 63]], shape=(3, 4), dtype=uint8) - *NOTE*: Bitcast is implemented as a low-level cast, so machines with different endian orderings will give different results. END diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py index fc650dbfd55..69721ce4839 100644 --- a/tensorflow/python/ops/math_ops.py +++ b/tensorflow/python/ops/math_ops.py @@ -2786,7 +2786,17 @@ def add_n(inputs, name=None): """Adds all input tensors element-wise. Converts `IndexedSlices` objects into dense tensors prior to adding. - + tf.math.add_n performs same operation as'tf.math.accumulate_n' but it waits + for all of its inputs to be ready before beginning to sum, that consumes more + memory. + For example: + + ```python + a = tf.constant([[3, 5], [4, 8]]) + b = tf.constant([[1, 6], [2, 9]]) + tf.math.add_n([a, b, a]) # [[7, 16], [10, 25]] + + ``` Args: inputs: A list of `Tensor` or `IndexedSlices` objects, each with same shape and type. From 4b7877cbb28e383a67df505960ed3ee42df171a6 Mon Sep 17 00:00:00 2001 From: shashvatshahi1998 Date: Fri, 29 Mar 2019 19:52:54 +0530 Subject: [PATCH 02/10] removed few errors --- tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt | 3 --- 1 file changed, 3 deletions(-) diff --git a/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt b/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt index c51d04230bb..e4d4f9ea08e 100644 --- a/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt +++ b/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt @@ -12,9 +12,6 @@ If `T` is smaller than `type`, the operator requires that the rightmost dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from [..., sizeof(`type`)/sizeof(`T`)] to [...]. -tf.bitcast() is used when you have to cast to an unsigned dtype (e.g. tf.uint8) -where tf.cast() will not work effectively. - *NOTE*: Bitcast is implemented as a low-level cast, so machines with different endian orderings will give different results. END From 1091a9e9152ff366ccc69cd5cf45cbe247700b68 Mon Sep 17 00:00:00 2001 From: shashvatshahi1998 Date: Fri, 29 Mar 2019 22:07:49 +0530 Subject: [PATCH 03/10] Done with changes --- tensorflow/python/eager/backprop.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tensorflow/python/eager/backprop.py b/tensorflow/python/eager/backprop.py index 87cc6444f5f..1bbb095036b 100644 --- a/tensorflow/python/eager/backprop.py +++ b/tensorflow/python/eager/backprop.py @@ -211,7 +211,8 @@ def implicit_val_and_grad(f): variables = this_tape.watched_variables() if not variables: raise ValueError("No trainable variables were accessed while the " - "function was being computed.") + "function was being computed.") + sources = [v.handle for v in variables] grad = imperative_grad.imperative_grad(this_tape, nest.flatten(end_node), sources) From c8bdda08cb20f552cc1a54c390dc7cdb04f16ddc Mon Sep 17 00:00:00 2001 From: shashvatshahi1998 Date: Sat, 30 Mar 2019 11:42:04 +0530 Subject: [PATCH 04/10] Fixed whitespace --- tensorflow/python/eager/backprop.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/python/eager/backprop.py b/tensorflow/python/eager/backprop.py index 1bbb095036b..3e1758d063f 100644 --- a/tensorflow/python/eager/backprop.py +++ b/tensorflow/python/eager/backprop.py @@ -212,7 +212,7 @@ def implicit_val_and_grad(f): if not variables: raise ValueError("No trainable variables were accessed while the " "function was being computed.") - + sources = [v.handle for v in variables] grad = imperative_grad.imperative_grad(this_tape, nest.flatten(end_node), sources) From 7856d565c6a2de27b2df8d3a155a53452461d9ee Mon Sep 17 00:00:00 2001 From: shashvatshahi1998 Date: Tue, 2 Apr 2019 07:01:48 +0530 Subject: [PATCH 05/10] Done with changes --- .../api_def/base_api/api_def_Bitcast.pbtxt | 27 +++++++++++++++++++ tensorflow/python/eager/backprop.py | 3 +-- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt b/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt index e4d4f9ea08e..773df2fbe1c 100644 --- a/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt +++ b/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt @@ -12,6 +12,33 @@ If `T` is smaller than `type`, the operator requires that the rightmost dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from [..., sizeof(`type`)/sizeof(`T`)] to [...]. +tf.bitcast() is used when you have to cast to an unsigned dtype (e.g. tf.uint8) +where tf.cast() will not work effectively. + +``` +python +code here +``` +For example, + tf.bitcast(tf.cast(my_tensor, tf.int8), tf.uint8) + +Other example, +>>> import tensorflow as tf +>>> x = [1., 2., 3.] +>>> y = [0., 2., 3.] +>>> equality= tf.equal(x,y) +>>> equality_cast = tf.cast(equality,tf.float32) +>>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8) +>>> print(equality) +tf.Tensor([False True True], shape=(3,), dtype=bool) +>>> print(equality_cast) +tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) +>>> print(equality_bitcast) +tf.Tensor( +[[ 0 0 0 0] + [ 0 0 128 63] + [ 0 0 128 63]], shape=(3, 4), dtype=uint8) + *NOTE*: Bitcast is implemented as a low-level cast, so machines with different endian orderings will give different results. END diff --git a/tensorflow/python/eager/backprop.py b/tensorflow/python/eager/backprop.py index 3e1758d063f..87cc6444f5f 100644 --- a/tensorflow/python/eager/backprop.py +++ b/tensorflow/python/eager/backprop.py @@ -211,8 +211,7 @@ def implicit_val_and_grad(f): variables = this_tape.watched_variables() if not variables: raise ValueError("No trainable variables were accessed while the " - "function was being computed.") - + "function was being computed.") sources = [v.handle for v in variables] grad = imperative_grad.imperative_grad(this_tape, nest.flatten(end_node), sources) From 7d0697826e1278ce013380c557a30715c15c8f59 Mon Sep 17 00:00:00 2001 From: shashvatshahi1998 Date: Tue, 2 Apr 2019 07:54:02 +0530 Subject: [PATCH 06/10] Done with changes requested --- tensorflow/python/ops/math_ops.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py index 69721ce4839..d7c47b7bc7d 100644 --- a/tensorflow/python/ops/math_ops.py +++ b/tensorflow/python/ops/math_ops.py @@ -2786,17 +2786,18 @@ def add_n(inputs, name=None): """Adds all input tensors element-wise. Converts `IndexedSlices` objects into dense tensors prior to adding. - tf.math.add_n performs same operation as'tf.math.accumulate_n' but it waits + 'tf.math.add_n' performs same operation as 'tf.math.accumulate_n' but it waits for all of its inputs to be ready before beginning to sum, that consumes more - memory. + memory as inputs are not ready at different times, since minimum temporary storage + is proportional to the output size rather than the inputs size. For example: ```python a = tf.constant([[3, 5], [4, 8]]) b = tf.constant([[1, 6], [2, 9]]) tf.math.add_n([a, b, a]) # [[7, 16], [10, 25]] - - ``` + ``` + Args: inputs: A list of `Tensor` or `IndexedSlices` objects, each with same shape and type. From 8be9d9b76a092d014eed754b57826a9d267e7449 Mon Sep 17 00:00:00 2001 From: shashvatshahi1998 Date: Tue, 2 Apr 2019 16:40:03 +0530 Subject: [PATCH 07/10] Final Changes --- .../api_def/base_api/api_def_Bitcast.pbtxt | 29 ++----------------- tensorflow/python/ops/math_ops.py | 3 +- 2 files changed, 5 insertions(+), 27 deletions(-) diff --git a/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt b/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt index 773df2fbe1c..d555ecc0ab1 100644 --- a/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt +++ b/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt @@ -12,32 +12,9 @@ If `T` is smaller than `type`, the operator requires that the rightmost dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from [..., sizeof(`type`)/sizeof(`T`)] to [...]. -tf.bitcast() is used when you have to cast to an unsigned dtype (e.g. tf.uint8) -where tf.cast() will not work effectively. - -``` -python -code here -``` -For example, - tf.bitcast(tf.cast(my_tensor, tf.int8), tf.uint8) - -Other example, ->>> import tensorflow as tf ->>> x = [1., 2., 3.] ->>> y = [0., 2., 3.] ->>> equality= tf.equal(x,y) ->>> equality_cast = tf.cast(equality,tf.float32) ->>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8) ->>> print(equality) -tf.Tensor([False True True], shape=(3,), dtype=bool) ->>> print(equality_cast) -tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) ->>> print(equality_bitcast) -tf.Tensor( -[[ 0 0 0 0] - [ 0 0 128 63] - [ 0 0 128 63]], shape=(3, 4), dtype=uint8) +In case ofconverting from a real data type to complex data type(e.g. tf.complex64, +tf.complex128) tf.bitcast() give error while tf.cast() make imaginary part of output +0. *NOTE*: Bitcast is implemented as a low-level cast, so machines with different endian orderings will give different results. diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py index d7c47b7bc7d..ffe53621106 100644 --- a/tensorflow/python/ops/math_ops.py +++ b/tensorflow/python/ops/math_ops.py @@ -2786,7 +2786,8 @@ def add_n(inputs, name=None): """Adds all input tensors element-wise. Converts `IndexedSlices` objects into dense tensors prior to adding. - 'tf.math.add_n' performs same operation as 'tf.math.accumulate_n' but it waits + + `tf.math.add_n` performs the same operation as `tf.math.accumulate_n` but it waits for all of its inputs to be ready before beginning to sum, that consumes more memory as inputs are not ready at different times, since minimum temporary storage is proportional to the output size rather than the inputs size. From 4e37e19e6561e05c74ad62ce2535d1b859511db8 Mon Sep 17 00:00:00 2001 From: shashvatshahi1998 Date: Tue, 2 Apr 2019 16:49:30 +0530 Subject: [PATCH 08/10] Final correction --- .../api_def/base_api/api_def_Bitcast.pbtxt | 29 +++++++++++++++++-- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt b/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt index d555ecc0ab1..773df2fbe1c 100644 --- a/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt +++ b/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt @@ -12,9 +12,32 @@ If `T` is smaller than `type`, the operator requires that the rightmost dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from [..., sizeof(`type`)/sizeof(`T`)] to [...]. -In case ofconverting from a real data type to complex data type(e.g. tf.complex64, -tf.complex128) tf.bitcast() give error while tf.cast() make imaginary part of output -0. +tf.bitcast() is used when you have to cast to an unsigned dtype (e.g. tf.uint8) +where tf.cast() will not work effectively. + +``` +python +code here +``` +For example, + tf.bitcast(tf.cast(my_tensor, tf.int8), tf.uint8) + +Other example, +>>> import tensorflow as tf +>>> x = [1., 2., 3.] +>>> y = [0., 2., 3.] +>>> equality= tf.equal(x,y) +>>> equality_cast = tf.cast(equality,tf.float32) +>>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8) +>>> print(equality) +tf.Tensor([False True True], shape=(3,), dtype=bool) +>>> print(equality_cast) +tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) +>>> print(equality_bitcast) +tf.Tensor( +[[ 0 0 0 0] + [ 0 0 128 63] + [ 0 0 128 63]], shape=(3, 4), dtype=uint8) *NOTE*: Bitcast is implemented as a low-level cast, so machines with different endian orderings will give different results. From 433882c1ab31f6038697e5f0b76050632f51aca3 Mon Sep 17 00:00:00 2001 From: shashvatshahi1998 Date: Wed, 3 Apr 2019 08:09:58 +0530 Subject: [PATCH 09/10] Added modifications --- tensorflow/python/ops/math_ops.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py index ffe53621106..69c0846cad9 100644 --- a/tensorflow/python/ops/math_ops.py +++ b/tensorflow/python/ops/math_ops.py @@ -2788,9 +2788,9 @@ def add_n(inputs, name=None): Converts `IndexedSlices` objects into dense tensors prior to adding. `tf.math.add_n` performs the same operation as `tf.math.accumulate_n` but it waits - for all of its inputs to be ready before beginning to sum, that consumes more - memory as inputs are not ready at different times, since minimum temporary storage - is proportional to the output size rather than the inputs size. + for all of its inputs to be ready before beginning to sum.This can consume more + memory when inputs are ready at different times, since the minimum temporary storage + required is proportional to the input size rather than the output size. For example: ```python From bd5a9665b101f7d0e5d37f7db8381d3189d34927 Mon Sep 17 00:00:00 2001 From: shashvatshahi1998 Date: Wed, 3 Apr 2019 23:49:40 +0530 Subject: [PATCH 10/10] Reduced length of lines --- tensorflow/python/ops/math_ops.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py index 69c0846cad9..6086de02978 100644 --- a/tensorflow/python/ops/math_ops.py +++ b/tensorflow/python/ops/math_ops.py @@ -2787,10 +2787,11 @@ def add_n(inputs, name=None): Converts `IndexedSlices` objects into dense tensors prior to adding. - `tf.math.add_n` performs the same operation as `tf.math.accumulate_n` but it waits - for all of its inputs to be ready before beginning to sum.This can consume more - memory when inputs are ready at different times, since the minimum temporary storage - required is proportional to the input size rather than the output size. + `add_n` performs the same operation as `accumulate_n` but it waits + for all of its inputs to be ready before beginning to sum.This can + consume more memory when inputs are ready at different times, since + the minimum temporary storage required is proportional to the input + size rather than the output size. For example: ```python