This commit is contained in:
Tomohiro Ubukata 2020-03-11 10:08:05 +00:00
parent 0a0a204598
commit 88ddf3b7a1
4 changed files with 23 additions and 22 deletions

View File

@ -58,7 +58,7 @@ END
Provide a basic summary of numeric value types, range and distribution.
output: A double tensor of shape [14 + nDimensions], where nDimensions is the
the number of dimensions of the tensor's shape. The elements of output are:
number of dimensions of the tensor's shape. The elements of output are:
[0]: is initialized (1.0) or not (0.0).
[1]: total number of elements
[2]: NaN element count
@ -68,7 +68,7 @@ output: A double tensor of shape [14 + nDimensions], where nDimensions is the
-inf. Otherwise, this is the count of elements > lower_bound and < 0.
[5]: zero element count
[6]: positive element count (excluding +inf), if upper_bound is the default
-inf. Otherwise, this is the count of elements < upper_bound and > 0.
+inf. Otherwise, this is the count of elements < upper_bound and > 0.
[7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by
default.
Output elements [1:8] are all zero, if the tensor is uninitialized.

View File

@ -656,15 +656,15 @@ bool Tensor::IsInitialized() const {
}
void Tensor::CheckType(DataType expected_dtype) const {
CHECK_EQ(dtype(), expected_dtype)
<< " " << DataTypeString(expected_dtype) << " expected, got "
<< DataTypeString(dtype());
CHECK_EQ(dtype(), expected_dtype) << " " << DataTypeString(expected_dtype)
<< " expected, got "
<< DataTypeString(dtype());
}
void Tensor::CheckTypeAndIsAligned(DataType expected_dtype) const {
CHECK_EQ(dtype(), expected_dtype)
<< " " << DataTypeString(expected_dtype) << " expected, got "
<< DataTypeString(dtype());
CHECK_EQ(dtype(), expected_dtype) << " " << DataTypeString(expected_dtype)
<< " expected, got "
<< DataTypeString(dtype());
CHECK(IsAligned()) << "ptr = " << base<void>();
}
@ -764,9 +764,10 @@ bool Tensor::RefCountIsOne() const {
break; \
}
#define CASES(TYPE_ENUM, STMTS) \
CASES_WITH_DEFAULT(TYPE_ENUM, STMTS, LOG(FATAL) << "Type not set"; \
, LOG(FATAL) << "Unexpected type: " << TYPE_ENUM;)
#define CASES(TYPE_ENUM, STMTS) \
CASES_WITH_DEFAULT(TYPE_ENUM, STMTS, LOG(FATAL) << "Unexpected type: " \
<< TYPE_ENUM; \
, LOG(FATAL) << "Type not set";)
Tensor::Tensor(Allocator* a, DataType type, const TensorShape& shape)
: shape_(shape), buf_(nullptr) {
@ -1255,14 +1256,14 @@ bool Tensor::SharesBufferWith(const Tensor& b) const {
}
string Tensor::DebugString(int num_values) const {
return strings::StrCat("Tensor<type: ", DataTypeString(dtype()),
" shape: ", shape().DebugString(),
" values: ", SummarizeValue(num_values), ">");
return strings::StrCat("Tensor<type: ", DataTypeString(dtype()), " shape: ",
shape().DebugString(), " values: ",
SummarizeValue(num_values), ">");
}
string Tensor::DeviceSafeDebugString() const {
return strings::StrCat("Tensor<type: ", DataTypeString(dtype()),
" shape: ", shape().DebugString(), ">");
return strings::StrCat("Tensor<type: ", DataTypeString(dtype()), " shape: ",
shape().DebugString(), ">");
}
void Tensor::FillDescription(TensorDescription* description) const {

View File

@ -346,7 +346,7 @@ void Assign(const D& d, Out out, Rhs rhs) {
}
// Partial specialization of BinaryFunctor<Device=CPUDevice, Functor, NDIMS>
// for functors with with no error checking.
// for functors with no error checking.
template <typename Functor, int NDIMS>
struct BinaryFunctor<CPUDevice, Functor, NDIMS, false> {
void operator()(const CPUDevice& d, typename Functor::tout_type out,
@ -405,7 +405,7 @@ struct BinaryFunctor<CPUDevice, Functor, NDIMS, false> {
};
// Partial specialization of BinaryFunctor<Device=CPUDevice, Functor, 2>
// for functors with with no error checking.
// for functors with no error checking.
template <typename Functor>
struct BinaryFunctor<CPUDevice, Functor, 2, false> {
enum { NDIMS = 2 };
@ -472,7 +472,7 @@ struct BinaryFunctor<CPUDevice, Functor, 2, false> {
typename Functor::func func;
if (Functor::use_bcast_optimization && use_bcast_optimization<T>::value) {
// Optimize for speed by using Eigen::type2index and avoid
// .broadcast() when we know its a no-op.
// .broadcast() when we know it's a no-op.
//
// Here, we need to handle 6 cases depending on how many "1"
// exist in in0 and in1's shapes (4 numbers in total). It's not

View File

@ -18,11 +18,11 @@ limitations under the License.
#include <type_traits>
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/platform/types.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
namespace tensorflow {
@ -44,7 +44,7 @@ constexpr int kMaxSpaceToBatchBlockDims = 4;
MACRO(2 /**/, ##__VA_ARGS__) \
MACRO(3 /**/, ##__VA_ARGS__) \
MACRO(4 /**/, ##__VA_ARGS__) \
/**/
/**/
namespace internal {
namespace spacetobatch {
@ -80,7 +80,7 @@ namespace functor {
// Functor used by {SpaceToBatch,BatchToSpace}{ND,}Op to do the conversion.
//
// If B2S is false, then this performs the space-to-batch conversion. If S2B is
// If B2S is false, then this performs the space-to-batch conversion. If B2S is
// true, then this performs the inverse batch-to-space conversion.
template <typename Device, typename T, int NUM_BLOCK_DIMS, bool B2S = false>
struct SpaceToBatchFunctor {