Be more pedantic about type conversions to avoid warning on Windows.
PiperOrigin-RevId: 297716959 Change-Id: I5bc28c70a988448e90e8acf3ab877ac017879b0a
This commit is contained in:
parent
688b902589
commit
24fe4c1a0d
@ -1885,9 +1885,12 @@ void ProcessSparseToDenseOperator(Model* model, SparseToDenseOperator* op) {
|
||||
} else {
|
||||
const std::vector<int64>& output_shape_data =
|
||||
output_shape_array.GetBuffer<ArrayDataType::kInt64>().data;
|
||||
std::copy(
|
||||
// explicitly cast elements to int in order to avoid MSVC warnings about
|
||||
// narrowing conversion.
|
||||
std::transform(
|
||||
output_shape_data.begin(), output_shape_data.end(),
|
||||
std::back_inserter(*output_array.mutable_shape()->mutable_dims()));
|
||||
std::back_inserter(*output_array.mutable_shape()->mutable_dims()),
|
||||
[](const int64 dim) { return static_cast<int>(dim); });
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -64,8 +64,9 @@ std::pair<int, int> TileOneDimension(const Shape& in_dimensions,
|
||||
CopyMultipleTimes(out_data, total_tiled_stride_size,
|
||||
multipliers[dimension] - 1,
|
||||
out_data + total_tiled_stride_size);
|
||||
return std::make_pair(total_stride_size,
|
||||
total_tiled_stride_size * multipliers[dimension]);
|
||||
return std::make_pair(
|
||||
total_stride_size,
|
||||
static_cast<int>(total_tiled_stride_size * multipliers[dimension]));
|
||||
}
|
||||
|
||||
template <ArrayDataType Type>
|
||||
|
@ -241,8 +241,11 @@ TransposeOperator* TransposeInput(const string& input, Model* model) {
|
||||
|
||||
// Reshape the rank-3 Tensor into the correct output shape.
|
||||
const auto& result_batch_shape = bcast.output_batch_shape().dim_sizes();
|
||||
std::vector<int> result_shape(result_batch_shape.begin(),
|
||||
result_batch_shape.end());
|
||||
std::vector<int> result_shape;
|
||||
// Explicitly cast 64-bit sizes to int in order to avoid MSVC warnings.
|
||||
std::transform(result_batch_shape.begin(), result_batch_shape.end(),
|
||||
std::back_inserter(result_shape),
|
||||
[](const int64 dim) { return static_cast<int>(dim); });
|
||||
result_shape.push_back(input_array_a.shape().dims(dims_a - 2));
|
||||
result_shape.push_back(input_array_b.shape().dims(dims_b - 1));
|
||||
|
||||
|
@ -533,7 +533,8 @@ string CreateConstArray(Model* model, string const& name,
|
||||
string array_name = toco::AvailableArrayName(*model, name);
|
||||
auto& array = model->GetOrCreateArray(array_name);
|
||||
array.data_type = T;
|
||||
array.mutable_shape()->mutable_dims()->emplace_back(data.size());
|
||||
array.mutable_shape()->mutable_dims()->emplace_back(
|
||||
static_cast<int>(data.size()));
|
||||
array.GetMutableBuffer<T>().data = data;
|
||||
return array_name;
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
|
||||
// LogSoftmax requires output with 16/256 as scale and 127 as zero point.
|
||||
TensorProperty tensor_property;
|
||||
tensor_property.restriction = true;
|
||||
tensor_property.restricted_value = {16.0 / 256.0, 127};
|
||||
tensor_property.restricted_value = {16.0f / 256.0f, 127};
|
||||
property.outputs = {{0, tensor_property}};
|
||||
property.version = 2;
|
||||
break;
|
||||
@ -186,7 +186,7 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
|
||||
// Logistic requires output with 1/256 as scale and -128 as zero point.
|
||||
TensorProperty tensor_property;
|
||||
tensor_property.restriction = true;
|
||||
tensor_property.restricted_value = {1 / 256.0, -128};
|
||||
tensor_property.restricted_value = {1 / 256.0f, -128};
|
||||
property.outputs = {{0, tensor_property}};
|
||||
property.version = 2;
|
||||
break;
|
||||
@ -741,7 +741,7 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
|
||||
// L2 Norm requires output with 1/128 as scale and 0 as zero point.
|
||||
TensorProperty tensor_property;
|
||||
tensor_property.restriction = true;
|
||||
tensor_property.restricted_value = {1 / 128.0, 0};
|
||||
tensor_property.restricted_value = {1 / 128.0f, 0};
|
||||
property.outputs = {{0, tensor_property}};
|
||||
property.version = 2;
|
||||
break;
|
||||
@ -841,7 +841,7 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
|
||||
// Softmax requires output with 1/256 as scale and -128 as zero point.
|
||||
TensorProperty tensor_property;
|
||||
tensor_property.restriction = true;
|
||||
tensor_property.restricted_value = {1 / 256.0, -128};
|
||||
tensor_property.restricted_value = {1 / 256.0f, -128};
|
||||
property.outputs = {{0, tensor_property}};
|
||||
property.version = 2;
|
||||
break;
|
||||
@ -867,7 +867,7 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
|
||||
// Tanh requires output with 1/128 as scale and 0 as zero point.
|
||||
TensorProperty tensor_property;
|
||||
tensor_property.restriction = true;
|
||||
tensor_property.restricted_value = {1 / 128.0, 0};
|
||||
tensor_property.restricted_value = {1 / 128.0f, 0};
|
||||
property.outputs = {{0, tensor_property}};
|
||||
property.version = 2;
|
||||
break;
|
||||
|
@ -43,7 +43,7 @@ struct TensorProperty {
|
||||
// Constraints.
|
||||
bool restriction = false;
|
||||
// scale/zero_point hardcoded.
|
||||
std::pair<float, int> restricted_value = {0.0, 0};
|
||||
std::pair<float, int> restricted_value = {0.0f, 0};
|
||||
|
||||
// Use derived scale.
|
||||
bool use_derived_scale = false;
|
||||
|
@ -304,7 +304,7 @@ void FormatConverter<T>::Populate(const T* src_data, std::vector<int> indices,
|
||||
template <typename T>
|
||||
TfLiteStatus FormatConverter<T>::SparseToDense(const T* src_data) {
|
||||
data_.resize(dense_size_);
|
||||
std::fill(data_.begin(), data_.end(), 0);
|
||||
std::fill(data_.begin(), data_.end(), T(0));
|
||||
|
||||
int total_rank = traversal_order_.size();
|
||||
int src_data_ptr = 0;
|
||||
|
Loading…
x
Reference in New Issue
Block a user