Merge pull request #26915 from amitsrivastava78:error_8

PiperOrigin-RevId: 239465456
This commit is contained in:
TensorFlower Gardener 2019-03-20 14:17:04 -07:00
commit b4ecc7875f
4 changed files with 5 additions and 5 deletions

View File

@ -64,7 +64,7 @@ class BaseConvolutionOpModel : public SingleOpModel {
filter.per_channel_quantization_scales.size()); filter.per_channel_quantization_scales.size());
std::vector<int64_t> bias_zero_points( std::vector<int64_t> bias_zero_points(
filter.per_channel_quantization_scales.size()); filter.per_channel_quantization_scales.size());
for (int i = 0; i < filter.per_channel_quantization_scales.size(); for (size_t i = 0; i < filter.per_channel_quantization_scales.size();
++i) { ++i) {
bias_scale[i] = bias_scale[i] =
input.scale * filter.per_channel_quantization_scales[i]; input.scale * filter.per_channel_quantization_scales[i];

View File

@ -62,7 +62,7 @@ class BaseDepthwiseConvolutionOpModel : public SingleOpModel {
filter.per_channel_quantization_scales.size()); filter.per_channel_quantization_scales.size());
std::vector<int64_t> bias_zero_points( std::vector<int64_t> bias_zero_points(
filter.per_channel_quantization_scales.size()); filter.per_channel_quantization_scales.size());
for (int i = 0; i < filter.per_channel_quantization_scales.size(); for (size_t i = 0; i < filter.per_channel_quantization_scales.size();
++i) { ++i) {
bias_scale[i] = bias_scale[i] =
input.scale * filter.per_channel_quantization_scales[i]; input.scale * filter.per_channel_quantization_scales[i];

View File

@ -142,7 +142,7 @@ class BaseFullyConnectedOpModel : public SingleOpModel {
FullyConnectedOptionsWeightsFormat_DEFAULT) FullyConnectedOptionsWeightsFormat_DEFAULT)
: batches_(batches), units_(units) { : batches_(batches), units_(units) {
int total_input_size = 1; int total_input_size = 1;
for (int i = 0; i < input.shape.size(); ++i) { for (size_t i = 0; i < input.shape.size(); ++i) {
total_input_size *= input.shape[i]; total_input_size *= input.shape[i];
} }
input_size_ = total_input_size / batches_; input_size_ = total_input_size / batches_;
@ -278,7 +278,7 @@ class HybridFullyConnectedOpModel : public SingleOpModel {
const TensorData& output = {TensorType_FLOAT32}) const TensorData& output = {TensorType_FLOAT32})
: batches_(batches), units_(units) { : batches_(batches), units_(units) {
int total_input_size = 1; int total_input_size = 1;
for (int i = 0; i < input.shape.size(); ++i) { for (size_t i = 0; i < input.shape.size(); ++i) {
total_input_size *= input.shape[i]; total_input_size *= input.shape[i];
} }
input_size_ = total_input_size / batches_; input_size_ = total_input_size / batches_;

View File

@ -117,7 +117,7 @@ void SymmetricPerChannelQuantization(const float* const input,
// Calculate scales per channel // Calculate scales per channel
std::vector<float> scale_invs(channel_dim_size); std::vector<float> scale_invs(channel_dim_size);
const float half_scale = kMaxQuantizedValue; const float half_scale = kMaxQuantizedValue;
for (size_t channel_idx = 0; channel_idx < channel_dim_size; channel_idx++) { for (int channel_idx = 0; channel_idx < channel_dim_size; channel_idx++) {
const float half_range = std::max(std::abs(min_vals[channel_idx]), const float half_range = std::max(std::abs(min_vals[channel_idx]),
std::abs(max_vals[channel_idx])); std::abs(max_vals[channel_idx]));
output_scales->at(channel_idx) = half_range / half_scale; output_scales->at(channel_idx) = half_range / half_scale;