Qualify calls to some functions from <cmath>.

PiperOrigin-RevId: 240190307
This commit is contained in:
A. Unique TensorFlower 2019-03-25 12:04:42 -07:00 committed by TensorFlower Gardener
parent 03d5dc4a5e
commit e38c98c650
7 changed files with 20 additions and 16 deletions

View File

@ -84,7 +84,7 @@ class FractionalAvgPoolOp : public OpKernel {
// Output size.
for (int i = 0; i < tensor_in_and_out_dims; ++i) {
output_size[i] =
static_cast<int>(floor(input_size[i] / pooling_ratio_[i]));
static_cast<int>(std::floor(input_size[i] / pooling_ratio_[i]));
DCHECK_GT(output_size[i], 0);
}

View File

@ -89,7 +89,7 @@ class FractionalMaxPoolOp : public OpKernel {
// This must match the same logic in the shape function in
// core/ops/nn_ops.cc.
output_size[i] =
static_cast<int>(floor(input_size[i] / pooling_ratio_[i]));
static_cast<int>(std::floor(input_size[i] / pooling_ratio_[i]));
DCHECK_GT(output_size[i], 0);
}

View File

@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <cmath>
#define EIGEN_USE_THREADS
#include <limits>
@ -499,7 +500,7 @@ void TestAvoidBias() {
const float step_size = (max - min) / 255.0f;
const float tolerance = step_size / 1000.0f;
// This is the smallest perfectly representable float in the range.
float first_float = ceil(min / step_size) * step_size;
float first_float = std::ceil(min / step_size) * step_size;
for (float f = first_float; f <= max; f += step_size) {
const int as_int = FloatToQuantized<quint8>(f, min, max);
const float back_to_float = QuantizedToFloat<quint8>(as_int, min, max);

View File

@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/core/kernels/range_sampler.h"
#include <cmath>
#include <unordered_set>
#include <vector>
@ -69,7 +70,7 @@ static float ExpectedCountHelper(float p, int batch_size, int num_tries) {
return p * batch_size;
}
// numerically stable version of (1 - (1-p)^num_tries)
return -expm1(num_tries * log1p(-p));
return -expm1(num_tries * std::log1p(-p));
}
} // namespace
@ -298,7 +299,7 @@ Status FixedUnigramSampler::LoadFromFile(Env* env, const string& vocab_file,
return errors::InvalidArgument("Wrong vocabulary format at line: ",
line);
}
w = pow(w, distortion);
w = std::pow(w, distortion);
total_weight_ += w;
weights_.push_back(w);
}
@ -313,7 +314,7 @@ void FixedUnigramSampler::LoadFromUnigrams(const std::vector<float>& unigrams,
for (float w : unigrams) {
// Skip entries that do not belong to this shard.
if (word_id % num_shards_ == shard_) {
w = pow(w, distortion);
w = std::pow(w, distortion);
total_weight_ += w;
weights_.push_back(w);
}

View File

@ -165,14 +165,14 @@ class ResizeAreaOp : public OpKernel {
const float in_x1 = (x + 1) * st.width_scale;
// The start and end width indices of all the cells that could
// contribute to the target cell.
int64 v = floor(in_x);
int64 v = std::floor(in_x);
x_interp.start = v;
// TODO(cwhipkey): simplify this logic.
x_interp.start_scale =
v < in_x ? (v + 1 > in_x1 ? st.width_scale : v + 1 - in_x)
: (v + 1 > in_x1 ? in_x1 - v : 1.0);
v = ceil(in_x1);
v = std::ceil(in_x1);
x_interp.end = v;
v = x_interp.end - 1;
x_interp.end_minus_one_scale =
@ -226,8 +226,8 @@ class ResizeAreaOp : public OpKernel {
const float in_y1 = (y + 1) * st.height_scale;
// The start and end height indices of all the cells that could
// contribute to the target cell.
const int64 y_start = floor(in_y);
const int64 y_end = ceil(in_y1);
const int64 y_start = std::floor(in_y);
const int64 y_end = std::ceil(in_y1);
y_scales.clear();
y_ptrs.clear();
for (int64 i = y_start; i < y_end; ++i) {

View File

@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <cmath>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
@ -103,16 +104,16 @@ class ResizeAreaOpTest : public OpsTestBase {
const float in_y1 = (y + 1) * height_scale;
// The start and end height indices of all the cells that could
// contribute to the target cell.
int64 y_start = floor(in_y);
int64 y_end = ceil(in_y1);
int64 y_start = std::floor(in_y);
int64 y_end = std::ceil(in_y1);
for (int64 x = 0; x < out_width; ++x) {
const float in_x = x * width_scale;
const float in_x1 = (x + 1) * width_scale;
// The start and end width indices of all the cells that could
// contribute to the target cell.
int64 x_start = floor(in_x);
int64 x_end = ceil(in_x1);
int64 x_start = std::floor(in_x);
int64 x_end = std::ceil(in_x1);
sum_data.setConstant(0.0);
for (int64 i = y_start; i < y_end; ++i) {

View File

@ -14,6 +14,7 @@ limitations under the License.
==============================================================================*/
// See docs in ../ops/image_ops.cc.
#include <math.h>
#include <cmath>
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
@ -122,8 +123,8 @@ bool GenerateRandomCrop(int original_width, int original_height,
const float max_area =
max_relative_crop_area * original_width * original_height;
int height = static_cast<int>(lrintf(sqrt(min_area / aspect_ratio)));
int max_height = static_cast<int>(lrintf(sqrt(max_area / aspect_ratio)));
int height = static_cast<int>(lrintf(std::sqrt(min_area / aspect_ratio)));
int max_height = static_cast<int>(lrintf(std::sqrt(max_area / aspect_ratio)));
if (lrintf(max_height * aspect_ratio) > original_width) {
// We must find the smallest max_height satisfying