Remove unneeded copies now that int64 and int64_t are the same

Just removed a couple of obvious ones. Larger/more uniform update coming later.

PiperOrigin-RevId: 354637425
Change-Id: If7ae27acf47c81f1a39eb9120ddbfd0bae828a15
This commit is contained in:
Jacques Pienaar 2021-01-29 16:51:37 -08:00 committed by TensorFlower Gardener
parent fcb2fc1ec3
commit 23d9a2b49d
4 changed files with 6 additions and 27 deletions

View File

@ -216,14 +216,8 @@ struct ConvertConst : public OpConversionPattern<TF::ConstOp> {
// If the list is empty, directly create the final result instead of
// creating the tf.Pack op. tf.Pack op requires at least one operand.
if (tensors.empty()) {
absl::InlinedVector<tensorflow::int64, 4> tf_shape;
tf_shape.reserve(result_shape.size());
for (int64_t dim : result_shape) {
tf_shape.push_back(dim);
}
tensorflow::Tensor tensor(list->element_dtype,
tensorflow::TensorShape(tf_shape));
tensorflow::TensorShape(result_shape));
auto attr_or = tensorflow::ConvertTensor(tensor, &rewriter);
if (!attr_or.ok()) return failure();
rewriter.replaceOpWithNewOp<TF::ConstOp>(op, attr_or.ValueOrDie());

View File

@ -56,7 +56,6 @@ limitations under the License.
#define DEBUG_TYPE "tf-shape-inference-utils"
using ::tensorflow::int64;
using tensorflow::shape_inference::DimensionHandle;
using tensorflow::shape_inference::InferenceContext;
using tensorflow::shape_inference::ShapeHandle;
@ -83,12 +82,7 @@ NamedAttrList GetAllAttributesFromOperation(Operation* op) {
// Extracts a PartialTensorShape from the MLIR type.
Optional<tensorflow::PartialTensorShape> GetShapeFromMlirType(Type t) {
if (auto ranked_type = t.dyn_cast<RankedTensorType>()) {
// Convert the MLIR shape indices (int64_t) to TensorFlow indices
// (int64).
ArrayRef<int64_t> shape = ranked_type.getShape();
SmallVector<int64, 8> tf_shape(shape.begin(), shape.end());
return tensorflow::PartialTensorShape(
MutableArrayRefToSpan<int64>(tf_shape));
return tensorflow::PartialTensorShape(ranked_type.getShape());
}
return None;
}

View File

@ -41,11 +41,6 @@ inline absl::Span<const T> ArrayRefToSpan(llvm::ArrayRef<T> ref) {
return absl::Span<const T>(ref.data(), ref.size());
}
template <typename T>
inline absl::Span<T> MutableArrayRefToSpan(llvm::MutableArrayRef<T> ref) {
return absl::Span<T>(ref.data(), ref.size());
}
} // namespace mlir
#endif // TENSORFLOW_COMPILER_MLIR_UTILS_ARRAY_CONTAINER_UTILS_H_

View File

@ -49,16 +49,12 @@ static mlir::DenseIntElementsAttr GetI64ElementsAttr(
absl::Span<const int64> values, mlir::Builder* builder) {
auto ty = mlir::RankedTensorType::get({static_cast<int64_t>(values.size())},
builder->getIntegerType(64));
llvm::SmallVector<int64_t, 4> mlir_values;
mlir_values.reserve(values.size());
for (const auto& value : values) {
mlir_values.push_back(value);
}
return mlir::DenseIntElementsAttr::get(ty, mlir_values);
return mlir::DenseIntElementsAttr::get(
ty, llvm::makeArrayRef(values.data(), values.size()));
}
static mlir::DenseIntElementsAttr ConvertPadding(
absl::Span<const std::pair<tensorflow::int64, tensorflow::int64>> padding,
absl::Span<const std::pair<int64_t, int64_t>> padding,
mlir::Builder* builder) {
llvm::SmallVector<int64_t, 8> elements;
elements.reserve(padding.size() * 2);
@ -80,7 +76,7 @@ StatusOr<XlaOp> MlirHloBuilder::MakeXlaOp(mlir::Value val) {
return InvalidArgument("unsupported type: %s", ToString(ty).c_str());
}
int64 handle = reinterpret_cast<int64>(val.getAsOpaquePointer());
int64_t handle = reinterpret_cast<int64_t>(val.getAsOpaquePointer());
handle_to_shape_[handle] = std::move(shape);
return XlaOp(handle, this);
}