throwTypeError -> ThrowTypeError

pyo -> Pyo
pyo_or_throw -> PyoOrThrow

PiperOrigin-RevId: 306876916
Change-Id: Idf846a2b13f93ab504ed277e229f473cf5a8605a
This commit is contained in:
Akshay Modi 2020-04-16 10:43:03 -07:00 committed by TensorFlower Gardener
parent 5c306c4d40
commit ffb230a4b7
12 changed files with 105 additions and 106 deletions

View File

@ -55,82 +55,82 @@ PYBIND11_MODULE(_pywrap_tensorflow_interpreter_wrapper, m) {
py::class_<InterpreterWrapper>(m, "InterpreterWrapper")
.def("AllocateTensors",
[](InterpreterWrapper& self) {
return tensorflow::pyo_or_throw(self.AllocateTensors());
return tensorflow::PyoOrThrow(self.AllocateTensors());
})
.def("Invoke",
[](InterpreterWrapper& self) {
return tensorflow::pyo_or_throw(self.Invoke());
return tensorflow::PyoOrThrow(self.Invoke());
})
.def("InputIndices",
[](const InterpreterWrapper& self) {
return tensorflow::pyo_or_throw(self.InputIndices());
return tensorflow::PyoOrThrow(self.InputIndices());
})
.def("OutputIndices",
[](InterpreterWrapper& self) {
return tensorflow::pyo_or_throw(self.OutputIndices());
return tensorflow::PyoOrThrow(self.OutputIndices());
})
.def("ResizeInputTensor",
[](InterpreterWrapper& self, int i, py::handle& value) {
return tensorflow::pyo_or_throw(
return tensorflow::PyoOrThrow(
self.ResizeInputTensor(i, value.ptr()));
})
.def("NumTensors", &InterpreterWrapper::NumTensors)
.def("TensorName", &InterpreterWrapper::TensorName)
.def("TensorType",
[](const InterpreterWrapper& self, int i) {
return tensorflow::pyo_or_throw(self.TensorType(i));
return tensorflow::PyoOrThrow(self.TensorType(i));
})
.def("TensorSize",
[](const InterpreterWrapper& self, int i) {
return tensorflow::pyo_or_throw(self.TensorSize(i));
return tensorflow::PyoOrThrow(self.TensorSize(i));
})
.def("TensorSizeSignature",
[](const InterpreterWrapper& self, int i) {
return tensorflow::pyo_or_throw(self.TensorSizeSignature(i));
return tensorflow::PyoOrThrow(self.TensorSizeSignature(i));
})
.def("TensorSparsityParameters",
[](const InterpreterWrapper& self, int i) {
return tensorflow::pyo_or_throw(self.TensorSparsityParameters(i));
return tensorflow::PyoOrThrow(self.TensorSparsityParameters(i));
})
.def(
"TensorQuantization",
[](const InterpreterWrapper& self, int i) {
return tensorflow::pyo_or_throw(self.TensorQuantization(i));
return tensorflow::PyoOrThrow(self.TensorQuantization(i));
},
R"pbdoc(
Deprecated in favor of TensorQuantizationParameters.
)pbdoc")
.def("TensorQuantizationParameters",
[](InterpreterWrapper& self, int i) {
return tensorflow::pyo_or_throw(
self.TensorQuantizationParameters(i));
})
.def(
"TensorQuantizationParameters",
[](InterpreterWrapper& self, int i) {
return tensorflow::PyoOrThrow(self.TensorQuantizationParameters(i));
})
.def("SetTensor",
[](InterpreterWrapper& self, int i, py::handle& value) {
return tensorflow::pyo_or_throw(self.SetTensor(i, value.ptr()));
return tensorflow::PyoOrThrow(self.SetTensor(i, value.ptr()));
})
.def("GetTensor",
[](const InterpreterWrapper& self, int i) {
return tensorflow::pyo_or_throw(self.GetTensor(i));
return tensorflow::PyoOrThrow(self.GetTensor(i));
})
.def("ResetVariableTensors",
[](InterpreterWrapper& self) {
return tensorflow::pyo_or_throw(self.ResetVariableTensors());
return tensorflow::PyoOrThrow(self.ResetVariableTensors());
})
.def("NumNodes", &InterpreterWrapper::NumNodes)
.def("NodeName", &InterpreterWrapper::NodeName)
.def("NodeInputs",
[](const InterpreterWrapper& self, int i) {
return tensorflow::pyo_or_throw(self.NodeInputs(i));
return tensorflow::PyoOrThrow(self.NodeInputs(i));
})
.def("NodeOutputs",
[](const InterpreterWrapper& self, int i) {
return tensorflow::pyo_or_throw(self.NodeOutputs(i));
return tensorflow::PyoOrThrow(self.NodeOutputs(i));
})
.def(
"tensor",
[](InterpreterWrapper& self, py::handle& base_object, int i) {
return tensorflow::pyo_or_throw(self.tensor(base_object.ptr(), i));
return tensorflow::PyoOrThrow(self.tensor(base_object.ptr(), i));
},
R"pbdoc(
Returns a reference to tensor index i as a numpy array. The
@ -140,7 +140,7 @@ PYBIND11_MODULE(_pywrap_tensorflow_interpreter_wrapper, m) {
"ModifyGraphWithDelegate",
// Address of the delegate is passed as an argument.
[](InterpreterWrapper& self, uintptr_t delegate_ptr) {
return tensorflow::pyo_or_throw(self.ModifyGraphWithDelegate(
return tensorflow::PyoOrThrow(self.ModifyGraphWithDelegate(
reinterpret_cast<TfLiteDelegate*>(delegate_ptr)));
},
R"pbdoc(

View File

@ -31,37 +31,36 @@ PYBIND11_MODULE(_pywrap_tensorflow_lite_calibration_wrapper, m) {
}))
.def("Prepare",
[](CalibrationWrapper& self, py::handle& input_shapes) {
return tensorflow::pyo_or_throw(self.Prepare(input_shapes.ptr()));
return tensorflow::PyoOrThrow(self.Prepare(input_shapes.ptr()));
})
.def("Prepare",
[](CalibrationWrapper& self) {
return tensorflow::pyo_or_throw(self.Prepare());
return tensorflow::PyoOrThrow(self.Prepare());
})
.def("FeedTensor",
[](CalibrationWrapper& self, py::handle& input_value) {
return tensorflow::PyoOrThrow(self.FeedTensor(input_value.ptr()));
})
.def(
"FeedTensor",
[](CalibrationWrapper& self, py::handle& input_value) {
return tensorflow::pyo_or_throw(self.FeedTensor(input_value.ptr()));
})
.def("QuantizeModel",
[](CalibrationWrapper& self, int input_py_type, int output_py_type,
bool allow_float, bool enable_mlir_quantizer) {
return tensorflow::pyo_or_throw(self.QuantizeModel(
return tensorflow::PyoOrThrow(self.QuantizeModel(
input_py_type, output_py_type, allow_float));
})
.def("QuantizeModel",
[](CalibrationWrapper& self, int input_py_type, int output_py_type,
bool allow_float) {
return tensorflow::pyo_or_throw(self.QuantizeModel(
return tensorflow::PyoOrThrow(self.QuantizeModel(
input_py_type, output_py_type, allow_float));
})
.def("QuantizeModel",
[](CalibrationWrapper& self, int input_py_type, int output_py_type,
bool allow_float, const char* operator_output_name) {
return tensorflow::pyo_or_throw(
return tensorflow::PyoOrThrow(
self.QuantizeModel(input_py_type, output_py_type, allow_float,
operator_output_name));
})
.def("Calibrate", [](CalibrationWrapper& self) {
return tensorflow::pyo_or_throw(self.Calibrate());
return tensorflow::PyoOrThrow(self.Calibrate());
});
}

View File

@ -30,6 +30,6 @@ PYBIND11_MODULE(_pywrap_tensorflow_lite_sparsification_wrapper, m) {
return ::SparsificationWrapper::CreateWrapperCPPFromBuffer(data.ptr());
}))
.def("SparsifyModel", [](SparsificationWrapper& self) {
return tensorflow::pyo_or_throw(self.SparsifyModel());
return tensorflow::PyoOrThrow(self.SparsifyModel());
});
}

View File

@ -28,7 +28,7 @@ PYBIND11_MODULE(_pywrap_string_util, m) {
m.def(
"SerializeAsHexString",
[](py::handle& string_tensor) {
return tensorflow::pyo_or_throw(
return tensorflow::PyoOrThrow(
tflite::testing::python::SerializeAsHexString(string_tensor.ptr()));
},
R"pbdoc(

View File

@ -377,7 +377,7 @@ PYBIND11_MODULE(_pywrap_tf_session, m) {
auto result = tensorflow::TF_TryEvaluateConstant_wrapper(
graph, output, status.get());
tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get());
return tensorflow::pyo_or_throw(result);
return tensorflow::PyoOrThrow(result);
});
m.def("ExtendSession", [](TF_Session* session) {
@ -459,7 +459,7 @@ PYBIND11_MODULE(_pywrap_tf_session, m) {
PyList_SET_ITEM(result, i, py_outputs.at(i));
}
return tensorflow::pyo_or_throw(result);
return tensorflow::PyoOrThrow(result);
});
// Do not release GIL.
@ -509,7 +509,7 @@ PYBIND11_MODULE(_pywrap_tf_session, m) {
PyList_SET_ITEM(result, i, py_outputs.at(i));
}
return tensorflow::pyo_or_throw(result);
return tensorflow::PyoOrThrow(result);
});
// Do not release GIL.
@ -540,7 +540,7 @@ PYBIND11_MODULE(_pywrap_tf_session, m) {
// Return out_values
py::list py_list;
for (size_t i = 0; i < out_values.size(); ++i) {
py::object obj = tensorflow::pyo(out_values.at(i));
py::object obj = tensorflow::Pyo(out_values.at(i));
py_list.append(obj);
}
return py_list;
@ -610,7 +610,7 @@ PYBIND11_MODULE(_pywrap_tf_session, m) {
// bool.
// Acquire GIL for returning output returning.
pybind11::gil_scoped_acquire acquire;
return tensorflow::pyo(PyLong_FromLongLong(value));
return tensorflow::Pyo(PyLong_FromLongLong(value));
});
m.def("TF_SetAttrValueProto", [](TF_OperationDescription* desc,
@ -667,7 +667,7 @@ PYBIND11_MODULE(_pywrap_tf_session, m) {
m.def("TF_NewBuffer", TF_NewBuffer, py::return_value_policy::reference);
m.def("TF_GetBuffer", [](TF_Buffer* buf) {
TF_Buffer buffer = TF_GetBuffer(buf);
return tensorflow::pyo_or_throw(PyBytes_FromStringAndSize(
return tensorflow::PyoOrThrow(PyBytes_FromStringAndSize(
reinterpret_cast<const char*>(buffer.data), buffer.length));
});
m.def("TF_DeleteBuffer", &TF_DeleteBuffer);
@ -713,7 +713,7 @@ PYBIND11_MODULE(_pywrap_tf_session, m) {
m.def("TF_GetOpList", [](TF_Library* lib_handle) {
TF_Buffer output_buffer = TF_GetOpList(lib_handle);
return tensorflow::pyo_or_throw(PyBytes_FromStringAndSize(
return tensorflow::PyoOrThrow(PyBytes_FromStringAndSize(
reinterpret_cast<const char*>(output_buffer.data),
output_buffer.length));
});
@ -790,7 +790,7 @@ PYBIND11_MODULE(_pywrap_tf_session, m) {
// Returns a (TF_Operation*, int pos) tuple.
py::tuple result_tuple = py::make_tuple(
py::cast(output), tensorflow::pyo(PyLong_FromSize_t(pos)));
py::cast(output), tensorflow::Pyo(PyLong_FromSize_t(pos)));
return result_tuple;
},
py::return_value_policy::reference);
@ -1094,7 +1094,7 @@ PYBIND11_MODULE(_pywrap_tf_session, m) {
py::gil_scoped_release release;
TF_OperationGetAttrBool(oper, attr_name, &value, status.get());
tensorflow::MaybeRaiseRegisteredFromTFStatusWithGIL(status.get());
return tensorflow::pyo(PyBool_FromLong(value));
return tensorflow::Pyo(PyBool_FromLong(value));
});
m.def("TF_NewStatus", TF_NewStatus, py::return_value_policy::reference);

View File

@ -64,7 +64,7 @@ PYBIND11_MODULE(custom_device_testutil, m) {
PyCapsule_New(device, "TFE_CustomDevice", &CallDelete_Device));
tensorflow::Safe_PyObjectPtr device_info_capsule(PyCapsule_New(
device_info, "TFE_CustomDevice_DeviceInfo", &CallDelete_DeviceInfo));
return tensorflow::pyo_or_throw(
return tensorflow::PyoOrThrow(
PyTuple_Pack(4, device_capsule.get(), device_info_capsule.get(),
arrived_capsule.get(), executed_capsule.get()));
});

View File

@ -41,21 +41,21 @@ namespace tensorflow {
// Convert PyObject* to py::object with no error handling.
inline py::object pyo(PyObject* ptr) {
inline py::object Pyo(PyObject* ptr) {
return py::reinterpret_steal<py::object>(ptr);
}
// Raise an exception if the PyErrOccurred flag is set or else return the Python
// object.
inline py::object pyo_or_throw(PyObject* ptr) {
inline py::object PyoOrThrow(PyObject* ptr) {
if (PyErr_Occurred() || ptr == nullptr) {
throw py::error_already_set();
}
return pyo(ptr);
return Pyo(ptr);
}
void throwTypeError(const char* error_message) {
void ThrowTypeError(const char* error_message) {
PyErr_SetString(PyExc_TypeError, error_message);
throw pybind11::error_already_set();
}

View File

@ -26,7 +26,7 @@ PYBIND11_MODULE(_pywrap_toco_api, m) {
py::object toco_flags_proto_txt_raw, py::object input_contents_txt_raw,
bool extended_return, py::object debug_info_txt_raw,
bool enable_mlir_converter) {
return tensorflow::pyo_or_throw(toco::TocoConvert(
return tensorflow::PyoOrThrow(toco::TocoConvert(
model_flags_proto_txt_raw.ptr(), toco_flags_proto_txt_raw.ptr(),
input_contents_txt_raw.ptr(), extended_return,
debug_info_txt_raw.ptr(), enable_mlir_converter));
@ -49,7 +49,7 @@ PYBIND11_MODULE(_pywrap_toco_api, m) {
m.def(
"TocoGetPotentiallySupportedOps",
[]() {
return tensorflow::pyo_or_throw(toco::TocoGetPotentiallySupportedOps());
return tensorflow::PyoOrThrow(toco::TocoGetPotentiallySupportedOps());
},
R"pbdoc(
Returns a list of names of all ops potentially supported by tflite.
@ -57,7 +57,7 @@ PYBIND11_MODULE(_pywrap_toco_api, m) {
m.def(
"ExperimentalMlirQuantizeModel",
[](py::object input_contents_txt_raw, bool fully_quantize) {
return tensorflow::pyo_or_throw(toco::MlirQuantizeModel(
return tensorflow::PyoOrThrow(toco::MlirQuantizeModel(
input_contents_txt_raw.ptr(), fully_quantize));
},
py::arg("input_contents_txt_raw"), py::arg("fully_quantize") = true,

View File

@ -110,14 +110,14 @@ TFE_InputTensorHandles InputTFE_InputTensorHandles(
TFE_InputTensorHandles input_tensor_handles;
if (input_tensors.ptr() != Py_None) {
if (!PyList_Check(input_tensors.ptr())) {
tensorflow::throwTypeError("must provide a list of Tensors as inputs");
tensorflow::ThrowTypeError("must provide a list of Tensors as inputs");
}
Py_ssize_t len = PyList_Size(input_tensors.ptr());
input_tensor_handles.resize(len);
for (Py_ssize_t i = 0; i < len; ++i) {
PyObject* elem = PyList_GetItem(input_tensors.ptr(), i);
if (!elem) {
tensorflow::throwTypeError("Input Tensor does not exist.");
tensorflow::ThrowTypeError("Input Tensor does not exist.");
}
if (EagerTensor_CheckExact(elem)) {
(input_tensor_handles)[i] = EagerTensor_Handle(elem);
@ -139,7 +139,7 @@ TFE_InputTensorHandles InputTFE_InputTensorHandles(
} else {
// This is a subclass of EagerTensor that we don't support.
PyErr_Clear();
tensorflow::throwTypeError(
tensorflow::ThrowTypeError(
tensorflow::strings::StrCat(
"Saw an object that is an instance of a strict subclass of "
"EagerTensor, which is not supported. Item ",
@ -151,7 +151,7 @@ TFE_InputTensorHandles InputTFE_InputTensorHandles(
// tensor.
tensorflow::Safe_PyObjectPtr name_attr(
PyObject_GetAttrString(elem, "name"));
tensorflow::throwTypeError(
tensorflow::ThrowTypeError(
tensorflow::strings::StrCat(
"An op outside of the function building code is being passed\n"
"a \"Graph\" tensor. It is possible to have Graph tensors\n"
@ -166,7 +166,7 @@ TFE_InputTensorHandles InputTFE_InputTensorHandles(
name_attr ? TFE_GetPythonString(name_attr.get()) : "<unknown>")
.c_str());
} else {
tensorflow::throwTypeError(
tensorflow::ThrowTypeError(
tensorflow::strings::StrCat(
"provided list of inputs contains objects other "
"than 'EagerTensor'. Item ",
@ -234,7 +234,7 @@ py::object TFE_Py_ExecuteCancelable_wrapper(
PyList_SetItem(output_list, i, output);
}
tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get());
return tensorflow::pyo_or_throw(output_list);
return tensorflow::PyoOrThrow(output_list);
}
static py::object TF_ListPhysicalDevices() {
@ -253,7 +253,7 @@ static py::object TF_ListPhysicalDevices() {
PyList_SetItem(result, i, dev_obj);
++i;
}
return tensorflow::pyo_or_throw(result);
return tensorflow::PyoOrThrow(result);
}
static py::object TFE_ClearScalarCache() {
@ -323,10 +323,10 @@ PYBIND11_MODULE(_pywrap_tfe, m) {
py::class_<TF_Function> TF_Function_class(m, "TF_Function");
m.def("TFE_Py_RegisterExceptionClass", [](const py::handle& e) {
return tensorflow::pyo_or_throw(TFE_Py_RegisterExceptionClass(e.ptr()));
return tensorflow::PyoOrThrow(TFE_Py_RegisterExceptionClass(e.ptr()));
});
m.def("TFE_Py_RegisterFallbackExceptionClass", [](const py::handle& e) {
return tensorflow::pyo_or_throw(
return tensorflow::PyoOrThrow(
TFE_Py_RegisterFallbackExceptionClass(e.ptr()));
});
@ -347,7 +347,7 @@ PYBIND11_MODULE(_pywrap_tfe, m) {
tensorflow::make_safe(TF_NewStatus());
TFE_Context* context = TFE_NewContext(opts, status.get());
tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get());
return tensorflow::pyo_or_throw(tensorflow::OutputTFE_Context(context));
return tensorflow::PyoOrThrow(tensorflow::OutputTFE_Context(context));
},
py::return_value_policy::reference);
m.def("TFE_DeleteContext", [](py::handle& o) {
@ -540,19 +540,19 @@ PYBIND11_MODULE(_pywrap_tfe, m) {
if (*is_list == 1) {
PyObject* list = PyList_New(1);
PyList_SetItem(list, 0, output_pyo);
return tensorflow::pyo_or_throw(list);
return tensorflow::PyoOrThrow(list);
}
return tensorflow::pyo_or_throw(output_pyo);
return tensorflow::PyoOrThrow(output_pyo);
});
m.def("TFE_Py_InitEagerTensor", [](const py::handle& o) {
return tensorflow::pyo_or_throw(TFE_Py_InitEagerTensor(o.ptr()));
return tensorflow::PyoOrThrow(TFE_Py_InitEagerTensor(o.ptr()));
});
m.def("TFE_Py_SetEagerTensorProfiler", &TFE_Py_SetEagerTensorProfiler);
m.def("TFE_Py_RegisterJVPFunction", [](const py::handle& o) {
return tensorflow::pyo_or_throw(TFE_Py_RegisterJVPFunction(o.ptr()));
return tensorflow::PyoOrThrow(TFE_Py_RegisterJVPFunction(o.ptr()));
});
m.def("TFE_Py_RegisterGradientFunction", [](const py::handle& o) {
return tensorflow::pyo_or_throw(TFE_Py_RegisterGradientFunction(o.ptr()));
return tensorflow::PyoOrThrow(TFE_Py_RegisterGradientFunction(o.ptr()));
});
m.def("TFE_Py_Execute",
[](const py::handle& context, const char* device_name,
@ -574,22 +574,22 @@ PYBIND11_MODULE(_pywrap_tfe, m) {
});
m.def("TFE_Py_FastPathExecute", [](const py::args args) {
// TFE_Py_FastPathExecute requires error checking prior to returning.
return tensorflow::pyo_or_throw(TFE_Py_FastPathExecute_C(args.ptr()));
return tensorflow::PyoOrThrow(TFE_Py_FastPathExecute_C(args.ptr()));
});
m.def("TFE_Py_RecordGradient",
[](const py::handle& op_name, const py::handle& inputs,
const py::handle& attrs, const py::handle& results,
const py::handle& forward_pass_name_scope) {
return tensorflow::pyo_or_throw(TFE_Py_RecordGradient(
return tensorflow::PyoOrThrow(TFE_Py_RecordGradient(
op_name.ptr(), inputs.ptr(), attrs.ptr(), results.ptr(),
forward_pass_name_scope.ptr()));
});
m.def("TFE_Py_UID", []() { return tensorflow::pyo_or_throw(TFE_Py_UID()); });
m.def("TFE_Py_UID", []() { return tensorflow::PyoOrThrow(TFE_Py_UID()); });
// TFE_Py_Tape Logic
m.def("TFE_Py_TapeSetNew", [](const py::handle& persistent,
const py::handle& watch_accessed_variables) {
return tensorflow::pyo_or_throw(
return tensorflow::PyoOrThrow(
TFE_Py_TapeSetNew(persistent.ptr(), watch_accessed_variables.ptr()));
});
m.def("TFE_Py_TapeSetAdd",
@ -599,15 +599,15 @@ PYBIND11_MODULE(_pywrap_tfe, m) {
m.def("TFE_Py_TapeSetStopOnThread", &TFE_Py_TapeSetStopOnThread);
m.def("TFE_Py_TapeSetRestartOnThread", &TFE_Py_TapeSetRestartOnThread);
m.def("TFE_Py_TapeSetIsStopped",
[]() { return tensorflow::pyo_or_throw(TFE_Py_TapeSetIsStopped()); });
[]() { return tensorflow::PyoOrThrow(TFE_Py_TapeSetIsStopped()); });
m.def("TFE_Py_TapeSetIsEmpty",
[]() { return tensorflow::pyo_or_throw(TFE_Py_TapeSetIsEmpty()); });
[]() { return tensorflow::PyoOrThrow(TFE_Py_TapeSetIsEmpty()); });
m.def("TFE_Py_TapeSetShouldRecordBackprop", [](const py::handle& tensors) {
return tensorflow::pyo_or_throw(
return tensorflow::PyoOrThrow(
TFE_Py_TapeSetShouldRecordBackprop(tensors.ptr()));
});
m.def("TFE_Py_TapeSetPossibleGradientTypes", [](const py::handle& tensors) {
return tensorflow::pyo_or_throw(
return tensorflow::PyoOrThrow(
TFE_Py_TapeSetPossibleGradientTypes(tensors.ptr()));
});
m.def("TFE_Py_TapeSetDeleteTrace", &TFE_Py_TapeSetDeleteTrace);
@ -615,7 +615,7 @@ PYBIND11_MODULE(_pywrap_tfe, m) {
[](const py::handle& op_type, const py::handle& output_tensors,
const py::handle& input_tensors, const py::handle& backward_function,
const py::handle& forward_function) {
return tensorflow::pyo_or_throw(TFE_Py_TapeSetRecordOperation(
return tensorflow::PyoOrThrow(TFE_Py_TapeSetRecordOperation(
op_type.ptr(), output_tensors.ptr(), input_tensors.ptr(),
backward_function.ptr(), forward_function.ptr()));
});
@ -623,19 +623,19 @@ PYBIND11_MODULE(_pywrap_tfe, m) {
"TFE_Py_TapeSetRecordOperationBackprop",
[](const py::handle& op_type, const py::handle& output_tensors,
const py::handle& input_tensors, const py::handle& backward_function) {
return tensorflow::pyo_or_throw(TFE_Py_TapeSetRecordOperationBackprop(
return tensorflow::PyoOrThrow(TFE_Py_TapeSetRecordOperationBackprop(
op_type.ptr(), output_tensors.ptr(), input_tensors.ptr(),
backward_function.ptr()));
});
m.def("TFE_Py_TapeSetRecordOperationForwardprop",
[](const py::handle& op_type, const py::handle& output_tensors,
const py::handle& input_tensors, const py::handle& backward_function,
const py::handle& forwardprop_output_indices) {
return tensorflow::pyo_or_throw(
TFE_Py_TapeSetRecordOperationForwardprop(
op_type.ptr(), output_tensors.ptr(), input_tensors.ptr(),
backward_function.ptr(), forwardprop_output_indices.ptr()));
});
m.def(
"TFE_Py_TapeSetRecordOperationForwardprop",
[](const py::handle& op_type, const py::handle& output_tensors,
const py::handle& input_tensors, const py::handle& backward_function,
const py::handle& forwardprop_output_indices) {
return tensorflow::PyoOrThrow(TFE_Py_TapeSetRecordOperationForwardprop(
op_type.ptr(), output_tensors.ptr(), input_tensors.ptr(),
backward_function.ptr(), forwardprop_output_indices.ptr()));
});
m.def("TFE_Py_TapeGradient",
[](const py::handle& tape, const py::handle& target,
const py::handle& sources, const py::handle& output_gradients,
@ -647,7 +647,7 @@ PYBIND11_MODULE(_pywrap_tfe, m) {
tape.ptr(), target.ptr(), sources.ptr(), output_gradients.ptr(),
sources_raw.ptr(), unconnected_gradients.ptr(), status.get());
tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get());
return tensorflow::pyo_or_throw(output);
return tensorflow::PyoOrThrow(output);
});
m.def("TFE_Py_TapeVariableAccessed", [](const py::handle& variable) {
@ -662,15 +662,15 @@ PYBIND11_MODULE(_pywrap_tfe, m) {
TFE_Py_TapeWatchVariable(tape.ptr(), variable.ptr());
});
m.def("TFE_Py_TapeWatchedVariables", [](const py::handle& tape) {
return tensorflow::pyo_or_throw(TFE_Py_TapeWatchedVariables(tape.ptr()));
return tensorflow::PyoOrThrow(TFE_Py_TapeWatchedVariables(tape.ptr()));
});
m.def("TFE_Py_ForwardAccumulatorNew", []() {
return tensorflow::pyo_or_throw(TFE_Py_ForwardAccumulatorNew());
return tensorflow::PyoOrThrow(TFE_Py_ForwardAccumulatorNew());
});
m.def("TFE_Py_ForwardAccumulatorSetAdd", [](const py::handle& accumulator) {
return tensorflow::pyo_or_throw(
return tensorflow::PyoOrThrow(
TFE_Py_ForwardAccumulatorSetAdd(accumulator.ptr()));
});
m.def("TFE_Py_ForwardAccumulatorSetRemove",
@ -686,17 +686,17 @@ PYBIND11_MODULE(_pywrap_tfe, m) {
});
m.def("TFE_Py_ForwardAccumulatorJVP",
[](const py::handle& accumulator, const py::handle& tensor) {
return tensorflow::pyo_or_throw(
return tensorflow::PyoOrThrow(
TFE_Py_ForwardAccumulatorJVP(accumulator.ptr(), tensor.ptr()));
});
m.def("TFE_Py_ForwardAccumulatorPushState", []() {
return tensorflow::pyo_or_throw(TFE_Py_ForwardAccumulatorPushState());
return tensorflow::PyoOrThrow(TFE_Py_ForwardAccumulatorPushState());
});
m.def("TFE_Py_ForwardAccumulatorPopState", []() {
return tensorflow::pyo_or_throw(TFE_Py_ForwardAccumulatorPopState());
return tensorflow::PyoOrThrow(TFE_Py_ForwardAccumulatorPopState());
});
m.def("TFE_Py_PackJVPs", [](const py::handle& tensors) {
return tensorflow::pyo_or_throw(TFE_Py_PackJVPs(tensors.ptr()));
return tensorflow::PyoOrThrow(TFE_Py_PackJVPs(tensors.ptr()));
});
// TFE_ContextOptions Logic
@ -726,30 +726,30 @@ PYBIND11_MODULE(_pywrap_tfe, m) {
// TFE_Py_TensorShape Logic
m.def("TFE_Py_TensorShapeSlice",
[](const py::handle& tensors, int slice_dim) {
return tensorflow::pyo_or_throw(
return tensorflow::PyoOrThrow(
TFE_Py_TensorShapeSlice(tensors.ptr(), slice_dim));
});
m.def("TFE_Py_TensorShapeOnDevice", [](const py::handle& tensors,
int slice_dim) {
return tensorflow::pyo_or_throw(TFE_Py_TensorShapeOnDevice(tensors.ptr()));
return tensorflow::PyoOrThrow(TFE_Py_TensorShapeOnDevice(tensors.ptr()));
});
m.def("TFE_Py_EnableInteractivePythonLogging",
&TFE_Py_EnableInteractivePythonLogging);
// Additional Context Logic
m.def("TFE_Py_SetEagerContext", [](const py::handle& o) {
return tensorflow::pyo_or_throw(TFE_Py_SetEagerContext(o.ptr()));
return tensorflow::PyoOrThrow(TFE_Py_SetEagerContext(o.ptr()));
});
m.def("TFE_ContextStartStep", [](py::handle& o) {
TFE_ContextStartStep(tensorflow::InputTFE_Context(o.ptr()));
});
m.def("TFE_ContextEndStep", &TFE_ContextEndStep);
m.def("TFE_Py_RegisterVSpace", [](const py::handle& o) {
return tensorflow::pyo_or_throw(TFE_Py_RegisterVSpace(o.ptr()));
return tensorflow::PyoOrThrow(TFE_Py_RegisterVSpace(o.ptr()));
});
m.def("TFE_Py_EncodeArg",
[](const py::handle& o, bool include_tensor_ranks_only) {
return tensorflow::pyo_or_throw(
return tensorflow::PyoOrThrow(
TFE_Py_EncodeArg(o.ptr(), include_tensor_ranks_only));
});
m.def("TFE_EnableCollectiveOps", [](const py::handle& ctx, py::str proto) {

View File

@ -41,7 +41,7 @@ static PyObject* DoQuantizeTrainingOnGraphDefHelper(const string& input_graph,
PYBIND11_MODULE(_pywrap_quantize_training, m) {
m.def("DoQuantizeTrainingOnGraphDefHelper",
[](const py::object input_graph, int num_bits) {
return tensorflow::pyo_or_throw(
return tensorflow::PyoOrThrow(
tensorflow::DoQuantizeTrainingOnGraphDefHelper(
input_graph.cast<std::string>(), num_bits));
});

View File

@ -112,7 +112,7 @@ static py::object CheckpointReader_GetTensor(
tensorflow::MaybeRaiseFromStatus(
tensorflow::TensorToNdarray(*tensor, &py_obj));
return tensorflow::pyo_or_throw(
return tensorflow::PyoOrThrow(
PyArray_Return(reinterpret_cast<PyArrayObject*>(py_obj)));
}

View File

@ -27,7 +27,7 @@ PYBIND11_MODULE(_pywrap_utils, m) {
)pbdoc";
m.def("RegisterType",
[](const py::handle& type_name, const py::handle& type) {
return tensorflow::pyo_or_throw(
return tensorflow::PyoOrThrow(
tensorflow::swig::RegisterType(type_name.ptr(), type.ptr()));
});
m.def(
@ -116,7 +116,7 @@ PYBIND11_MODULE(_pywrap_utils, m) {
m.def(
"IsNamedtuple",
[](const py::handle& o, bool strict) {
return tensorflow::pyo_or_throw(
return tensorflow::PyoOrThrow(
tensorflow::swig::IsNamedtuple(o.ptr(), strict));
},
R"pbdoc(
@ -197,7 +197,7 @@ PYBIND11_MODULE(_pywrap_utils, m) {
m.def(
"SameNamedtuples",
[](const py::handle& o1, const py::handle& o2) {
return tensorflow::pyo_or_throw(
return tensorflow::PyoOrThrow(
tensorflow::swig::SameNamedtuples(o1.ptr(), o2.ptr()));
},
R"pbdoc(
@ -220,7 +220,7 @@ PYBIND11_MODULE(_pywrap_utils, m) {
m.def(
"Flatten",
[](const py::handle& o, bool expand_composites) {
return tensorflow::pyo_or_throw(
return tensorflow::PyoOrThrow(
tensorflow::swig::Flatten(o.ptr(), expand_composites));
},
R"pbdoc(
@ -280,7 +280,7 @@ PYBIND11_MODULE(_pywrap_utils, m) {
m.def(
"FlattenForData",
[](const py::handle& o) {
return tensorflow::pyo_or_throw(
return tensorflow::PyoOrThrow(
tensorflow::swig::FlattenForData(o.ptr()));
},
R"pbdoc(