Update ops-related pbtxt files.

Change: 154378722
This commit is contained in:
A. Unique TensorFlower 2017-04-26 18:49:48 -08:00 committed by TensorFlower Gardener
parent 4f525819b6
commit 5bb419792e
2 changed files with 243 additions and 3 deletions

View File

@ -7251,6 +7251,38 @@ op {
}
}
}
op {
name: "FakeQuantWithMinMaxArgs"
input_arg {
name: "inputs"
type: DT_FLOAT
}
output_arg {
name: "outputs"
type: DT_FLOAT
}
attr {
name: "min"
type: "float"
default_value {
f: -6
}
}
attr {
name: "max"
type: "float"
default_value {
f: 6
}
}
attr {
name: "num_bits"
type: "int"
default_value {
i: 8
}
}
}
op {
name: "FakeQuantWithMinMaxArgsGradient"
input_arg {
@ -7280,6 +7312,42 @@ op {
}
}
}
op {
name: "FakeQuantWithMinMaxArgsGradient"
input_arg {
name: "gradients"
type: DT_FLOAT
}
input_arg {
name: "inputs"
type: DT_FLOAT
}
output_arg {
name: "backprops"
type: DT_FLOAT
}
attr {
name: "min"
type: "float"
default_value {
f: -6
}
}
attr {
name: "max"
type: "float"
default_value {
f: 6
}
}
attr {
name: "num_bits"
type: "int"
default_value {
i: 8
}
}
}
op {
name: "FakeQuantWithMinMaxVars"
input_arg {
@ -7299,6 +7367,32 @@ op {
type: DT_FLOAT
}
}
op {
name: "FakeQuantWithMinMaxVars"
input_arg {
name: "inputs"
type: DT_FLOAT
}
input_arg {
name: "min"
type: DT_FLOAT
}
input_arg {
name: "max"
type: DT_FLOAT
}
output_arg {
name: "outputs"
type: DT_FLOAT
}
attr {
name: "num_bits"
type: "int"
default_value {
i: 8
}
}
}
op {
name: "FakeQuantWithMinMaxVarsGradient"
input_arg {
@ -7330,6 +7424,44 @@ op {
type: DT_FLOAT
}
}
op {
name: "FakeQuantWithMinMaxVarsGradient"
input_arg {
name: "gradients"
type: DT_FLOAT
}
input_arg {
name: "inputs"
type: DT_FLOAT
}
input_arg {
name: "min"
type: DT_FLOAT
}
input_arg {
name: "max"
type: DT_FLOAT
}
output_arg {
name: "backprops_wrt_input"
type: DT_FLOAT
}
output_arg {
name: "backprop_wrt_min"
type: DT_FLOAT
}
output_arg {
name: "backprop_wrt_max"
type: DT_FLOAT
}
attr {
name: "num_bits"
type: "int"
default_value {
i: 8
}
}
}
op {
name: "FakeQuantWithMinMaxVarsPerChannel"
input_arg {
@ -7349,6 +7481,63 @@ op {
type: DT_FLOAT
}
}
op {
name: "FakeQuantWithMinMaxVarsPerChannel"
input_arg {
name: "inputs"
type: DT_FLOAT
}
input_arg {
name: "min"
type: DT_FLOAT
}
input_arg {
name: "max"
type: DT_FLOAT
}
output_arg {
name: "outputs"
type: DT_FLOAT
}
attr {
name: "num_bits"
type: "int"
default_value {
i: 8
}
}
}
op {
name: "FakeQuantWithMinMaxVarsPerChannelGradient"
input_arg {
name: "gradients"
type: DT_FLOAT
}
input_arg {
name: "inputs"
type: DT_FLOAT
}
input_arg {
name: "min"
type: DT_FLOAT
}
input_arg {
name: "max"
type: DT_FLOAT
}
output_arg {
name: "backprops_wrt_input"
type: DT_FLOAT
}
output_arg {
name: "backprop_wrt_min"
type: DT_FLOAT
}
output_arg {
name: "backprop_wrt_max"
type: DT_FLOAT
}
}
op {
name: "FakeQuantWithMinMaxVarsPerChannelGradient"
input_arg {
@ -7379,6 +7568,13 @@ op {
name: "backprop_wrt_max"
type: DT_FLOAT
}
attr {
name: "num_bits"
type: "int"
default_value {
i: 8
}
}
}
op {
name: "FakeQueue"

View File

@ -7420,8 +7420,15 @@ op {
f: 6
}
}
attr {
name: "num_bits"
type: "int"
default_value {
i: 8
}
}
summary: "Fake-quantize the \'inputs\' tensor, type float to \'outputs\' tensor of same type."
description: "Attributes [min; max] define the clamping range for the \'inputs\' data. Op\ndivides this range into 255 steps (total of 256 values), then replaces each\n\'inputs\' value with the closest of the quantized step values.\n\nQuantization is called fake since the output is still in floating point."
description: "Attributes [min; max] define the clamping range for the \'inputs\' data. Op\ndivides this range into 255 steps (total of 256 values), then replaces each\n\'inputs\' value with the closest of the quantized step values.\n\'num_bits\' is the bitwidth of the quantization; between 2 and 8, inclusive.\n\nQuantization is called fake since the output is still in floating point."
}
op {
name: "FakeQuantWithMinMaxArgsGradient"
@ -7454,6 +7461,13 @@ op {
f: 6
}
}
attr {
name: "num_bits"
type: "int"
default_value {
i: 8
}
}
summary: "Compute gradients for a FakeQuantWithMinMaxArgs operation."
}
op {
@ -7474,8 +7488,15 @@ op {
name: "outputs"
type: DT_FLOAT
}
attr {
name: "num_bits"
type: "int"
default_value {
i: 8
}
}
summary: "Fake-quantize the \'inputs\' tensor of type float via global float scalars `min`"
description: "and `max` to \'outputs\' tensor of same shape as `inputs`.\n\n[min; max] is the clamping range for the \'inputs\' data. Op divides this range\ninto 255 steps (total of 256 values), then replaces each \'inputs\' value with the\nclosest of the quantized step values.\n\nThis operation has a gradient and thus allows for training `min` and `max` values."
description: "and `max` to \'outputs\' tensor of same shape as `inputs`.\n\n[min; max] is the clamping range for the \'inputs\' data. Op divides this range\ninto 255 steps (total of 256 values), then replaces each \'inputs\' value with the\nclosest of the quantized step values.\n\'num_bits\' is the bitwidth of the quantization; between 2 and 8, inclusive.\n\nThis operation has a gradient and thus allows for training `min` and `max` values."
}
op {
name: "FakeQuantWithMinMaxVarsGradient"
@ -7512,6 +7533,14 @@ op {
description: "Backpropagated gradients w.r.t. max parameter:\n`sum(gradients * (inputs > max))`."
type: DT_FLOAT
}
attr {
name: "num_bits"
type: "int"
default_value {
i: 8
}
description: "The bitwidth of the quantization; between 2 and 8, inclusive."
}
summary: "Compute gradients for a FakeQuantWithMinMaxVars operation."
}
op {
@ -7532,8 +7561,15 @@ op {
name: "outputs"
type: DT_FLOAT
}
attr {
name: "num_bits"
type: "int"
default_value {
i: 8
}
}
summary: "Fake-quantize the \'inputs\' tensor of type float and one of the shapes: `[d]`,"
description: "`[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`\nto \'outputs\' tensor of same shape as `inputs`.\n\n[min; max] is the clamping range for the \'inputs\' data in the corresponding\ndepth channel. Op divides this range into 255 steps (total of 256 values), then\nreplaces each \'inputs\' value with the closest of the quantized step values.\n\nThis operation has a gradient and thus allows for training `min` and `max` values."
description: "`[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`\nto \'outputs\' tensor of same shape as `inputs`.\n\n[min; max] is the clamping range for the \'inputs\' data in the corresponding\ndepth channel. Op divides this range into 255 steps (total of 256 values), then\nreplaces each \'inputs\' value with the closest of the quantized step values.\n\'num_bits\' is the bitwidth of the quantization; between 2 and 8, inclusive.\n\nThis operation has a gradient and thus allows for training `min` and `max` values."
}
op {
name: "FakeQuantWithMinMaxVarsPerChannelGradient"
@ -7570,6 +7606,14 @@ op {
description: "Backpropagated gradients w.r.t. max parameter, shape `[d]`:\n`sum_per_d(gradients * (inputs > max))`."
type: DT_FLOAT
}
attr {
name: "num_bits"
type: "int"
default_value {
i: 8
}
description: "The bitwidth of the quantization; between 2 and 8, inclusive."
}
summary: "Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation."
}
op {