Delete outdated examples.
PiperOrigin-RevId: 265760118
This commit is contained in:
parent
85ef709a6f
commit
c6859f92a0
@ -1,39 +0,0 @@
|
||||
# Description:
|
||||
# Examples of tf.learn usage
|
||||
|
||||
package(
|
||||
default_visibility = ["//visibility:public"],
|
||||
licenses = ["notice"], # Apache 2.0
|
||||
)
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
|
||||
py_binary(
|
||||
name = "iris_custom_decay_dnn",
|
||||
srcs = ["iris_custom_decay_dnn.py"],
|
||||
python_version = "PY2",
|
||||
srcs_version = "PY2AND3",
|
||||
deps = ["//tensorflow:tensorflow_py"],
|
||||
)
|
||||
|
||||
py_binary(
|
||||
name = "iris_custom_model",
|
||||
srcs = ["iris_custom_model.py"],
|
||||
python_version = "PY2",
|
||||
srcs_version = "PY2AND3",
|
||||
deps = ["//tensorflow:tensorflow_py"],
|
||||
)
|
||||
|
||||
sh_test(
|
||||
name = "examples_test",
|
||||
size = "large",
|
||||
srcs = ["examples_test.sh"],
|
||||
data = [
|
||||
":iris_custom_decay_dnn",
|
||||
":iris_custom_model",
|
||||
],
|
||||
tags = [
|
||||
"manual",
|
||||
"notap",
|
||||
],
|
||||
)
|
@ -1,16 +0,0 @@
|
||||
# Estimator Examples
|
||||
|
||||
TensorFlow Estimators are a high-level API for TensorFlow that allows you to
|
||||
create, train, and use deep learning models easily.
|
||||
|
||||
See the [Quickstart tutorial](https://www.tensorflow.org/get_started/estimator)
|
||||
for an introduction to the API.
|
||||
|
||||
## Basics
|
||||
|
||||
* [Building a Custom Model](https://www.tensorflow.org/code/tensorflow/examples/learn/iris_custom_model.py)
|
||||
|
||||
## Techniques
|
||||
|
||||
* [Deep Neural Network with Customized Decay Function](https://www.tensorflow.org/code/tensorflow/examples/learn/iris_custom_decay_dnn.py)
|
||||
|
@ -1,48 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script exercises the examples of using TF.Learn.
|
||||
|
||||
DIR="$TEST_SRCDIR"
|
||||
|
||||
# Check if TEST_WORKSPACE is defined, and set as empty string if not.
|
||||
if [ -z "${TEST_WORKSPACE-}" ]
|
||||
then
|
||||
TEST_WORKSPACE=""
|
||||
fi
|
||||
|
||||
if [ ! -z "$TEST_WORKSPACE" ]
|
||||
then
|
||||
DIR="$DIR"/"$TEST_WORKSPACE"
|
||||
fi
|
||||
|
||||
TFLEARN_EXAMPLE_BASE_DIR=$DIR/tensorflow/examples/learn
|
||||
|
||||
|
||||
function test() {
|
||||
echo "Test $1:"
|
||||
$TFLEARN_EXAMPLE_BASE_DIR/$1 $2
|
||||
if [ $? -eq 0 ]
|
||||
then
|
||||
echo "Test passed."
|
||||
return 0
|
||||
else
|
||||
echo "Test failed."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
test iris_custom_decay_dnn
|
||||
test iris_custom_model
|
@ -1,100 +0,0 @@
|
||||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Example of DNNClassifier for Iris plant dataset, with exponential decay."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
from sklearn import datasets
|
||||
from sklearn import metrics
|
||||
from sklearn import model_selection
|
||||
import tensorflow as tf
|
||||
|
||||
|
||||
X_FEATURE = 'x' # Name of the input feature.
|
||||
|
||||
|
||||
def my_model(features, labels, mode):
|
||||
"""DNN with three hidden layers."""
|
||||
# Create three fully connected layers respectively of size 10, 20, and 10.
|
||||
net = features[X_FEATURE]
|
||||
for units in [10, 20, 10]:
|
||||
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
|
||||
|
||||
# Compute logits (1 per class).
|
||||
logits = tf.layers.dense(net, 3, activation=None)
|
||||
|
||||
# Compute predictions.
|
||||
predicted_classes = tf.argmax(logits, 1)
|
||||
if mode == tf.estimator.ModeKeys.PREDICT:
|
||||
predictions = {
|
||||
'class': predicted_classes,
|
||||
'prob': tf.nn.softmax(logits)
|
||||
}
|
||||
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
|
||||
|
||||
# Compute loss.
|
||||
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
|
||||
|
||||
# Create training op with exponentially decaying learning rate.
|
||||
if mode == tf.estimator.ModeKeys.TRAIN:
|
||||
global_step = tf.train.get_global_step()
|
||||
learning_rate = tf.train.exponential_decay(
|
||||
learning_rate=0.1, global_step=global_step,
|
||||
decay_steps=100, decay_rate=0.001)
|
||||
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
|
||||
train_op = optimizer.minimize(loss, global_step=global_step)
|
||||
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
|
||||
|
||||
# Compute evaluation metrics.
|
||||
eval_metric_ops = {
|
||||
'accuracy': tf.metrics.accuracy(
|
||||
labels=labels, predictions=predicted_classes)
|
||||
}
|
||||
return tf.estimator.EstimatorSpec(
|
||||
mode, loss=loss, eval_metric_ops=eval_metric_ops)
|
||||
|
||||
|
||||
def main(unused_argv):
|
||||
iris = datasets.load_iris()
|
||||
x_train, x_test, y_train, y_test = model_selection.train_test_split(
|
||||
iris.data, iris.target, test_size=0.2, random_state=42)
|
||||
|
||||
classifier = tf.estimator.Estimator(model_fn=my_model)
|
||||
|
||||
# Train.
|
||||
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
|
||||
classifier.train(input_fn=train_input_fn, steps=1000)
|
||||
|
||||
# Predict.
|
||||
test_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
|
||||
predictions = classifier.predict(input_fn=test_input_fn)
|
||||
y_predicted = np.array(list(p['class'] for p in predictions))
|
||||
y_predicted = y_predicted.reshape(np.array(y_test).shape)
|
||||
|
||||
# Score with sklearn.
|
||||
score = metrics.accuracy_score(y_test, y_predicted)
|
||||
print('Accuracy (sklearn): {0:f}'.format(score))
|
||||
|
||||
# Score with tensorflow.
|
||||
scores = classifier.evaluate(input_fn=test_input_fn)
|
||||
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
tf.app.run()
|
@ -1,97 +0,0 @@
|
||||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Example of Estimator for Iris plant dataset."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
import numpy as np
|
||||
from sklearn import datasets
|
||||
from sklearn import metrics
|
||||
from sklearn import model_selection
|
||||
import tensorflow as tf
|
||||
|
||||
|
||||
X_FEATURE = 'x' # Name of the input feature.
|
||||
|
||||
|
||||
def my_model(features, labels, mode):
|
||||
"""DNN with three hidden layers, and dropout of 0.1 probability."""
|
||||
# Create three fully connected layers respectively of size 10, 20, and 10 with
|
||||
# each layer having a dropout probability of 0.1.
|
||||
net = features[X_FEATURE]
|
||||
for units in [10, 20, 10]:
|
||||
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
|
||||
net = tf.layers.dropout(net, rate=0.1)
|
||||
|
||||
# Compute logits (1 per class).
|
||||
logits = tf.layers.dense(net, 3, activation=None)
|
||||
|
||||
# Compute predictions.
|
||||
predicted_classes = tf.argmax(logits, 1)
|
||||
if mode == tf.estimator.ModeKeys.PREDICT:
|
||||
predictions = {
|
||||
'class': predicted_classes,
|
||||
'prob': tf.nn.softmax(logits)
|
||||
}
|
||||
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
|
||||
|
||||
# Compute loss.
|
||||
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
|
||||
|
||||
# Create training op.
|
||||
if mode == tf.estimator.ModeKeys.TRAIN:
|
||||
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
|
||||
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
|
||||
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
|
||||
|
||||
# Compute evaluation metrics.
|
||||
eval_metric_ops = {
|
||||
'accuracy': tf.metrics.accuracy(
|
||||
labels=labels, predictions=predicted_classes)
|
||||
}
|
||||
return tf.estimator.EstimatorSpec(
|
||||
mode, loss=loss, eval_metric_ops=eval_metric_ops)
|
||||
|
||||
|
||||
def main(unused_argv):
|
||||
iris = datasets.load_iris()
|
||||
x_train, x_test, y_train, y_test = model_selection.train_test_split(
|
||||
iris.data, iris.target, test_size=0.2, random_state=42)
|
||||
|
||||
classifier = tf.estimator.Estimator(model_fn=my_model)
|
||||
|
||||
# Train.
|
||||
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
|
||||
classifier.train(input_fn=train_input_fn, steps=1000)
|
||||
|
||||
# Predict.
|
||||
test_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
|
||||
predictions = classifier.predict(input_fn=test_input_fn)
|
||||
y_predicted = np.array(list(p['class'] for p in predictions))
|
||||
y_predicted = y_predicted.reshape(np.array(y_test).shape)
|
||||
|
||||
# Score with sklearn.
|
||||
score = metrics.accuracy_score(y_test, y_predicted)
|
||||
print('Accuracy (sklearn): {0:f}'.format(score))
|
||||
|
||||
# Score with tensorflow.
|
||||
scores = classifier.evaluate(input_fn=test_input_fn)
|
||||
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
tf.app.run()
|
Loading…
x
Reference in New Issue
Block a user