Merge changes from github.
Change: 120185825
This commit is contained in:
parent
fc432e37a7
commit
5c9bc51857
@ -10,6 +10,9 @@ help and advice.
|
||||
### Environment info
|
||||
Operating System:
|
||||
|
||||
Installed version of CUDA and cuDNN:
|
||||
(please attach the output of `ls -l /path/to/cuda/lib/libcud*`):
|
||||
|
||||
If installed from binary pip package, provide:
|
||||
|
||||
1. Which pip package you installed.
|
||||
|
@ -33,9 +33,9 @@ and discussion.**
|
||||
|
||||
People who are a little bit adventurous can also try our nightly binaries:
|
||||
|
||||
* Linux CPU only: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.7.1-cp27-none-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/)) / [Python 3](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.7.1-cp34-cp34m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/))
|
||||
* Linux GPU: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.7.1-cp27-none-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-slave/)) / [Python 3](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.7.1-cp34-cp34m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-slave/))
|
||||
* Mac CPU only: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.7.1-py2-none-any.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/)) / [Python 3](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.7.1-py3-none-any.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/))
|
||||
* Linux CPU only: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.8.0rc0-cp27-none-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/)) / [Python 3](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.8.0rc0-cp34-cp34m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/))
|
||||
* Linux GPU: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-working/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.8.0rc0-cp27-none-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-working/)) / [Python 3](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-working/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.8.0rc0-cp34-cp34m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-working/))
|
||||
* Mac CPU only: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.8.0rc0-py2-none-any.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/)) / [Python 3](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.8.0rc0-py3-none-any.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/))
|
||||
* [Android](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-android/TF_BUILD_CONTAINER_TYPE=ANDROID,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=NO_PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=android-slave/lastSuccessfulBuild/artifact/bazel-out/local_linux/bin/tensorflow/examples/android/tensorflow_demo.apk) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-android/TF_BUILD_CONTAINER_TYPE=ANDROID,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=NO_PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=android-slave/))
|
||||
|
||||
#### *Try your first TensorFlow program*
|
||||
|
43
RELEASE.md
43
RELEASE.md
@ -1,3 +1,46 @@
|
||||
# Release 0.8.0
|
||||
|
||||
## Major Features and Improvements
|
||||
|
||||
* Added a distributed runtime using GRPC
|
||||
* Move skflow to `contrib/learn`
|
||||
* Better linear optimizer in `contrib/linear_optimizer`
|
||||
* Random forest implementation in `contrib/tensor_forest`
|
||||
* CTC loss and decoders in `contrib/ctc`
|
||||
* Basic support for `half` data type
|
||||
* Better support for loading user ops (see examples in `contrib/`)
|
||||
* Allow use of (non-blocking) Eigen threadpool with `TENSORFLOW_USE_EIGEN_THREADPOOL` define
|
||||
* Add an extension mechanism for adding network file system support
|
||||
* TensorBoard displays metadata stats (running time, memory usage and device used) and tensor shapes
|
||||
|
||||
## Big Fixes and Other Changes
|
||||
|
||||
* Utility for inspecting checkpoints
|
||||
* Basic tracing and timeline support
|
||||
* Allow building against cuDNN 5 (not incl. RNN/LSTM support)
|
||||
* Added instructions and binaries for ProtoBuf library with fast serialization and without 64MB limit
|
||||
* Added special functions
|
||||
* `bool`-strictness: Tensors have to be explictly compared to `None`
|
||||
* Shape strictness: all fed values must have a shape that is compatible with the tensor they are replacing
|
||||
* Exposed `tf.while_loop` (deprecated `control_flow_ops.While`)
|
||||
* run() now takes RunOptions and RunMetadata, which enable timing stats
|
||||
* Fixed lots of potential overflow problems in op kernels
|
||||
* Various performance improvements, especially for RNNs and convolutions
|
||||
* Many bugfixes
|
||||
* Nightly builds, tutorial tests, many test improvements
|
||||
* New examples: transfer learning and deepdream ipython notebook
|
||||
* Added tutorials, many documentation fixes.
|
||||
|
||||
## Thanks to our Contributors
|
||||
|
||||
This release contains contributions from many people at Google, as well as:
|
||||
|
||||
Abhinav Upadhyay, Aggelos Avgerinos, Alan Wu, Alexander G. de G. Matthews, Aleksandr Yahnev, @amchercashin, Andy Kitchen, Aurelien Geron, Awni Hannun, @BanditCat, Bas Veeling, Cameron Chen, @cg31, Cheng-Lung Sung, Christopher Bonnett, Dan Becker, Dan Van Boxel, Daniel Golden, Danijar Hafner, Danny Goodman, Dave Decker, David Dao, David Kretch, Dongjoon Hyun, Dustin Dorroh, @e-lin, Eurico Doirado, Erik Erwitt, Fabrizio Milo, @gaohuazuo, Iblis Lin, Igor Babuschkin, Isaac Hodes, Isaac Turner, Iván Vallés, J Yegerlehner, Jack Zhang, James Wexler, Jan Zikes, Jay Young, Jeff Hodges, @jmtatsch, Johnny Lim, Jonas Meinertz Hansen, Kanit Wongsuphasawat, Kashif Rasul, Ken Shirriff, Kenneth Mitchner, Kenta Yonekura, Konrad Magnusson, Konstantin Lopuhin, @lahwran, @lekaha, @liyongsea, Lucas Adams, @makseq, Mandeep Singh, @manipopopo, Mark Amery, Memo Akten, Michael Heilman, Michael Peteuil, Nathan Daly, Nicolas Fauchereau, @ninotoshi, Olav Nymoen, @panmari, @papelita1234, Pedro Lopes, Pranav Sailesh Mani, RJ Ryan, Rob Culliton, Robert DiPietro, @ronrest, Sam Abrahams, Sarath Shekkizhar, Scott Graham, Sebastian Raschka, Sung Kim, Surya Bhupatiraju, Syed Ahmed, Till Hoffmann, @timsl, @urimend, @vesnica, Vlad Frolov, Vlad Zagorodniy, Wei-Ting Kuo, Wenjian Huang, William Dmitri Breaden Madden, Wladimir Schmidt, Yuwen Yan, Yuxin Wu, Yuya Kusakabe, @zhongzyd, @znah.
|
||||
|
||||
We are also grateful to all who filed issues or helped resolve them, asked and
|
||||
answered questions, and were part of inspiring discussions.
|
||||
|
||||
|
||||
# Release 0.7.1
|
||||
|
||||
## Bug Fixes and Other Changes
|
||||
|
@ -69,12 +69,12 @@ filegroup(
|
||||
"//tensorflow/contrib/distributions:all_files",
|
||||
"//tensorflow/contrib/framework:all_files",
|
||||
"//tensorflow/contrib/layers:all_files",
|
||||
"//tensorflow/contrib/learn:all_files",
|
||||
"//tensorflow/contrib/linear_optimizer:all_files",
|
||||
"//tensorflow/contrib/linear_optimizer/kernels:all_files",
|
||||
"//tensorflow/contrib/lookup:all_files",
|
||||
"//tensorflow/contrib/losses:all_files",
|
||||
"//tensorflow/contrib/metrics:all_files",
|
||||
"//tensorflow/contrib/skflow:all_files",
|
||||
"//tensorflow/contrib/tensor_forest:all_files",
|
||||
"//tensorflow/contrib/testing:all_files",
|
||||
"//tensorflow/contrib/util:all_files",
|
||||
@ -89,6 +89,7 @@ filegroup(
|
||||
"//tensorflow/examples/how_tos/reading_data:all_files",
|
||||
"//tensorflow/examples/image_retraining:all_files",
|
||||
"//tensorflow/examples/label_image:all_files",
|
||||
"//tensorflow/examples/skflow:all_files",
|
||||
"//tensorflow/examples/tutorials/mnist:all_files",
|
||||
"//tensorflow/examples/tutorials/word2vec:all_files",
|
||||
"//tensorflow/g3doc/how_tos/adding_an_op:all_files",
|
||||
|
@ -16,7 +16,9 @@ py_library(
|
||||
"//tensorflow/contrib/ctc:ctc_py",
|
||||
"//tensorflow/contrib/distributions:distributions_py",
|
||||
"//tensorflow/contrib/framework:framework_py",
|
||||
"//tensorflow/contrib/grid_rnn:grid_rnn_py",
|
||||
"//tensorflow/contrib/layers:layers_py",
|
||||
"//tensorflow/contrib/learn",
|
||||
"//tensorflow/contrib/linear_optimizer:sdca_ops_py",
|
||||
"//tensorflow/contrib/lookup:lookup_py",
|
||||
"//tensorflow/contrib/losses:losses_py",
|
||||
|
@ -22,10 +22,13 @@ from __future__ import print_function
|
||||
from tensorflow.contrib import ctc
|
||||
from tensorflow.contrib import distributions
|
||||
from tensorflow.contrib import framework
|
||||
from tensorflow.contrib import grid_rnn
|
||||
from tensorflow.contrib import layers
|
||||
from tensorflow.contrib import learn
|
||||
from tensorflow.contrib import linear_optimizer
|
||||
from tensorflow.contrib import lookup
|
||||
from tensorflow.contrib import losses
|
||||
from tensorflow.contrib import metrics
|
||||
from tensorflow.contrib import skflow
|
||||
from tensorflow.contrib import testing
|
||||
from tensorflow.contrib import util
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Minimum CMake required
|
||||
cmake_minimum_required(VERSION 2.8)
|
||||
cmake_minimum_required(VERSION 3.1)
|
||||
|
||||
# Project
|
||||
project(tensorflow C CXX)
|
||||
|
@ -123,6 +123,7 @@ foreach(tf_cc_op_lib_name ${tf_cc_op_lib_names})
|
||||
${jpeg_STATIC_LIBRARIES}
|
||||
${png_STATIC_LIBRARIES}
|
||||
${ZLIB_LIBRARIES}
|
||||
${CMAKE_DL_LIBS}
|
||||
)
|
||||
|
||||
target_compile_options(${tf_cc_op_lib_name}_gen_cc PRIVATE
|
||||
@ -201,4 +202,4 @@ target_compile_features(tf_cc_ops PRIVATE
|
||||
# "ops/const_op.cc",
|
||||
# ] + glob(["ops/*_grad.cc"]),
|
||||
# pkg = "//tensorflow/core",
|
||||
#)
|
||||
#)
|
||||
|
@ -41,6 +41,7 @@ target_link_libraries(tf_tutorials_example_trainer PUBLIC
|
||||
${jpeg_STATIC_LIBRARIES}
|
||||
${png_STATIC_LIBRARIES}
|
||||
${ZLIB_LIBRARIES}
|
||||
${CMAKE_DL_LIBS}
|
||||
)
|
||||
|
||||
target_compile_options(tf_tutorials_example_trainer PRIVATE
|
||||
|
9
tensorflow/contrib/ffmpeg/BUILD
Normal file
9
tensorflow/contrib/ffmpeg/BUILD
Normal file
@ -0,0 +1,9 @@
|
||||
# Description:
|
||||
# Ops that process audio and/or video files using FFmpeg.
|
||||
# (https://www.ffmpeg.org/)
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
|
||||
package(default_visibility = ["//tensorflow:__subpackages__"])
|
61
tensorflow/contrib/ffmpeg/kernels/BUILD
Normal file
61
tensorflow/contrib/ffmpeg/kernels/BUILD
Normal file
@ -0,0 +1,61 @@
|
||||
# Description:
|
||||
# Libraries and kernels for manipulating audio and video using FFmpeg.
|
||||
# (https://www.ffmpeg.org)
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
|
||||
package(default_visibility = ["//tensorflow:__subpackages__"])
|
||||
|
||||
cc_library(
|
||||
name = "ffmpeg_lib",
|
||||
srcs = ["ffmpeg_lib.cc"],
|
||||
hdrs = ["ffmpeg_lib.h"],
|
||||
deps = [
|
||||
"//tensorflow/core:framework",
|
||||
"//tensorflow/core:lib",
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "ffmpeg_lib_installed_test",
|
||||
srcs = ["ffmpeg_lib_test.cc"],
|
||||
args = [
|
||||
"--should_ffmpeg_be_installed=true",
|
||||
],
|
||||
data = [
|
||||
":testdata/test_sound1.mp3",
|
||||
],
|
||||
tags = [
|
||||
"local",
|
||||
"manual",
|
||||
],
|
||||
deps = [
|
||||
":ffmpeg_lib",
|
||||
"//tensorflow/core:framework_internal",
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:test",
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "ffmpeg_lib_uninstalled_test",
|
||||
srcs = ["ffmpeg_lib_test.cc"],
|
||||
args = [
|
||||
"--should_ffmpeg_be_installed=false",
|
||||
],
|
||||
data = [
|
||||
":testdata/test_sound1.mp3",
|
||||
],
|
||||
tags = [
|
||||
"local",
|
||||
"manual",
|
||||
],
|
||||
deps = [
|
||||
":ffmpeg_lib",
|
||||
"//tensorflow/core:framework_internal",
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:test",
|
||||
],
|
||||
)
|
119
tensorflow/contrib/ffmpeg/kernels/ffmpeg_lib.cc
Normal file
119
tensorflow/contrib/ffmpeg/kernels/ffmpeg_lib.cc
Normal file
@ -0,0 +1,119 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// =============================================================================
|
||||
|
||||
#include "tensorflow/contrib/ffmpeg/kernels/ffmpeg_lib.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/wait.h>
|
||||
#include <unistd.h>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
#include "tensorflow/core/lib/io/path.h"
|
||||
#include "tensorflow/core/lib/strings/str_util.h"
|
||||
|
||||
using tensorflow::strings::StrCat;
|
||||
|
||||
namespace tensorflow {
|
||||
namespace ffmpeg {
|
||||
namespace {
|
||||
|
||||
const char kFfmpegExecutable[] = "ffmpeg";
|
||||
const int32 kDefaultProbeSize = 5000000; // 5MB
|
||||
|
||||
string GetTempFilename(const string& extension) {
|
||||
for (const char* dir : std::vector<const char*>(
|
||||
{getenv("TEST_TMPDIR"), getenv("TMPDIR"), getenv("TMP"), "/tmp"})) {
|
||||
if (!dir || !dir[0]) {
|
||||
continue;
|
||||
}
|
||||
struct stat statbuf;
|
||||
if (!stat(dir, &statbuf) && S_ISDIR(statbuf.st_mode)) {
|
||||
return io::JoinPath(dir, StrCat("tmp_file_", getpid(), ".", extension));
|
||||
}
|
||||
}
|
||||
LOG(FATAL) << "No temp directory found.";
|
||||
}
|
||||
|
||||
std::vector<string> FfmpegCommandLine(const string& input_filename,
|
||||
const string& output_filename,
|
||||
const string& input_format_id,
|
||||
int32 samples_per_second,
|
||||
int32 channel_count) {
|
||||
return {"-nostats", // No additional progress display.
|
||||
"-nostdin", // No interactive commands accepted.
|
||||
"-f", input_format_id, // eg: "mp3"
|
||||
"-probesize", StrCat(kDefaultProbeSize), "-i", input_filename,
|
||||
"-loglevel", "info", // Enable verbose logging to support debugging.
|
||||
"-map_metadata", "-1", // Copy global metadata from input to output.
|
||||
"-vn", // No video recording.
|
||||
"-ac:a:0", StrCat(channel_count), "-ar:a:0",
|
||||
StrCat(samples_per_second),
|
||||
// Output set (in several ways) to signed 16-bit little-endian ints.
|
||||
"-codec:a:0", "pcm_s16le", "-sample_fmt", "s16", "-f", "s16le",
|
||||
"-sn", // No subtitle recording.
|
||||
"-y", // Overwrite output file.
|
||||
StrCat(output_filename)};
|
||||
}
|
||||
|
||||
[[noreturn]] int ExecuteFfmpeg(const std::vector<string>& args) {
|
||||
std::vector<char*> args_chars;
|
||||
std::transform(args.begin(), args.end(), std::back_inserter(args_chars),
|
||||
[](const string& s) { return const_cast<char*>(s.c_str()); });
|
||||
args_chars.push_back(nullptr);
|
||||
|
||||
::execvp(kFfmpegExecutable, args_chars.data());
|
||||
// exec only returns on error.
|
||||
const int error = errno;
|
||||
LOG(ERROR) << "FFmpeg could not be executed: " << error;
|
||||
::_exit(error);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
Status ReadAudioFile(const string& filename, const string& audio_format_id,
|
||||
int32 samples_per_second, int32 channel_count,
|
||||
std::vector<float>* output_samples) {
|
||||
// Create an argument list.
|
||||
string output_filename = GetTempFilename(audio_format_id);
|
||||
const std::vector<string> args =
|
||||
FfmpegCommandLine(filename, output_filename, audio_format_id,
|
||||
samples_per_second, channel_count);
|
||||
|
||||
// Execute ffmpeg and report errors.
|
||||
pid_t child_pid = ::fork();
|
||||
if (child_pid < 0) {
|
||||
return Status(error::Code::UNKNOWN, StrCat("fork failed: ", errno));
|
||||
}
|
||||
if (child_pid == 0) {
|
||||
ExecuteFfmpeg(args);
|
||||
} else {
|
||||
int status_code;
|
||||
::waitpid(child_pid, &status_code, 0);
|
||||
if (!status_code) {
|
||||
return Status::OK();
|
||||
} else {
|
||||
return Status(error::Code::NOT_FOUND,
|
||||
StrCat("FFmpeg execution failed: ", status_code));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace ffmpeg
|
||||
} // namespace tensorflow
|
33
tensorflow/contrib/ffmpeg/kernels/ffmpeg_lib.h
Normal file
33
tensorflow/contrib/ffmpeg/kernels/ffmpeg_lib.h
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// =============================================================================
|
||||
|
||||
#ifndef THIRD_PARTY_TENSORFLOW_CONTRIB_FFMPEG_KERNELS_FFMPEG_LIB_H_
|
||||
#define THIRD_PARTY_TENSORFLOW_CONTRIB_FFMPEG_KERNELS_FFMPEG_LIB_H_
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "tensorflow/core/lib/core/status.h"
|
||||
|
||||
namespace tensorflow {
|
||||
namespace ffmpeg {
|
||||
|
||||
Status ReadAudioFile(const string& filename, const string& audio_format_id,
|
||||
int32 samples_per_second, int32 channel_count,
|
||||
std::vector<float>* output_samples);
|
||||
|
||||
} // namespace ffmpeg
|
||||
} // namespace tensorflow
|
||||
|
||||
#endif // THIRD_PARTY_TENSORFLOW_CONTRIB_FFMPEG_KERNELS_FFMPEG_LIB_H_
|
86
tensorflow/contrib/ffmpeg/kernels/ffmpeg_lib_test.cc
Normal file
86
tensorflow/contrib/ffmpeg/kernels/ffmpeg_lib_test.cc
Normal file
@ -0,0 +1,86 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// =============================================================================
|
||||
|
||||
#include "tensorflow/contrib/ffmpeg/kernels/ffmpeg_lib.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <vector>
|
||||
|
||||
#include "tensorflow/core/lib/io/path.h"
|
||||
#include "tensorflow/core/lib/strings/str_util.h"
|
||||
#include "tensorflow/core/platform/mutex.h"
|
||||
#include "tensorflow/core/platform/test.h"
|
||||
#include "tensorflow/core/platform/thread_annotations.h"
|
||||
#include "tensorflow/core/util/command_line_flags.h"
|
||||
|
||||
using tensorflow::testing::TensorFlowSrcRoot;
|
||||
|
||||
namespace tensorflow {
|
||||
namespace ffmpeg {
|
||||
namespace {
|
||||
|
||||
const char kTestSoundFilename[] =
|
||||
"contrib/ffmpeg/kernels/testdata/test_sound1.mp3";
|
||||
|
||||
// Set to true via a command line flag iff the test is expected to have FFmpeg
|
||||
// installed.
|
||||
mutex mu;
|
||||
bool should_ffmpeg_be_installed GUARDED_BY(mu) = false;
|
||||
|
||||
void ParseTestFlags(int* argc, char** argv) {
|
||||
mutex_lock l(mu);
|
||||
CHECK(ParseFlags(argc, argv, {Flag("should_ffmpeg_be_installed",
|
||||
&should_ffmpeg_be_installed)}));
|
||||
}
|
||||
|
||||
TEST(FfmpegLibTest, TestUninstalled) {
|
||||
{
|
||||
mutex_lock l(mu);
|
||||
if (should_ffmpeg_be_installed) {
|
||||
return;
|
||||
}
|
||||
LOG(INFO) << "Assuming FFmpeg is uninstalled.";
|
||||
}
|
||||
|
||||
string filename = io::JoinPath(TensorFlowSrcRoot(), kTestSoundFilename);
|
||||
std::vector<float> output_samples;
|
||||
Status status = ReadAudioFile(filename, "mp3", 5000, 1, &output_samples);
|
||||
ASSERT_EQ(status.code(), error::Code::NOT_FOUND);
|
||||
}
|
||||
|
||||
TEST(FfmpegLibTest, TestInstalled) {
|
||||
{
|
||||
mutex_lock l(mu);
|
||||
if (!should_ffmpeg_be_installed) {
|
||||
return;
|
||||
}
|
||||
LOG(INFO) << "Assuming FFmpeg is installed.";
|
||||
}
|
||||
|
||||
string filename = io::JoinPath(TensorFlowSrcRoot(), kTestSoundFilename);
|
||||
std::vector<float> output_samples;
|
||||
Status status = ReadAudioFile(filename, "mp3", 5000, 1, &output_samples);
|
||||
ASSERT_TRUE(status.ok());
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // ffmpeg
|
||||
} // tensorflow
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
tensorflow::ffmpeg::ParseTestFlags(&argc, argv);
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
BIN
tensorflow/contrib/ffmpeg/kernels/testdata/test_sound1.mp3
vendored
Normal file
BIN
tensorflow/contrib/ffmpeg/kernels/testdata/test_sound1.mp3
vendored
Normal file
Binary file not shown.
39
tensorflow/contrib/grid_rnn/BUILD
Normal file
39
tensorflow/contrib/grid_rnn/BUILD
Normal file
@ -0,0 +1,39 @@
|
||||
# Description:
|
||||
# Contains classes to construct GridRNN cells
|
||||
# APIs here are meant to evolve over time.
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
|
||||
package(default_visibility = ["//tensorflow:__subpackages__"])
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "cuda_py_tests")
|
||||
|
||||
py_library(
|
||||
name = "grid_rnn_py",
|
||||
srcs = ["__init__.py"] + glob(["python/ops/*.py"]),
|
||||
srcs_version = "PY2AND3",
|
||||
)
|
||||
|
||||
cuda_py_tests(
|
||||
name = "grid_rnn_test",
|
||||
srcs = ["python/kernel_tests/grid_rnn_test.py"],
|
||||
additional_deps = [
|
||||
":grid_rnn_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all_files",
|
||||
srcs = glob(
|
||||
["**/*"],
|
||||
exclude = [
|
||||
"**/METADATA",
|
||||
"**/OWNERS",
|
||||
],
|
||||
),
|
||||
visibility = ["//tensorflow:__subpackages__"],
|
||||
)
|
27
tensorflow/contrib/grid_rnn/__init__.py
Normal file
27
tensorflow/contrib/grid_rnn/__init__.py
Normal file
@ -0,0 +1,27 @@
|
||||
# Copyright 2016 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""GridRNN cells
|
||||
|
||||
## This package provides classes for GridRNN
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
# pylint: disable=unused-import,wildcard-import, line-too-long
|
||||
from tensorflow.contrib.grid_rnn.python.ops.grid_rnn_cell import *
|
@ -12,8 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from skflow import *
|
489
tensorflow/contrib/grid_rnn/python/kernel_tests/grid_rnn_test.py
Normal file
489
tensorflow/contrib/grid_rnn/python/kernel_tests/grid_rnn_test.py
Normal file
@ -0,0 +1,489 @@
|
||||
# Copyright 2016 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for GridRNN cells."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
|
||||
class GridRNNCellTest(tf.test.TestCase):
|
||||
|
||||
def testGrid2BasicLSTMCell(self):
|
||||
with self.test_session() as sess:
|
||||
with tf.variable_scope('root', initializer=tf.constant_initializer(0.2)) as root_scope:
|
||||
x = tf.zeros([1, 3])
|
||||
m = tf.zeros([1, 8])
|
||||
cell = tf.contrib.grid_rnn.Grid2BasicLSTMCell(2)
|
||||
self.assertEqual(cell.state_size, 8)
|
||||
|
||||
g, s = cell(x, m)
|
||||
self.assertEqual(g.get_shape(), (1, 2))
|
||||
self.assertEqual(s.get_shape(), (1, 8))
|
||||
|
||||
sess.run([tf.initialize_all_variables()])
|
||||
res = sess.run([g, s], {x: np.array([[1., 1., 1.]]),
|
||||
m: np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]])})
|
||||
self.assertEqual(res[0].shape, (1, 2))
|
||||
self.assertEqual(res[1].shape, (1, 8))
|
||||
self.assertAllClose(res[0], [[ 0.36617181, 0.36617181]])
|
||||
self.assertAllClose(res[1], [[ 0.71053141, 0.71053141, 0.36617181, 0.36617181,
|
||||
0.72320831, 0.80555487, 0.39102408, 0.42150158]])
|
||||
|
||||
# emulate a loop through the input sequence, where we call cell() multiple times
|
||||
root_scope.reuse_variables()
|
||||
g2, s2 = cell(x, m)
|
||||
self.assertEqual(g2.get_shape(), (1, 2))
|
||||
self.assertEqual(s2.get_shape(), (1, 8))
|
||||
|
||||
res = sess.run([g2, s2], {x: np.array([[2., 2., 2.]]), m: res[1]})
|
||||
self.assertEqual(res[0].shape, (1, 2))
|
||||
self.assertEqual(res[1].shape, (1, 8))
|
||||
self.assertAllClose(res[0], [[0.58847463, 0.58847463]])
|
||||
self.assertAllClose(res[1], [[1.40469193, 1.40469193, 0.58847463, 0.58847463,
|
||||
0.97726452, 1.04626071, 0.4927212, 0.51137757]])
|
||||
|
||||
def testGrid2BasicLSTMCellTied(self):
|
||||
with self.test_session() as sess:
|
||||
with tf.variable_scope('root', initializer=tf.constant_initializer(0.2)):
|
||||
x = tf.zeros([1, 3])
|
||||
m = tf.zeros([1, 8])
|
||||
cell = tf.contrib.grid_rnn.Grid2BasicLSTMCell(2, tied=True)
|
||||
self.assertEqual(cell.state_size, 8)
|
||||
|
||||
g, s = cell(x, m)
|
||||
self.assertEqual(g.get_shape(), (1, 2))
|
||||
self.assertEqual(s.get_shape(), (1, 8))
|
||||
|
||||
sess.run([tf.initialize_all_variables()])
|
||||
res = sess.run([g, s], {x: np.array([[1., 1., 1.]]),
|
||||
m: np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]])})
|
||||
self.assertEqual(res[0].shape, (1, 2))
|
||||
self.assertEqual(res[1].shape, (1, 8))
|
||||
self.assertAllClose(res[0], [[ 0.36617181, 0.36617181]])
|
||||
self.assertAllClose(res[1], [[ 0.71053141, 0.71053141, 0.36617181, 0.36617181,
|
||||
0.72320831, 0.80555487, 0.39102408, 0.42150158]])
|
||||
|
||||
res = sess.run([g, s], {x: np.array([[1., 1., 1.]]), m: res[1]})
|
||||
self.assertEqual(res[0].shape, (1, 2))
|
||||
self.assertEqual(res[1].shape, (1, 8))
|
||||
self.assertAllClose(res[0], [[0.36703536, 0.36703536]])
|
||||
self.assertAllClose(res[1], [[0.71200621, 0.71200621, 0.36703536, 0.36703536,
|
||||
0.80941606, 0.87550586, 0.40108523, 0.42199609]])
|
||||
|
||||
def testGrid2BasicLSTMCellWithRelu(self):
|
||||
with self.test_session() as sess:
|
||||
with tf.variable_scope('root', initializer=tf.constant_initializer(0.2)):
|
||||
x = tf.zeros([1, 3])
|
||||
m = tf.zeros([1, 4])
|
||||
cell = tf.contrib.grid_rnn.Grid2BasicLSTMCell(2, tied=False, non_recurrent_fn=tf.nn.relu)
|
||||
self.assertEqual(cell.state_size, 4)
|
||||
|
||||
g, s = cell(x, m)
|
||||
self.assertEqual(g.get_shape(), (1, 2))
|
||||
self.assertEqual(s.get_shape(), (1, 4))
|
||||
|
||||
sess.run([tf.initialize_all_variables()])
|
||||
res = sess.run([g, s], {x: np.array([[1., 1., 1.]]),
|
||||
m: np.array([[0.1, 0.2, 0.3, 0.4]])})
|
||||
self.assertEqual(res[0].shape, (1, 2))
|
||||
self.assertEqual(res[1].shape, (1, 4))
|
||||
self.assertAllClose(res[0], [[ 0.31667367, 0.31667367]])
|
||||
self.assertAllClose(res[1], [[ 0.29530135, 0.37520045, 0.17044567, 0.21292259]])
|
||||
|
||||
"""
|
||||
LSTMCell
|
||||
"""
|
||||
|
||||
def testGrid2LSTMCell(self):
|
||||
with self.test_session() as sess:
|
||||
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
|
||||
x = tf.zeros([1, 3])
|
||||
m = tf.zeros([1, 8])
|
||||
cell = tf.contrib.grid_rnn.Grid2LSTMCell(2, use_peepholes=True)
|
||||
self.assertEqual(cell.state_size, 8)
|
||||
|
||||
g, s = cell(x, m)
|
||||
self.assertEqual(g.get_shape(), (1, 2))
|
||||
self.assertEqual(s.get_shape(), (1, 8))
|
||||
|
||||
sess.run([tf.initialize_all_variables()])
|
||||
res = sess.run([g, s], {x: np.array([[1., 1., 1.]]),
|
||||
m: np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]])})
|
||||
self.assertEqual(res[0].shape, (1, 2))
|
||||
self.assertEqual(res[1].shape, (1, 8))
|
||||
self.assertAllClose(res[0], [[ 0.95686918, 0.95686918]])
|
||||
self.assertAllClose(res[1], [[ 2.41515064, 2.41515064, 0.95686918, 0.95686918,
|
||||
1.38917875, 1.49043763, 0.83884692, 0.86036491]])
|
||||
|
||||
def testGrid2LSTMCellTied(self):
|
||||
with self.test_session() as sess:
|
||||
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
|
||||
x = tf.zeros([1, 3])
|
||||
m = tf.zeros([1, 8])
|
||||
cell = tf.contrib.grid_rnn.Grid2LSTMCell(2, tied=True, use_peepholes=True)
|
||||
self.assertEqual(cell.state_size, 8)
|
||||
|
||||
g, s = cell(x, m)
|
||||
self.assertEqual(g.get_shape(), (1, 2))
|
||||
self.assertEqual(s.get_shape(), (1, 8))
|
||||
|
||||
sess.run([tf.initialize_all_variables()])
|
||||
res = sess.run([g, s], {x: np.array([[1., 1., 1.]]),
|
||||
m: np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]])})
|
||||
self.assertEqual(res[0].shape, (1, 2))
|
||||
self.assertEqual(res[1].shape, (1, 8))
|
||||
self.assertAllClose(res[0], [[ 0.95686918, 0.95686918]])
|
||||
self.assertAllClose(res[1], [[ 2.41515064, 2.41515064, 0.95686918, 0.95686918,
|
||||
1.38917875, 1.49043763, 0.83884692, 0.86036491]])
|
||||
|
||||
def testGrid2LSTMCellWithRelu(self):
|
||||
with self.test_session() as sess:
|
||||
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
|
||||
x = tf.zeros([1, 3])
|
||||
m = tf.zeros([1, 4])
|
||||
cell = tf.contrib.grid_rnn.Grid2LSTMCell(2, use_peepholes=True, non_recurrent_fn=tf.nn.relu)
|
||||
self.assertEqual(cell.state_size, 4)
|
||||
|
||||
g, s = cell(x, m)
|
||||
self.assertEqual(g.get_shape(), (1, 2))
|
||||
self.assertEqual(s.get_shape(), (1, 4))
|
||||
|
||||
sess.run([tf.initialize_all_variables()])
|
||||
res = sess.run([g, s], {x: np.array([[1., 1., 1.]]),
|
||||
m: np.array([[0.1, 0.2, 0.3, 0.4]])})
|
||||
self.assertEqual(res[0].shape, (1, 2))
|
||||
self.assertEqual(res[1].shape, (1, 4))
|
||||
self.assertAllClose(res[0], [[ 2.1831727, 2.1831727]])
|
||||
self.assertAllClose(res[1], [[ 0.92270052, 1.02325559, 0.66159075, 0.70475441]])
|
||||
|
||||
"""
|
||||
RNNCell
|
||||
"""
|
||||
|
||||
def testGrid2BasicRNNCell(self):
|
||||
with self.test_session() as sess:
|
||||
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
|
||||
x = tf.zeros([2, 2])
|
||||
m = tf.zeros([2, 4])
|
||||
cell = tf.contrib.grid_rnn.Grid2BasicRNNCell(2)
|
||||
self.assertEqual(cell.state_size, 4)
|
||||
|
||||
g, s = cell(x, m)
|
||||
self.assertEqual(g.get_shape(), (2, 2))
|
||||
self.assertEqual(s.get_shape(), (2, 4))
|
||||
|
||||
sess.run([tf.initialize_all_variables()])
|
||||
res = sess.run([g, s], {x: np.array([[1., 1.], [2., 2.]]),
|
||||
m: np.array([[0.1, 0.1, 0.1, 0.1], [0.2, 0.2, 0.2, 0.2]])})
|
||||
self.assertEqual(res[0].shape, (2, 2))
|
||||
self.assertEqual(res[1].shape, (2, 4))
|
||||
self.assertAllClose(res[0], [[0.94685763, 0.94685763],
|
||||
[0.99480951, 0.99480951]])
|
||||
self.assertAllClose(res[1], [[0.94685763, 0.94685763, 0.80049908, 0.80049908],
|
||||
[0.99480951, 0.99480951, 0.97574311, 0.97574311]])
|
||||
|
||||
def testGrid2BasicRNNCellTied(self):
|
||||
with self.test_session() as sess:
|
||||
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
|
||||
x = tf.zeros([2, 2])
|
||||
m = tf.zeros([2, 4])
|
||||
cell = tf.contrib.grid_rnn.Grid2BasicRNNCell(2, tied=True)
|
||||
self.assertEqual(cell.state_size, 4)
|
||||
|
||||
g, s = cell(x, m)
|
||||
self.assertEqual(g.get_shape(), (2, 2))
|
||||
self.assertEqual(s.get_shape(), (2, 4))
|
||||
|
||||
sess.run([tf.initialize_all_variables()])
|
||||
res = sess.run([g, s], {x: np.array([[1., 1.], [2., 2.]]),
|
||||
m: np.array([[0.1, 0.1, 0.1, 0.1], [0.2, 0.2, 0.2, 0.2]])})
|
||||
self.assertEqual(res[0].shape, (2, 2))
|
||||
self.assertEqual(res[1].shape, (2, 4))
|
||||
self.assertAllClose(res[0], [[0.94685763, 0.94685763],
|
||||
[0.99480951, 0.99480951]])
|
||||
self.assertAllClose(res[1], [[0.94685763, 0.94685763, 0.80049908, 0.80049908],
|
||||
[0.99480951, 0.99480951, 0.97574311, 0.97574311]])
|
||||
|
||||
def testGrid2BasicRNNCellWithRelu(self):
|
||||
with self.test_session() as sess:
|
||||
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
|
||||
x = tf.zeros([1, 2])
|
||||
m = tf.zeros([1, 2])
|
||||
cell = tf.contrib.grid_rnn.Grid2BasicRNNCell(2, non_recurrent_fn=tf.nn.relu)
|
||||
self.assertEqual(cell.state_size, 2)
|
||||
|
||||
g, s = cell(x, m)
|
||||
self.assertEqual(g.get_shape(), (1, 2))
|
||||
self.assertEqual(s.get_shape(), (1, 2))
|
||||
|
||||
sess.run([tf.initialize_all_variables()])
|
||||
res = sess.run([g, s], {x: np.array([[1., 1.]]), m: np.array([[0.1, 0.1]])})
|
||||
self.assertEqual(res[0].shape, (1, 2))
|
||||
self.assertEqual(res[1].shape, (1, 2))
|
||||
self.assertAllClose(res[0], [[1.80049896, 1.80049896]])
|
||||
self.assertAllClose(res[1], [[0.80049896, 0.80049896]])
|
||||
|
||||
"""
|
||||
1-LSTM
|
||||
"""
|
||||
|
||||
def testGrid1LSTMCell(self):
|
||||
with self.test_session() as sess:
|
||||
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)) as root_scope:
|
||||
x = tf.zeros([1, 3])
|
||||
m = tf.zeros([1, 4])
|
||||
cell = tf.contrib.grid_rnn.Grid1LSTMCell(2, use_peepholes=True)
|
||||
self.assertEqual(cell.state_size, 4)
|
||||
|
||||
g, s = cell(x, m)
|
||||
self.assertEqual(g.get_shape(), (1, 2))
|
||||
self.assertEqual(s.get_shape(), (1, 4))
|
||||
|
||||
sess.run([tf.initialize_all_variables()])
|
||||
res = sess.run([g, s], {x: np.array([[1., 1., 1.]]),
|
||||
m: np.array([[0.1, 0.2, 0.3, 0.4]])})
|
||||
self.assertEqual(res[0].shape, (1, 2))
|
||||
self.assertEqual(res[1].shape, (1, 4))
|
||||
self.assertAllClose(res[0], [[0.91287315, 0.91287315]])
|
||||
self.assertAllClose(res[1], [[2.26285243, 2.26285243, 0.91287315, 0.91287315]])
|
||||
|
||||
root_scope.reuse_variables()
|
||||
|
||||
x2 = tf.zeros([0, 0])
|
||||
g2, s2 = cell(x2, m)
|
||||
self.assertEqual(g2.get_shape(), (1, 2))
|
||||
self.assertEqual(s2.get_shape(), (1, 4))
|
||||
|
||||
sess.run([tf.initialize_all_variables()])
|
||||
res = sess.run([g2, s2], {m: res[1]})
|
||||
self.assertEqual(res[0].shape, (1, 2))
|
||||
self.assertEqual(res[1].shape, (1, 4))
|
||||
self.assertAllClose(res[0], [[0.9032144, 0.9032144]])
|
||||
self.assertAllClose(res[1], [[2.79966092, 2.79966092, 0.9032144, 0.9032144]])
|
||||
|
||||
g3, s3 = cell(x2, m)
|
||||
self.assertEqual(g3.get_shape(), (1, 2))
|
||||
self.assertEqual(s3.get_shape(), (1, 4))
|
||||
|
||||
sess.run([tf.initialize_all_variables()])
|
||||
res = sess.run([g3, s3], {m: res[1]})
|
||||
self.assertEqual(res[0].shape, (1, 2))
|
||||
self.assertEqual(res[1].shape, (1, 4))
|
||||
self.assertAllClose(res[0], [[0.92727238, 0.92727238]])
|
||||
self.assertAllClose(res[1], [[3.3529923, 3.3529923, 0.92727238, 0.92727238]])
|
||||
|
||||
"""
|
||||
3-LSTM
|
||||
"""
|
||||
def testGrid3LSTMCell(self):
|
||||
with self.test_session() as sess:
|
||||
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
|
||||
x = tf.zeros([1, 3])
|
||||
m = tf.zeros([1, 12])
|
||||
cell = tf.contrib.grid_rnn.Grid3LSTMCell(2, use_peepholes=True)
|
||||
self.assertEqual(cell.state_size, 12)
|
||||
|
||||
g, s = cell(x, m)
|
||||
self.assertEqual(g.get_shape(), (1, 2))
|
||||
self.assertEqual(s.get_shape(), (1, 12))
|
||||
|
||||
sess.run([tf.initialize_all_variables()])
|
||||
res = sess.run([g, s], {x: np.array([[1., 1., 1.]]),
|
||||
m: np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, -0.1, -0.2, -0.3, -0.4]])})
|
||||
self.assertEqual(res[0].shape, (1, 2))
|
||||
self.assertEqual(res[1].shape, (1, 12))
|
||||
|
||||
self.assertAllClose(res[0], [[0.96892911, 0.96892911]])
|
||||
self.assertAllClose(res[1], [[2.45227885, 2.45227885, 0.96892911, 0.96892911,
|
||||
1.33592629, 1.4373529, 0.80867189, 0.83247656,
|
||||
0.7317788, 0.63205892, 0.56548983, 0.50446129]])
|
||||
|
||||
"""
|
||||
Edge cases
|
||||
"""
|
||||
def testGridRNNEdgeCasesLikeRelu(self):
|
||||
with self.test_session() as sess:
|
||||
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
|
||||
x = tf.zeros([3, 2])
|
||||
m = tf.zeros([0, 0])
|
||||
|
||||
# this is equivalent to relu
|
||||
cell = tf.contrib.grid_rnn.GridRNNCell(num_units=2, num_dims=1, input_dims=0, output_dims=0,
|
||||
non_recurrent_dims=0, non_recurrent_fn=tf.nn.relu)
|
||||
g, s = cell(x, m)
|
||||
self.assertEqual(g.get_shape(), (3, 2))
|
||||
self.assertEqual(s.get_shape(), (0, 0))
|
||||
|
||||
sess.run([tf.initialize_all_variables()])
|
||||
res = sess.run([g, s], {x: np.array([[1., -1.], [-2, 1], [2, -1]])})
|
||||
self.assertEqual(res[0].shape, (3, 2))
|
||||
self.assertEqual(res[1].shape, (0, 0))
|
||||
self.assertAllClose(res[0], [[0, 0], [0, 0], [0.5, 0.5]])
|
||||
|
||||
def testGridRNNEdgeCasesNoOutput(self):
|
||||
with self.test_session() as sess:
|
||||
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
|
||||
x = tf.zeros([1, 2])
|
||||
m = tf.zeros([1, 4])
|
||||
|
||||
# This cell produces no output
|
||||
cell = tf.contrib.grid_rnn.GridRNNCell(num_units=2, num_dims=2, input_dims=0, output_dims=None,
|
||||
non_recurrent_dims=0, non_recurrent_fn=tf.nn.relu)
|
||||
g, s = cell(x, m)
|
||||
self.assertEqual(g.get_shape(), (0, 0))
|
||||
self.assertEqual(s.get_shape(), (1, 4))
|
||||
|
||||
sess.run([tf.initialize_all_variables()])
|
||||
res = sess.run([g, s], {x: np.array([[1., 1.]]),
|
||||
m: np.array([[0.1, 0.1, 0.1, 0.1]])})
|
||||
self.assertEqual(res[0].shape, (0, 0))
|
||||
self.assertEqual(res[1].shape, (1, 4))
|
||||
|
||||
"""
|
||||
Test with tf.nn.rnn
|
||||
"""
|
||||
|
||||
def testGrid2LSTMCellWithRNN(self):
|
||||
batch_size = 3
|
||||
input_size = 5
|
||||
max_length = 6 # unrolled up to this length
|
||||
num_units = 2
|
||||
|
||||
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
|
||||
cell = tf.contrib.grid_rnn.Grid2LSTMCell(num_units=num_units)
|
||||
|
||||
inputs = max_length * [
|
||||
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
|
||||
|
||||
outputs, state = tf.nn.rnn(cell, inputs, dtype=tf.float32)
|
||||
|
||||
self.assertEqual(len(outputs), len(inputs))
|
||||
self.assertEqual(state.get_shape(), (batch_size, 8))
|
||||
|
||||
for out, inp in zip(outputs, inputs):
|
||||
self.assertEqual(out.get_shape()[0], inp.get_shape()[0])
|
||||
self.assertEqual(out.get_shape()[1], num_units)
|
||||
self.assertEqual(out.dtype, inp.dtype)
|
||||
|
||||
with self.test_session() as sess:
|
||||
sess.run(tf.initialize_all_variables())
|
||||
|
||||
input_value = np.ones((batch_size, input_size))
|
||||
values = sess.run(outputs + [state],
|
||||
feed_dict={inputs[0]: input_value})
|
||||
for v in values:
|
||||
self.assertTrue(np.all(np.isfinite(v)))
|
||||
|
||||
def testGrid2LSTMCellReLUWithRNN(self):
|
||||
batch_size = 3
|
||||
input_size = 5
|
||||
max_length = 6 # unrolled up to this length
|
||||
num_units = 2
|
||||
|
||||
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
|
||||
cell = tf.contrib.grid_rnn.Grid2LSTMCell(num_units=num_units, non_recurrent_fn=tf.nn.relu)
|
||||
|
||||
inputs = max_length * [
|
||||
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
|
||||
|
||||
outputs, state = tf.nn.rnn(cell, inputs, dtype=tf.float32)
|
||||
|
||||
self.assertEqual(len(outputs), len(inputs))
|
||||
self.assertEqual(state.get_shape(), (batch_size, 4))
|
||||
|
||||
for out, inp in zip(outputs, inputs):
|
||||
self.assertEqual(out.get_shape()[0], inp.get_shape()[0])
|
||||
self.assertEqual(out.get_shape()[1], num_units)
|
||||
self.assertEqual(out.dtype, inp.dtype)
|
||||
|
||||
with self.test_session() as sess:
|
||||
sess.run(tf.initialize_all_variables())
|
||||
|
||||
input_value = np.ones((batch_size, input_size))
|
||||
values = sess.run(outputs + [state],
|
||||
feed_dict={inputs[0]: input_value})
|
||||
for v in values:
|
||||
self.assertTrue(np.all(np.isfinite(v)))
|
||||
|
||||
def testGrid3LSTMCellReLUWithRNN(self):
|
||||
batch_size = 3
|
||||
input_size = 5
|
||||
max_length = 6 # unrolled up to this length
|
||||
num_units = 2
|
||||
|
||||
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
|
||||
cell = tf.contrib.grid_rnn.Grid3LSTMCell(num_units=num_units, non_recurrent_fn=tf.nn.relu)
|
||||
|
||||
inputs = max_length * [
|
||||
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
|
||||
|
||||
outputs, state = tf.nn.rnn(cell, inputs, dtype=tf.float32)
|
||||
|
||||
self.assertEqual(len(outputs), len(inputs))
|
||||
self.assertEqual(state.get_shape(), (batch_size, 8))
|
||||
|
||||
for out, inp in zip(outputs, inputs):
|
||||
self.assertEqual(out.get_shape()[0], inp.get_shape()[0])
|
||||
self.assertEqual(out.get_shape()[1], num_units)
|
||||
self.assertEqual(out.dtype, inp.dtype)
|
||||
|
||||
with self.test_session() as sess:
|
||||
sess.run(tf.initialize_all_variables())
|
||||
|
||||
input_value = np.ones((batch_size, input_size))
|
||||
values = sess.run(outputs + [state],
|
||||
feed_dict={inputs[0]: input_value})
|
||||
for v in values:
|
||||
self.assertTrue(np.all(np.isfinite(v)))
|
||||
|
||||
|
||||
def testGrid1LSTMCellWithRNN(self):
|
||||
batch_size = 3
|
||||
input_size = 5
|
||||
max_length = 6 # unrolled up to this length
|
||||
num_units = 2
|
||||
|
||||
with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)):
|
||||
cell = tf.contrib.grid_rnn.Grid1LSTMCell(num_units=num_units)
|
||||
|
||||
# for 1-LSTM, we only feed the first step
|
||||
inputs = [tf.placeholder(tf.float32, shape=(batch_size, input_size))] \
|
||||
+ (max_length - 1) * [tf.zeros([0, 0])]
|
||||
|
||||
outputs, state = tf.nn.rnn(cell, inputs, dtype=tf.float32)
|
||||
|
||||
self.assertEqual(len(outputs), len(inputs))
|
||||
self.assertEqual(state.get_shape(), (batch_size, 4))
|
||||
|
||||
for out, inp in zip(outputs, inputs):
|
||||
self.assertEqual(out.get_shape(), (3, num_units))
|
||||
self.assertEqual(out.dtype, inp.dtype)
|
||||
|
||||
with self.test_session() as sess:
|
||||
sess.run(tf.initialize_all_variables())
|
||||
|
||||
input_value = np.ones((batch_size, input_size))
|
||||
values = sess.run(outputs + [state],
|
||||
feed_dict={inputs[0]: input_value})
|
||||
for v in values:
|
||||
self.assertTrue(np.all(np.isfinite(v)))
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
18
tensorflow/contrib/grid_rnn/python/ops/__init__.py
Normal file
18
tensorflow/contrib/grid_rnn/python/ops/__init__.py
Normal file
@ -0,0 +1,18 @@
|
||||
# Copyright 2016 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
352
tensorflow/contrib/grid_rnn/python/ops/grid_rnn_cell.py
Normal file
352
tensorflow/contrib/grid_rnn/python/ops/grid_rnn_cell.py
Normal file
@ -0,0 +1,352 @@
|
||||
# Copyright 2016 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Module for constructing GridRNN cells"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import nn
|
||||
from tensorflow.python.ops import variable_scope as vs
|
||||
from tensorflow.python.ops import rnn_cell
|
||||
from tensorflow.contrib import layers
|
||||
|
||||
|
||||
class GridRNNCell(rnn_cell.RNNCell):
|
||||
"""Grid recurrent cell.
|
||||
|
||||
This implementation is based on:
|
||||
|
||||
http://arxiv.org/pdf/1507.01526v3.pdf
|
||||
|
||||
This is the generic implementation of GridRNN. Users can specify arbitrary number of dimensions,
|
||||
set some of them to be priority (section 3.2), non-recurrent (section 3.3)
|
||||
and input/output dimensions (section 3.4).
|
||||
Weight sharing can also be specified using the `tied` parameter.
|
||||
Type of recurrent units can be specified via `cell_fn`.
|
||||
"""
|
||||
|
||||
def __init__(self, num_units, num_dims=1, input_dims=None, output_dims=None, priority_dims=None,
|
||||
non_recurrent_dims=None, tied=False, cell_fn=None, non_recurrent_fn=None):
|
||||
"""Initialize the parameters of a Grid RNN cell
|
||||
|
||||
Args:
|
||||
num_units: int, The number of units in all dimensions of this GridRNN cell
|
||||
num_dims: int, Number of dimensions of this grid.
|
||||
input_dims: int or list, List of dimensions which will receive input data.
|
||||
output_dims: int or list, List of dimensions from which the output will be recorded.
|
||||
priority_dims: int or list, List of dimensions to be considered as priority dimensions.
|
||||
If None, no dimension is prioritized.
|
||||
non_recurrent_dims: int or list, List of dimensions that are not recurrent.
|
||||
The transfer function for non-recurrent dimensions is specified via `non_recurrent_fn`,
|
||||
which is default to be `tensorflow.nn.relu`.
|
||||
tied: bool, Whether to share the weights among the dimensions of this GridRNN cell.
|
||||
If there are non-recurrent dimensions in the grid, weights are shared between each
|
||||
group of recurrent and non-recurrent dimensions.
|
||||
cell_fn: function, a function which returns the recurrent cell object. Has to be in the following signature:
|
||||
def cell_func(num_units, input_size):
|
||||
# ...
|
||||
|
||||
and returns an object of type `RNNCell`. If None, LSTMCell with default parameters will be used.
|
||||
non_recurrent_fn: a tensorflow Op that will be the transfer function of the non-recurrent dimensions
|
||||
"""
|
||||
if num_dims < 1:
|
||||
raise ValueError('dims must be >= 1: {}'.format(num_dims))
|
||||
|
||||
self._config = _parse_rnn_config(num_dims, input_dims, output_dims, priority_dims,
|
||||
non_recurrent_dims, non_recurrent_fn or nn.relu, tied, num_units)
|
||||
|
||||
cell_input_size = (self._config.num_dims - 1) * num_units
|
||||
if cell_fn is None:
|
||||
self._cell = rnn_cell.LSTMCell(num_units=num_units, input_size=cell_input_size)
|
||||
else:
|
||||
self._cell = cell_fn(num_units, cell_input_size)
|
||||
if not isinstance(self._cell, rnn_cell.RNNCell):
|
||||
raise ValueError('cell_fn must return an object of type RNNCell')
|
||||
|
||||
@property
|
||||
def input_size(self):
|
||||
# temporarily using num_units as the input_size of each dimension.
|
||||
# The actual input size only determined when this cell get invoked,
|
||||
# so this information can be considered unreliable.
|
||||
return self._config.num_units * len(self._config.inputs)
|
||||
|
||||
@property
|
||||
def output_size(self):
|
||||
return self._cell.output_size * len(self._config.outputs)
|
||||
|
||||
@property
|
||||
def state_size(self):
|
||||
return self._cell.state_size * len(self._config.recurrents)
|
||||
|
||||
def __call__(self, inputs, state, scope=None):
|
||||
"""Run one step of GridRNN.
|
||||
|
||||
Args:
|
||||
inputs: input Tensor, 2D, batch x input_size. Or None
|
||||
state: state Tensor, 2D, batch x state_size. Note that state_size = cell_state_size * recurrent_dims
|
||||
scope: VariableScope for the created subgraph; defaults to "GridRNNCell".
|
||||
|
||||
Returns:
|
||||
A tuple containing:
|
||||
- A 2D, batch x output_size, Tensor representing the output of the cell
|
||||
after reading "inputs" when previous state was "state".
|
||||
- A 2D, batch x state_size, Tensor representing the new state of the cell
|
||||
after reading "inputs" when previous state was "state".
|
||||
"""
|
||||
state_sz = state.get_shape().as_list()[1]
|
||||
if self.state_size != state_sz:
|
||||
raise ValueError('Actual state size not same as specified: {} vs {}.'.format(state_sz, self.state_size))
|
||||
|
||||
conf = self._config
|
||||
dtype = inputs.dtype if inputs is not None else state.dtype
|
||||
|
||||
# c_prev is `m`, and m_prev is `h` in the paper. Keep c and m here for consistency with the codebase
|
||||
c_prev = [None] * self._config.num_dims
|
||||
m_prev = [None] * self._config.num_dims
|
||||
cell_output_size = self._cell.state_size - conf.num_units
|
||||
|
||||
# for LSTM : state = memory cell + output, hence cell_output_size > 0
|
||||
# for GRU/RNN: state = output (whose size is equal to _num_units), hence cell_output_size = 0
|
||||
for recurrent_dim, start_idx in zip(self._config.recurrents, range(0, self.state_size, self._cell.state_size)):
|
||||
if cell_output_size > 0:
|
||||
c_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx], [-1, conf.num_units])
|
||||
m_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx + conf.num_units], [-1, cell_output_size])
|
||||
else:
|
||||
m_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx], [-1, conf.num_units])
|
||||
|
||||
new_output = [None] * conf.num_dims
|
||||
new_state = [None] * conf.num_dims
|
||||
|
||||
with vs.variable_scope(scope or type(self).__name__): # GridRNNCell
|
||||
|
||||
# project input
|
||||
if inputs is not None and sum(inputs.get_shape().as_list()) > 0 and len(conf.inputs) > 0:
|
||||
input_splits = array_ops.split(1, len(conf.inputs), inputs)
|
||||
input_sz = input_splits[0].get_shape().as_list()[1]
|
||||
|
||||
for i, j in enumerate(conf.inputs):
|
||||
input_project_m = vs.get_variable('project_m_{}'.format(j), [input_sz, conf.num_units], dtype=dtype)
|
||||
m_prev[j] = math_ops.matmul(input_splits[i], input_project_m)
|
||||
|
||||
if cell_output_size > 0:
|
||||
input_project_c = vs.get_variable('project_c_{}'.format(j), [input_sz, conf.num_units], dtype=dtype)
|
||||
c_prev[j] = math_ops.matmul(input_splits[i], input_project_c)
|
||||
|
||||
|
||||
_propagate(conf.non_priority, conf, self._cell, c_prev, m_prev, new_output, new_state, True)
|
||||
_propagate(conf.priority, conf, self._cell, c_prev, m_prev, new_output, new_state, False)
|
||||
|
||||
output_tensors = [new_output[i] for i in self._config.outputs]
|
||||
output = array_ops.zeros([0, 0], dtype) if len(output_tensors) == 0 else array_ops.concat(1,
|
||||
output_tensors)
|
||||
|
||||
state_tensors = [new_state[i] for i in self._config.recurrents]
|
||||
states = array_ops.zeros([0, 0], dtype) if len(state_tensors) == 0 else array_ops.concat(1, state_tensors)
|
||||
|
||||
return output, states
|
||||
|
||||
|
||||
"""
|
||||
Specialized cells, for convenience
|
||||
"""
|
||||
|
||||
class Grid1BasicRNNCell(GridRNNCell):
|
||||
"""1D BasicRNN cell"""
|
||||
def __init__(self, num_units):
|
||||
super(Grid1BasicRNNCell, self).__init__(num_units=num_units, num_dims=1,
|
||||
input_dims=0, output_dims=0, priority_dims=0, tied=False,
|
||||
cell_fn=lambda n, i: rnn_cell.BasicRNNCell(num_units=n, input_size=i))
|
||||
|
||||
|
||||
class Grid2BasicRNNCell(GridRNNCell):
|
||||
"""2D BasicRNN cell
|
||||
This creates a 2D cell which receives input and gives output in the first dimension.
|
||||
The first dimension can optionally be non-recurrent if `non_recurrent_fn` is specified.
|
||||
"""
|
||||
def __init__(self, num_units, tied=False, non_recurrent_fn=None):
|
||||
super(Grid2BasicRNNCell, self).__init__(num_units=num_units, num_dims=2,
|
||||
input_dims=0, output_dims=0, priority_dims=0, tied=tied,
|
||||
non_recurrent_dims=None if non_recurrent_fn is None else 0,
|
||||
cell_fn=lambda n, i: rnn_cell.BasicRNNCell(num_units=n, input_size=i),
|
||||
non_recurrent_fn=non_recurrent_fn)
|
||||
|
||||
|
||||
class Grid1BasicLSTMCell(GridRNNCell):
|
||||
"""1D BasicLSTM cell"""
|
||||
def __init__(self, num_units, forget_bias=1):
|
||||
super(Grid1BasicLSTMCell, self).__init__(num_units=num_units, num_dims=1,
|
||||
input_dims=0, output_dims=0, priority_dims=0, tied=False,
|
||||
cell_fn=lambda n, i: rnn_cell.BasicLSTMCell(num_units=n,
|
||||
forget_bias=forget_bias, input_size=i))
|
||||
|
||||
|
||||
class Grid2BasicLSTMCell(GridRNNCell):
|
||||
"""2D BasicLSTM cell
|
||||
This creates a 2D cell which receives input and gives output in the first dimension.
|
||||
The first dimension can optionally be non-recurrent if `non_recurrent_fn` is specified.
|
||||
"""
|
||||
def __init__(self, num_units, tied=False, non_recurrent_fn=None, forget_bias=1):
|
||||
super(Grid2BasicLSTMCell, self).__init__(num_units=num_units, num_dims=2,
|
||||
input_dims=0, output_dims=0, priority_dims=0, tied=tied,
|
||||
non_recurrent_dims=None if non_recurrent_fn is None else 0,
|
||||
cell_fn=lambda n, i: rnn_cell.BasicLSTMCell(
|
||||
num_units=n, forget_bias=forget_bias, input_size=i),
|
||||
non_recurrent_fn=non_recurrent_fn)
|
||||
|
||||
|
||||
class Grid1LSTMCell(GridRNNCell):
|
||||
"""1D LSTM cell
|
||||
This is different from Grid1BasicLSTMCell because it gives options to specify the forget bias and enabling peepholes
|
||||
"""
|
||||
def __init__(self, num_units, use_peepholes=False, forget_bias=1.0):
|
||||
super(Grid1LSTMCell, self).__init__(num_units=num_units, num_dims=1,
|
||||
input_dims=0, output_dims=0, priority_dims=0,
|
||||
cell_fn=lambda n, i: rnn_cell.LSTMCell(
|
||||
num_units=n, input_size=i, use_peepholes=use_peepholes,
|
||||
forget_bias=forget_bias))
|
||||
|
||||
|
||||
class Grid2LSTMCell(GridRNNCell):
|
||||
"""2D LSTM cell
|
||||
This creates a 2D cell which receives input and gives output in the first dimension.
|
||||
The first dimension can optionally be non-recurrent if `non_recurrent_fn` is specified.
|
||||
"""
|
||||
def __init__(self, num_units, tied=False, non_recurrent_fn=None,
|
||||
use_peepholes=False, forget_bias=1.0):
|
||||
super(Grid2LSTMCell, self).__init__(num_units=num_units, num_dims=2,
|
||||
input_dims=0, output_dims=0, priority_dims=0, tied=tied,
|
||||
non_recurrent_dims=None if non_recurrent_fn is None else 0,
|
||||
cell_fn=lambda n, i: rnn_cell.LSTMCell(
|
||||
num_units=n, input_size=i, forget_bias=forget_bias,
|
||||
use_peepholes=use_peepholes),
|
||||
non_recurrent_fn=non_recurrent_fn)
|
||||
|
||||
|
||||
class Grid3LSTMCell(GridRNNCell):
|
||||
"""3D BasicLSTM cell
|
||||
This creates a 2D cell which receives input and gives output in the first dimension.
|
||||
The first dimension can optionally be non-recurrent if `non_recurrent_fn` is specified.
|
||||
The second and third dimensions are LSTM.
|
||||
"""
|
||||
def __init__(self, num_units, tied=False, non_recurrent_fn=None,
|
||||
use_peepholes=False, forget_bias=1.0):
|
||||
super(Grid3LSTMCell, self).__init__(num_units=num_units, num_dims=3,
|
||||
input_dims=0, output_dims=0, priority_dims=0, tied=tied,
|
||||
non_recurrent_dims=None if non_recurrent_fn is None else 0,
|
||||
cell_fn=lambda n, i: rnn_cell.LSTMCell(
|
||||
num_units=n, input_size=i, forget_bias=forget_bias,
|
||||
use_peepholes=use_peepholes),
|
||||
non_recurrent_fn=non_recurrent_fn)
|
||||
|
||||
class Grid2GRUCell(GridRNNCell):
|
||||
"""2D LSTM cell
|
||||
This creates a 2D cell which receives input and gives output in the first dimension.
|
||||
The first dimension can optionally be non-recurrent if `non_recurrent_fn` is specified.
|
||||
"""
|
||||
|
||||
def __init__(self, num_units, tied=False, non_recurrent_fn=None):
|
||||
super(Grid2GRUCell, self).__init__(num_units=num_units, num_dims=2,
|
||||
input_dims=0, output_dims=0, priority_dims=0, tied=tied,
|
||||
non_recurrent_dims=None if non_recurrent_fn is None else 0,
|
||||
cell_fn=lambda n, i: rnn_cell.GRUCell(num_units=n, input_size=i),
|
||||
non_recurrent_fn=non_recurrent_fn)
|
||||
|
||||
"""
|
||||
Helpers
|
||||
"""
|
||||
|
||||
_GridRNNDimension = namedtuple('_GridRNNDimension', ['idx', 'is_input', 'is_output', 'is_priority', 'non_recurrent_fn'])
|
||||
|
||||
_GridRNNConfig = namedtuple('_GridRNNConfig', ['num_dims', 'dims',
|
||||
'inputs', 'outputs', 'recurrents',
|
||||
'priority', 'non_priority', 'tied', 'num_units'])
|
||||
|
||||
|
||||
def _parse_rnn_config(num_dims, ls_input_dims, ls_output_dims, ls_priority_dims, ls_non_recurrent_dims,
|
||||
non_recurrent_fn, tied, num_units):
|
||||
def check_dim_list(ls):
|
||||
if ls is None:
|
||||
ls = []
|
||||
if not isinstance(ls, (list, tuple)):
|
||||
ls = [ls]
|
||||
ls = sorted(set(ls))
|
||||
if any(_ < 0 or _ >= num_dims for _ in ls):
|
||||
raise ValueError('Invalid dims: {}. Must be in [0, {})'.format(ls, num_dims))
|
||||
return ls
|
||||
|
||||
input_dims = check_dim_list(ls_input_dims)
|
||||
output_dims = check_dim_list(ls_output_dims)
|
||||
priority_dims = check_dim_list(ls_priority_dims)
|
||||
non_recurrent_dims = check_dim_list(ls_non_recurrent_dims)
|
||||
|
||||
rnn_dims = []
|
||||
for i in range(num_dims):
|
||||
rnn_dims.append(_GridRNNDimension(idx=i, is_input=(i in input_dims), is_output=(i in output_dims),
|
||||
is_priority=(i in priority_dims),
|
||||
non_recurrent_fn=non_recurrent_fn if i in non_recurrent_dims else None))
|
||||
return _GridRNNConfig(num_dims=num_dims, dims=rnn_dims, inputs=input_dims, outputs=output_dims,
|
||||
recurrents=[x for x in range(num_dims) if x not in non_recurrent_dims],
|
||||
priority=priority_dims,
|
||||
non_priority=[x for x in range(num_dims) if x not in priority_dims],
|
||||
tied=tied, num_units=num_units)
|
||||
|
||||
|
||||
def _propagate(dim_indices, conf, cell, c_prev, m_prev, new_output, new_state, first_call):
|
||||
"""
|
||||
Propagates through all the cells in dim_indices dimensions.
|
||||
"""
|
||||
if len(dim_indices) == 0:
|
||||
return
|
||||
|
||||
# Because of the way RNNCells are implemented, we take the last dimension (H_{N-1}) out
|
||||
# and feed it as the state of the RNN cell (in `last_dim_output`)
|
||||
# The input of the cell (H_0 to H_{N-2}) are concatenated into `cell_inputs`
|
||||
if conf.num_dims > 1:
|
||||
ls_cell_inputs = [None] * (conf.num_dims - 1)
|
||||
for d in conf.dims[:-1]:
|
||||
ls_cell_inputs[d.idx] = new_output[d.idx] if new_output[d.idx] is not None else m_prev[d.idx]
|
||||
cell_inputs = array_ops.concat(1, ls_cell_inputs)
|
||||
else:
|
||||
cell_inputs = array_ops.zeros([m_prev[0].get_shape().as_list()[0], 0], m_prev[0].dtype)
|
||||
|
||||
last_dim_output = new_output[-1] if new_output[-1] is not None else m_prev[-1]
|
||||
|
||||
for i in dim_indices:
|
||||
d = conf.dims[i]
|
||||
if d.non_recurrent_fn:
|
||||
linear_args = array_ops.concat(1, [cell_inputs, last_dim_output]) if conf.num_dims > 1 else last_dim_output
|
||||
with vs.variable_scope('non_recurrent' if conf.tied else 'non_recurrent/cell_{}'.format(i)):
|
||||
if conf.tied and not(first_call and i == dim_indices[0]):
|
||||
vs.get_variable_scope().reuse_variables()
|
||||
new_output[d.idx] = layers.fully_connected(linear_args, num_output_units=conf.num_units,
|
||||
activation_fn=d.non_recurrent_fn,
|
||||
weight_init=vs.get_variable_scope().initializer or
|
||||
layers.initializers.xavier_initializer)
|
||||
else:
|
||||
if c_prev[i] is not None:
|
||||
cell_state = array_ops.concat(1, [c_prev[i], last_dim_output])
|
||||
else:
|
||||
# for GRU/RNN, the state is just the previous output
|
||||
cell_state = last_dim_output
|
||||
|
||||
with vs.variable_scope('recurrent' if conf.tied else 'recurrent/cell_{}'.format(i)):
|
||||
if conf.tied and not (first_call and i == dim_indices[0]):
|
||||
vs.get_variable_scope().reuse_variables()
|
||||
new_output[d.idx], new_state[d.idx] = cell(cell_inputs, cell_state)
|
@ -87,10 +87,16 @@ def optimize_loss(loss,
|
||||
loss = control_flow_ops.with_dependencies([loss_averages_op], loss)
|
||||
|
||||
# Learning rate variable, with possible decay.
|
||||
lr = vs.get_variable("learning_rate",
|
||||
[],
|
||||
trainable=False,
|
||||
initializer=init_ops.constant_initializer(learning_rate))
|
||||
if isinstance(learning_rate, ops.Tensor) and len(learning_rate.get_shape()) == 0:
|
||||
lr = learning_rate
|
||||
elif isinstance(learning_rate, float):
|
||||
lr = vs.get_variable("learning_rate",
|
||||
[],
|
||||
trainable=False,
|
||||
initializer=init_ops.constant_initializer(learning_rate))
|
||||
else:
|
||||
raise ValueError("Learning rate should be 0d Tensor or float. Got %s" %
|
||||
str(learning_rate))
|
||||
if learning_rate_decay_fn is not None:
|
||||
lr = learning_rate_decay_fn(lr, global_step)
|
||||
|
||||
@ -149,3 +155,4 @@ def optimize_loss(loss,
|
||||
train_tensor = control_flow_ops.with_dependencies([grad_updates], final_loss)
|
||||
|
||||
return train_tensor
|
||||
|
||||
|
@ -88,3 +88,4 @@ class OptimizersTest(tf.test.TestCase):
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
|
||||
|
259
tensorflow/contrib/learn/BUILD
Normal file
259
tensorflow/contrib/learn/BUILD
Normal file
@ -0,0 +1,259 @@
|
||||
# Description:
|
||||
# Contains TF Learn (aka Scikit Flow) sub-project with high level tensorflow API.
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
|
||||
package(default_visibility = ["//tensorflow:__subpackages__"])
|
||||
|
||||
filegroup(
|
||||
name = "datasets",
|
||||
srcs = glob(["**/*.csv"]),
|
||||
)
|
||||
|
||||
py_library(
|
||||
name = "learn",
|
||||
srcs = glob([
|
||||
"python/learn/**/*.py",
|
||||
]) + [
|
||||
"__init__.py",
|
||||
"python/__init__.py",
|
||||
],
|
||||
data = [":datasets"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = ["//tensorflow/python:framework"],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_base",
|
||||
size = "medium",
|
||||
srcs = ["python/learn/tests/test_base.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_custom_decay",
|
||||
size = "small",
|
||||
srcs = ["python/learn/tests/test_custom_decay.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_data_feeder",
|
||||
size = "small",
|
||||
srcs = ["python/learn/tests/test_data_feeder.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_early_stopping",
|
||||
size = "medium",
|
||||
srcs = ["python/learn/tests/test_early_stopping.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_estimators",
|
||||
size = "small",
|
||||
srcs = ["python/learn/tests/test_estimators.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_grid_search",
|
||||
size = "small",
|
||||
srcs = ["python/learn/tests/test_grid_search.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_io",
|
||||
size = "small",
|
||||
srcs = ["python/learn/tests/test_io.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_multioutput",
|
||||
size = "small",
|
||||
srcs = ["python/learn/tests/test_multioutput.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_nonlinear",
|
||||
size = "medium",
|
||||
srcs = ["python/learn/tests/test_nonlinear.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_regression",
|
||||
size = "small",
|
||||
srcs = ["python/learn/tests/test_regression.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_saver",
|
||||
size = "small",
|
||||
srcs = ["python/learn/tests/test_saver.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_ops",
|
||||
size = "small",
|
||||
srcs = ["python/learn/ops/tests/test_ops.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_dropout_ops",
|
||||
size = "small",
|
||||
srcs = ["python/learn/ops/tests/test_dropout_ops.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_seq2seq_ops",
|
||||
size = "small",
|
||||
srcs = ["python/learn/ops/tests/test_seq2seq_ops.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_categorical",
|
||||
size = "small",
|
||||
srcs = ["python/learn/preprocessing/tests/test_categorical.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_categorical_vocabulary",
|
||||
size = "small",
|
||||
srcs = ["python/learn/preprocessing/tests/test_categorical_vocabulary.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_text",
|
||||
size = "small",
|
||||
srcs = ["python/learn/preprocessing/tests/test_text.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all_files",
|
||||
srcs = glob(
|
||||
["**/*"],
|
||||
exclude = [
|
||||
"**/METADATA",
|
||||
"**/OWNERS",
|
||||
],
|
||||
),
|
||||
visibility = ["//tensorflow:__subpackages__"],
|
||||
)
|
21
tensorflow/contrib/learn/__init__.py
Normal file
21
tensorflow/contrib/learn/__init__.py
Normal file
@ -0,0 +1,21 @@
|
||||
# Copyright 2016 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from tensorflow.contrib.learn.python.learn import *
|
||||
|
20
tensorflow/contrib/learn/python/__init__.py
Normal file
20
tensorflow/contrib/learn/python/__init__.py
Normal file
@ -0,0 +1,20 @@
|
||||
# Copyright 2016 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from tensorflow.contrib.learn.python.learn import *
|
@ -1,7 +1,6 @@
|
||||
|Travis-CI Build Status| |Codecov Status| |License| |PyPI version| |Join the chat at
|
||||
https://gitter.im/tensorflow/skflow|
|
||||
|License| |Join the chat at [https://gitter.im/tensorflow/skflow](https://gitter.im/tensorflow/skflow)|
|
||||
|
||||
Scikit Flow
|
||||
TF Learn (aka Scikit Flow)
|
||||
===========
|
||||
|
||||
This is a simplified interface for TensorFlow, to get people started on predictive analytics and data mining.
|
||||
@ -13,7 +12,7 @@ Why *TensorFlow*?
|
||||
- TensorFlow provides a good backbone for building different shapes of machine learning applications.
|
||||
- It will continue to evolve both in the distributed direction and as general pipelinining machinery.
|
||||
|
||||
Why *Scikit Flow*?
|
||||
Why *TensorFlow Learn* (Scikit Flow)?
|
||||
-----------------
|
||||
- To smooth the transition from the Scikit Learn world of one-liner machine learning into the more open world of building different shapes of ML models. You can start by using fit/predict and slide into TensorFlow APIs as you are getting comfortable.
|
||||
- To provide a set of reference models that would be easy to integrate with existing code.
|
||||
@ -21,25 +20,10 @@ Why *Scikit Flow*?
|
||||
Installation
|
||||
============
|
||||
|
||||
Dependencies
|
||||
-----------
|
||||
- Python: 2.7, 3.4+
|
||||
- Scikit learn: 0.16, 0.17, 0.18+
|
||||
- Tensorflow: 0.7+
|
||||
Optionally you can install Scikit Learn and Pandas for additional functionality.
|
||||
|
||||
First, you need to make sure you have `TensorFlow <https://github.com/tensorflow/tensorflow#installation>`__ and `Scikit Learn <http://scikit-learn.org/stable/install.html>`__ installed.
|
||||
Then you can simply import `learn` via `from tensorflow.contrib.learn` or use `tf.contrib.learn`.
|
||||
|
||||
Run the following to install the stable version from PyPI:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
pip install skflow
|
||||
|
||||
Or run the following to install from the development version from Github:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
pip install git+git://github.com/tensorflow/skflow.git
|
||||
|
||||
Tutorial
|
||||
--------
|
||||
@ -57,13 +41,13 @@ Community
|
||||
---------
|
||||
- Twitter `#skflow <https://twitter.com/search?q=skflow&src=typd>`__.
|
||||
- StackOverflow with `skflow tag <http://stackoverflow.com/questions/tagged/skflow>`__ for questions and struggles.
|
||||
- Github `issues <https://github.com/tensorflow/skflow/issues>`__ for technical discussions and feature requests.
|
||||
- Github `issues <https://github.com/tensorflow/tensorflow/issues>`__ for technical discussions and feature requests.
|
||||
- `Gitter channel <https://gitter.im/tensorflow/skflow>`__ for non-trivial discussions.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
Below are few simple examples of the API. For more examples, please see `examples <https://github.com/tensorflow/skflow/tree/master/examples>`__.
|
||||
Below are few simple examples of the API. For more examples, please see `examples <https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/skflow>`__.
|
||||
|
||||
General tips
|
||||
~~~~~~~~~~~~
|
||||
@ -79,11 +63,10 @@ Simple linear classification:
|
||||
|
||||
.. code:: python
|
||||
|
||||
import skflow
|
||||
from sklearn import datasets, metrics
|
||||
|
||||
iris = datasets.load_iris()
|
||||
classifier = skflow.TensorFlowLinearClassifier(n_classes=3)
|
||||
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
|
||||
classifier.fit(iris.data, iris.target)
|
||||
score = metrics.accuracy_score(iris.target, classifier.predict(iris.data))
|
||||
print("Accuracy: %f" % score)
|
||||
@ -95,12 +78,11 @@ Simple linear regression:
|
||||
|
||||
.. code:: python
|
||||
|
||||
import skflow
|
||||
from sklearn import datasets, metrics, preprocessing
|
||||
|
||||
boston = datasets.load_boston()
|
||||
X = preprocessing.StandardScaler().fit_transform(boston.data)
|
||||
regressor = skflow.TensorFlowLinearRegressor()
|
||||
regressor = learn.TensorFlowLinearRegressor()
|
||||
regressor.fit(X, boston.target)
|
||||
score = metrics.mean_squared_error(regressor.predict(X), boston.target)
|
||||
print ("MSE: %f" % score)
|
||||
@ -112,11 +94,10 @@ Example of 3 layer network with 10, 20 and 10 hidden units respectively:
|
||||
|
||||
.. code:: python
|
||||
|
||||
import skflow
|
||||
from sklearn import datasets, metrics
|
||||
|
||||
iris = datasets.load_iris()
|
||||
classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3)
|
||||
classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3)
|
||||
classifier.fit(iris.data, iris.target)
|
||||
score = metrics.accuracy_score(iris.target, classifier.predict(iris.data))
|
||||
print("Accuracy: %f" % score)
|
||||
@ -128,17 +109,16 @@ Example of how to pass a custom model to the TensorFlowEstimator:
|
||||
|
||||
.. code:: python
|
||||
|
||||
import skflow
|
||||
from sklearn import datasets, metrics
|
||||
|
||||
iris = datasets.load_iris()
|
||||
|
||||
def my_model(X, y):
|
||||
"""This is DNN with 10, 20, 10 hidden layers, and dropout of 0.5 probability."""
|
||||
layers = skflow.ops.dnn(X, [10, 20, 10], keep_prob=0.5)
|
||||
return skflow.models.logistic_regression(layers, y)
|
||||
layers = learn.ops.dnn(X, [10, 20, 10], keep_prob=0.5)
|
||||
return learn.models.logistic_regression(layers, y)
|
||||
|
||||
classifier = skflow.TensorFlowEstimator(model_fn=my_model, n_classes=3)
|
||||
classifier = learn.TensorFlowEstimator(model_fn=my_model, n_classes=3)
|
||||
classifier.fit(iris.data, iris.target)
|
||||
score = metrics.accuracy_score(iris.target, classifier.predict(iris.data))
|
||||
print("Accuracy: %f" % score)
|
||||
@ -146,15 +126,13 @@ Example of how to pass a custom model to the TensorFlowEstimator:
|
||||
Saving / Restoring models
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Each estimator has a ``save`` method which takes folder path where all model information will be saved. For restoring you can just call ``skflow.TensorFlowEstimator.restore(path)`` and it will return object of your class.
|
||||
Each estimator has a ``save`` method which takes folder path where all model information will be saved. For restoring you can just call ``learn.TensorFlowEstimator.restore(path)`` and it will return object of your class.
|
||||
|
||||
Some example code:
|
||||
|
||||
.. code:: python
|
||||
|
||||
import skflow
|
||||
|
||||
classifier = skflow.TensorFlowLinearRegression()
|
||||
classifier = learn.TensorFlowLinearRegression()
|
||||
classifier.fit(...)
|
||||
classifier.save('/tmp/tf_examples/my_model_1/')
|
||||
|
||||
@ -168,7 +146,7 @@ To get nice visualizations and summaries you can use ``logdir`` parameter on ``f
|
||||
|
||||
.. code:: python
|
||||
|
||||
classifier = skflow.TensorFlowLinearRegression()
|
||||
classifier = learn.TensorFlowLinearRegression()
|
||||
classifier.fit(X, y, logdir='/tmp/tf_examples/my_model_1/')
|
||||
|
||||
Then run next command in command line:
|
||||
@ -186,7 +164,7 @@ Loss visualization: |Text classification RNN Loss|
|
||||
More examples
|
||||
-------------
|
||||
|
||||
See examples folder for:
|
||||
See `examples folder <https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/skflow>`__ for:
|
||||
|
||||
- Easy way to handle categorical variables - words are just an example of categorical variable.
|
||||
- Text Classification - see examples for RNN, CNN on word and characters.
|
||||
@ -194,10 +172,6 @@ See examples folder for:
|
||||
- Images (CNNs) - see example for digit recognition.
|
||||
- More & deeper - different examples showing DNNs and CNNs
|
||||
|
||||
.. |Travis-CI Build Status| image:: https://travis-ci.org/tensorflow/skflow.svg?branch=master
|
||||
:target: https://travis-ci.org/tensorflow/skflow
|
||||
.. |Codecov Status| image:: https://codecov.io/github/tensorflow/skflow/coverage.svg?precision=2
|
||||
:target: https://codecov.io/github/tensorflow/skflow
|
||||
.. |License| image:: https://img.shields.io/badge/license-Apache%202.0-blue.svg
|
||||
:target: http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
.. |Join the chat at https://gitter.im/tensorflow/skflow| image:: https://badges.gitter.im/Join%20Chat.svg
|
@ -1,4 +1,4 @@
|
||||
"""Scikit Flow Addons."""
|
||||
"""Main Scikit Flow module."""
|
||||
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -12,8 +12,15 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from tensorflow.contrib.skflow.python.skflow.addons.config_addon import ConfigAddon
|
||||
import numpy as np
|
||||
|
||||
from tensorflow.contrib.learn.python.learn.io import *
|
||||
from tensorflow.contrib.learn.python.learn.estimators import *
|
||||
from tensorflow.contrib.learn.python.learn import ops
|
||||
from tensorflow.contrib.learn.python.learn import preprocessing
|
||||
from tensorflow.contrib.learn.python.learn import models
|
56
tensorflow/contrib/learn/python/learn/datasets/__init__.py
Normal file
56
tensorflow/contrib/learn/python/learn/datasets/__init__.py
Normal file
@ -0,0 +1,56 @@
|
||||
"""Module inclues reference datasets and utilities to load datasets."""
|
||||
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import csv
|
||||
import collections
|
||||
from os import path
|
||||
|
||||
import numpy as np
|
||||
|
||||
from tensorflow.contrib.learn.python.learn.datasets import base
|
||||
from tensorflow.contrib.learn.python.learn.datasets import mnist
|
||||
|
||||
# Export load_iris and load_boston.
|
||||
load_iris = base.load_iris
|
||||
load_boston = base.load_boston
|
||||
|
||||
# List of all available datasets.
|
||||
# Note, currently they may return different types.
|
||||
DATASETS = {
|
||||
# Returns base.Dataset.
|
||||
'iris': base.load_iris,
|
||||
'boston': base.load_boston,
|
||||
# Returns mnist.Dataset.
|
||||
'mnist': mnist.load_mnist,
|
||||
}
|
||||
|
||||
|
||||
def load_dataset(name):
|
||||
"""Loads dataset by name.
|
||||
|
||||
Args:
|
||||
name: Name of the dataset to load.
|
||||
|
||||
Returns:
|
||||
Features and targets for given dataset. Can be numpy or iterator.
|
||||
"""
|
||||
if name not in DATASETS:
|
||||
raise ValueError("Name of dataset is not found: %s" % name)
|
||||
return DATASETS[name]()
|
||||
|
87
tensorflow/contrib/learn/python/learn/datasets/base.py
Normal file
87
tensorflow/contrib/learn/python/learn/datasets/base.py
Normal file
@ -0,0 +1,87 @@
|
||||
"""Base utilities for loading datasets."""
|
||||
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import csv
|
||||
import collections
|
||||
import os
|
||||
from os import path
|
||||
import tempfile
|
||||
from six.moves import urllib
|
||||
|
||||
import numpy as np
|
||||
from tensorflow.python.platform.default import _gfile as gfile
|
||||
|
||||
|
||||
Dataset = collections.namedtuple('Dataset', ['data', 'target'])
|
||||
|
||||
|
||||
def load_csv(filename, target_dtype):
|
||||
with gfile.Open(filename) as csv_file:
|
||||
data_file = csv.reader(csv_file)
|
||||
header = next(data_file)
|
||||
n_samples = int(header[0])
|
||||
n_features = int(header[1])
|
||||
target_names = np.array(header[2:])
|
||||
data = np.empty((n_samples, n_features))
|
||||
target = np.empty((n_samples,), dtype=np.int)
|
||||
|
||||
for i, ir in enumerate(data_file):
|
||||
data[i] = np.asarray(ir[:-1], dtype=np.float64)
|
||||
target[i] = np.asarray(ir[-1], dtype=target_dtype)
|
||||
|
||||
return Dataset(data=data, target=target)
|
||||
|
||||
|
||||
def load_iris():
|
||||
"""Load Iris dataset.
|
||||
|
||||
Returns:
|
||||
Dataset object containing data in-memory.
|
||||
"""
|
||||
module_path = path.dirname(__file__)
|
||||
return load_csv(path.join(module_path, 'data', 'iris.csv'),
|
||||
target_dtype=np.int)
|
||||
|
||||
|
||||
def load_boston():
|
||||
"""Load Boston housing dataset.
|
||||
|
||||
Returns:
|
||||
Dataset object containing data in-memory.
|
||||
"""
|
||||
module_path = path.dirname(__file__)
|
||||
return load_csv(path.join(module_path, 'data', 'boston_house_prices.csv'),
|
||||
target_dtype=np.float)
|
||||
|
||||
|
||||
def maybe_download(filename, work_directory, source_url):
|
||||
"""Download the data from source url, unless it's already here."""
|
||||
if not gfile.Exists(work_directory):
|
||||
gfile.MakeDirs(work_directory)
|
||||
filepath = os.path.join(work_directory, filename)
|
||||
if not gfile.Exists(filepath):
|
||||
with tempfile.NamedTemporaryFile() as tmpfile:
|
||||
temp_file_name = tmpfile.name
|
||||
urllib.request.urlretrieve(source_url, temp_file_name)
|
||||
gfile.Copy(temp_file_name, filepath)
|
||||
with gfile.GFile(filepath) as f:
|
||||
size = f.Size()
|
||||
print('Successfully downloaded', filename, size, 'bytes.')
|
||||
return filepath
|
||||
|
@ -0,0 +1,507 @@
|
||||
506,13,"CRIM","ZN","INDUS","CHAS","NOX","RM","AGE","DIS","RAD","TAX","PTRATIO","B","LSTAT","MEDV"
|
||||
0.00632,18,2.31,0,0.538,6.575,65.2,4.09,1,296,15.3,396.9,4.98,24
|
||||
0.02731,0,7.07,0,0.469,6.421,78.9,4.9671,2,242,17.8,396.9,9.14,21.6
|
||||
0.02729,0,7.07,0,0.469,7.185,61.1,4.9671,2,242,17.8,392.83,4.03,34.7
|
||||
0.03237,0,2.18,0,0.458,6.998,45.8,6.0622,3,222,18.7,394.63,2.94,33.4
|
||||
0.06905,0,2.18,0,0.458,7.147,54.2,6.0622,3,222,18.7,396.9,5.33,36.2
|
||||
0.02985,0,2.18,0,0.458,6.43,58.7,6.0622,3,222,18.7,394.12,5.21,28.7
|
||||
0.08829,12.5,7.87,0,0.524,6.012,66.6,5.5605,5,311,15.2,395.6,12.43,22.9
|
||||
0.14455,12.5,7.87,0,0.524,6.172,96.1,5.9505,5,311,15.2,396.9,19.15,27.1
|
||||
0.21124,12.5,7.87,0,0.524,5.631,100,6.0821,5,311,15.2,386.63,29.93,16.5
|
||||
0.17004,12.5,7.87,0,0.524,6.004,85.9,6.5921,5,311,15.2,386.71,17.1,18.9
|
||||
0.22489,12.5,7.87,0,0.524,6.377,94.3,6.3467,5,311,15.2,392.52,20.45,15
|
||||
0.11747,12.5,7.87,0,0.524,6.009,82.9,6.2267,5,311,15.2,396.9,13.27,18.9
|
||||
0.09378,12.5,7.87,0,0.524,5.889,39,5.4509,5,311,15.2,390.5,15.71,21.7
|
||||
0.62976,0,8.14,0,0.538,5.949,61.8,4.7075,4,307,21,396.9,8.26,20.4
|
||||
0.63796,0,8.14,0,0.538,6.096,84.5,4.4619,4,307,21,380.02,10.26,18.2
|
||||
0.62739,0,8.14,0,0.538,5.834,56.5,4.4986,4,307,21,395.62,8.47,19.9
|
||||
1.05393,0,8.14,0,0.538,5.935,29.3,4.4986,4,307,21,386.85,6.58,23.1
|
||||
0.7842,0,8.14,0,0.538,5.99,81.7,4.2579,4,307,21,386.75,14.67,17.5
|
||||
0.80271,0,8.14,0,0.538,5.456,36.6,3.7965,4,307,21,288.99,11.69,20.2
|
||||
0.7258,0,8.14,0,0.538,5.727,69.5,3.7965,4,307,21,390.95,11.28,18.2
|
||||
1.25179,0,8.14,0,0.538,5.57,98.1,3.7979,4,307,21,376.57,21.02,13.6
|
||||
0.85204,0,8.14,0,0.538,5.965,89.2,4.0123,4,307,21,392.53,13.83,19.6
|
||||
1.23247,0,8.14,0,0.538,6.142,91.7,3.9769,4,307,21,396.9,18.72,15.2
|
||||
0.98843,0,8.14,0,0.538,5.813,100,4.0952,4,307,21,394.54,19.88,14.5
|
||||
0.75026,0,8.14,0,0.538,5.924,94.1,4.3996,4,307,21,394.33,16.3,15.6
|
||||
0.84054,0,8.14,0,0.538,5.599,85.7,4.4546,4,307,21,303.42,16.51,13.9
|
||||
0.67191,0,8.14,0,0.538,5.813,90.3,4.682,4,307,21,376.88,14.81,16.6
|
||||
0.95577,0,8.14,0,0.538,6.047,88.8,4.4534,4,307,21,306.38,17.28,14.8
|
||||
0.77299,0,8.14,0,0.538,6.495,94.4,4.4547,4,307,21,387.94,12.8,18.4
|
||||
1.00245,0,8.14,0,0.538,6.674,87.3,4.239,4,307,21,380.23,11.98,21
|
||||
1.13081,0,8.14,0,0.538,5.713,94.1,4.233,4,307,21,360.17,22.6,12.7
|
||||
1.35472,0,8.14,0,0.538,6.072,100,4.175,4,307,21,376.73,13.04,14.5
|
||||
1.38799,0,8.14,0,0.538,5.95,82,3.99,4,307,21,232.6,27.71,13.2
|
||||
1.15172,0,8.14,0,0.538,5.701,95,3.7872,4,307,21,358.77,18.35,13.1
|
||||
1.61282,0,8.14,0,0.538,6.096,96.9,3.7598,4,307,21,248.31,20.34,13.5
|
||||
0.06417,0,5.96,0,0.499,5.933,68.2,3.3603,5,279,19.2,396.9,9.68,18.9
|
||||
0.09744,0,5.96,0,0.499,5.841,61.4,3.3779,5,279,19.2,377.56,11.41,20
|
||||
0.08014,0,5.96,0,0.499,5.85,41.5,3.9342,5,279,19.2,396.9,8.77,21
|
||||
0.17505,0,5.96,0,0.499,5.966,30.2,3.8473,5,279,19.2,393.43,10.13,24.7
|
||||
0.02763,75,2.95,0,0.428,6.595,21.8,5.4011,3,252,18.3,395.63,4.32,30.8
|
||||
0.03359,75,2.95,0,0.428,7.024,15.8,5.4011,3,252,18.3,395.62,1.98,34.9
|
||||
0.12744,0,6.91,0,0.448,6.77,2.9,5.7209,3,233,17.9,385.41,4.84,26.6
|
||||
0.1415,0,6.91,0,0.448,6.169,6.6,5.7209,3,233,17.9,383.37,5.81,25.3
|
||||
0.15936,0,6.91,0,0.448,6.211,6.5,5.7209,3,233,17.9,394.46,7.44,24.7
|
||||
0.12269,0,6.91,0,0.448,6.069,40,5.7209,3,233,17.9,389.39,9.55,21.2
|
||||
0.17142,0,6.91,0,0.448,5.682,33.8,5.1004,3,233,17.9,396.9,10.21,19.3
|
||||
0.18836,0,6.91,0,0.448,5.786,33.3,5.1004,3,233,17.9,396.9,14.15,20
|
||||
0.22927,0,6.91,0,0.448,6.03,85.5,5.6894,3,233,17.9,392.74,18.8,16.6
|
||||
0.25387,0,6.91,0,0.448,5.399,95.3,5.87,3,233,17.9,396.9,30.81,14.4
|
||||
0.21977,0,6.91,0,0.448,5.602,62,6.0877,3,233,17.9,396.9,16.2,19.4
|
||||
0.08873,21,5.64,0,0.439,5.963,45.7,6.8147,4,243,16.8,395.56,13.45,19.7
|
||||
0.04337,21,5.64,0,0.439,6.115,63,6.8147,4,243,16.8,393.97,9.43,20.5
|
||||
0.0536,21,5.64,0,0.439,6.511,21.1,6.8147,4,243,16.8,396.9,5.28,25
|
||||
0.04981,21,5.64,0,0.439,5.998,21.4,6.8147,4,243,16.8,396.9,8.43,23.4
|
||||
0.0136,75,4,0,0.41,5.888,47.6,7.3197,3,469,21.1,396.9,14.8,18.9
|
||||
0.01311,90,1.22,0,0.403,7.249,21.9,8.6966,5,226,17.9,395.93,4.81,35.4
|
||||
0.02055,85,0.74,0,0.41,6.383,35.7,9.1876,2,313,17.3,396.9,5.77,24.7
|
||||
0.01432,100,1.32,0,0.411,6.816,40.5,8.3248,5,256,15.1,392.9,3.95,31.6
|
||||
0.15445,25,5.13,0,0.453,6.145,29.2,7.8148,8,284,19.7,390.68,6.86,23.3
|
||||
0.10328,25,5.13,0,0.453,5.927,47.2,6.932,8,284,19.7,396.9,9.22,19.6
|
||||
0.14932,25,5.13,0,0.453,5.741,66.2,7.2254,8,284,19.7,395.11,13.15,18.7
|
||||
0.17171,25,5.13,0,0.453,5.966,93.4,6.8185,8,284,19.7,378.08,14.44,16
|
||||
0.11027,25,5.13,0,0.453,6.456,67.8,7.2255,8,284,19.7,396.9,6.73,22.2
|
||||
0.1265,25,5.13,0,0.453,6.762,43.4,7.9809,8,284,19.7,395.58,9.5,25
|
||||
0.01951,17.5,1.38,0,0.4161,7.104,59.5,9.2229,3,216,18.6,393.24,8.05,33
|
||||
0.03584,80,3.37,0,0.398,6.29,17.8,6.6115,4,337,16.1,396.9,4.67,23.5
|
||||
0.04379,80,3.37,0,0.398,5.787,31.1,6.6115,4,337,16.1,396.9,10.24,19.4
|
||||
0.05789,12.5,6.07,0,0.409,5.878,21.4,6.498,4,345,18.9,396.21,8.1,22
|
||||
0.13554,12.5,6.07,0,0.409,5.594,36.8,6.498,4,345,18.9,396.9,13.09,17.4
|
||||
0.12816,12.5,6.07,0,0.409,5.885,33,6.498,4,345,18.9,396.9,8.79,20.9
|
||||
0.08826,0,10.81,0,0.413,6.417,6.6,5.2873,4,305,19.2,383.73,6.72,24.2
|
||||
0.15876,0,10.81,0,0.413,5.961,17.5,5.2873,4,305,19.2,376.94,9.88,21.7
|
||||
0.09164,0,10.81,0,0.413,6.065,7.8,5.2873,4,305,19.2,390.91,5.52,22.8
|
||||
0.19539,0,10.81,0,0.413,6.245,6.2,5.2873,4,305,19.2,377.17,7.54,23.4
|
||||
0.07896,0,12.83,0,0.437,6.273,6,4.2515,5,398,18.7,394.92,6.78,24.1
|
||||
0.09512,0,12.83,0,0.437,6.286,45,4.5026,5,398,18.7,383.23,8.94,21.4
|
||||
0.10153,0,12.83,0,0.437,6.279,74.5,4.0522,5,398,18.7,373.66,11.97,20
|
||||
0.08707,0,12.83,0,0.437,6.14,45.8,4.0905,5,398,18.7,386.96,10.27,20.8
|
||||
0.05646,0,12.83,0,0.437,6.232,53.7,5.0141,5,398,18.7,386.4,12.34,21.2
|
||||
0.08387,0,12.83,0,0.437,5.874,36.6,4.5026,5,398,18.7,396.06,9.1,20.3
|
||||
0.04113,25,4.86,0,0.426,6.727,33.5,5.4007,4,281,19,396.9,5.29,28
|
||||
0.04462,25,4.86,0,0.426,6.619,70.4,5.4007,4,281,19,395.63,7.22,23.9
|
||||
0.03659,25,4.86,0,0.426,6.302,32.2,5.4007,4,281,19,396.9,6.72,24.8
|
||||
0.03551,25,4.86,0,0.426,6.167,46.7,5.4007,4,281,19,390.64,7.51,22.9
|
||||
0.05059,0,4.49,0,0.449,6.389,48,4.7794,3,247,18.5,396.9,9.62,23.9
|
||||
0.05735,0,4.49,0,0.449,6.63,56.1,4.4377,3,247,18.5,392.3,6.53,26.6
|
||||
0.05188,0,4.49,0,0.449,6.015,45.1,4.4272,3,247,18.5,395.99,12.86,22.5
|
||||
0.07151,0,4.49,0,0.449,6.121,56.8,3.7476,3,247,18.5,395.15,8.44,22.2
|
||||
0.0566,0,3.41,0,0.489,7.007,86.3,3.4217,2,270,17.8,396.9,5.5,23.6
|
||||
0.05302,0,3.41,0,0.489,7.079,63.1,3.4145,2,270,17.8,396.06,5.7,28.7
|
||||
0.04684,0,3.41,0,0.489,6.417,66.1,3.0923,2,270,17.8,392.18,8.81,22.6
|
||||
0.03932,0,3.41,0,0.489,6.405,73.9,3.0921,2,270,17.8,393.55,8.2,22
|
||||
0.04203,28,15.04,0,0.464,6.442,53.6,3.6659,4,270,18.2,395.01,8.16,22.9
|
||||
0.02875,28,15.04,0,0.464,6.211,28.9,3.6659,4,270,18.2,396.33,6.21,25
|
||||
0.04294,28,15.04,0,0.464,6.249,77.3,3.615,4,270,18.2,396.9,10.59,20.6
|
||||
0.12204,0,2.89,0,0.445,6.625,57.8,3.4952,2,276,18,357.98,6.65,28.4
|
||||
0.11504,0,2.89,0,0.445,6.163,69.6,3.4952,2,276,18,391.83,11.34,21.4
|
||||
0.12083,0,2.89,0,0.445,8.069,76,3.4952,2,276,18,396.9,4.21,38.7
|
||||
0.08187,0,2.89,0,0.445,7.82,36.9,3.4952,2,276,18,393.53,3.57,43.8
|
||||
0.0686,0,2.89,0,0.445,7.416,62.5,3.4952,2,276,18,396.9,6.19,33.2
|
||||
0.14866,0,8.56,0,0.52,6.727,79.9,2.7778,5,384,20.9,394.76,9.42,27.5
|
||||
0.11432,0,8.56,0,0.52,6.781,71.3,2.8561,5,384,20.9,395.58,7.67,26.5
|
||||
0.22876,0,8.56,0,0.52,6.405,85.4,2.7147,5,384,20.9,70.8,10.63,18.6
|
||||
0.21161,0,8.56,0,0.52,6.137,87.4,2.7147,5,384,20.9,394.47,13.44,19.3
|
||||
0.1396,0,8.56,0,0.52,6.167,90,2.421,5,384,20.9,392.69,12.33,20.1
|
||||
0.13262,0,8.56,0,0.52,5.851,96.7,2.1069,5,384,20.9,394.05,16.47,19.5
|
||||
0.1712,0,8.56,0,0.52,5.836,91.9,2.211,5,384,20.9,395.67,18.66,19.5
|
||||
0.13117,0,8.56,0,0.52,6.127,85.2,2.1224,5,384,20.9,387.69,14.09,20.4
|
||||
0.12802,0,8.56,0,0.52,6.474,97.1,2.4329,5,384,20.9,395.24,12.27,19.8
|
||||
0.26363,0,8.56,0,0.52,6.229,91.2,2.5451,5,384,20.9,391.23,15.55,19.4
|
||||
0.10793,0,8.56,0,0.52,6.195,54.4,2.7778,5,384,20.9,393.49,13,21.7
|
||||
0.10084,0,10.01,0,0.547,6.715,81.6,2.6775,6,432,17.8,395.59,10.16,22.8
|
||||
0.12329,0,10.01,0,0.547,5.913,92.9,2.3534,6,432,17.8,394.95,16.21,18.8
|
||||
0.22212,0,10.01,0,0.547,6.092,95.4,2.548,6,432,17.8,396.9,17.09,18.7
|
||||
0.14231,0,10.01,0,0.547,6.254,84.2,2.2565,6,432,17.8,388.74,10.45,18.5
|
||||
0.17134,0,10.01,0,0.547,5.928,88.2,2.4631,6,432,17.8,344.91,15.76,18.3
|
||||
0.13158,0,10.01,0,0.547,6.176,72.5,2.7301,6,432,17.8,393.3,12.04,21.2
|
||||
0.15098,0,10.01,0,0.547,6.021,82.6,2.7474,6,432,17.8,394.51,10.3,19.2
|
||||
0.13058,0,10.01,0,0.547,5.872,73.1,2.4775,6,432,17.8,338.63,15.37,20.4
|
||||
0.14476,0,10.01,0,0.547,5.731,65.2,2.7592,6,432,17.8,391.5,13.61,19.3
|
||||
0.06899,0,25.65,0,0.581,5.87,69.7,2.2577,2,188,19.1,389.15,14.37,22
|
||||
0.07165,0,25.65,0,0.581,6.004,84.1,2.1974,2,188,19.1,377.67,14.27,20.3
|
||||
0.09299,0,25.65,0,0.581,5.961,92.9,2.0869,2,188,19.1,378.09,17.93,20.5
|
||||
0.15038,0,25.65,0,0.581,5.856,97,1.9444,2,188,19.1,370.31,25.41,17.3
|
||||
0.09849,0,25.65,0,0.581,5.879,95.8,2.0063,2,188,19.1,379.38,17.58,18.8
|
||||
0.16902,0,25.65,0,0.581,5.986,88.4,1.9929,2,188,19.1,385.02,14.81,21.4
|
||||
0.38735,0,25.65,0,0.581,5.613,95.6,1.7572,2,188,19.1,359.29,27.26,15.7
|
||||
0.25915,0,21.89,0,0.624,5.693,96,1.7883,4,437,21.2,392.11,17.19,16.2
|
||||
0.32543,0,21.89,0,0.624,6.431,98.8,1.8125,4,437,21.2,396.9,15.39,18
|
||||
0.88125,0,21.89,0,0.624,5.637,94.7,1.9799,4,437,21.2,396.9,18.34,14.3
|
||||
0.34006,0,21.89,0,0.624,6.458,98.9,2.1185,4,437,21.2,395.04,12.6,19.2
|
||||
1.19294,0,21.89,0,0.624,6.326,97.7,2.271,4,437,21.2,396.9,12.26,19.6
|
||||
0.59005,0,21.89,0,0.624,6.372,97.9,2.3274,4,437,21.2,385.76,11.12,23
|
||||
0.32982,0,21.89,0,0.624,5.822,95.4,2.4699,4,437,21.2,388.69,15.03,18.4
|
||||
0.97617,0,21.89,0,0.624,5.757,98.4,2.346,4,437,21.2,262.76,17.31,15.6
|
||||
0.55778,0,21.89,0,0.624,6.335,98.2,2.1107,4,437,21.2,394.67,16.96,18.1
|
||||
0.32264,0,21.89,0,0.624,5.942,93.5,1.9669,4,437,21.2,378.25,16.9,17.4
|
||||
0.35233,0,21.89,0,0.624,6.454,98.4,1.8498,4,437,21.2,394.08,14.59,17.1
|
||||
0.2498,0,21.89,0,0.624,5.857,98.2,1.6686,4,437,21.2,392.04,21.32,13.3
|
||||
0.54452,0,21.89,0,0.624,6.151,97.9,1.6687,4,437,21.2,396.9,18.46,17.8
|
||||
0.2909,0,21.89,0,0.624,6.174,93.6,1.6119,4,437,21.2,388.08,24.16,14
|
||||
1.62864,0,21.89,0,0.624,5.019,100,1.4394,4,437,21.2,396.9,34.41,14.4
|
||||
3.32105,0,19.58,1,0.871,5.403,100,1.3216,5,403,14.7,396.9,26.82,13.4
|
||||
4.0974,0,19.58,0,0.871,5.468,100,1.4118,5,403,14.7,396.9,26.42,15.6
|
||||
2.77974,0,19.58,0,0.871,4.903,97.8,1.3459,5,403,14.7,396.9,29.29,11.8
|
||||
2.37934,0,19.58,0,0.871,6.13,100,1.4191,5,403,14.7,172.91,27.8,13.8
|
||||
2.15505,0,19.58,0,0.871,5.628,100,1.5166,5,403,14.7,169.27,16.65,15.6
|
||||
2.36862,0,19.58,0,0.871,4.926,95.7,1.4608,5,403,14.7,391.71,29.53,14.6
|
||||
2.33099,0,19.58,0,0.871,5.186,93.8,1.5296,5,403,14.7,356.99,28.32,17.8
|
||||
2.73397,0,19.58,0,0.871,5.597,94.9,1.5257,5,403,14.7,351.85,21.45,15.4
|
||||
1.6566,0,19.58,0,0.871,6.122,97.3,1.618,5,403,14.7,372.8,14.1,21.5
|
||||
1.49632,0,19.58,0,0.871,5.404,100,1.5916,5,403,14.7,341.6,13.28,19.6
|
||||
1.12658,0,19.58,1,0.871,5.012,88,1.6102,5,403,14.7,343.28,12.12,15.3
|
||||
2.14918,0,19.58,0,0.871,5.709,98.5,1.6232,5,403,14.7,261.95,15.79,19.4
|
||||
1.41385,0,19.58,1,0.871,6.129,96,1.7494,5,403,14.7,321.02,15.12,17
|
||||
3.53501,0,19.58,1,0.871,6.152,82.6,1.7455,5,403,14.7,88.01,15.02,15.6
|
||||
2.44668,0,19.58,0,0.871,5.272,94,1.7364,5,403,14.7,88.63,16.14,13.1
|
||||
1.22358,0,19.58,0,0.605,6.943,97.4,1.8773,5,403,14.7,363.43,4.59,41.3
|
||||
1.34284,0,19.58,0,0.605,6.066,100,1.7573,5,403,14.7,353.89,6.43,24.3
|
||||
1.42502,0,19.58,0,0.871,6.51,100,1.7659,5,403,14.7,364.31,7.39,23.3
|
||||
1.27346,0,19.58,1,0.605,6.25,92.6,1.7984,5,403,14.7,338.92,5.5,27
|
||||
1.46336,0,19.58,0,0.605,7.489,90.8,1.9709,5,403,14.7,374.43,1.73,50
|
||||
1.83377,0,19.58,1,0.605,7.802,98.2,2.0407,5,403,14.7,389.61,1.92,50
|
||||
1.51902,0,19.58,1,0.605,8.375,93.9,2.162,5,403,14.7,388.45,3.32,50
|
||||
2.24236,0,19.58,0,0.605,5.854,91.8,2.422,5,403,14.7,395.11,11.64,22.7
|
||||
2.924,0,19.58,0,0.605,6.101,93,2.2834,5,403,14.7,240.16,9.81,25
|
||||
2.01019,0,19.58,0,0.605,7.929,96.2,2.0459,5,403,14.7,369.3,3.7,50
|
||||
1.80028,0,19.58,0,0.605,5.877,79.2,2.4259,5,403,14.7,227.61,12.14,23.8
|
||||
2.3004,0,19.58,0,0.605,6.319,96.1,2.1,5,403,14.7,297.09,11.1,23.8
|
||||
2.44953,0,19.58,0,0.605,6.402,95.2,2.2625,5,403,14.7,330.04,11.32,22.3
|
||||
1.20742,0,19.58,0,0.605,5.875,94.6,2.4259,5,403,14.7,292.29,14.43,17.4
|
||||
2.3139,0,19.58,0,0.605,5.88,97.3,2.3887,5,403,14.7,348.13,12.03,19.1
|
||||
0.13914,0,4.05,0,0.51,5.572,88.5,2.5961,5,296,16.6,396.9,14.69,23.1
|
||||
0.09178,0,4.05,0,0.51,6.416,84.1,2.6463,5,296,16.6,395.5,9.04,23.6
|
||||
0.08447,0,4.05,0,0.51,5.859,68.7,2.7019,5,296,16.6,393.23,9.64,22.6
|
||||
0.06664,0,4.05,0,0.51,6.546,33.1,3.1323,5,296,16.6,390.96,5.33,29.4
|
||||
0.07022,0,4.05,0,0.51,6.02,47.2,3.5549,5,296,16.6,393.23,10.11,23.2
|
||||
0.05425,0,4.05,0,0.51,6.315,73.4,3.3175,5,296,16.6,395.6,6.29,24.6
|
||||
0.06642,0,4.05,0,0.51,6.86,74.4,2.9153,5,296,16.6,391.27,6.92,29.9
|
||||
0.0578,0,2.46,0,0.488,6.98,58.4,2.829,3,193,17.8,396.9,5.04,37.2
|
||||
0.06588,0,2.46,0,0.488,7.765,83.3,2.741,3,193,17.8,395.56,7.56,39.8
|
||||
0.06888,0,2.46,0,0.488,6.144,62.2,2.5979,3,193,17.8,396.9,9.45,36.2
|
||||
0.09103,0,2.46,0,0.488,7.155,92.2,2.7006,3,193,17.8,394.12,4.82,37.9
|
||||
0.10008,0,2.46,0,0.488,6.563,95.6,2.847,3,193,17.8,396.9,5.68,32.5
|
||||
0.08308,0,2.46,0,0.488,5.604,89.8,2.9879,3,193,17.8,391,13.98,26.4
|
||||
0.06047,0,2.46,0,0.488,6.153,68.8,3.2797,3,193,17.8,387.11,13.15,29.6
|
||||
0.05602,0,2.46,0,0.488,7.831,53.6,3.1992,3,193,17.8,392.63,4.45,50
|
||||
0.07875,45,3.44,0,0.437,6.782,41.1,3.7886,5,398,15.2,393.87,6.68,32
|
||||
0.12579,45,3.44,0,0.437,6.556,29.1,4.5667,5,398,15.2,382.84,4.56,29.8
|
||||
0.0837,45,3.44,0,0.437,7.185,38.9,4.5667,5,398,15.2,396.9,5.39,34.9
|
||||
0.09068,45,3.44,0,0.437,6.951,21.5,6.4798,5,398,15.2,377.68,5.1,37
|
||||
0.06911,45,3.44,0,0.437,6.739,30.8,6.4798,5,398,15.2,389.71,4.69,30.5
|
||||
0.08664,45,3.44,0,0.437,7.178,26.3,6.4798,5,398,15.2,390.49,2.87,36.4
|
||||
0.02187,60,2.93,0,0.401,6.8,9.9,6.2196,1,265,15.6,393.37,5.03,31.1
|
||||
0.01439,60,2.93,0,0.401,6.604,18.8,6.2196,1,265,15.6,376.7,4.38,29.1
|
||||
0.01381,80,0.46,0,0.422,7.875,32,5.6484,4,255,14.4,394.23,2.97,50
|
||||
0.04011,80,1.52,0,0.404,7.287,34.1,7.309,2,329,12.6,396.9,4.08,33.3
|
||||
0.04666,80,1.52,0,0.404,7.107,36.6,7.309,2,329,12.6,354.31,8.61,30.3
|
||||
0.03768,80,1.52,0,0.404,7.274,38.3,7.309,2,329,12.6,392.2,6.62,34.6
|
||||
0.0315,95,1.47,0,0.403,6.975,15.3,7.6534,3,402,17,396.9,4.56,34.9
|
||||
0.01778,95,1.47,0,0.403,7.135,13.9,7.6534,3,402,17,384.3,4.45,32.9
|
||||
0.03445,82.5,2.03,0,0.415,6.162,38.4,6.27,2,348,14.7,393.77,7.43,24.1
|
||||
0.02177,82.5,2.03,0,0.415,7.61,15.7,6.27,2,348,14.7,395.38,3.11,42.3
|
||||
0.0351,95,2.68,0,0.4161,7.853,33.2,5.118,4,224,14.7,392.78,3.81,48.5
|
||||
0.02009,95,2.68,0,0.4161,8.034,31.9,5.118,4,224,14.7,390.55,2.88,50
|
||||
0.13642,0,10.59,0,0.489,5.891,22.3,3.9454,4,277,18.6,396.9,10.87,22.6
|
||||
0.22969,0,10.59,0,0.489,6.326,52.5,4.3549,4,277,18.6,394.87,10.97,24.4
|
||||
0.25199,0,10.59,0,0.489,5.783,72.7,4.3549,4,277,18.6,389.43,18.06,22.5
|
||||
0.13587,0,10.59,1,0.489,6.064,59.1,4.2392,4,277,18.6,381.32,14.66,24.4
|
||||
0.43571,0,10.59,1,0.489,5.344,100,3.875,4,277,18.6,396.9,23.09,20
|
||||
0.17446,0,10.59,1,0.489,5.96,92.1,3.8771,4,277,18.6,393.25,17.27,21.7
|
||||
0.37578,0,10.59,1,0.489,5.404,88.6,3.665,4,277,18.6,395.24,23.98,19.3
|
||||
0.21719,0,10.59,1,0.489,5.807,53.8,3.6526,4,277,18.6,390.94,16.03,22.4
|
||||
0.14052,0,10.59,0,0.489,6.375,32.3,3.9454,4,277,18.6,385.81,9.38,28.1
|
||||
0.28955,0,10.59,0,0.489,5.412,9.8,3.5875,4,277,18.6,348.93,29.55,23.7
|
||||
0.19802,0,10.59,0,0.489,6.182,42.4,3.9454,4,277,18.6,393.63,9.47,25
|
||||
0.0456,0,13.89,1,0.55,5.888,56,3.1121,5,276,16.4,392.8,13.51,23.3
|
||||
0.07013,0,13.89,0,0.55,6.642,85.1,3.4211,5,276,16.4,392.78,9.69,28.7
|
||||
0.11069,0,13.89,1,0.55,5.951,93.8,2.8893,5,276,16.4,396.9,17.92,21.5
|
||||
0.11425,0,13.89,1,0.55,6.373,92.4,3.3633,5,276,16.4,393.74,10.5,23
|
||||
0.35809,0,6.2,1,0.507,6.951,88.5,2.8617,8,307,17.4,391.7,9.71,26.7
|
||||
0.40771,0,6.2,1,0.507,6.164,91.3,3.048,8,307,17.4,395.24,21.46,21.7
|
||||
0.62356,0,6.2,1,0.507,6.879,77.7,3.2721,8,307,17.4,390.39,9.93,27.5
|
||||
0.6147,0,6.2,0,0.507,6.618,80.8,3.2721,8,307,17.4,396.9,7.6,30.1
|
||||
0.31533,0,6.2,0,0.504,8.266,78.3,2.8944,8,307,17.4,385.05,4.14,44.8
|
||||
0.52693,0,6.2,0,0.504,8.725,83,2.8944,8,307,17.4,382,4.63,50
|
||||
0.38214,0,6.2,0,0.504,8.04,86.5,3.2157,8,307,17.4,387.38,3.13,37.6
|
||||
0.41238,0,6.2,0,0.504,7.163,79.9,3.2157,8,307,17.4,372.08,6.36,31.6
|
||||
0.29819,0,6.2,0,0.504,7.686,17,3.3751,8,307,17.4,377.51,3.92,46.7
|
||||
0.44178,0,6.2,0,0.504,6.552,21.4,3.3751,8,307,17.4,380.34,3.76,31.5
|
||||
0.537,0,6.2,0,0.504,5.981,68.1,3.6715,8,307,17.4,378.35,11.65,24.3
|
||||
0.46296,0,6.2,0,0.504,7.412,76.9,3.6715,8,307,17.4,376.14,5.25,31.7
|
||||
0.57529,0,6.2,0,0.507,8.337,73.3,3.8384,8,307,17.4,385.91,2.47,41.7
|
||||
0.33147,0,6.2,0,0.507,8.247,70.4,3.6519,8,307,17.4,378.95,3.95,48.3
|
||||
0.44791,0,6.2,1,0.507,6.726,66.5,3.6519,8,307,17.4,360.2,8.05,29
|
||||
0.33045,0,6.2,0,0.507,6.086,61.5,3.6519,8,307,17.4,376.75,10.88,24
|
||||
0.52058,0,6.2,1,0.507,6.631,76.5,4.148,8,307,17.4,388.45,9.54,25.1
|
||||
0.51183,0,6.2,0,0.507,7.358,71.6,4.148,8,307,17.4,390.07,4.73,31.5
|
||||
0.08244,30,4.93,0,0.428,6.481,18.5,6.1899,6,300,16.6,379.41,6.36,23.7
|
||||
0.09252,30,4.93,0,0.428,6.606,42.2,6.1899,6,300,16.6,383.78,7.37,23.3
|
||||
0.11329,30,4.93,0,0.428,6.897,54.3,6.3361,6,300,16.6,391.25,11.38,22
|
||||
0.10612,30,4.93,0,0.428,6.095,65.1,6.3361,6,300,16.6,394.62,12.4,20.1
|
||||
0.1029,30,4.93,0,0.428,6.358,52.9,7.0355,6,300,16.6,372.75,11.22,22.2
|
||||
0.12757,30,4.93,0,0.428,6.393,7.8,7.0355,6,300,16.6,374.71,5.19,23.7
|
||||
0.20608,22,5.86,0,0.431,5.593,76.5,7.9549,7,330,19.1,372.49,12.5,17.6
|
||||
0.19133,22,5.86,0,0.431,5.605,70.2,7.9549,7,330,19.1,389.13,18.46,18.5
|
||||
0.33983,22,5.86,0,0.431,6.108,34.9,8.0555,7,330,19.1,390.18,9.16,24.3
|
||||
0.19657,22,5.86,0,0.431,6.226,79.2,8.0555,7,330,19.1,376.14,10.15,20.5
|
||||
0.16439,22,5.86,0,0.431,6.433,49.1,7.8265,7,330,19.1,374.71,9.52,24.5
|
||||
0.19073,22,5.86,0,0.431,6.718,17.5,7.8265,7,330,19.1,393.74,6.56,26.2
|
||||
0.1403,22,5.86,0,0.431,6.487,13,7.3967,7,330,19.1,396.28,5.9,24.4
|
||||
0.21409,22,5.86,0,0.431,6.438,8.9,7.3967,7,330,19.1,377.07,3.59,24.8
|
||||
0.08221,22,5.86,0,0.431,6.957,6.8,8.9067,7,330,19.1,386.09,3.53,29.6
|
||||
0.36894,22,5.86,0,0.431,8.259,8.4,8.9067,7,330,19.1,396.9,3.54,42.8
|
||||
0.04819,80,3.64,0,0.392,6.108,32,9.2203,1,315,16.4,392.89,6.57,21.9
|
||||
0.03548,80,3.64,0,0.392,5.876,19.1,9.2203,1,315,16.4,395.18,9.25,20.9
|
||||
0.01538,90,3.75,0,0.394,7.454,34.2,6.3361,3,244,15.9,386.34,3.11,44
|
||||
0.61154,20,3.97,0,0.647,8.704,86.9,1.801,5,264,13,389.7,5.12,50
|
||||
0.66351,20,3.97,0,0.647,7.333,100,1.8946,5,264,13,383.29,7.79,36
|
||||
0.65665,20,3.97,0,0.647,6.842,100,2.0107,5,264,13,391.93,6.9,30.1
|
||||
0.54011,20,3.97,0,0.647,7.203,81.8,2.1121,5,264,13,392.8,9.59,33.8
|
||||
0.53412,20,3.97,0,0.647,7.52,89.4,2.1398,5,264,13,388.37,7.26,43.1
|
||||
0.52014,20,3.97,0,0.647,8.398,91.5,2.2885,5,264,13,386.86,5.91,48.8
|
||||
0.82526,20,3.97,0,0.647,7.327,94.5,2.0788,5,264,13,393.42,11.25,31
|
||||
0.55007,20,3.97,0,0.647,7.206,91.6,1.9301,5,264,13,387.89,8.1,36.5
|
||||
0.76162,20,3.97,0,0.647,5.56,62.8,1.9865,5,264,13,392.4,10.45,22.8
|
||||
0.7857,20,3.97,0,0.647,7.014,84.6,2.1329,5,264,13,384.07,14.79,30.7
|
||||
0.57834,20,3.97,0,0.575,8.297,67,2.4216,5,264,13,384.54,7.44,50
|
||||
0.5405,20,3.97,0,0.575,7.47,52.6,2.872,5,264,13,390.3,3.16,43.5
|
||||
0.09065,20,6.96,1,0.464,5.92,61.5,3.9175,3,223,18.6,391.34,13.65,20.7
|
||||
0.29916,20,6.96,0,0.464,5.856,42.1,4.429,3,223,18.6,388.65,13,21.1
|
||||
0.16211,20,6.96,0,0.464,6.24,16.3,4.429,3,223,18.6,396.9,6.59,25.2
|
||||
0.1146,20,6.96,0,0.464,6.538,58.7,3.9175,3,223,18.6,394.96,7.73,24.4
|
||||
0.22188,20,6.96,1,0.464,7.691,51.8,4.3665,3,223,18.6,390.77,6.58,35.2
|
||||
0.05644,40,6.41,1,0.447,6.758,32.9,4.0776,4,254,17.6,396.9,3.53,32.4
|
||||
0.09604,40,6.41,0,0.447,6.854,42.8,4.2673,4,254,17.6,396.9,2.98,32
|
||||
0.10469,40,6.41,1,0.447,7.267,49,4.7872,4,254,17.6,389.25,6.05,33.2
|
||||
0.06127,40,6.41,1,0.447,6.826,27.6,4.8628,4,254,17.6,393.45,4.16,33.1
|
||||
0.07978,40,6.41,0,0.447,6.482,32.1,4.1403,4,254,17.6,396.9,7.19,29.1
|
||||
0.21038,20,3.33,0,0.4429,6.812,32.2,4.1007,5,216,14.9,396.9,4.85,35.1
|
||||
0.03578,20,3.33,0,0.4429,7.82,64.5,4.6947,5,216,14.9,387.31,3.76,45.4
|
||||
0.03705,20,3.33,0,0.4429,6.968,37.2,5.2447,5,216,14.9,392.23,4.59,35.4
|
||||
0.06129,20,3.33,1,0.4429,7.645,49.7,5.2119,5,216,14.9,377.07,3.01,46
|
||||
0.01501,90,1.21,1,0.401,7.923,24.8,5.885,1,198,13.6,395.52,3.16,50
|
||||
0.00906,90,2.97,0,0.4,7.088,20.8,7.3073,1,285,15.3,394.72,7.85,32.2
|
||||
0.01096,55,2.25,0,0.389,6.453,31.9,7.3073,1,300,15.3,394.72,8.23,22
|
||||
0.01965,80,1.76,0,0.385,6.23,31.5,9.0892,1,241,18.2,341.6,12.93,20.1
|
||||
0.03871,52.5,5.32,0,0.405,6.209,31.3,7.3172,6,293,16.6,396.9,7.14,23.2
|
||||
0.0459,52.5,5.32,0,0.405,6.315,45.6,7.3172,6,293,16.6,396.9,7.6,22.3
|
||||
0.04297,52.5,5.32,0,0.405,6.565,22.9,7.3172,6,293,16.6,371.72,9.51,24.8
|
||||
0.03502,80,4.95,0,0.411,6.861,27.9,5.1167,4,245,19.2,396.9,3.33,28.5
|
||||
0.07886,80,4.95,0,0.411,7.148,27.7,5.1167,4,245,19.2,396.9,3.56,37.3
|
||||
0.03615,80,4.95,0,0.411,6.63,23.4,5.1167,4,245,19.2,396.9,4.7,27.9
|
||||
0.08265,0,13.92,0,0.437,6.127,18.4,5.5027,4,289,16,396.9,8.58,23.9
|
||||
0.08199,0,13.92,0,0.437,6.009,42.3,5.5027,4,289,16,396.9,10.4,21.7
|
||||
0.12932,0,13.92,0,0.437,6.678,31.1,5.9604,4,289,16,396.9,6.27,28.6
|
||||
0.05372,0,13.92,0,0.437,6.549,51,5.9604,4,289,16,392.85,7.39,27.1
|
||||
0.14103,0,13.92,0,0.437,5.79,58,6.32,4,289,16,396.9,15.84,20.3
|
||||
0.06466,70,2.24,0,0.4,6.345,20.1,7.8278,5,358,14.8,368.24,4.97,22.5
|
||||
0.05561,70,2.24,0,0.4,7.041,10,7.8278,5,358,14.8,371.58,4.74,29
|
||||
0.04417,70,2.24,0,0.4,6.871,47.4,7.8278,5,358,14.8,390.86,6.07,24.8
|
||||
0.03537,34,6.09,0,0.433,6.59,40.4,5.4917,7,329,16.1,395.75,9.5,22
|
||||
0.09266,34,6.09,0,0.433,6.495,18.4,5.4917,7,329,16.1,383.61,8.67,26.4
|
||||
0.1,34,6.09,0,0.433,6.982,17.7,5.4917,7,329,16.1,390.43,4.86,33.1
|
||||
0.05515,33,2.18,0,0.472,7.236,41.1,4.022,7,222,18.4,393.68,6.93,36.1
|
||||
0.05479,33,2.18,0,0.472,6.616,58.1,3.37,7,222,18.4,393.36,8.93,28.4
|
||||
0.07503,33,2.18,0,0.472,7.42,71.9,3.0992,7,222,18.4,396.9,6.47,33.4
|
||||
0.04932,33,2.18,0,0.472,6.849,70.3,3.1827,7,222,18.4,396.9,7.53,28.2
|
||||
0.49298,0,9.9,0,0.544,6.635,82.5,3.3175,4,304,18.4,396.9,4.54,22.8
|
||||
0.3494,0,9.9,0,0.544,5.972,76.7,3.1025,4,304,18.4,396.24,9.97,20.3
|
||||
2.63548,0,9.9,0,0.544,4.973,37.8,2.5194,4,304,18.4,350.45,12.64,16.1
|
||||
0.79041,0,9.9,0,0.544,6.122,52.8,2.6403,4,304,18.4,396.9,5.98,22.1
|
||||
0.26169,0,9.9,0,0.544,6.023,90.4,2.834,4,304,18.4,396.3,11.72,19.4
|
||||
0.26938,0,9.9,0,0.544,6.266,82.8,3.2628,4,304,18.4,393.39,7.9,21.6
|
||||
0.3692,0,9.9,0,0.544,6.567,87.3,3.6023,4,304,18.4,395.69,9.28,23.8
|
||||
0.25356,0,9.9,0,0.544,5.705,77.7,3.945,4,304,18.4,396.42,11.5,16.2
|
||||
0.31827,0,9.9,0,0.544,5.914,83.2,3.9986,4,304,18.4,390.7,18.33,17.8
|
||||
0.24522,0,9.9,0,0.544,5.782,71.7,4.0317,4,304,18.4,396.9,15.94,19.8
|
||||
0.40202,0,9.9,0,0.544,6.382,67.2,3.5325,4,304,18.4,395.21,10.36,23.1
|
||||
0.47547,0,9.9,0,0.544,6.113,58.8,4.0019,4,304,18.4,396.23,12.73,21
|
||||
0.1676,0,7.38,0,0.493,6.426,52.3,4.5404,5,287,19.6,396.9,7.2,23.8
|
||||
0.18159,0,7.38,0,0.493,6.376,54.3,4.5404,5,287,19.6,396.9,6.87,23.1
|
||||
0.35114,0,7.38,0,0.493,6.041,49.9,4.7211,5,287,19.6,396.9,7.7,20.4
|
||||
0.28392,0,7.38,0,0.493,5.708,74.3,4.7211,5,287,19.6,391.13,11.74,18.5
|
||||
0.34109,0,7.38,0,0.493,6.415,40.1,4.7211,5,287,19.6,396.9,6.12,25
|
||||
0.19186,0,7.38,0,0.493,6.431,14.7,5.4159,5,287,19.6,393.68,5.08,24.6
|
||||
0.30347,0,7.38,0,0.493,6.312,28.9,5.4159,5,287,19.6,396.9,6.15,23
|
||||
0.24103,0,7.38,0,0.493,6.083,43.7,5.4159,5,287,19.6,396.9,12.79,22.2
|
||||
0.06617,0,3.24,0,0.46,5.868,25.8,5.2146,4,430,16.9,382.44,9.97,19.3
|
||||
0.06724,0,3.24,0,0.46,6.333,17.2,5.2146,4,430,16.9,375.21,7.34,22.6
|
||||
0.04544,0,3.24,0,0.46,6.144,32.2,5.8736,4,430,16.9,368.57,9.09,19.8
|
||||
0.05023,35,6.06,0,0.4379,5.706,28.4,6.6407,1,304,16.9,394.02,12.43,17.1
|
||||
0.03466,35,6.06,0,0.4379,6.031,23.3,6.6407,1,304,16.9,362.25,7.83,19.4
|
||||
0.05083,0,5.19,0,0.515,6.316,38.1,6.4584,5,224,20.2,389.71,5.68,22.2
|
||||
0.03738,0,5.19,0,0.515,6.31,38.5,6.4584,5,224,20.2,389.4,6.75,20.7
|
||||
0.03961,0,5.19,0,0.515,6.037,34.5,5.9853,5,224,20.2,396.9,8.01,21.1
|
||||
0.03427,0,5.19,0,0.515,5.869,46.3,5.2311,5,224,20.2,396.9,9.8,19.5
|
||||
0.03041,0,5.19,0,0.515,5.895,59.6,5.615,5,224,20.2,394.81,10.56,18.5
|
||||
0.03306,0,5.19,0,0.515,6.059,37.3,4.8122,5,224,20.2,396.14,8.51,20.6
|
||||
0.05497,0,5.19,0,0.515,5.985,45.4,4.8122,5,224,20.2,396.9,9.74,19
|
||||
0.06151,0,5.19,0,0.515,5.968,58.5,4.8122,5,224,20.2,396.9,9.29,18.7
|
||||
0.01301,35,1.52,0,0.442,7.241,49.3,7.0379,1,284,15.5,394.74,5.49,32.7
|
||||
0.02498,0,1.89,0,0.518,6.54,59.7,6.2669,1,422,15.9,389.96,8.65,16.5
|
||||
0.02543,55,3.78,0,0.484,6.696,56.4,5.7321,5,370,17.6,396.9,7.18,23.9
|
||||
0.03049,55,3.78,0,0.484,6.874,28.1,6.4654,5,370,17.6,387.97,4.61,31.2
|
||||
0.03113,0,4.39,0,0.442,6.014,48.5,8.0136,3,352,18.8,385.64,10.53,17.5
|
||||
0.06162,0,4.39,0,0.442,5.898,52.3,8.0136,3,352,18.8,364.61,12.67,17.2
|
||||
0.0187,85,4.15,0,0.429,6.516,27.7,8.5353,4,351,17.9,392.43,6.36,23.1
|
||||
0.01501,80,2.01,0,0.435,6.635,29.7,8.344,4,280,17,390.94,5.99,24.5
|
||||
0.02899,40,1.25,0,0.429,6.939,34.5,8.7921,1,335,19.7,389.85,5.89,26.6
|
||||
0.06211,40,1.25,0,0.429,6.49,44.4,8.7921,1,335,19.7,396.9,5.98,22.9
|
||||
0.0795,60,1.69,0,0.411,6.579,35.9,10.7103,4,411,18.3,370.78,5.49,24.1
|
||||
0.07244,60,1.69,0,0.411,5.884,18.5,10.7103,4,411,18.3,392.33,7.79,18.6
|
||||
0.01709,90,2.02,0,0.41,6.728,36.1,12.1265,5,187,17,384.46,4.5,30.1
|
||||
0.04301,80,1.91,0,0.413,5.663,21.9,10.5857,4,334,22,382.8,8.05,18.2
|
||||
0.10659,80,1.91,0,0.413,5.936,19.5,10.5857,4,334,22,376.04,5.57,20.6
|
||||
8.98296,0,18.1,1,0.77,6.212,97.4,2.1222,24,666,20.2,377.73,17.6,17.8
|
||||
3.8497,0,18.1,1,0.77,6.395,91,2.5052,24,666,20.2,391.34,13.27,21.7
|
||||
5.20177,0,18.1,1,0.77,6.127,83.4,2.7227,24,666,20.2,395.43,11.48,22.7
|
||||
4.26131,0,18.1,0,0.77,6.112,81.3,2.5091,24,666,20.2,390.74,12.67,22.6
|
||||
4.54192,0,18.1,0,0.77,6.398,88,2.5182,24,666,20.2,374.56,7.79,25
|
||||
3.83684,0,18.1,0,0.77,6.251,91.1,2.2955,24,666,20.2,350.65,14.19,19.9
|
||||
3.67822,0,18.1,0,0.77,5.362,96.2,2.1036,24,666,20.2,380.79,10.19,20.8
|
||||
4.22239,0,18.1,1,0.77,5.803,89,1.9047,24,666,20.2,353.04,14.64,16.8
|
||||
3.47428,0,18.1,1,0.718,8.78,82.9,1.9047,24,666,20.2,354.55,5.29,21.9
|
||||
4.55587,0,18.1,0,0.718,3.561,87.9,1.6132,24,666,20.2,354.7,7.12,27.5
|
||||
3.69695,0,18.1,0,0.718,4.963,91.4,1.7523,24,666,20.2,316.03,14,21.9
|
||||
13.5222,0,18.1,0,0.631,3.863,100,1.5106,24,666,20.2,131.42,13.33,23.1
|
||||
4.89822,0,18.1,0,0.631,4.97,100,1.3325,24,666,20.2,375.52,3.26,50
|
||||
5.66998,0,18.1,1,0.631,6.683,96.8,1.3567,24,666,20.2,375.33,3.73,50
|
||||
6.53876,0,18.1,1,0.631,7.016,97.5,1.2024,24,666,20.2,392.05,2.96,50
|
||||
9.2323,0,18.1,0,0.631,6.216,100,1.1691,24,666,20.2,366.15,9.53,50
|
||||
8.26725,0,18.1,1,0.668,5.875,89.6,1.1296,24,666,20.2,347.88,8.88,50
|
||||
11.1081,0,18.1,0,0.668,4.906,100,1.1742,24,666,20.2,396.9,34.77,13.8
|
||||
18.4982,0,18.1,0,0.668,4.138,100,1.137,24,666,20.2,396.9,37.97,13.8
|
||||
19.6091,0,18.1,0,0.671,7.313,97.9,1.3163,24,666,20.2,396.9,13.44,15
|
||||
15.288,0,18.1,0,0.671,6.649,93.3,1.3449,24,666,20.2,363.02,23.24,13.9
|
||||
9.82349,0,18.1,0,0.671,6.794,98.8,1.358,24,666,20.2,396.9,21.24,13.3
|
||||
23.6482,0,18.1,0,0.671,6.38,96.2,1.3861,24,666,20.2,396.9,23.69,13.1
|
||||
17.8667,0,18.1,0,0.671,6.223,100,1.3861,24,666,20.2,393.74,21.78,10.2
|
||||
88.9762,0,18.1,0,0.671,6.968,91.9,1.4165,24,666,20.2,396.9,17.21,10.4
|
||||
15.8744,0,18.1,0,0.671,6.545,99.1,1.5192,24,666,20.2,396.9,21.08,10.9
|
||||
9.18702,0,18.1,0,0.7,5.536,100,1.5804,24,666,20.2,396.9,23.6,11.3
|
||||
7.99248,0,18.1,0,0.7,5.52,100,1.5331,24,666,20.2,396.9,24.56,12.3
|
||||
20.0849,0,18.1,0,0.7,4.368,91.2,1.4395,24,666,20.2,285.83,30.63,8.8
|
||||
16.8118,0,18.1,0,0.7,5.277,98.1,1.4261,24,666,20.2,396.9,30.81,7.2
|
||||
24.3938,0,18.1,0,0.7,4.652,100,1.4672,24,666,20.2,396.9,28.28,10.5
|
||||
22.5971,0,18.1,0,0.7,5,89.5,1.5184,24,666,20.2,396.9,31.99,7.4
|
||||
14.3337,0,18.1,0,0.7,4.88,100,1.5895,24,666,20.2,372.92,30.62,10.2
|
||||
8.15174,0,18.1,0,0.7,5.39,98.9,1.7281,24,666,20.2,396.9,20.85,11.5
|
||||
6.96215,0,18.1,0,0.7,5.713,97,1.9265,24,666,20.2,394.43,17.11,15.1
|
||||
5.29305,0,18.1,0,0.7,6.051,82.5,2.1678,24,666,20.2,378.38,18.76,23.2
|
||||
11.5779,0,18.1,0,0.7,5.036,97,1.77,24,666,20.2,396.9,25.68,9.7
|
||||
8.64476,0,18.1,0,0.693,6.193,92.6,1.7912,24,666,20.2,396.9,15.17,13.8
|
||||
13.3598,0,18.1,0,0.693,5.887,94.7,1.7821,24,666,20.2,396.9,16.35,12.7
|
||||
8.71675,0,18.1,0,0.693,6.471,98.8,1.7257,24,666,20.2,391.98,17.12,13.1
|
||||
5.87205,0,18.1,0,0.693,6.405,96,1.6768,24,666,20.2,396.9,19.37,12.5
|
||||
7.67202,0,18.1,0,0.693,5.747,98.9,1.6334,24,666,20.2,393.1,19.92,8.5
|
||||
38.3518,0,18.1,0,0.693,5.453,100,1.4896,24,666,20.2,396.9,30.59,5
|
||||
9.91655,0,18.1,0,0.693,5.852,77.8,1.5004,24,666,20.2,338.16,29.97,6.3
|
||||
25.0461,0,18.1,0,0.693,5.987,100,1.5888,24,666,20.2,396.9,26.77,5.6
|
||||
14.2362,0,18.1,0,0.693,6.343,100,1.5741,24,666,20.2,396.9,20.32,7.2
|
||||
9.59571,0,18.1,0,0.693,6.404,100,1.639,24,666,20.2,376.11,20.31,12.1
|
||||
24.8017,0,18.1,0,0.693,5.349,96,1.7028,24,666,20.2,396.9,19.77,8.3
|
||||
41.5292,0,18.1,0,0.693,5.531,85.4,1.6074,24,666,20.2,329.46,27.38,8.5
|
||||
67.9208,0,18.1,0,0.693,5.683,100,1.4254,24,666,20.2,384.97,22.98,5
|
||||
20.7162,0,18.1,0,0.659,4.138,100,1.1781,24,666,20.2,370.22,23.34,11.9
|
||||
11.9511,0,18.1,0,0.659,5.608,100,1.2852,24,666,20.2,332.09,12.13,27.9
|
||||
7.40389,0,18.1,0,0.597,5.617,97.9,1.4547,24,666,20.2,314.64,26.4,17.2
|
||||
14.4383,0,18.1,0,0.597,6.852,100,1.4655,24,666,20.2,179.36,19.78,27.5
|
||||
51.1358,0,18.1,0,0.597,5.757,100,1.413,24,666,20.2,2.6,10.11,15
|
||||
14.0507,0,18.1,0,0.597,6.657,100,1.5275,24,666,20.2,35.05,21.22,17.2
|
||||
18.811,0,18.1,0,0.597,4.628,100,1.5539,24,666,20.2,28.79,34.37,17.9
|
||||
28.6558,0,18.1,0,0.597,5.155,100,1.5894,24,666,20.2,210.97,20.08,16.3
|
||||
45.7461,0,18.1,0,0.693,4.519,100,1.6582,24,666,20.2,88.27,36.98,7
|
||||
18.0846,0,18.1,0,0.679,6.434,100,1.8347,24,666,20.2,27.25,29.05,7.2
|
||||
10.8342,0,18.1,0,0.679,6.782,90.8,1.8195,24,666,20.2,21.57,25.79,7.5
|
||||
25.9406,0,18.1,0,0.679,5.304,89.1,1.6475,24,666,20.2,127.36,26.64,10.4
|
||||
73.5341,0,18.1,0,0.679,5.957,100,1.8026,24,666,20.2,16.45,20.62,8.8
|
||||
11.8123,0,18.1,0,0.718,6.824,76.5,1.794,24,666,20.2,48.45,22.74,8.4
|
||||
11.0874,0,18.1,0,0.718,6.411,100,1.8589,24,666,20.2,318.75,15.02,16.7
|
||||
7.02259,0,18.1,0,0.718,6.006,95.3,1.8746,24,666,20.2,319.98,15.7,14.2
|
||||
12.0482,0,18.1,0,0.614,5.648,87.6,1.9512,24,666,20.2,291.55,14.1,20.8
|
||||
7.05042,0,18.1,0,0.614,6.103,85.1,2.0218,24,666,20.2,2.52,23.29,13.4
|
||||
8.79212,0,18.1,0,0.584,5.565,70.6,2.0635,24,666,20.2,3.65,17.16,11.7
|
||||
15.8603,0,18.1,0,0.679,5.896,95.4,1.9096,24,666,20.2,7.68,24.39,8.3
|
||||
12.2472,0,18.1,0,0.584,5.837,59.7,1.9976,24,666,20.2,24.65,15.69,10.2
|
||||
37.6619,0,18.1,0,0.679,6.202,78.7,1.8629,24,666,20.2,18.82,14.52,10.9
|
||||
7.36711,0,18.1,0,0.679,6.193,78.1,1.9356,24,666,20.2,96.73,21.52,11
|
||||
9.33889,0,18.1,0,0.679,6.38,95.6,1.9682,24,666,20.2,60.72,24.08,9.5
|
||||
8.49213,0,18.1,0,0.584,6.348,86.1,2.0527,24,666,20.2,83.45,17.64,14.5
|
||||
10.0623,0,18.1,0,0.584,6.833,94.3,2.0882,24,666,20.2,81.33,19.69,14.1
|
||||
6.44405,0,18.1,0,0.584,6.425,74.8,2.2004,24,666,20.2,97.95,12.03,16.1
|
||||
5.58107,0,18.1,0,0.713,6.436,87.9,2.3158,24,666,20.2,100.19,16.22,14.3
|
||||
13.9134,0,18.1,0,0.713,6.208,95,2.2222,24,666,20.2,100.63,15.17,11.7
|
||||
11.1604,0,18.1,0,0.74,6.629,94.6,2.1247,24,666,20.2,109.85,23.27,13.4
|
||||
14.4208,0,18.1,0,0.74,6.461,93.3,2.0026,24,666,20.2,27.49,18.05,9.6
|
||||
15.1772,0,18.1,0,0.74,6.152,100,1.9142,24,666,20.2,9.32,26.45,8.7
|
||||
13.6781,0,18.1,0,0.74,5.935,87.9,1.8206,24,666,20.2,68.95,34.02,8.4
|
||||
9.39063,0,18.1,0,0.74,5.627,93.9,1.8172,24,666,20.2,396.9,22.88,12.8
|
||||
22.0511,0,18.1,0,0.74,5.818,92.4,1.8662,24,666,20.2,391.45,22.11,10.5
|
||||
9.72418,0,18.1,0,0.74,6.406,97.2,2.0651,24,666,20.2,385.96,19.52,17.1
|
||||
5.66637,0,18.1,0,0.74,6.219,100,2.0048,24,666,20.2,395.69,16.59,18.4
|
||||
9.96654,0,18.1,0,0.74,6.485,100,1.9784,24,666,20.2,386.73,18.85,15.4
|
||||
12.8023,0,18.1,0,0.74,5.854,96.6,1.8956,24,666,20.2,240.52,23.79,10.8
|
||||
0.6718,0,18.1,0,0.74,6.459,94.8,1.9879,24,666,20.2,43.06,23.98,11.8
|
||||
6.28807,0,18.1,0,0.74,6.341,96.4,2.072,24,666,20.2,318.01,17.79,14.9
|
||||
9.92485,0,18.1,0,0.74,6.251,96.6,2.198,24,666,20.2,388.52,16.44,12.6
|
||||
9.32909,0,18.1,0,0.713,6.185,98.7,2.2616,24,666,20.2,396.9,18.13,14.1
|
||||
7.52601,0,18.1,0,0.713,6.417,98.3,2.185,24,666,20.2,304.21,19.31,13
|
||||
6.71772,0,18.1,0,0.713,6.749,92.6,2.3236,24,666,20.2,0.32,17.44,13.4
|
||||
5.44114,0,18.1,0,0.713,6.655,98.2,2.3552,24,666,20.2,355.29,17.73,15.2
|
||||
5.09017,0,18.1,0,0.713,6.297,91.8,2.3682,24,666,20.2,385.09,17.27,16.1
|
||||
8.24809,0,18.1,0,0.713,7.393,99.3,2.4527,24,666,20.2,375.87,16.74,17.8
|
||||
9.51363,0,18.1,0,0.713,6.728,94.1,2.4961,24,666,20.2,6.68,18.71,14.9
|
||||
4.75237,0,18.1,0,0.713,6.525,86.5,2.4358,24,666,20.2,50.92,18.13,14.1
|
||||
4.66883,0,18.1,0,0.713,5.976,87.9,2.5806,24,666,20.2,10.48,19.01,12.7
|
||||
8.20058,0,18.1,0,0.713,5.936,80.3,2.7792,24,666,20.2,3.5,16.94,13.5
|
||||
7.75223,0,18.1,0,0.713,6.301,83.7,2.7831,24,666,20.2,272.21,16.23,14.9
|
||||
6.80117,0,18.1,0,0.713,6.081,84.4,2.7175,24,666,20.2,396.9,14.7,20
|
||||
4.81213,0,18.1,0,0.713,6.701,90,2.5975,24,666,20.2,255.23,16.42,16.4
|
||||
3.69311,0,18.1,0,0.713,6.376,88.4,2.5671,24,666,20.2,391.43,14.65,17.7
|
||||
6.65492,0,18.1,0,0.713,6.317,83,2.7344,24,666,20.2,396.9,13.99,19.5
|
||||
5.82115,0,18.1,0,0.713,6.513,89.9,2.8016,24,666,20.2,393.82,10.29,20.2
|
||||
7.83932,0,18.1,0,0.655,6.209,65.4,2.9634,24,666,20.2,396.9,13.22,21.4
|
||||
3.1636,0,18.1,0,0.655,5.759,48.2,3.0665,24,666,20.2,334.4,14.13,19.9
|
||||
3.77498,0,18.1,0,0.655,5.952,84.7,2.8715,24,666,20.2,22.01,17.15,19
|
||||
4.42228,0,18.1,0,0.584,6.003,94.5,2.5403,24,666,20.2,331.29,21.32,19.1
|
||||
15.5757,0,18.1,0,0.58,5.926,71,2.9084,24,666,20.2,368.74,18.13,19.1
|
||||
13.0751,0,18.1,0,0.58,5.713,56.7,2.8237,24,666,20.2,396.9,14.76,20.1
|
||||
4.34879,0,18.1,0,0.58,6.167,84,3.0334,24,666,20.2,396.9,16.29,19.9
|
||||
4.03841,0,18.1,0,0.532,6.229,90.7,3.0993,24,666,20.2,395.33,12.87,19.6
|
||||
3.56868,0,18.1,0,0.58,6.437,75,2.8965,24,666,20.2,393.37,14.36,23.2
|
||||
4.64689,0,18.1,0,0.614,6.98,67.6,2.5329,24,666,20.2,374.68,11.66,29.8
|
||||
8.05579,0,18.1,0,0.584,5.427,95.4,2.4298,24,666,20.2,352.58,18.14,13.8
|
||||
6.39312,0,18.1,0,0.584,6.162,97.4,2.206,24,666,20.2,302.76,24.1,13.3
|
||||
4.87141,0,18.1,0,0.614,6.484,93.6,2.3053,24,666,20.2,396.21,18.68,16.7
|
||||
15.0234,0,18.1,0,0.614,5.304,97.3,2.1007,24,666,20.2,349.48,24.91,12
|
||||
10.233,0,18.1,0,0.614,6.185,96.7,2.1705,24,666,20.2,379.7,18.03,14.6
|
||||
14.3337,0,18.1,0,0.614,6.229,88,1.9512,24,666,20.2,383.32,13.11,21.4
|
||||
5.82401,0,18.1,0,0.532,6.242,64.7,3.4242,24,666,20.2,396.9,10.74,23
|
||||
5.70818,0,18.1,0,0.532,6.75,74.9,3.3317,24,666,20.2,393.07,7.74,23.7
|
||||
5.73116,0,18.1,0,0.532,7.061,77,3.4106,24,666,20.2,395.28,7.01,25
|
||||
2.81838,0,18.1,0,0.532,5.762,40.3,4.0983,24,666,20.2,392.92,10.42,21.8
|
||||
2.37857,0,18.1,0,0.583,5.871,41.9,3.724,24,666,20.2,370.73,13.34,20.6
|
||||
3.67367,0,18.1,0,0.583,6.312,51.9,3.9917,24,666,20.2,388.62,10.58,21.2
|
||||
5.69175,0,18.1,0,0.583,6.114,79.8,3.5459,24,666,20.2,392.68,14.98,19.1
|
||||
4.83567,0,18.1,0,0.583,5.905,53.2,3.1523,24,666,20.2,388.22,11.45,20.6
|
||||
0.15086,0,27.74,0,0.609,5.454,92.7,1.8209,4,711,20.1,395.09,18.06,15.2
|
||||
0.18337,0,27.74,0,0.609,5.414,98.3,1.7554,4,711,20.1,344.05,23.97,7
|
||||
0.20746,0,27.74,0,0.609,5.093,98,1.8226,4,711,20.1,318.43,29.68,8.1
|
||||
0.10574,0,27.74,0,0.609,5.983,98.8,1.8681,4,711,20.1,390.11,18.07,13.6
|
||||
0.11132,0,27.74,0,0.609,5.983,83.5,2.1099,4,711,20.1,396.9,13.35,20.1
|
||||
0.17331,0,9.69,0,0.585,5.707,54,2.3817,6,391,19.2,396.9,12.01,21.8
|
||||
0.27957,0,9.69,0,0.585,5.926,42.6,2.3817,6,391,19.2,396.9,13.59,24.5
|
||||
0.17899,0,9.69,0,0.585,5.67,28.8,2.7986,6,391,19.2,393.29,17.6,23.1
|
||||
0.2896,0,9.69,0,0.585,5.39,72.9,2.7986,6,391,19.2,396.9,21.14,19.7
|
||||
0.26838,0,9.69,0,0.585,5.794,70.6,2.8927,6,391,19.2,396.9,14.1,18.3
|
||||
0.23912,0,9.69,0,0.585,6.019,65.3,2.4091,6,391,19.2,396.9,12.92,21.2
|
||||
0.17783,0,9.69,0,0.585,5.569,73.5,2.3999,6,391,19.2,395.77,15.1,17.5
|
||||
0.22438,0,9.69,0,0.585,6.027,79.7,2.4982,6,391,19.2,396.9,14.33,16.8
|
||||
0.06263,0,11.93,0,0.573,6.593,69.1,2.4786,1,273,21,391.99,9.67,22.4
|
||||
0.04527,0,11.93,0,0.573,6.12,76.7,2.2875,1,273,21,396.9,9.08,20.6
|
||||
0.06076,0,11.93,0,0.573,6.976,91,2.1675,1,273,21,396.9,5.64,23.9
|
||||
0.10959,0,11.93,0,0.573,6.794,89.3,2.3889,1,273,21,393.45,6.48,22
|
||||
0.04741,0,11.93,0,0.573,6.03,80.8,2.505,1,273,21,396.9,7.88,11.9
|
Can't render this file because it has a wrong number of fields in line 2.
|
151
tensorflow/contrib/learn/python/learn/datasets/data/iris.csv
Normal file
151
tensorflow/contrib/learn/python/learn/datasets/data/iris.csv
Normal file
@ -0,0 +1,151 @@
|
||||
150,4,setosa,versicolor,virginica
|
||||
5.1,3.5,1.4,0.2,0
|
||||
4.9,3.0,1.4,0.2,0
|
||||
4.7,3.2,1.3,0.2,0
|
||||
4.6,3.1,1.5,0.2,0
|
||||
5.0,3.6,1.4,0.2,0
|
||||
5.4,3.9,1.7,0.4,0
|
||||
4.6,3.4,1.4,0.3,0
|
||||
5.0,3.4,1.5,0.2,0
|
||||
4.4,2.9,1.4,0.2,0
|
||||
4.9,3.1,1.5,0.1,0
|
||||
5.4,3.7,1.5,0.2,0
|
||||
4.8,3.4,1.6,0.2,0
|
||||
4.8,3.0,1.4,0.1,0
|
||||
4.3,3.0,1.1,0.1,0
|
||||
5.8,4.0,1.2,0.2,0
|
||||
5.7,4.4,1.5,0.4,0
|
||||
5.4,3.9,1.3,0.4,0
|
||||
5.1,3.5,1.4,0.3,0
|
||||
5.7,3.8,1.7,0.3,0
|
||||
5.1,3.8,1.5,0.3,0
|
||||
5.4,3.4,1.7,0.2,0
|
||||
5.1,3.7,1.5,0.4,0
|
||||
4.6,3.6,1.0,0.2,0
|
||||
5.1,3.3,1.7,0.5,0
|
||||
4.8,3.4,1.9,0.2,0
|
||||
5.0,3.0,1.6,0.2,0
|
||||
5.0,3.4,1.6,0.4,0
|
||||
5.2,3.5,1.5,0.2,0
|
||||
5.2,3.4,1.4,0.2,0
|
||||
4.7,3.2,1.6,0.2,0
|
||||
4.8,3.1,1.6,0.2,0
|
||||
5.4,3.4,1.5,0.4,0
|
||||
5.2,4.1,1.5,0.1,0
|
||||
5.5,4.2,1.4,0.2,0
|
||||
4.9,3.1,1.5,0.1,0
|
||||
5.0,3.2,1.2,0.2,0
|
||||
5.5,3.5,1.3,0.2,0
|
||||
4.9,3.1,1.5,0.1,0
|
||||
4.4,3.0,1.3,0.2,0
|
||||
5.1,3.4,1.5,0.2,0
|
||||
5.0,3.5,1.3,0.3,0
|
||||
4.5,2.3,1.3,0.3,0
|
||||
4.4,3.2,1.3,0.2,0
|
||||
5.0,3.5,1.6,0.6,0
|
||||
5.1,3.8,1.9,0.4,0
|
||||
4.8,3.0,1.4,0.3,0
|
||||
5.1,3.8,1.6,0.2,0
|
||||
4.6,3.2,1.4,0.2,0
|
||||
5.3,3.7,1.5,0.2,0
|
||||
5.0,3.3,1.4,0.2,0
|
||||
7.0,3.2,4.7,1.4,1
|
||||
6.4,3.2,4.5,1.5,1
|
||||
6.9,3.1,4.9,1.5,1
|
||||
5.5,2.3,4.0,1.3,1
|
||||
6.5,2.8,4.6,1.5,1
|
||||
5.7,2.8,4.5,1.3,1
|
||||
6.3,3.3,4.7,1.6,1
|
||||
4.9,2.4,3.3,1.0,1
|
||||
6.6,2.9,4.6,1.3,1
|
||||
5.2,2.7,3.9,1.4,1
|
||||
5.0,2.0,3.5,1.0,1
|
||||
5.9,3.0,4.2,1.5,1
|
||||
6.0,2.2,4.0,1.0,1
|
||||
6.1,2.9,4.7,1.4,1
|
||||
5.6,2.9,3.6,1.3,1
|
||||
6.7,3.1,4.4,1.4,1
|
||||
5.6,3.0,4.5,1.5,1
|
||||
5.8,2.7,4.1,1.0,1
|
||||
6.2,2.2,4.5,1.5,1
|
||||
5.6,2.5,3.9,1.1,1
|
||||
5.9,3.2,4.8,1.8,1
|
||||
6.1,2.8,4.0,1.3,1
|
||||
6.3,2.5,4.9,1.5,1
|
||||
6.1,2.8,4.7,1.2,1
|
||||
6.4,2.9,4.3,1.3,1
|
||||
6.6,3.0,4.4,1.4,1
|
||||
6.8,2.8,4.8,1.4,1
|
||||
6.7,3.0,5.0,1.7,1
|
||||
6.0,2.9,4.5,1.5,1
|
||||
5.7,2.6,3.5,1.0,1
|
||||
5.5,2.4,3.8,1.1,1
|
||||
5.5,2.4,3.7,1.0,1
|
||||
5.8,2.7,3.9,1.2,1
|
||||
6.0,2.7,5.1,1.6,1
|
||||
5.4,3.0,4.5,1.5,1
|
||||
6.0,3.4,4.5,1.6,1
|
||||
6.7,3.1,4.7,1.5,1
|
||||
6.3,2.3,4.4,1.3,1
|
||||
5.6,3.0,4.1,1.3,1
|
||||
5.5,2.5,4.0,1.3,1
|
||||
5.5,2.6,4.4,1.2,1
|
||||
6.1,3.0,4.6,1.4,1
|
||||
5.8,2.6,4.0,1.2,1
|
||||
5.0,2.3,3.3,1.0,1
|
||||
5.6,2.7,4.2,1.3,1
|
||||
5.7,3.0,4.2,1.2,1
|
||||
5.7,2.9,4.2,1.3,1
|
||||
6.2,2.9,4.3,1.3,1
|
||||
5.1,2.5,3.0,1.1,1
|
||||
5.7,2.8,4.1,1.3,1
|
||||
6.3,3.3,6.0,2.5,2
|
||||
5.8,2.7,5.1,1.9,2
|
||||
7.1,3.0,5.9,2.1,2
|
||||
6.3,2.9,5.6,1.8,2
|
||||
6.5,3.0,5.8,2.2,2
|
||||
7.6,3.0,6.6,2.1,2
|
||||
4.9,2.5,4.5,1.7,2
|
||||
7.3,2.9,6.3,1.8,2
|
||||
6.7,2.5,5.8,1.8,2
|
||||
7.2,3.6,6.1,2.5,2
|
||||
6.5,3.2,5.1,2.0,2
|
||||
6.4,2.7,5.3,1.9,2
|
||||
6.8,3.0,5.5,2.1,2
|
||||
5.7,2.5,5.0,2.0,2
|
||||
5.8,2.8,5.1,2.4,2
|
||||
6.4,3.2,5.3,2.3,2
|
||||
6.5,3.0,5.5,1.8,2
|
||||
7.7,3.8,6.7,2.2,2
|
||||
7.7,2.6,6.9,2.3,2
|
||||
6.0,2.2,5.0,1.5,2
|
||||
6.9,3.2,5.7,2.3,2
|
||||
5.6,2.8,4.9,2.0,2
|
||||
7.7,2.8,6.7,2.0,2
|
||||
6.3,2.7,4.9,1.8,2
|
||||
6.7,3.3,5.7,2.1,2
|
||||
7.2,3.2,6.0,1.8,2
|
||||
6.2,2.8,4.8,1.8,2
|
||||
6.1,3.0,4.9,1.8,2
|
||||
6.4,2.8,5.6,2.1,2
|
||||
7.2,3.0,5.8,1.6,2
|
||||
7.4,2.8,6.1,1.9,2
|
||||
7.9,3.8,6.4,2.0,2
|
||||
6.4,2.8,5.6,2.2,2
|
||||
6.3,2.8,5.1,1.5,2
|
||||
6.1,2.6,5.6,1.4,2
|
||||
7.7,3.0,6.1,2.3,2
|
||||
6.3,3.4,5.6,2.4,2
|
||||
6.4,3.1,5.5,1.8,2
|
||||
6.0,3.0,4.8,1.8,2
|
||||
6.9,3.1,5.4,2.1,2
|
||||
6.7,3.1,5.6,2.4,2
|
||||
6.9,3.1,5.1,2.3,2
|
||||
5.8,2.7,5.1,1.9,2
|
||||
6.8,3.2,5.9,2.3,2
|
||||
6.7,3.3,5.7,2.5,2
|
||||
6.7,3.0,5.2,2.3,2
|
||||
6.3,2.5,5.0,1.9,2
|
||||
6.5,3.0,5.2,2.0,2
|
||||
6.2,3.4,5.4,2.3,2
|
||||
5.9,3.0,5.1,1.8,2
|
|
210
tensorflow/contrib/learn/python/learn/datasets/mnist.py
Normal file
210
tensorflow/contrib/learn/python/learn/datasets/mnist.py
Normal file
@ -0,0 +1,210 @@
|
||||
"""Functions for downloading and reading MNIST data."""
|
||||
# Copyright 2015 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import gzip
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
import numpy
|
||||
from six.moves import urllib
|
||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
||||
import tensorflow as tf
|
||||
from tensorflow.contrib.learn.python.learn.datasets.base import maybe_download
|
||||
|
||||
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
|
||||
|
||||
|
||||
def _read32(bytestream):
|
||||
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
|
||||
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
|
||||
|
||||
|
||||
def extract_images(filename):
|
||||
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
|
||||
print('Extracting', filename)
|
||||
with tf.gfile.Open(filename, 'rb') as f, gzip.GzipFile(fileobj=f) as bytestream:
|
||||
magic = _read32(bytestream)
|
||||
if magic != 2051:
|
||||
raise ValueError(
|
||||
'Invalid magic number %d in MNIST image file: %s' %
|
||||
(magic, filename))
|
||||
num_images = _read32(bytestream)
|
||||
rows = _read32(bytestream)
|
||||
cols = _read32(bytestream)
|
||||
buf = bytestream.read(rows * cols * num_images)
|
||||
data = numpy.frombuffer(buf, dtype=numpy.uint8)
|
||||
data = data.reshape(num_images, rows, cols, 1)
|
||||
return data
|
||||
|
||||
|
||||
def dense_to_one_hot(labels_dense, num_classes):
|
||||
"""Convert class labels from scalars to one-hot vectors."""
|
||||
num_labels = labels_dense.shape[0]
|
||||
index_offset = numpy.arange(num_labels) * num_classes
|
||||
labels_one_hot = numpy.zeros((num_labels, num_classes))
|
||||
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
|
||||
return labels_one_hot
|
||||
|
||||
|
||||
def extract_labels(filename, one_hot=False, num_classes=10):
|
||||
"""Extract the labels into a 1D uint8 numpy array [index]."""
|
||||
print('Extracting', filename)
|
||||
with tf.gfile.Open(filename, 'rb') as f, gzip.GzipFile(fileobj=f) as bytestream:
|
||||
magic = _read32(bytestream)
|
||||
if magic != 2049:
|
||||
raise ValueError(
|
||||
'Invalid magic number %d in MNIST label file: %s' %
|
||||
(magic, filename))
|
||||
num_items = _read32(bytestream)
|
||||
buf = bytestream.read(num_items)
|
||||
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
|
||||
if one_hot:
|
||||
return dense_to_one_hot(labels, num_classes)
|
||||
return labels
|
||||
|
||||
|
||||
class DataSet(object):
|
||||
|
||||
def __init__(self, images, labels, fake_data=False, one_hot=False,
|
||||
dtype=tf.float32):
|
||||
"""Construct a DataSet.
|
||||
|
||||
one_hot arg is used only if fake_data is true. `dtype` can be either
|
||||
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
|
||||
`[0, 1]`.
|
||||
"""
|
||||
dtype = tf.as_dtype(dtype).base_dtype
|
||||
if dtype not in (tf.uint8, tf.float32):
|
||||
raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
|
||||
dtype)
|
||||
if fake_data:
|
||||
self._num_examples = 10000
|
||||
self.one_hot = one_hot
|
||||
else:
|
||||
assert images.shape[0] == labels.shape[0], (
|
||||
'images.shape: %s labels.shape: %s' % (images.shape,
|
||||
labels.shape))
|
||||
self._num_examples = images.shape[0]
|
||||
|
||||
# Convert shape from [num examples, rows, columns, depth]
|
||||
# to [num examples, rows*columns] (assuming depth == 1)
|
||||
assert images.shape[3] == 1
|
||||
images = images.reshape(images.shape[0],
|
||||
images.shape[1] * images.shape[2])
|
||||
if dtype == tf.float32:
|
||||
# Convert from [0, 255] -> [0.0, 1.0].
|
||||
images = images.astype(numpy.float32)
|
||||
images = numpy.multiply(images, 1.0 / 255.0)
|
||||
self._images = images
|
||||
self._labels = labels
|
||||
self._epochs_completed = 0
|
||||
self._index_in_epoch = 0
|
||||
|
||||
@property
|
||||
def images(self):
|
||||
return self._images
|
||||
|
||||
@property
|
||||
def labels(self):
|
||||
return self._labels
|
||||
|
||||
@property
|
||||
def num_examples(self):
|
||||
return self._num_examples
|
||||
|
||||
@property
|
||||
def epochs_completed(self):
|
||||
return self._epochs_completed
|
||||
|
||||
def next_batch(self, batch_size, fake_data=False):
|
||||
"""Return the next `batch_size` examples from this data set."""
|
||||
if fake_data:
|
||||
fake_image = [1] * 784
|
||||
if self.one_hot:
|
||||
fake_label = [1] + [0] * 9
|
||||
else:
|
||||
fake_label = 0
|
||||
return [fake_image for _ in xrange(batch_size)], [
|
||||
fake_label for _ in xrange(batch_size)]
|
||||
start = self._index_in_epoch
|
||||
self._index_in_epoch += batch_size
|
||||
if self._index_in_epoch > self._num_examples:
|
||||
# Finished epoch
|
||||
self._epochs_completed += 1
|
||||
# Shuffle the data
|
||||
perm = numpy.arange(self._num_examples)
|
||||
numpy.random.shuffle(perm)
|
||||
self._images = self._images[perm]
|
||||
self._labels = self._labels[perm]
|
||||
# Start next epoch
|
||||
start = 0
|
||||
self._index_in_epoch = batch_size
|
||||
assert batch_size <= self._num_examples
|
||||
end = self._index_in_epoch
|
||||
return self._images[start:end], self._labels[start:end]
|
||||
|
||||
|
||||
def read_data_sets(train_dir, fake_data=False, one_hot=False, dtype=tf.float32):
|
||||
class DataSets(object):
|
||||
pass
|
||||
data_sets = DataSets()
|
||||
|
||||
if fake_data:
|
||||
def fake():
|
||||
return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype)
|
||||
data_sets.train = fake()
|
||||
data_sets.validation = fake()
|
||||
data_sets.test = fake()
|
||||
return data_sets
|
||||
|
||||
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
|
||||
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
|
||||
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
|
||||
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
|
||||
VALIDATION_SIZE = 5000
|
||||
|
||||
local_file = maybe_download(TRAIN_IMAGES, train_dir, SOURCE_URL + TRAIN_IMAGES)
|
||||
train_images = extract_images(local_file)
|
||||
|
||||
local_file = maybe_download(TRAIN_LABELS, train_dir, SOURCE_URL + TRAIN_LABELS)
|
||||
train_labels = extract_labels(local_file, one_hot=one_hot)
|
||||
|
||||
local_file = maybe_download(TEST_IMAGES, train_dir, SOURCE_URL + TEST_IMAGES)
|
||||
test_images = extract_images(local_file)
|
||||
|
||||
local_file = maybe_download(TEST_LABELS, train_dir, SOURCE_URL + TEST_LABELS)
|
||||
test_labels = extract_labels(local_file, one_hot=one_hot)
|
||||
|
||||
validation_images = train_images[:VALIDATION_SIZE]
|
||||
validation_labels = train_labels[:VALIDATION_SIZE]
|
||||
train_images = train_images[VALIDATION_SIZE:]
|
||||
train_labels = train_labels[VALIDATION_SIZE:]
|
||||
|
||||
data_sets.train = DataSet(train_images, train_labels, dtype=dtype)
|
||||
data_sets.validation = DataSet(validation_images, validation_labels,
|
||||
dtype=dtype)
|
||||
data_sets.test = DataSet(test_images, test_labels, dtype=dtype)
|
||||
|
||||
return data_sets
|
||||
|
||||
|
||||
def load_mnist():
|
||||
return read_data_sets("MNIST_data")
|
||||
|
28
tensorflow/contrib/learn/python/learn/estimators/__init__.py
Normal file
28
tensorflow/contrib/learn/python/learn/estimators/__init__.py
Normal file
@ -0,0 +1,28 @@
|
||||
"""Scikit Flow Estimators."""
|
||||
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from tensorflow.contrib.learn.python.learn.estimators.base import TensorFlowEstimator
|
||||
from tensorflow.contrib.learn.python.learn.estimators.linear import TensorFlowLinearClassifier
|
||||
from tensorflow.contrib.learn.python.learn.estimators.linear import TensorFlowClassifier
|
||||
from tensorflow.contrib.learn.python.learn.estimators.linear import TensorFlowLinearRegressor
|
||||
from tensorflow.contrib.learn.python.learn.estimators.linear import TensorFlowRegressor
|
||||
from tensorflow.contrib.learn.python.learn.estimators.dnn import TensorFlowDNNClassifier
|
||||
from tensorflow.contrib.learn.python.learn.estimators.dnn import TensorFlowDNNRegressor
|
||||
from tensorflow.contrib.learn.python.learn.estimators.rnn import TensorFlowRNNClassifier
|
||||
from tensorflow.contrib.learn.python.learn.estimators.rnn import TensorFlowRNNRegressor
|
||||
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
|
186
tensorflow/contrib/learn/python/learn/estimators/_sklearn.py
Normal file
186
tensorflow/contrib/learn/python/learn/estimators/_sklearn.py
Normal file
@ -0,0 +1,186 @@
|
||||
"""sklearn cross-support."""
|
||||
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
class _BaseEstimator(object):
|
||||
"""This is a cross-import when sklearn is not available.
|
||||
|
||||
Adopted from sklearn.BaseEstimator implementation.
|
||||
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
||||
"""
|
||||
|
||||
def get_params(self, deep=True):
|
||||
"""Get parameters for this estimator.
|
||||
Parameters
|
||||
----------
|
||||
deep: boolean, optional
|
||||
If True, will return the parameters for this estimator and
|
||||
contained subobjects that are estimators.
|
||||
Returns
|
||||
-------
|
||||
params : mapping of string to any
|
||||
Parameter names mapped to their values.
|
||||
"""
|
||||
out = dict()
|
||||
param_names = [name for name in self.__dict__ if not name.startswith('_')]
|
||||
for key in param_names:
|
||||
value = getattr(self, key, None)
|
||||
|
||||
# XXX: should we rather test if instance of estimator?
|
||||
if deep and hasattr(value, 'get_params'):
|
||||
deep_items = value.get_params().items()
|
||||
out.update((key + '__' + k, val) for k, val in deep_items)
|
||||
out[key] = value
|
||||
return out
|
||||
|
||||
def set_params(self, **params):
|
||||
"""Set the parameters of this estimator.
|
||||
The method works on simple estimators as well as on nested objects
|
||||
(such as pipelines). The former have parameters of the form
|
||||
``<component>__<parameter>`` so that it's possible to update each
|
||||
component of a nested object.
|
||||
Returns
|
||||
-------
|
||||
self
|
||||
"""
|
||||
if not params:
|
||||
# Simple optimisation to gain speed (inspect is slow)
|
||||
return self
|
||||
valid_params = self.get_params(deep=True)
|
||||
for key, value in six.iteritems(params):
|
||||
split = key.split('__', 1)
|
||||
if len(split) > 1:
|
||||
# nested objects case
|
||||
name, sub_name = split
|
||||
if name not in valid_params:
|
||||
raise ValueError('Invalid parameter %s for estimator %s. '
|
||||
'Check the list of available parameters '
|
||||
'with `estimator.get_params().keys()`.' %
|
||||
(name, self))
|
||||
sub_object = valid_params[name]
|
||||
sub_object.set_params(**{sub_name: value})
|
||||
else:
|
||||
# simple objects case
|
||||
if key not in valid_params:
|
||||
raise ValueError('Invalid parameter %s for estimator %s. '
|
||||
'Check the list of available parameters '
|
||||
'with `estimator.get_params().keys()`.' %
|
||||
(key, self.__class__.__name__))
|
||||
setattr(self, key, value)
|
||||
return self
|
||||
|
||||
def __repr__(self):
|
||||
class_name = self.__class__.__name__
|
||||
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
|
||||
offset=len(class_name),),)
|
||||
|
||||
|
||||
class _ClassifierMixin():
|
||||
"""Mixin class for all classifiers."""
|
||||
pass
|
||||
|
||||
|
||||
class _RegressorMixin():
|
||||
"""Mixin class for all regression estimators."""
|
||||
pass
|
||||
|
||||
|
||||
class _NotFittedError(ValueError, AttributeError):
|
||||
"""Exception class to raise if estimator is used before fitting.
|
||||
This class inherits from both ValueError and AttributeError to help with
|
||||
exception handling and backward compatibility.
|
||||
Examples
|
||||
--------
|
||||
>>> from sklearn.svm import LinearSVC
|
||||
>>> from sklearn.exceptions import NotFittedError
|
||||
>>> try:
|
||||
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
|
||||
... except NotFittedError as e:
|
||||
... print(repr(e))
|
||||
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
|
||||
NotFittedError('This LinearSVC instance is not fitted yet',)
|
||||
|
||||
Copied from https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
|
||||
"""
|
||||
|
||||
def _accuracy_score(y_true, y_pred):
|
||||
score = y_true == y_pred
|
||||
return np.average(score)
|
||||
|
||||
|
||||
def _mean_squared_error(y_true, y_pred):
|
||||
if len(y_true.shape) > 1:
|
||||
y_true = np.squeeze(y_true)
|
||||
if len(y_pred.shape) > 1:
|
||||
y_pred = np.squeeze(y_pred)
|
||||
return np.average((y_true - y_pred) ** 2)
|
||||
|
||||
|
||||
def _train_test_split(*args, **options):
|
||||
n_array = len(args)
|
||||
|
||||
test_size = options.pop('test_size', None)
|
||||
train_size = options.pop('train_size', None)
|
||||
random_state = options.pop('random_state', None)
|
||||
|
||||
if test_size is None and train_size is None:
|
||||
train_size = 0.75
|
||||
elif train_size is None:
|
||||
train_size = 1 - test_size
|
||||
train_size = train_size * args[0].shape[0]
|
||||
|
||||
indices = np.random.permutation(args[0].shape[0])
|
||||
train_idx, test_idx = indices[:train_size], indices[:train_size]
|
||||
result = []
|
||||
for x in args:
|
||||
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
|
||||
return tuple(result)
|
||||
|
||||
|
||||
# Try to import sklearn, if fail - use _BaseEstimator.
|
||||
try:
|
||||
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
|
||||
except ImportError:
|
||||
BaseEstimator = _BaseEstimator
|
||||
ClassifierMixin = _ClassifierMixin
|
||||
RegressorMixin = _RegressorMixin
|
||||
|
||||
# Try to import exception for not fitted error.
|
||||
try:
|
||||
from sklearn.exceptions import NotFittedError
|
||||
except ImportError:
|
||||
NotFittedError = _NotFittedError
|
||||
|
||||
|
||||
# Try to import metrics
|
||||
try:
|
||||
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
|
||||
except ImportError:
|
||||
accuracy_score = _accuracy_score
|
||||
log_loss = None
|
||||
mean_squared_error = _mean_squared_error
|
||||
|
||||
|
||||
# Try to import train_test_split
|
||||
try:
|
||||
from sklearn.cross_validation import train_test_split
|
||||
except ImportError:
|
||||
train_test_split = _train_test_split
|
||||
|
@ -23,40 +23,53 @@ import shutil
|
||||
from six import string_types
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from google.protobuf import text_format
|
||||
from tensorflow.python.platform.default import _gfile as gfile
|
||||
|
||||
from sklearn.base import BaseEstimator
|
||||
try:
|
||||
from sklearn.exceptions import NotFittedError
|
||||
except ImportError:
|
||||
from sklearn.utils.validation import NotFittedError # pylint: disable=ungrouped-imports
|
||||
from tensorflow.python.client import session
|
||||
from tensorflow.core.framework import graph_pb2
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import importer
|
||||
from tensorflow.python.framework import random_seed
|
||||
from tensorflow.python.ops import array_ops as array_ops_
|
||||
from tensorflow.python.ops import init_ops
|
||||
from tensorflow.python.ops import constant_op
|
||||
from tensorflow.python.ops import control_flow_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import logging_ops
|
||||
from tensorflow.python.ops import nn
|
||||
from tensorflow.python.ops import variables
|
||||
from tensorflow.python.ops import variable_scope as vs
|
||||
from tensorflow.python.training import training as train
|
||||
|
||||
from ..trainer import TensorFlowTrainer, RestoredTrainer
|
||||
from ..io.data_feeder import setup_train_data_feeder
|
||||
from ..io.data_feeder import setup_predict_data_feeder
|
||||
from ..ops.dropout_ops import DROPOUTS
|
||||
from .. import monitors
|
||||
from tensorflow.contrib.layers import optimizers
|
||||
from tensorflow.contrib.learn.python.learn import trainer
|
||||
from tensorflow.contrib.learn.python.learn.io.data_feeder import setup_train_data_feeder
|
||||
from tensorflow.contrib.learn.python.learn.io.data_feeder import setup_predict_data_feeder
|
||||
from tensorflow.contrib.learn.python.learn.ops.dropout_ops import DROPOUTS
|
||||
from tensorflow.contrib.learn.python.learn import monitors
|
||||
|
||||
from ..addons.config_addon import ConfigAddon
|
||||
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
|
||||
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
|
||||
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
|
||||
|
||||
|
||||
def _write_with_backup(filename, content):
|
||||
if os.path.exists(filename):
|
||||
shutil.move(filename, filename + '.old')
|
||||
with open(filename, 'w') as f:
|
||||
if gfile.Exists(filename):
|
||||
gfile.Rename(filename, filename + '.old', overwrite=True)
|
||||
with gfile.Open(filename, 'w') as f:
|
||||
f.write(content)
|
||||
|
||||
|
||||
class TensorFlowEstimator(BaseEstimator):
|
||||
class TensorFlowEstimator(_sklearn.BaseEstimator):
|
||||
"""Base class for all TensorFlow estimators.
|
||||
|
||||
Parameters:
|
||||
model_fn: Model function, that takes input X, y tensors and outputs
|
||||
prediction and loss tensors.
|
||||
n_classes: Number of classes in the target.
|
||||
tf_master: TensorFlow master. Empty string is default for local.
|
||||
batch_size: Mini batch size.
|
||||
steps: Number of steps to run over data.
|
||||
optimizer: Optimizer name (or class), for example "SGD", "Adam",
|
||||
@ -69,115 +82,111 @@ class TensorFlowEstimator(BaseEstimator):
|
||||
return tf.train.exponential_decay(
|
||||
learning_rate=0.1, global_step,
|
||||
decay_steps=2, decay_rate=0.001)
|
||||
clip_gradients: Clip norm of the gradients to this value to stop
|
||||
gradient explosion.
|
||||
class_weight: None or list of n_classes floats. Weight associated with
|
||||
classes for loss computation. If not given, all classes are suppose to have
|
||||
weight one.
|
||||
tf_random_seed: Random seed for TensorFlow initializers.
|
||||
Setting this value, allows consistency between reruns.
|
||||
continue_training: when continue_training is True, once initialized
|
||||
model will be continuely trained on every call of fit.
|
||||
config_addon: ConfigAddon object that controls the configurations of the session,
|
||||
config: RunConfig object that controls the configurations of the session,
|
||||
e.g. num_cores, gpu_memory_fraction, etc.
|
||||
verbose: Controls the verbosity, possible values:
|
||||
0: the algorithm and debug information is muted.
|
||||
1: trainer prints the progress.
|
||||
2: log device placement is printed.
|
||||
max_to_keep: The maximum number of recent checkpoint files to keep.
|
||||
As new files are created, older files are deleted.
|
||||
If None or 0, all checkpoint files are kept.
|
||||
Defaults to 5 (that is, the 5 most recent checkpoint files are kept.)
|
||||
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
|
||||
to be saved. The default value of 10,000 hours effectively disables the feature.
|
||||
"""
|
||||
|
||||
def __init__(self, model_fn, n_classes, tf_master="", batch_size=32,
|
||||
def __init__(self, model_fn, n_classes, batch_size=32,
|
||||
steps=200, optimizer="Adagrad",
|
||||
learning_rate=0.1, class_weight=None,
|
||||
tf_random_seed=42, continue_training=False,
|
||||
config_addon=None, verbose=1,
|
||||
max_to_keep=5, keep_checkpoint_every_n_hours=10000):
|
||||
|
||||
learning_rate=0.1, clip_gradients=5.0, class_weight=None,
|
||||
continue_training=False,
|
||||
config=None, verbose=1):
|
||||
self.model_fn = model_fn
|
||||
self.n_classes = n_classes
|
||||
self.tf_master = tf_master
|
||||
self.batch_size = batch_size
|
||||
self.steps = steps
|
||||
self.verbose = verbose
|
||||
self.optimizer = optimizer
|
||||
self.learning_rate = learning_rate
|
||||
self.tf_random_seed = tf_random_seed
|
||||
self.model_fn = model_fn
|
||||
self.clip_gradients = clip_gradients
|
||||
self.continue_training = continue_training
|
||||
self._initialized = False
|
||||
self.max_to_keep = max_to_keep
|
||||
self.keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
|
||||
self.class_weight = class_weight
|
||||
self.config_addon = config_addon
|
||||
self._config = config
|
||||
|
||||
def _setup_training(self):
|
||||
"""Sets up graph, model and trainer."""
|
||||
self._graph = tf.Graph()
|
||||
# Create config if not given.
|
||||
if self._config is None:
|
||||
self._config = RunConfig(verbose=self.verbose)
|
||||
# Create new graph.
|
||||
self._graph = ops.Graph()
|
||||
self._graph.add_to_collection("IS_TRAINING", True)
|
||||
with self._graph.as_default():
|
||||
tf.set_random_seed(self.tf_random_seed)
|
||||
self._global_step = tf.Variable(
|
||||
random_seed.set_random_seed(self._config.tf_random_seed)
|
||||
self._global_step = variables.Variable(
|
||||
0, name="global_step", trainable=False)
|
||||
|
||||
# Setting up input and output placeholders.
|
||||
input_shape = [None] + self._data_feeder.input_shape[1:]
|
||||
output_shape = [None] + self._data_feeder.output_shape[1:]
|
||||
self._inp = tf.placeholder(
|
||||
tf.as_dtype(self._data_feeder.input_dtype), input_shape,
|
||||
name="input")
|
||||
self._out = tf.placeholder(
|
||||
tf.as_dtype(self._data_feeder.output_dtype), output_shape,
|
||||
name="output")
|
||||
# Setting up inputs and outputs.
|
||||
self._inp, self._out = self._data_feeder.input_builder()
|
||||
|
||||
# If class weights are provided, add them to the graph.
|
||||
# Different loss functions can use this tensor by name.
|
||||
if self.class_weight:
|
||||
self._class_weight_node = tf.constant(
|
||||
self._class_weight_node = constant_op.constant(
|
||||
self.class_weight, name='class_weight')
|
||||
|
||||
# Add histograms for X and y if they are floats.
|
||||
if self._data_feeder.input_dtype in (np.float32, np.float64):
|
||||
tf.histogram_summary("X", self._inp)
|
||||
logging_ops.histogram_summary("X", self._inp)
|
||||
if self._data_feeder.output_dtype in (np.float32, np.float64):
|
||||
tf.histogram_summary("y", self._out)
|
||||
logging_ops.histogram_summary("y", self._out)
|
||||
|
||||
# Create model's graph.
|
||||
self._model_predictions, self._model_loss = self.model_fn(
|
||||
self._inp, self._out)
|
||||
|
||||
# Create summary to monitor loss
|
||||
tf.scalar_summary("loss", self._model_loss)
|
||||
|
||||
# Set up a single operator to merge all the summaries
|
||||
self._summaries = tf.merge_all_summaries()
|
||||
self._summaries = logging_ops.merge_all_summaries()
|
||||
|
||||
# Create trainer and augment graph with gradients and optimizer.
|
||||
# Additionally creates initialization ops.
|
||||
self._trainer = TensorFlowTrainer(
|
||||
loss=self._model_loss, global_step=self._global_step,
|
||||
optimizer=self.optimizer, learning_rate=self.learning_rate)
|
||||
learning_rate = self.learning_rate
|
||||
optimizer = self.optimizer
|
||||
if callable(learning_rate):
|
||||
learning_rate = learning_rate(self._global_step)
|
||||
if callable(optimizer):
|
||||
optimizer = optimizer(learning_rate)
|
||||
self._train = optimizers.optimize_loss(self._model_loss, self._global_step,
|
||||
learning_rate=learning_rate,
|
||||
optimizer=optimizer, clip_gradients=self.clip_gradients)
|
||||
|
||||
# Update ops during training, e.g. batch_norm_ops
|
||||
self._train = control_flow_ops.group(self._train, *ops.get_collection('update_ops'))
|
||||
|
||||
# Get all initializers for all trainable variables.
|
||||
self._initializers = variables.initialize_all_variables()
|
||||
|
||||
# Create model's saver capturing all the nodes created up until now.
|
||||
self._saver = tf.train.Saver(
|
||||
max_to_keep=self.max_to_keep,
|
||||
keep_checkpoint_every_n_hours=self.keep_checkpoint_every_n_hours)
|
||||
self._saver = train.Saver(
|
||||
max_to_keep=self._config.keep_checkpoint_max,
|
||||
keep_checkpoint_every_n_hours=self._config.keep_checkpoint_every_n_hours)
|
||||
|
||||
# Enable monitor to create validation data dict with appropriate tf placeholders
|
||||
self._monitor.create_val_feed_dict(self._inp, self._out)
|
||||
|
||||
# Create session to run model with.
|
||||
if self.config_addon is None:
|
||||
self.config_addon = ConfigAddon(verbose=self.verbose)
|
||||
self._session = tf.Session(self.tf_master, config=self.config_addon.config)
|
||||
self._session = session.Session(self._config.tf_master, config=self._config.tf_config)
|
||||
|
||||
# Run parameter initializers.
|
||||
self._session.run(self._initializers)
|
||||
|
||||
def _setup_summary_writer(self, logdir):
|
||||
"""Sets up the summary writer to prepare for later optional visualization."""
|
||||
self._summary_writer = tf.train.SummaryWriter(
|
||||
self._summary_writer = train.SummaryWriter(
|
||||
os.path.join(logdir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')),
|
||||
graph_def=self._session.graph_def)
|
||||
graph=self._session.graph)
|
||||
|
||||
def fit(self, X, y, monitor=None, logdir=None):
|
||||
"""Builds a neural network model given provided `model_fn` and training
|
||||
@ -216,9 +225,9 @@ class TensorFlowEstimator(BaseEstimator):
|
||||
if not self.continue_training or not self._initialized:
|
||||
# Sets up model and trainer.
|
||||
self._setup_training()
|
||||
# Initialize model parameters.
|
||||
self._trainer.initialize(self._session)
|
||||
self._initialized = True
|
||||
else:
|
||||
self._data_feeder.set_placeholders(self._inp, self._out)
|
||||
|
||||
# Sets up summary writer for later optional visualization.
|
||||
# Due to not able to setup _summary_writer in __init__ as it's not a
|
||||
@ -234,14 +243,15 @@ class TensorFlowEstimator(BaseEstimator):
|
||||
self._summary_writer = None
|
||||
|
||||
# Train model for given number of steps.
|
||||
self._trainer.train(self._session,
|
||||
self._data_feeder.get_feed_dict_fn(
|
||||
self._inp, self._out),
|
||||
self.steps,
|
||||
self._monitor,
|
||||
self._summary_writer,
|
||||
self._summaries,
|
||||
feed_params_fn=self._data_feeder.get_feed_params)
|
||||
trainer.train(
|
||||
self._session, self._train,
|
||||
self._model_loss, self._global_step,
|
||||
self._data_feeder.get_feed_dict_fn(),
|
||||
steps=self.steps,
|
||||
monitor=self._monitor,
|
||||
summary_writer=self._summary_writer,
|
||||
summaries=self._summaries,
|
||||
feed_params_fn=self._data_feeder.get_feed_params)
|
||||
return self
|
||||
|
||||
def partial_fit(self, X, y):
|
||||
@ -270,7 +280,7 @@ class TensorFlowEstimator(BaseEstimator):
|
||||
|
||||
def _predict(self, X, axis=-1, batch_size=None):
|
||||
if not self._initialized:
|
||||
raise NotFittedError()
|
||||
raise _sklearn.NotFittedError()
|
||||
|
||||
# Use the batch size for fitting if the user did not specify one.
|
||||
if batch_size is None:
|
||||
@ -361,7 +371,7 @@ class TensorFlowEstimator(BaseEstimator):
|
||||
path: Folder to save model to.
|
||||
"""
|
||||
if not self._initialized:
|
||||
raise NotFittedError()
|
||||
raise _sklearn.NotFittedError()
|
||||
|
||||
# Currently Saver requires absolute path to work correctly.
|
||||
path = os.path.abspath(path)
|
||||
@ -394,7 +404,7 @@ class TensorFlowEstimator(BaseEstimator):
|
||||
# Save graph definition.
|
||||
_write_with_backup(os.path.join(path, 'graph.pbtxt'), str(self._graph.as_graph_def()))
|
||||
|
||||
# Save saver defintion.
|
||||
# Save saver definition.
|
||||
_write_with_backup(os.path.join(path, 'saver.pbtxt'), str(self._saver.as_saver_def()))
|
||||
|
||||
# Save checkpoints.
|
||||
@ -413,46 +423,44 @@ class TensorFlowEstimator(BaseEstimator):
|
||||
# Currently Saver requires absolute path to work correctly.
|
||||
path = os.path.abspath(path)
|
||||
|
||||
self._graph = tf.Graph()
|
||||
self._graph = ops.Graph()
|
||||
with self._graph.as_default():
|
||||
endpoints_filename = os.path.join(path, 'endpoints')
|
||||
if not os.path.exists(endpoints_filename):
|
||||
raise ValueError("Restore folder doesn't contain endpoints.")
|
||||
with open(endpoints_filename) as foutputs:
|
||||
with gfile.Open(endpoints_filename) as foutputs:
|
||||
endpoints = foutputs.read().split('\n')
|
||||
graph_filename = os.path.join(path, 'graph.pbtxt')
|
||||
if not os.path.exists(graph_filename):
|
||||
raise ValueError("Restore folder doesn't contain graph definition.")
|
||||
with open(graph_filename) as fgraph:
|
||||
graph_def = tf.GraphDef()
|
||||
with gfile.Open(graph_filename) as fgraph:
|
||||
graph_def = graph_pb2.GraphDef()
|
||||
text_format.Merge(fgraph.read(), graph_def)
|
||||
(self._inp, self._out,
|
||||
self._model_predictions, self._model_loss) = tf.import_graph_def(
|
||||
self._model_predictions, self._model_loss) = importer.import_graph_def(
|
||||
graph_def, name='', return_elements=endpoints)
|
||||
saver_filename = os.path.join(path, 'saver.pbtxt')
|
||||
if not os.path.exists(saver_filename):
|
||||
raise ValueError("Restore folder doesn't contain saver defintion.")
|
||||
with open(saver_filename) as fsaver:
|
||||
saver_def = tf.train.SaverDef()
|
||||
raise ValueError("Restore folder doesn't contain saver definition.")
|
||||
with gfile.Open(saver_filename) as fsaver:
|
||||
saver_def = train.SaverDef()
|
||||
text_format.Merge(fsaver.read(), saver_def)
|
||||
self._saver = tf.train.Saver(saver_def=saver_def)
|
||||
self._saver = train.Saver(saver_def=saver_def)
|
||||
|
||||
# Restore trainer
|
||||
self._global_step = self._graph.get_tensor_by_name('global_step:0')
|
||||
trainer_op = self._graph.get_operation_by_name('train')
|
||||
self._trainer = RestoredTrainer(
|
||||
self._model_loss, self._global_step, trainer_op)
|
||||
self._train = self._graph.get_operation_by_name('train')
|
||||
|
||||
# Restore summaries.
|
||||
self._summaries = self._graph.get_operation_by_name('MergeSummary/MergeSummary')
|
||||
|
||||
# Restore session.
|
||||
if not isinstance(self.config_addon, ConfigAddon):
|
||||
self.config_addon = ConfigAddon(verbose=self.verbose)
|
||||
self._session = tf.Session(
|
||||
self.tf_master,
|
||||
config=self.config_addon.config)
|
||||
checkpoint_path = tf.train.latest_checkpoint(path)
|
||||
if not isinstance(self._config, RunConfig):
|
||||
self._config = RunConfig(verbose=self.verbose)
|
||||
self._session = session.Session(
|
||||
self._config.tf_master,
|
||||
config=self._config.tf_config)
|
||||
checkpoint_path = train.latest_checkpoint(path)
|
||||
if checkpoint_path is None:
|
||||
raise ValueError("Missing checkpoint files in the %s. Please "
|
||||
"make sure you are you have checkpoint file that describes "
|
||||
@ -465,12 +473,12 @@ class TensorFlowEstimator(BaseEstimator):
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
@classmethod
|
||||
def restore(cls, path, config_addon=None):
|
||||
def restore(cls, path, config=None):
|
||||
"""Restores model from give path.
|
||||
|
||||
Args:
|
||||
path: Path to the checkpoints and other model information.
|
||||
config_addon: ConfigAddon object that controls the configurations of the session,
|
||||
config: RunConfig object that controls the configurations of the session,
|
||||
e.g. num_cores, gpu_memory_fraction, etc. This is allowed to be reconfigured.
|
||||
|
||||
Returns:
|
||||
@ -480,8 +488,9 @@ class TensorFlowEstimator(BaseEstimator):
|
||||
if not os.path.exists(model_def_filename):
|
||||
raise ValueError("Restore folder doesn't contain model definition.")
|
||||
# list of parameters that are allowed to be reconfigured
|
||||
reconfigurable_params = ['config_addon']
|
||||
with open(model_def_filename) as fmodel:
|
||||
reconfigurable_params = ['_config']
|
||||
_config = config
|
||||
with gfile.Open(model_def_filename) as fmodel:
|
||||
model_def = json.loads(fmodel.read())
|
||||
# TensorFlow binding requires parameters to be strings not unicode.
|
||||
# Only issue in Python2.
|
||||
@ -490,9 +499,9 @@ class TensorFlowEstimator(BaseEstimator):
|
||||
not isinstance(value, str)):
|
||||
model_def[key] = str(value)
|
||||
if key in reconfigurable_params:
|
||||
newValue = locals()[key]
|
||||
if newValue is not None:
|
||||
model_def[key] = newValue
|
||||
new_value = locals()[key]
|
||||
if new_value is not None:
|
||||
model_def[key] = new_value
|
||||
class_name = model_def.pop('class_name')
|
||||
if class_name == 'TensorFlowEstimator':
|
||||
custom_estimator = TensorFlowEstimator(model_fn=None, **model_def)
|
||||
@ -501,8 +510,9 @@ class TensorFlowEstimator(BaseEstimator):
|
||||
|
||||
# To avoid cyclical dependencies, import inside the function instead of
|
||||
# the beginning of the file.
|
||||
from tensorflow.contrib.skflow.python.skflow import estimators
|
||||
from tensorflow.contrib.learn.python.learn import estimators
|
||||
# Estimator must be one of the defined estimators in the __init__ file.
|
||||
estimator = getattr(estimators, class_name)(**model_def)
|
||||
estimator._restore(path)
|
||||
return estimator
|
||||
|
@ -16,19 +16,17 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from sklearn.base import ClassifierMixin, RegressorMixin
|
||||
|
||||
from .base import TensorFlowEstimator
|
||||
from .. import models
|
||||
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
|
||||
from tensorflow.contrib.learn.python.learn.estimators.base import TensorFlowEstimator
|
||||
from tensorflow.contrib.learn.python.learn import models
|
||||
|
||||
|
||||
class TensorFlowDNNClassifier(TensorFlowEstimator, ClassifierMixin):
|
||||
class TensorFlowDNNClassifier(TensorFlowEstimator, _sklearn.ClassifierMixin):
|
||||
"""TensorFlow DNN Classifier model.
|
||||
|
||||
Parameters:
|
||||
hidden_units: List of hidden units per layer.
|
||||
n_classes: Number of classes in the target.
|
||||
tf_master: TensorFlow master. Empty string is default for local.
|
||||
batch_size: Mini batch size.
|
||||
steps: Number of steps to run over data.
|
||||
optimizer: Optimizer name (or class), for example "SGD", "Adam",
|
||||
@ -44,41 +42,29 @@ class TensorFlowDNNClassifier(TensorFlowEstimator, ClassifierMixin):
|
||||
class_weight: None or list of n_classes floats. Weight associated with
|
||||
classes for loss computation. If not given, all classes are suppose to have
|
||||
weight one.
|
||||
tf_random_seed: Random seed for TensorFlow initializers.
|
||||
Setting this value, allows consistency between reruns.
|
||||
continue_training: when continue_training is True, once initialized
|
||||
model will be continuely trained on every call of fit.
|
||||
config_addon: ConfigAddon object that controls the configurations of the session,
|
||||
config: RunConfig object that controls the configurations of the session,
|
||||
e.g. num_cores, gpu_memory_fraction, etc.
|
||||
max_to_keep: The maximum number of recent checkpoint files to keep.
|
||||
As new files are created, older files are deleted.
|
||||
If None or 0, all checkpoint files are kept.
|
||||
Defaults to 5 (that is, the 5 most recent checkpoint files are kept.)
|
||||
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
|
||||
to be saved. The default value of 10,000 hours effectively disables the feature.
|
||||
dropout: When not None, the probability we will drop out a given
|
||||
coordinate.
|
||||
"""
|
||||
"""
|
||||
|
||||
def __init__(self, hidden_units, n_classes, tf_master="", batch_size=32,
|
||||
def __init__(self, hidden_units, n_classes, batch_size=32,
|
||||
steps=200, optimizer="Adagrad", learning_rate=0.1,
|
||||
class_weight=None,
|
||||
tf_random_seed=42, continue_training=False, config_addon=None,
|
||||
verbose=1, max_to_keep=5, keep_checkpoint_every_n_hours=10000,
|
||||
dropout=None):
|
||||
|
||||
class_weight=None, clip_gradients=5.0,
|
||||
continue_training=False, config=None,
|
||||
verbose=1, dropout=None):
|
||||
self.hidden_units = hidden_units
|
||||
self.dropout = dropout
|
||||
super(TensorFlowDNNClassifier, self).__init__(
|
||||
model_fn=self._model_fn,
|
||||
n_classes=n_classes, tf_master=tf_master,
|
||||
n_classes=n_classes,
|
||||
batch_size=batch_size, steps=steps, optimizer=optimizer,
|
||||
learning_rate=learning_rate, class_weight=class_weight,
|
||||
tf_random_seed=tf_random_seed,
|
||||
clip_gradients=clip_gradients,
|
||||
continue_training=continue_training,
|
||||
config_addon=config_addon, verbose=verbose,
|
||||
max_to_keep=max_to_keep,
|
||||
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
|
||||
config=config, verbose=verbose)
|
||||
|
||||
def _model_fn(self, X, y):
|
||||
return models.get_dnn_model(self.hidden_units,
|
||||
@ -104,12 +90,11 @@ class TensorFlowDNNClassifier(TensorFlowEstimator, ClassifierMixin):
|
||||
return biases
|
||||
|
||||
|
||||
class TensorFlowDNNRegressor(TensorFlowEstimator, RegressorMixin):
|
||||
class TensorFlowDNNRegressor(TensorFlowEstimator, _sklearn.RegressorMixin):
|
||||
"""TensorFlow DNN Regressor model.
|
||||
|
||||
Parameters:
|
||||
hidden_units: List of hidden units per layer.
|
||||
tf_master: TensorFlow master. Empty string is default for local.
|
||||
batch_size: Mini batch size.
|
||||
steps: Number of steps to run over data.
|
||||
optimizer: Optimizer name (or class), for example "SGD", "Adam",
|
||||
@ -122,43 +107,32 @@ class TensorFlowDNNRegressor(TensorFlowEstimator, RegressorMixin):
|
||||
return tf.train.exponential_decay(
|
||||
learning_rate=0.1, global_step,
|
||||
decay_steps=2, decay_rate=0.001)
|
||||
tf_random_seed: Random seed for TensorFlow initializers.
|
||||
Setting this value, allows consistency between reruns.
|
||||
continue_training: when continue_training is True, once initialized
|
||||
model will be continuely trained on every call of fit.
|
||||
config_addon: ConfigAddon object that controls the configurations of the session,
|
||||
config: RunConfig object that controls the configurations of the session,
|
||||
e.g. num_cores, gpu_memory_fraction, etc.
|
||||
verbose: Controls the verbosity, possible values:
|
||||
0: the algorithm and debug information is muted.
|
||||
1: trainer prints the progress.
|
||||
2: log device placement is printed.
|
||||
max_to_keep: The maximum number of recent checkpoint files to keep.
|
||||
As new files are created, older files are deleted.
|
||||
If None or 0, all checkpoint files are kept.
|
||||
Defaults to 5 (that is, the 5 most recent checkpoint files are kept.)
|
||||
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
|
||||
to be saved. The default value of 10,000 hours effectively disables the feature.
|
||||
dropout: When not None, the probability we will drop out a given
|
||||
coordinate.
|
||||
"""
|
||||
"""
|
||||
|
||||
def __init__(self, hidden_units, n_classes=0, tf_master="", batch_size=32,
|
||||
def __init__(self, hidden_units, n_classes=0, batch_size=32,
|
||||
steps=200, optimizer="Adagrad", learning_rate=0.1,
|
||||
tf_random_seed=42, continue_training=False, config_addon=None,
|
||||
verbose=1, max_to_keep=5, keep_checkpoint_every_n_hours=10000,
|
||||
dropout=None):
|
||||
|
||||
clip_gradients=5.0,
|
||||
continue_training=False, config=None,
|
||||
verbose=1, dropout=None):
|
||||
self.hidden_units = hidden_units
|
||||
self.dropout = dropout
|
||||
super(TensorFlowDNNRegressor, self).__init__(
|
||||
model_fn=self._model_fn,
|
||||
n_classes=n_classes, tf_master=tf_master,
|
||||
n_classes=n_classes,
|
||||
batch_size=batch_size, steps=steps, optimizer=optimizer,
|
||||
learning_rate=learning_rate, tf_random_seed=tf_random_seed,
|
||||
learning_rate=learning_rate, clip_gradients=clip_gradients,
|
||||
continue_training=continue_training,
|
||||
config_addon=config_addon, verbose=verbose,
|
||||
max_to_keep=max_to_keep,
|
||||
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
|
||||
config=config, verbose=verbose)
|
||||
|
||||
def _model_fn(self, X, y):
|
||||
return models.get_dnn_model(self.hidden_units,
|
@ -16,28 +16,24 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from sklearn.base import ClassifierMixin, RegressorMixin
|
||||
|
||||
from .base import TensorFlowEstimator
|
||||
from .. import models
|
||||
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
|
||||
from tensorflow.contrib.learn.python.learn.estimators.base import TensorFlowEstimator
|
||||
from tensorflow.contrib.learn.python.learn import models
|
||||
|
||||
|
||||
class TensorFlowLinearRegressor(TensorFlowEstimator, RegressorMixin):
|
||||
class TensorFlowLinearRegressor(TensorFlowEstimator, _sklearn.RegressorMixin):
|
||||
"""TensorFlow Linear Regression model."""
|
||||
|
||||
def __init__(self, n_classes=0, tf_master="", batch_size=32, steps=200, optimizer="Adagrad",
|
||||
learning_rate=0.1, tf_random_seed=42, continue_training=False,
|
||||
config_addon=None, verbose=1,
|
||||
max_to_keep=5, keep_checkpoint_every_n_hours=10000):
|
||||
def __init__(self, n_classes=0, batch_size=32, steps=200, optimizer="Adagrad",
|
||||
learning_rate=0.1, clip_gradients=5.0, continue_training=False,
|
||||
config=None, verbose=1):
|
||||
|
||||
super(TensorFlowLinearRegressor, self).__init__(
|
||||
model_fn=models.linear_regression_zero_init, n_classes=n_classes,
|
||||
tf_master=tf_master,
|
||||
batch_size=batch_size, steps=steps, optimizer=optimizer,
|
||||
learning_rate=learning_rate, tf_random_seed=tf_random_seed,
|
||||
continue_training=continue_training, config_addon=config_addon,
|
||||
verbose=verbose, max_to_keep=max_to_keep,
|
||||
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
|
||||
learning_rate=learning_rate, clip_gradients=clip_gradients,
|
||||
continue_training=continue_training, config=config,
|
||||
verbose=verbose)
|
||||
|
||||
@property
|
||||
def weights_(self):
|
||||
@ -50,23 +46,21 @@ class TensorFlowLinearRegressor(TensorFlowEstimator, RegressorMixin):
|
||||
return self.get_tensor_value('linear_regression/bias:0')
|
||||
|
||||
|
||||
class TensorFlowLinearClassifier(TensorFlowEstimator, ClassifierMixin):
|
||||
class TensorFlowLinearClassifier(TensorFlowEstimator, _sklearn.ClassifierMixin):
|
||||
"""TensorFlow Linear Classifier model."""
|
||||
|
||||
def __init__(self, n_classes, tf_master="", batch_size=32, steps=200, optimizer="Adagrad",
|
||||
learning_rate=0.1, class_weight=None,
|
||||
tf_random_seed=42, continue_training=False, config_addon=None,
|
||||
verbose=1, max_to_keep=5, keep_checkpoint_every_n_hours=10000):
|
||||
def __init__(self, n_classes, batch_size=32, steps=200, optimizer="Adagrad",
|
||||
learning_rate=0.1, class_weight=None, clip_gradients=5.0,
|
||||
continue_training=False, config=None,
|
||||
verbose=1):
|
||||
|
||||
super(TensorFlowLinearClassifier, self).__init__(
|
||||
model_fn=models.logistic_regression_zero_init, n_classes=n_classes,
|
||||
tf_master=tf_master,
|
||||
batch_size=batch_size, steps=steps, optimizer=optimizer,
|
||||
learning_rate=learning_rate, class_weight=class_weight,
|
||||
tf_random_seed=tf_random_seed,
|
||||
continue_training=continue_training, config_addon=config_addon,
|
||||
verbose=verbose, max_to_keep=max_to_keep,
|
||||
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
|
||||
clip_gradients=clip_gradients,
|
||||
continue_training=continue_training, config=config,
|
||||
verbose=verbose)
|
||||
|
||||
@property
|
||||
def weights_(self):
|
@ -16,10 +16,9 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from sklearn.base import ClassifierMixin, RegressorMixin
|
||||
|
||||
from .base import TensorFlowEstimator
|
||||
from .. import models
|
||||
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
|
||||
from tensorflow.contrib.learn.python.learn.estimators.base import TensorFlowEstimator
|
||||
from tensorflow.contrib.learn.python.learn import models
|
||||
|
||||
|
||||
def null_input_op_fn(X):
|
||||
@ -27,7 +26,7 @@ def null_input_op_fn(X):
|
||||
return X
|
||||
|
||||
|
||||
class TensorFlowRNNClassifier(TensorFlowEstimator, ClassifierMixin):
|
||||
class TensorFlowRNNClassifier(TensorFlowEstimator, _sklearn.ClassifierMixin):
|
||||
"""TensorFlow RNN Classifier model.
|
||||
|
||||
Parameters:
|
||||
@ -43,7 +42,6 @@ class TensorFlowRNNClassifier(TensorFlowEstimator, ClassifierMixin):
|
||||
initial_state: An initial state for the RNN. This must be a tensor of appropriate type
|
||||
and shape [batch_size x cell.state_size].
|
||||
n_classes: Number of classes in the target.
|
||||
tf_master: TensorFlow master. Empty string is default for local.
|
||||
batch_size: Mini batch size.
|
||||
steps: Number of steps to run over data.
|
||||
optimizer: Optimizer name (or class), for example "SGD", "Adam",
|
||||
@ -59,28 +57,20 @@ class TensorFlowRNNClassifier(TensorFlowEstimator, ClassifierMixin):
|
||||
class_weight: None or list of n_classes floats. Weight associated with
|
||||
classes for loss computation. If not given, all classes are suppose to have
|
||||
weight one.
|
||||
tf_random_seed: Random seed for TensorFlow initializers.
|
||||
Setting this value, allows consistency between reruns.
|
||||
continue_training: when continue_training is True, once initialized
|
||||
model will be continuely trained on every call of fit.
|
||||
num_cores: Number of cores to be used. (default: 4)
|
||||
max_to_keep: The maximum number of recent checkpoint files to keep.
|
||||
As new files are created, older files are deleted.
|
||||
If None or 0, all checkpoint files are kept.
|
||||
Defaults to 5 (that is, the 5 most recent checkpoint files are kept.)
|
||||
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
|
||||
to be saved. The default value of 10,000 hours effectively disables the feature.
|
||||
config: RunConfig object that controls the configurations of the session,
|
||||
e.g. num_cores, gpu_memory_fraction, etc.
|
||||
"""
|
||||
|
||||
def __init__(self, rnn_size, n_classes, cell_type='gru', num_layers=1,
|
||||
input_op_fn=null_input_op_fn,
|
||||
initial_state=None, bidirectional=False,
|
||||
sequence_length=None, tf_master="", batch_size=32,
|
||||
sequence_length=None, batch_size=32,
|
||||
steps=50, optimizer="Adagrad", learning_rate=0.1,
|
||||
class_weight=None,
|
||||
tf_random_seed=42, continue_training=False,
|
||||
config_addon=None, verbose=1,
|
||||
max_to_keep=5, keep_checkpoint_every_n_hours=10000):
|
||||
class_weight=None, clip_gradients=5.0,
|
||||
continue_training=False,
|
||||
config=None, verbose=1):
|
||||
|
||||
self.rnn_size = rnn_size
|
||||
self.cell_type = cell_type
|
||||
@ -91,14 +81,12 @@ class TensorFlowRNNClassifier(TensorFlowEstimator, ClassifierMixin):
|
||||
self.initial_state = initial_state
|
||||
super(TensorFlowRNNClassifier, self).__init__(
|
||||
model_fn=self._model_fn,
|
||||
n_classes=n_classes, tf_master=tf_master,
|
||||
n_classes=n_classes,
|
||||
batch_size=batch_size, steps=steps, optimizer=optimizer,
|
||||
learning_rate=learning_rate, class_weight=class_weight,
|
||||
tf_random_seed=tf_random_seed,
|
||||
continue_training=continue_training, config_addon=config_addon,
|
||||
verbose=verbose,
|
||||
max_to_keep=max_to_keep,
|
||||
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
|
||||
clip_gradients=clip_gradients,
|
||||
continue_training=continue_training, config=config,
|
||||
verbose=verbose)
|
||||
|
||||
def _model_fn(self, X, y):
|
||||
return models.get_rnn_model(self.rnn_size, self.cell_type,
|
||||
@ -119,7 +107,7 @@ class TensorFlowRNNClassifier(TensorFlowEstimator, ClassifierMixin):
|
||||
return self.get_tensor_value('logistic_regression/weights:0')
|
||||
|
||||
|
||||
class TensorFlowRNNRegressor(TensorFlowEstimator, RegressorMixin):
|
||||
class TensorFlowRNNRegressor(TensorFlowEstimator, _sklearn.RegressorMixin):
|
||||
"""TensorFlow RNN Regressor model.
|
||||
|
||||
Parameters:
|
||||
@ -134,7 +122,6 @@ class TensorFlowRNNRegressor(TensorFlowEstimator, RegressorMixin):
|
||||
This saves computational time when unrolling past max sequence length.
|
||||
initial_state: An initial state for the RNN. This must be a tensor of appropriate type
|
||||
and shape [batch_size x cell.state_size].
|
||||
tf_master: TensorFlow master. Empty string is default for local.
|
||||
batch_size: Mini batch size.
|
||||
steps: Number of steps to run over data.
|
||||
optimizer: Optimizer name (or class), for example "SGD", "Adam",
|
||||
@ -147,32 +134,24 @@ class TensorFlowRNNRegressor(TensorFlowEstimator, RegressorMixin):
|
||||
return tf.train.exponential_decay(
|
||||
learning_rate=0.1, global_step,
|
||||
decay_steps=2, decay_rate=0.001)
|
||||
tf_random_seed: Random seed for TensorFlow initializers.
|
||||
Setting this value, allows consistency between reruns.
|
||||
continue_training: when continue_training is True, once initialized
|
||||
model will be continuely trained on every call of fit.
|
||||
num_cores: Number of cores to be used. (default: 4)
|
||||
config: RunConfig object that controls the configurations of the session,
|
||||
e.g. num_cores, gpu_memory_fraction, etc.
|
||||
verbose: Controls the verbosity, possible values:
|
||||
0: the algorithm and debug information is muted.
|
||||
1: trainer prints the progress.
|
||||
2: log device placement is printed.
|
||||
max_to_keep: The maximum number of recent checkpoint files to keep.
|
||||
As new files are created, older files are deleted.
|
||||
If None or 0, all checkpoint files are kept.
|
||||
Defaults to 5 (that is, the 5 most recent checkpoint files are kept.)
|
||||
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
|
||||
to be saved. The default value of 10,000 hours effectively disables the feature.
|
||||
"""
|
||||
|
||||
def __init__(self, rnn_size, cell_type='gru', num_layers=1,
|
||||
input_op_fn=null_input_op_fn, initial_state=None,
|
||||
bidirectional=False, sequence_length=None,
|
||||
n_classes=0, tf_master="", batch_size=32,
|
||||
n_classes=0, batch_size=32,
|
||||
steps=50, optimizer="Adagrad", learning_rate=0.1,
|
||||
tf_random_seed=42, continue_training=False,
|
||||
config_addon=None, verbose=1,
|
||||
max_to_keep=5, keep_checkpoint_every_n_hours=10000):
|
||||
|
||||
clip_gradients=5.0,
|
||||
continue_training=False,
|
||||
config=None, verbose=1):
|
||||
self.rnn_size = rnn_size
|
||||
self.cell_type = cell_type
|
||||
self.input_op_fn = input_op_fn
|
||||
@ -182,12 +161,11 @@ class TensorFlowRNNRegressor(TensorFlowEstimator, RegressorMixin):
|
||||
self.initial_state = initial_state
|
||||
super(TensorFlowRNNRegressor, self).__init__(
|
||||
model_fn=self._model_fn,
|
||||
n_classes=n_classes, tf_master=tf_master,
|
||||
n_classes=n_classes,
|
||||
batch_size=batch_size, steps=steps, optimizer=optimizer,
|
||||
learning_rate=learning_rate, tf_random_seed=tf_random_seed,
|
||||
continue_training=continue_training, config_addon=config_addon,
|
||||
verbose=verbose, max_to_keep=max_to_keep,
|
||||
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
|
||||
learning_rate=learning_rate, clip_gradients=clip_gradients,
|
||||
continue_training=continue_training, config=config,
|
||||
verbose=verbose)
|
||||
|
||||
def _model_fn(self, X, y):
|
||||
return models.get_rnn_model(self.rnn_size, self.cell_type,
|
@ -0,0 +1,64 @@
|
||||
"""Run Config."""
|
||||
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from tensorflow.python import GPUOptions, ConfigProto
|
||||
|
||||
|
||||
class RunConfig(object):
|
||||
"""This class specifies the specific configurations for the run.
|
||||
|
||||
Parameters:
|
||||
tf_master: TensorFlow master. Empty string is default for local.
|
||||
num_cores: Number of cores to be used. (default: 4)
|
||||
verbose: Controls the verbosity, possible values:
|
||||
0: the algorithm and debug information is muted.
|
||||
1: trainer prints the progress.
|
||||
2: log device placement is printed.
|
||||
gpu_memory_fraction: Fraction of GPU memory used by the process on
|
||||
each GPU uniformly on the same machine.
|
||||
tf_random_seed: Random seed for TensorFlow initializers.
|
||||
Setting this value, allows consistency between reruns.
|
||||
keep_checkpoint_max: The maximum number of recent checkpoint files to keep.
|
||||
As new files are created, older files are deleted.
|
||||
If None or 0, all checkpoint files are kept.
|
||||
Defaults to 5 (that is, the 5 most recent checkpoint files are kept.)
|
||||
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
|
||||
to be saved. The default value of 10,000 hours effectively disables the feature.
|
||||
|
||||
Attributes:
|
||||
tf_master: Tensorflow master.
|
||||
tf_config: Tensorflow Session Config proto.
|
||||
tf_random_seed: Tensorflow random seed.
|
||||
keep_checkpoint_max: Maximum number of checkpoints to keep.
|
||||
keep_checkpoint_every_n_hours: Number of hours between each checkpoint.
|
||||
"""
|
||||
|
||||
def __init__(self, tf_master='', num_cores=4, verbose=1,
|
||||
gpu_memory_fraction=1, tf_random_seed=42,
|
||||
keep_checkpoint_max=5,
|
||||
keep_checkpoint_every_n_hours=10000):
|
||||
self.tf_master = tf_master
|
||||
gpu_options = GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
|
||||
self.tf_config = ConfigProto(log_device_placement=(verbose > 1),
|
||||
inter_op_parallelism_threads=num_cores,
|
||||
intra_op_parallelism_threads=num_cores,
|
||||
gpu_options=gpu_options)
|
||||
self.tf_random_seed = tf_random_seed
|
||||
self.keep_checkpoint_max = keep_checkpoint_max
|
||||
self.keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
|
||||
|
@ -16,5 +16,5 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from tensorflow.contrib.skflow.python.skflow.io.pandas_io import *
|
||||
from tensorflow.contrib.skflow.python.skflow.io.dask_io import *
|
||||
from tensorflow.contrib.learn.python.learn.io.pandas_io import *
|
||||
from tensorflow.contrib.learn.python.learn.io.dask_io import *
|
@ -24,7 +24,9 @@ import six
|
||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
||||
|
||||
import numpy as np
|
||||
from sklearn.utils import check_array
|
||||
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.framework import dtypes
|
||||
|
||||
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
|
||||
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
|
||||
@ -36,16 +38,15 @@ def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size):
|
||||
input_shape = [batch_size] + x_shape
|
||||
if y_shape is None:
|
||||
return input_shape, None
|
||||
y_shape = list(y_shape[1:]) if len(y_shape) > 1 else []
|
||||
# Skip first dimension if it is 1.
|
||||
if y_shape and y_shape[0] == 1:
|
||||
y_shape = y_shape[1:]
|
||||
if n_classes > 1:
|
||||
output_shape = [batch_size] + y_shape + [n_classes]
|
||||
else:
|
||||
y_shape = list(y_shape[1:]) if len(y_shape) > 1 else []
|
||||
# Skip first dimention if it is 1.
|
||||
if y_shape and y_shape[0] == 1:
|
||||
y_shape = y_shape[1:]
|
||||
if n_classes > 1:
|
||||
output_shape = [batch_size] + y_shape + [n_classes]
|
||||
else:
|
||||
output_shape = [batch_size] + y_shape
|
||||
return input_shape, output_shape
|
||||
output_shape = [batch_size] + y_shape
|
||||
return input_shape, output_shape
|
||||
|
||||
|
||||
def _data_type_filter(X, y):
|
||||
@ -147,6 +148,20 @@ def setup_processor_data_feeder(X):
|
||||
return X
|
||||
|
||||
|
||||
def check_array(array, dtype):
|
||||
"""Checks array on dtype and convers it if different.
|
||||
|
||||
Args:
|
||||
array: Input array.
|
||||
dtype: Expected dtype.
|
||||
|
||||
Returns:
|
||||
Original array or converted.
|
||||
"""
|
||||
array = np.array(array, dtype=dtype, order=None, copy=False)
|
||||
return array
|
||||
|
||||
|
||||
class DataFeeder(object):
|
||||
"""Data feeder is an example class to sample data for TF trainer.
|
||||
|
||||
@ -173,10 +188,8 @@ class DataFeeder(object):
|
||||
def __init__(self, X, y, n_classes, batch_size, random_state=None):
|
||||
x_dtype = np.int64 if X.dtype == np.int64 else np.float32
|
||||
y_dtype = np.int64 if n_classes > 1 else np.float32
|
||||
self.X = check_array(X, ensure_2d=False,
|
||||
allow_nd=True, dtype=x_dtype)
|
||||
self.y = (None if y is None
|
||||
else check_array(y, ensure_2d=False, dtype=y_dtype))
|
||||
self.X = check_array(X, dtype=x_dtype)
|
||||
self.y = (None if y is None else check_array(y, dtype=y_dtype))
|
||||
self.n_classes = n_classes
|
||||
self.batch_size = batch_size
|
||||
self.input_shape, self.output_shape = _get_in_out_shape(
|
||||
@ -194,6 +207,36 @@ class DataFeeder(object):
|
||||
self.offset = 0
|
||||
self.epoch = 0
|
||||
|
||||
def input_builder(self):
|
||||
"""Builds inputs in the graph.
|
||||
|
||||
Returns:
|
||||
Two placeholders for inputs and outputs.
|
||||
"""
|
||||
input_shape = [None] + self.input_shape[1:]
|
||||
self._input_placeholder = array_ops.placeholder(dtypes.as_dtype(self.input_dtype), input_shape,
|
||||
name="input")
|
||||
if self.output_shape is None:
|
||||
self._output_placeholder = None
|
||||
else:
|
||||
output_shape = [None] + self.output_shape[1:]
|
||||
self._output_placeholder = array_ops.placeholder(dtypes.as_dtype(self.output_dtype), output_shape,
|
||||
name="output")
|
||||
return self._input_placeholder, self._output_placeholder
|
||||
|
||||
def set_placeholders(self, input_placeholder, output_placeholder):
|
||||
"""Sets placeholders for this data feeder.
|
||||
|
||||
Args:
|
||||
input_placeholder: Placeholder for `X` variable. Should match shape
|
||||
of the examples in the X dataset.
|
||||
output_placeholder: Placeholder for `y` variable. Should match
|
||||
shape of the examples in the y dataset. Can be
|
||||
None.
|
||||
"""
|
||||
self._input_placeholder = input_placeholder
|
||||
self._output_placeholder = output_placeholder
|
||||
|
||||
def get_feed_params(self):
|
||||
"""Function returns a dict with data feed params while training.
|
||||
Returns:
|
||||
@ -205,7 +248,7 @@ class DataFeeder(object):
|
||||
'batch_size': self.batch_size
|
||||
}
|
||||
|
||||
def get_feed_dict_fn(self, input_placeholder, output_placeholder=None):
|
||||
def get_feed_dict_fn(self):
|
||||
"""Returns a function, that will sample data and provide it to given
|
||||
placeholders.
|
||||
|
||||
@ -216,6 +259,7 @@ class DataFeeder(object):
|
||||
A function that when called samples a random subset of batch size
|
||||
from X and y.
|
||||
"""
|
||||
assert self._input_placeholder != None
|
||||
def _feed_dict_fn():
|
||||
# take random indices
|
||||
batch_indices = self.indices[self.offset: self.offset+self.batch_size]
|
||||
@ -224,38 +268,36 @@ class DataFeeder(object):
|
||||
inp = np.array(self.X[batch_indices]).reshape((batch_indices.shape[0], 1)) \
|
||||
if len(self.X.shape) == 1 else self.X[batch_indices]
|
||||
|
||||
if output_placeholder is None:
|
||||
return {input_placeholder.name: inp}
|
||||
else:
|
||||
assert self.y is not None
|
||||
assert self.output_shape is not None
|
||||
if self._output_placeholder is None:
|
||||
return {self._input_placeholder.name: inp}
|
||||
|
||||
# assign labels from random indices
|
||||
self.output_shape[0] = batch_indices.shape[0]
|
||||
out = np.zeros(self.output_shape, dtype=self.output_dtype)
|
||||
for i in xrange(out.shape[0]):
|
||||
sample = batch_indices[i]
|
||||
if self.n_classes > 1:
|
||||
if len(self.output_shape) == 2:
|
||||
out.itemset((i, self.y[sample]), 1.0)
|
||||
else:
|
||||
for idx, value in enumerate(self.y[sample]):
|
||||
out.itemset(tuple([i, idx, value]), 1.0)
|
||||
else:
|
||||
out[i] = self.y[sample]
|
||||
# assign labels from random indices
|
||||
self.output_shape[0] = batch_indices.shape[0]
|
||||
out = np.zeros(self.output_shape, dtype=self.output_dtype)
|
||||
for i in xrange(out.shape[0]):
|
||||
sample = batch_indices[i]
|
||||
if self.n_classes > 1:
|
||||
if len(self.output_shape) == 2:
|
||||
out.itemset((i, self.y[sample]), 1.0)
|
||||
else:
|
||||
for idx, value in enumerate(self.y[sample]):
|
||||
out.itemset(tuple([i, idx, value]), 1.0)
|
||||
else:
|
||||
out[i] = self.y[sample]
|
||||
|
||||
# move offset and reset it if necessary
|
||||
self.offset += self.batch_size
|
||||
if self.offset >= self.X.shape[0]:
|
||||
self.indices = self.random_state.permutation(self.X.shape[0])
|
||||
self.offset = 0
|
||||
self.epoch += 1
|
||||
# move offset and reset it if necessary
|
||||
self.offset += self.batch_size
|
||||
if self.offset >= self.X.shape[0]:
|
||||
self.indices = self.random_state.permutation(self.X.shape[0])
|
||||
self.offset = 0
|
||||
self.epoch += 1
|
||||
|
||||
return {input_placeholder.name: inp, output_placeholder.name: out}
|
||||
return {self._input_placeholder.name: inp,
|
||||
self._output_placeholder.name: out}
|
||||
return _feed_dict_fn
|
||||
|
||||
|
||||
class StreamingDataFeeder(object):
|
||||
class StreamingDataFeeder(DataFeeder):
|
||||
"""Data feeder for TF trainer that reads data from iterator.
|
||||
|
||||
Streaming data feeder allows to read data as it comes it from disk or
|
||||
@ -305,7 +347,7 @@ class StreamingDataFeeder(object):
|
||||
"""
|
||||
return {'batch_size': self.batch_size}
|
||||
|
||||
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
|
||||
def get_feed_dict_fn(self):
|
||||
"""Returns a function, that will sample data and provide it to given
|
||||
|
||||
placeholders.
|
||||
@ -331,7 +373,7 @@ class StreamingDataFeeder(object):
|
||||
out.itemset(tuple([i, idx, value]), 1.0)
|
||||
else:
|
||||
out[i] = y
|
||||
return {input_placeholder.name: inp, output_placeholder.name: out}
|
||||
return {self._input_placeholder.name: inp, self._output_placeholder.name: out}
|
||||
return _feed_dict_fn
|
||||
|
||||
|
@ -16,9 +16,16 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import array_ops as array_ops_
|
||||
from tensorflow.python.ops import init_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import logging_ops
|
||||
from tensorflow.python.ops import nn
|
||||
from tensorflow.python.ops import variable_scope as vs
|
||||
|
||||
from .ops import mean_squared_error_regressor, softmax_classifier, dnn
|
||||
from tensorflow.contrib.learn.python.learn.ops import mean_squared_error_regressor, softmax_classifier, dnn
|
||||
|
||||
|
||||
def linear_regression_zero_init(X, y):
|
||||
@ -71,9 +78,9 @@ def linear_regression(X, y, init_mean=None, init_stddev=1.0):
|
||||
is desirable for convex use cases.) If init_mean is None, then the
|
||||
uniform_unit_scaling_initialzer will be used.
|
||||
"""
|
||||
with tf.variable_scope('linear_regression'):
|
||||
tf.histogram_summary('linear_regression.X', X)
|
||||
tf.histogram_summary('linear_regression.y', y)
|
||||
with vs.variable_scope('linear_regression'):
|
||||
logging_ops.histogram_summary('linear_regression.X', X)
|
||||
logging_ops.histogram_summary('linear_regression.y', y)
|
||||
y_shape = y.get_shape()
|
||||
if len(y_shape) == 1:
|
||||
output_shape = 1
|
||||
@ -81,21 +88,21 @@ def linear_regression(X, y, init_mean=None, init_stddev=1.0):
|
||||
output_shape = y_shape[1]
|
||||
# Set up the requested initialization.
|
||||
if (init_mean is None):
|
||||
weights = tf.get_variable('weights',
|
||||
weights = vs.get_variable('weights',
|
||||
[X.get_shape()[1], output_shape])
|
||||
bias = tf.get_variable('bias',
|
||||
bias = vs.get_variable('bias',
|
||||
[output_shape])
|
||||
else:
|
||||
weights = tf.get_variable('weights',
|
||||
weights = vs.get_variable('weights',
|
||||
[X.get_shape()[1], output_shape],
|
||||
initializer=tf.random_normal_initializer(
|
||||
initializer=init_ops.random_normal_initializer(
|
||||
init_mean, init_stddev))
|
||||
bias = tf.get_variable('bias',
|
||||
bias = vs.get_variable('bias',
|
||||
[output_shape],
|
||||
initializer=tf.random_normal_initializer(
|
||||
initializer=init_ops.random_normal_initializer(
|
||||
init_mean, init_stddev))
|
||||
tf.histogram_summary('linear_regression.weights', weights)
|
||||
tf.histogram_summary('linear_regression.bias', bias)
|
||||
logging_ops.histogram_summary('linear_regression.weights', weights)
|
||||
logging_ops.histogram_summary('linear_regression.bias', bias)
|
||||
return mean_squared_error_regressor(X, y, weights, bias)
|
||||
|
||||
|
||||
@ -126,31 +133,31 @@ def logistic_regression(X, y, class_weight=None, init_mean=None,
|
||||
is desirable for convex use cases.) If init_mean is None, then the
|
||||
uniform_unit_scaling_initialzer will be used.
|
||||
"""
|
||||
with tf.variable_scope('logistic_regression'):
|
||||
tf.histogram_summary('logistic_regression.X', X)
|
||||
tf.histogram_summary('logistic_regression.y', y)
|
||||
with vs.variable_scope('logistic_regression'):
|
||||
logging_ops.histogram_summary('logistic_regression.X', X)
|
||||
logging_ops.histogram_summary('logistic_regression.y', y)
|
||||
# Set up the requested initialization.
|
||||
if (init_mean is None):
|
||||
weights = tf.get_variable('weights',
|
||||
weights = vs.get_variable('weights',
|
||||
[X.get_shape()[1], y.get_shape()[-1]])
|
||||
bias = tf.get_variable('bias',
|
||||
bias = vs.get_variable('bias',
|
||||
[y.get_shape()[-1]])
|
||||
else:
|
||||
weights = tf.get_variable('weights',
|
||||
weights = vs.get_variable('weights',
|
||||
[X.get_shape()[1], y.get_shape()[-1]],
|
||||
initializer=tf.random_normal_initializer(
|
||||
initializer=init_ops.random_normal_initializer(
|
||||
init_mean, init_stddev))
|
||||
bias = tf.get_variable('bias',
|
||||
bias = vs.get_variable('bias',
|
||||
[y.get_shape()[-1]],
|
||||
initializer=tf.random_normal_initializer(
|
||||
initializer=init_ops.random_normal_initializer(
|
||||
init_mean, init_stddev))
|
||||
tf.histogram_summary('logistic_regression.weights', weights)
|
||||
tf.histogram_summary('logistic_regression.bias', bias)
|
||||
logging_ops.histogram_summary('logistic_regression.weights', weights)
|
||||
logging_ops.histogram_summary('logistic_regression.bias', bias)
|
||||
# If no class weight provided, try to retrieve one from pre-defined
|
||||
# tensor name in the graph.
|
||||
if not class_weight:
|
||||
try:
|
||||
class_weight = tf.get_default_graph().get_tensor_by_name('class_weight:0')
|
||||
class_weight = ops.get_default_graph().get_tensor_by_name('class_weight:0')
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
@ -200,12 +207,12 @@ def _reverse_seq(input_seq, lengths):
|
||||
input_.set_shape(input_.get_shape().with_rank(2))
|
||||
|
||||
# Join into (time, batch_size, depth)
|
||||
s_joined = tf.pack(input_seq)
|
||||
s_joined = array_ops_.pack(input_seq)
|
||||
|
||||
# Reverse along dimension 0
|
||||
s_reversed = tf.reverse_sequence(s_joined, lengths, 0, 1)
|
||||
s_reversed = array_ops_.reverse_sequence(s_joined, lengths, 0, 1)
|
||||
# Split again into list
|
||||
result = tf.unpack(s_reversed)
|
||||
result = array_ops_.unpack(s_reversed)
|
||||
return result
|
||||
|
||||
|
||||
@ -245,9 +252,9 @@ def bidirectional_rnn(cell_fw, cell_bw, inputs,
|
||||
ValueError: If inputs is None or an empty list.
|
||||
"""
|
||||
|
||||
if not isinstance(cell_fw, tf.nn.rnn_cell.RNNCell):
|
||||
if not isinstance(cell_fw, nn.rnn_cell.RNNCell):
|
||||
raise TypeError("cell_fw must be an instance of RNNCell")
|
||||
if not isinstance(cell_bw, tf.nn.rnn_cell.RNNCell):
|
||||
if not isinstance(cell_bw, nn.rnn_cell.RNNCell):
|
||||
raise TypeError("cell_bw must be an instance of RNNCell")
|
||||
if not isinstance(inputs, list):
|
||||
raise TypeError("inputs must be a list")
|
||||
@ -256,20 +263,20 @@ def bidirectional_rnn(cell_fw, cell_bw, inputs,
|
||||
|
||||
name = scope or "BiRNN"
|
||||
# Forward direction
|
||||
with tf.variable_scope(name + "_FW"):
|
||||
output_fw, state_fw = tf.nn.rnn(cell_fw, inputs, initial_state_fw, dtype,
|
||||
sequence_length)
|
||||
with vs.variable_scope(name + "_FW"):
|
||||
output_fw, state_fw = nn.rnn(cell_fw, inputs, initial_state_fw, dtype,
|
||||
sequence_length)
|
||||
|
||||
# Backward direction
|
||||
with tf.variable_scope(name + "_BW"):
|
||||
tmp, state_bw = tf.nn.rnn(cell_bw, _reverse_seq(inputs, sequence_length),
|
||||
initial_state_bw, dtype, sequence_length)
|
||||
with vs.variable_scope(name + "_BW"):
|
||||
tmp, state_bw = nn.rnn(cell_bw, _reverse_seq(inputs, sequence_length),
|
||||
initial_state_bw, dtype, sequence_length)
|
||||
output_bw = _reverse_seq(tmp, sequence_length)
|
||||
# Concat each of the forward/backward outputs
|
||||
outputs = [tf.concat(1, [fw, bw])
|
||||
outputs = [array_ops_.concat(1, [fw, bw])
|
||||
for fw, bw in zip(output_fw, output_bw)]
|
||||
|
||||
return outputs, tf.concat(1, [state_fw, state_bw])
|
||||
return outputs, array_ops_.concat(1, [state_fw, state_bw])
|
||||
|
||||
# End of Tensorflow 0.7
|
||||
|
||||
@ -305,27 +312,27 @@ def get_rnn_model(rnn_size, cell_type, num_layers, input_op_fn,
|
||||
"""RNN estimator with target predictor function on top."""
|
||||
X = input_op_fn(X)
|
||||
if cell_type == 'rnn':
|
||||
cell_fn = tf.nn.rnn_cell.BasicRNNCell
|
||||
cell_fn = nn.rnn_cell.BasicRNNCell
|
||||
elif cell_type == 'gru':
|
||||
cell_fn = tf.nn.rnn_cell.GRUCell
|
||||
cell_fn = nn.rnn_cell.GRUCell
|
||||
elif cell_type == 'lstm':
|
||||
cell_fn = tf.nn.rnn_cell.BasicLSTMCell
|
||||
cell_fn = nn.rnn_cell.BasicLSTMCell
|
||||
else:
|
||||
raise ValueError("cell_type {} is not supported. ".format(cell_type))
|
||||
if bidirectional:
|
||||
# forward direction cell
|
||||
rnn_fw_cell = tf.nn.rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
|
||||
rnn_fw_cell = nn.rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
|
||||
# backward direction cell
|
||||
rnn_bw_cell = tf.nn.rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
|
||||
rnn_bw_cell = nn.rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
|
||||
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
|
||||
_, encoding = bidirectional_rnn(rnn_fw_cell, rnn_bw_cell, X,
|
||||
dtype=tf.float32,
|
||||
dtype=dtypes.float32,
|
||||
sequence_length=sequence_length,
|
||||
initial_state_fw=initial_state,
|
||||
initial_state_bw=initial_state)
|
||||
else:
|
||||
cell = tf.nn.rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
|
||||
_, encoding = tf.nn.rnn(cell, X, dtype=tf.float32,
|
||||
cell = nn.rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
|
||||
_, encoding = nn.rnn(cell, X, dtype=dtypes.float32,
|
||||
sequence_length=sequence_length,
|
||||
initial_state=initial_state)
|
||||
return target_predictor_fn(encoding, y)
|
@ -20,7 +20,7 @@ from __future__ import print_function
|
||||
import sys
|
||||
import numpy as np
|
||||
|
||||
from tensorflow.contrib.skflow.python.skflow.io.data_feeder import setup_train_data_feeder
|
||||
from tensorflow.contrib.learn.python.learn.io.data_feeder import setup_train_data_feeder
|
||||
|
||||
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
@ -162,7 +162,8 @@ class ValidationMonitor(BaseMonitor):
|
||||
|
||||
def create_val_feed_dict(self, inp, out):
|
||||
"""Set tensorflow placeholders and create validation data feed"""
|
||||
self.val_dict = self.val_feeder.get_feed_dict_fn(inp, out)()
|
||||
self.val_feeder.set_placeholders(inp, out)
|
||||
self.val_dict = self.val_feeder.get_feed_dict_fn()()
|
||||
|
||||
def _set_last_loss_seen(self):
|
||||
"""Sets self.last_loss_seen to most recent validation loss
|
@ -16,11 +16,11 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from tensorflow.contrib.skflow.python.skflow.ops.array_ops import *
|
||||
from tensorflow.contrib.skflow.python.skflow.ops.conv_ops import *
|
||||
from tensorflow.contrib.skflow.python.skflow.ops.dnn_ops import *
|
||||
from tensorflow.contrib.skflow.python.skflow.ops.dropout_ops import *
|
||||
from tensorflow.contrib.skflow.python.skflow.ops.embeddings_ops import *
|
||||
from tensorflow.contrib.skflow.python.skflow.ops.losses_ops import *
|
||||
from tensorflow.contrib.skflow.python.skflow.ops.seq2seq_ops import *
|
||||
from tensorflow.contrib.skflow.python.skflow.ops.batch_norm_ops import *
|
||||
from tensorflow.contrib.learn.python.learn.ops.array_ops import *
|
||||
from tensorflow.contrib.learn.python.learn.ops.conv_ops import *
|
||||
from tensorflow.contrib.learn.python.learn.ops.dnn_ops import *
|
||||
from tensorflow.contrib.learn.python.learn.ops.dropout_ops import *
|
||||
from tensorflow.contrib.learn.python.learn.ops.embeddings_ops import *
|
||||
from tensorflow.contrib.learn.python.learn.ops.losses_ops import *
|
||||
from tensorflow.contrib.learn.python.learn.ops.seq2seq_ops import *
|
||||
from tensorflow.contrib.learn.python.learn.ops.batch_norm_ops import *
|
@ -16,7 +16,9 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import array_ops as array_ops_
|
||||
from tensorflow.python.ops import math_ops
|
||||
|
||||
|
||||
def split_squeeze(dim, num_split, tensor_in):
|
||||
@ -30,7 +32,8 @@ def split_squeeze(dim, num_split, tensor_in):
|
||||
Returns:
|
||||
List of tensors [N1, N2, .. Ndim-1, Ndim+1, .. Nx].
|
||||
"""
|
||||
return [tf.squeeze(t, squeeze_dims=[dim]) for t in tf.split(dim, num_split, tensor_in)]
|
||||
return [array_ops_.squeeze(t, squeeze_dims=[dim])
|
||||
for t in array_ops_.split(dim, num_split, tensor_in)]
|
||||
|
||||
|
||||
def expand_concat(dim, inputs):
|
||||
@ -43,7 +46,7 @@ def expand_concat(dim, inputs):
|
||||
Returns:
|
||||
A tensor of shape [N1, .. Ndim, ... Nx]
|
||||
"""
|
||||
return tf.concat(dim, [tf.expand_dims(t, dim) for t in inputs])
|
||||
return array_ops_.concat(dim, [array_ops_.expand_dims(t, dim) for t in inputs])
|
||||
|
||||
|
||||
def one_hot_matrix(tensor_in, num_classes, on_value=1.0, off_value=0.0):
|
||||
@ -61,14 +64,5 @@ def one_hot_matrix(tensor_in, num_classes, on_value=1.0, off_value=0.0):
|
||||
Tensor of shape [N1, N2, num_classes] with 1.0 for each id in original
|
||||
tensor.
|
||||
"""
|
||||
tensor_in = tf.convert_to_tensor(tensor_in)
|
||||
sparse_values = tf.to_int64(tf.reshape(tensor_in, [-1, 1]))
|
||||
size = tf.shape(sparse_values)[0]
|
||||
dims = tf.shape(tensor_in)
|
||||
indices = tf.to_int64(tf.reshape(tf.range(0, size), [-1, 1]))
|
||||
indices_values = tf.concat(1, [indices, sparse_values])
|
||||
outshape = tf.to_int64(expand_concat(0, [size, num_classes]))
|
||||
one_hot_vector = tf.sparse_to_dense(indices_values, outshape, on_value, off_value)
|
||||
ret = tf.reshape(one_hot_vector, tf.concat(0, [dims, [num_classes]]))
|
||||
ret.set_shape(tensor_in.get_shape().concatenate(num_classes))
|
||||
return ret
|
||||
return array_ops_.one_hot(math_ops.cast(tensor_in, dtypes.int64), num_classes, on_value, off_value)
|
||||
|
@ -16,10 +16,16 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.ops import array_ops as array_ops_
|
||||
from tensorflow.python.ops import init_ops
|
||||
from tensorflow.python.ops import nn
|
||||
from tensorflow.python.ops import control_flow_ops
|
||||
from tensorflow.python.ops import variable_scope as vs
|
||||
from tensorflow.python.training import moving_averages
|
||||
|
||||
|
||||
def batch_normalize(tensor_in, epsilon=1e-5, convnet=True, decay=0.9,
|
||||
def batch_normalize(tensor_in, epsilon=1e-5, convnet=False, decay=0.9,
|
||||
scale_after_normalization=True):
|
||||
"""Batch Normalization
|
||||
|
||||
@ -34,25 +40,25 @@ def batch_normalize(tensor_in, epsilon=1e-5, convnet=True, decay=0.9,
|
||||
"""
|
||||
shape = tensor_in.get_shape().as_list()
|
||||
|
||||
with tf.variable_scope("batch_norm"):
|
||||
gamma = tf.get_variable("gamma", [shape[-1]],
|
||||
initializer=tf.random_normal_initializer(1., 0.02))
|
||||
beta = tf.get_variable("beta", [shape[-1]],
|
||||
initializer=tf.constant_initializer(0.))
|
||||
ema = tf.train.ExponentialMovingAverage(decay=decay)
|
||||
with vs.variable_scope("batch_norm"):
|
||||
gamma = vs.get_variable("gamma", [shape[-1]],
|
||||
initializer=init_ops.random_normal_initializer(1., 0.02))
|
||||
beta = vs.get_variable("beta", [shape[-1]],
|
||||
initializer=init_ops.constant_initializer(0.))
|
||||
ema = moving_averages.ExponentialMovingAverage(decay=decay)
|
||||
if convnet:
|
||||
assign_mean, assign_var = tf.nn.moments(tensor_in, [0, 1, 2])
|
||||
assign_mean, assign_var = nn.moments(tensor_in, [0, 1, 2])
|
||||
else:
|
||||
assign_mean, assign_var = tf.nn.moments(tensor_in, [0])
|
||||
assign_mean, assign_var = nn.moments(tensor_in, [0])
|
||||
ema_assign_op = ema.apply([assign_mean, assign_var])
|
||||
ema_mean, ema_var = ema.average(assign_mean), ema.average(assign_var)
|
||||
def update_mean_var():
|
||||
"""Internal function that updates mean and variance during training"""
|
||||
with tf.control_dependencies([ema_assign_op]):
|
||||
return tf.identity(assign_mean), tf.identity(assign_var)
|
||||
is_training = tf.squeeze(tf.get_collection("IS_TRAINING"))
|
||||
mean, variance = tf.cond(
|
||||
with ops.control_dependencies([ema_assign_op]):
|
||||
return array_ops_.identity(assign_mean), array_ops_.identity(assign_var)
|
||||
is_training = array_ops_.squeeze(ops.get_collection("IS_TRAINING"))
|
||||
mean, variance = control_flow_ops.cond(
|
||||
is_training, update_mean_var, lambda: (ema_mean, ema_var))
|
||||
return tf.nn.batch_norm_with_global_normalization(
|
||||
return nn.batch_norm_with_global_normalization(
|
||||
tensor_in, mean, variance, beta, gamma, epsilon,
|
||||
scale_after_normalization=scale_after_normalization)
|
@ -16,8 +16,10 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
from .batch_norm_ops import batch_normalize
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import nn
|
||||
from tensorflow.python.ops import variable_scope as vs
|
||||
from tensorflow.contrib.learn.python.learn.ops.batch_norm_ops import batch_normalize
|
||||
|
||||
|
||||
def conv2d(tensor_in, n_filters, filter_shape, strides=None, padding='SAME',
|
||||
@ -45,19 +47,19 @@ def conv2d(tensor_in, n_filters, filter_shape, strides=None, padding='SAME',
|
||||
Returns:
|
||||
A Tensor with resulting convolution.
|
||||
"""
|
||||
with tf.variable_scope('convolution'):
|
||||
with vs.variable_scope('convolution'):
|
||||
if strides is None:
|
||||
strides = [1, 1, 1, 1]
|
||||
input_shape = tensor_in.get_shape()
|
||||
filter_shape = list(filter_shape) + [input_shape[3], n_filters]
|
||||
filters = tf.get_variable('filters', filter_shape, tf.float32)
|
||||
output = tf.nn.conv2d(tensor_in, filters, strides, padding)
|
||||
filters = vs.get_variable('filters', filter_shape, dtypes.float32)
|
||||
output = nn.conv2d(tensor_in, filters, strides, padding)
|
||||
if bias:
|
||||
bias_var = tf.get_variable('bias', [1, 1, 1, n_filters],
|
||||
tf.float32)
|
||||
bias_var = vs.get_variable('bias', [1, 1, 1, n_filters],
|
||||
dtypes.float32)
|
||||
output = output + bias_var
|
||||
if batch_norm:
|
||||
output = batch_normalize(output)
|
||||
output = batch_normalize(output, convnet=True)
|
||||
if activation:
|
||||
output = activation(output)
|
||||
return output
|
@ -16,12 +16,13 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.contrib.skflow.python.skflow.ops import dropout_ops
|
||||
from tensorflow.python.ops import nn
|
||||
from tensorflow.python.ops import rnn_cell
|
||||
from tensorflow.python.ops import variable_scope as vs
|
||||
from tensorflow.contrib.learn.python.learn.ops import dropout_ops
|
||||
|
||||
|
||||
def dnn(tensor_in, hidden_units, activation=tf.nn.relu, dropout=None):
|
||||
def dnn(tensor_in, hidden_units, activation=nn.relu, dropout=None):
|
||||
"""Creates fully connected deep neural network subgraph.
|
||||
|
||||
Args:
|
||||
@ -34,10 +35,10 @@ def dnn(tensor_in, hidden_units, activation=tf.nn.relu, dropout=None):
|
||||
Returns:
|
||||
A tensor which would be a deep neural network.
|
||||
"""
|
||||
with tf.variable_scope('dnn'):
|
||||
with vs.variable_scope('dnn'):
|
||||
for i, n_units in enumerate(hidden_units):
|
||||
with tf.variable_scope('layer%d' % i):
|
||||
tensor_in = tf.nn.rnn_cell.linear(tensor_in, n_units, True)
|
||||
with vs.variable_scope('layer%d' % i):
|
||||
tensor_in = rnn_cell.linear(tensor_in, n_units, True)
|
||||
if activation is not None:
|
||||
tensor_in = activation(tensor_in)
|
||||
if dropout is not None:
|
@ -16,7 +16,10 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.ops import init_ops
|
||||
from tensorflow.python.ops import nn
|
||||
from tensorflow.python.ops import variable_scope as vs
|
||||
|
||||
|
||||
# Key to collect dropout probabilities.
|
||||
@ -36,11 +39,11 @@ def dropout(tensor_in, prob, name=None):
|
||||
Raises:
|
||||
ValueError: If `keep_prob` is not in `(0, 1]`.
|
||||
"""
|
||||
with tf.op_scope([tensor_in], name, "dropout") as name:
|
||||
with ops.op_scope([tensor_in], name, "dropout") as name:
|
||||
if isinstance(prob, float):
|
||||
prob = tf.get_variable("prob", [],
|
||||
initializer=tf.constant_initializer(prob),
|
||||
prob = vs.get_variable("prob", [],
|
||||
initializer=init_ops.constant_initializer(prob),
|
||||
trainable=False)
|
||||
tf.add_to_collection(DROPOUTS, prob)
|
||||
return tf.nn.dropout(tensor_in, prob)
|
||||
ops.add_to_collection(DROPOUTS, prob)
|
||||
return nn.dropout(tensor_in, prob)
|
||||
|
@ -20,7 +20,11 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.ops import array_ops as array_ops_
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import nn
|
||||
from tensorflow.python.ops import variable_scope as vs
|
||||
|
||||
|
||||
def embedding_lookup(params, ids, name="embedding_lookup"):
|
||||
@ -43,14 +47,14 @@ def embedding_lookup(params, ids, name="embedding_lookup"):
|
||||
Raises:
|
||||
ValueError: if some parameters are invalid.
|
||||
"""
|
||||
with tf.op_scope([params, ids], name, "embedding_lookup"):
|
||||
params = tf.convert_to_tensor(params)
|
||||
ids = tf.convert_to_tensor(ids)
|
||||
shape = tf.shape(ids)
|
||||
ids_flat = tf.reshape(ids, tf.reduce_prod(shape, keep_dims=True))
|
||||
embeds_flat = tf.nn.embedding_lookup(params, ids_flat, name)
|
||||
embed_shape = tf.concat(0, [shape, [-1]])
|
||||
embeds = tf.reshape(embeds_flat, embed_shape)
|
||||
with ops.op_scope([params, ids], name, "embedding_lookup"):
|
||||
params = ops.convert_to_tensor(params)
|
||||
ids = ops.convert_to_tensor(ids)
|
||||
shape = array_ops_.shape(ids)
|
||||
ids_flat = array_ops_.reshape(ids, math_ops.reduce_prod(shape, keep_dims=True))
|
||||
embeds_flat = nn.embedding_lookup(params, ids_flat, name)
|
||||
embed_shape = array_ops_.concat(0, [shape, [-1]])
|
||||
embeds = array_ops_.reshape(embeds_flat, embed_shape)
|
||||
embeds.set_shape(ids.get_shape().concatenate(params.get_shape()[1:]))
|
||||
return embeds
|
||||
|
||||
@ -72,7 +76,7 @@ def categorical_variable(tensor_in, n_classes, embedding_size, name):
|
||||
Calling categorical_variable([1, 2], 5, 10, "my_cat"), will return 2 x 10
|
||||
tensor, where each row is representation of the class.
|
||||
"""
|
||||
with tf.variable_scope(name):
|
||||
embeddings = tf.get_variable(
|
||||
with vs.variable_scope(name):
|
||||
embeddings = vs.get_variable(
|
||||
name + "_embeddings", [n_classes, embedding_size])
|
||||
return embedding_lookup(embeddings, tensor_in)
|
@ -16,18 +16,20 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.ops import array_ops as array_ops_
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import nn
|
||||
from tensorflow.contrib.losses.python.losses import loss_ops
|
||||
|
||||
|
||||
def mean_squared_error_regressor(tensor_in, labels, weights, biases, name=None):
|
||||
"""Returns prediction and loss for mean squared error regression."""
|
||||
with tf.op_scope([tensor_in, labels], name, "mean_squared_error_regressor"):
|
||||
predictions = tf.nn.xw_plus_b(tensor_in, weights, biases)
|
||||
with ops.op_scope([tensor_in, labels], name, "mean_squared_error_regressor"):
|
||||
predictions = nn.xw_plus_b(tensor_in, weights, biases)
|
||||
if len(labels.get_shape()) == 1:
|
||||
labels = tf.reshape(labels, [-1, 1])
|
||||
diff = labels - predictions
|
||||
loss = tf.reduce_mean(tf.mul(diff, diff))
|
||||
return predictions, loss
|
||||
labels = array_ops_.reshape(labels, [-1, 1])
|
||||
return predictions, loss_ops.sum_of_squares(predictions, labels)
|
||||
|
||||
|
||||
def softmax_classifier(tensor_in, labels, weights, biases, class_weight=None, name=None):
|
||||
@ -45,13 +47,9 @@ def softmax_classifier(tensor_in, labels, weights, biases, class_weight=None, na
|
||||
Returns:
|
||||
Prediction and loss tensors.
|
||||
"""
|
||||
with tf.op_scope([tensor_in, labels], name, "softmax_classifier"):
|
||||
logits = tf.nn.xw_plus_b(tensor_in, weights, biases)
|
||||
with ops.op_scope([tensor_in, labels], name, "softmax_classifier"):
|
||||
logits = nn.xw_plus_b(tensor_in, weights, biases)
|
||||
if class_weight is not None:
|
||||
logits = tf.mul(logits, class_weight)
|
||||
xent = tf.nn.softmax_cross_entropy_with_logits(logits,
|
||||
labels,
|
||||
name="xent_raw")
|
||||
loss = tf.reduce_mean(xent, name="xent")
|
||||
predictions = tf.nn.softmax(logits, name=name)
|
||||
return predictions, loss
|
||||
logits = math_ops.mul(logits, class_weight)
|
||||
return nn.softmax(logits), loss_ops.softmax_cross_entropy(logits, labels)
|
||||
|
@ -16,9 +16,14 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import array_ops as array_ops_
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import nn
|
||||
from tensorflow.python.ops import variable_scope as vs
|
||||
|
||||
from . import array_ops
|
||||
from tensorflow.contrib.learn.python.learn.ops import array_ops
|
||||
|
||||
|
||||
def sequence_classifier(decoding, labels, sampling_decoding=None, name=None):
|
||||
@ -34,18 +39,18 @@ def sequence_classifier(decoding, labels, sampling_decoding=None, name=None):
|
||||
Returns:
|
||||
Predictions and losses tensors.
|
||||
"""
|
||||
with tf.op_scope([decoding, labels], name, "sequence_classifier"):
|
||||
with ops.op_scope([decoding, labels], name, "sequence_classifier"):
|
||||
predictions, xent_list = [], []
|
||||
for i, pred in enumerate(decoding):
|
||||
xent_list.append(
|
||||
tf.nn.softmax_cross_entropy_with_logits(
|
||||
nn.softmax_cross_entropy_with_logits(
|
||||
pred, labels[i], name="sequence_loss/xent_raw{0}".format(i)))
|
||||
if sampling_decoding:
|
||||
predictions.append(tf.nn.softmax(sampling_decoding[i]))
|
||||
predictions.append(nn.softmax(sampling_decoding[i]))
|
||||
else:
|
||||
predictions.append(tf.nn.softmax(pred))
|
||||
xent = tf.add_n(xent_list, name="sequence_loss/xent")
|
||||
loss = tf.reduce_sum(xent, name="sequence_loss")
|
||||
predictions.append(nn.softmax(pred))
|
||||
xent = math_ops.add_n(xent_list, name="sequence_loss/xent")
|
||||
loss = math_ops.reduce_sum(xent, name="sequence_loss")
|
||||
return array_ops.expand_concat(1, predictions), loss
|
||||
|
||||
|
||||
@ -65,13 +70,13 @@ def seq2seq_inputs(X, y, input_length, output_length, sentinel=None, name=None):
|
||||
Returns:
|
||||
Encoder input from X, and decoder inputs and outputs from y.
|
||||
"""
|
||||
with tf.op_scope([X, y], name, "seq2seq_inputs"):
|
||||
with ops.op_scope([X, y], name, "seq2seq_inputs"):
|
||||
in_X = array_ops.split_squeeze(1, input_length, X)
|
||||
y = array_ops.split_squeeze(1, output_length, y)
|
||||
if not sentinel:
|
||||
# Set to zeros of shape of y[0], using X for batch size.
|
||||
sentinel_shape = tf.pack([tf.shape(X)[0], y[0].get_shape()[1]])
|
||||
sentinel = tf.zeros(sentinel_shape)
|
||||
sentinel_shape = array_ops_.pack([array_ops_.shape(X)[0], y[0].get_shape()[1]])
|
||||
sentinel = array_ops_.zeros(sentinel_shape)
|
||||
sentinel.set_shape(y[0].get_shape())
|
||||
in_y = [sentinel] + y
|
||||
out_y = y + [sentinel]
|
||||
@ -91,17 +96,17 @@ def rnn_decoder(decoder_inputs, initial_state, cell, scope=None):
|
||||
Returns:
|
||||
List of tensors for outputs and states for training and sampling sub-graphs.
|
||||
"""
|
||||
with tf.variable_scope(scope or "dnn_decoder"):
|
||||
with vs.variable_scope(scope or "dnn_decoder"):
|
||||
states, sampling_states = [initial_state], [initial_state]
|
||||
outputs, sampling_outputs = [], []
|
||||
with tf.op_scope([decoder_inputs, initial_state], "training"):
|
||||
with ops.op_scope([decoder_inputs, initial_state], "training"):
|
||||
for i, inp in enumerate(decoder_inputs):
|
||||
if i > 0:
|
||||
tf.get_variable_scope().reuse_variables()
|
||||
vs.get_variable_scope().reuse_variables()
|
||||
output, new_state = cell(inp, states[-1])
|
||||
outputs.append(output)
|
||||
states.append(new_state)
|
||||
with tf.op_scope([initial_state], "sampling"):
|
||||
with ops.op_scope([initial_state], "sampling"):
|
||||
for i, _ in enumerate(decoder_inputs):
|
||||
if i == 0:
|
||||
sampling_outputs.append(outputs[i])
|
||||
@ -115,7 +120,7 @@ def rnn_decoder(decoder_inputs, initial_state, cell, scope=None):
|
||||
|
||||
|
||||
def rnn_seq2seq(encoder_inputs, decoder_inputs, encoder_cell, decoder_cell=None,
|
||||
dtype=tf.float32, scope=None):
|
||||
dtype=dtypes.float32, scope=None):
|
||||
"""RNN Sequence to Sequence model.
|
||||
|
||||
Args:
|
||||
@ -129,6 +134,7 @@ def rnn_seq2seq(encoder_inputs, decoder_inputs, encoder_cell, decoder_cell=None,
|
||||
Returns:
|
||||
List of tensors for outputs and states for trianing and sampling sub-graphs.
|
||||
"""
|
||||
with tf.variable_scope(scope or "rnn_seq2seq"):
|
||||
_, last_enc_state = tf.nn.rnn(encoder_cell, encoder_inputs, dtype=dtype)
|
||||
with vs.variable_scope(scope or "rnn_seq2seq"):
|
||||
_, last_enc_state = nn.rnn(encoder_cell, encoder_inputs, dtype=dtype)
|
||||
return rnn_decoder(decoder_inputs, last_enc_state, decoder_cell or encoder_cell)
|
||||
|
@ -18,7 +18,7 @@ from __future__ import print_function
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.contrib.skflow.python.skflow import ops
|
||||
from tensorflow.contrib.learn.python.learn import ops
|
||||
|
||||
|
||||
class DropoutTest(tf.test.TestCase):
|
@ -18,7 +18,7 @@ from __future__ import print_function
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.contrib.skflow.python.skflow import ops
|
||||
from tensorflow.contrib.learn.python.learn import ops
|
||||
|
||||
|
||||
class OpsTest(tf.test.TestCase):
|
@ -18,7 +18,7 @@ from __future__ import print_function
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.contrib.skflow.python.skflow import ops
|
||||
from tensorflow.contrib.learn.python.learn import ops
|
||||
|
||||
|
||||
class Seq2SeqOpsTest(tf.test.TestCase):
|
@ -16,5 +16,5 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from tensorflow.contrib.skflow.python.skflow.preprocessing.text import *
|
||||
from tensorflow.contrib.skflow.python.skflow.preprocessing.categorical import *
|
||||
from tensorflow.contrib.learn.python.learn.preprocessing.text import *
|
||||
from tensorflow.contrib.learn.python.learn.preprocessing.categorical import *
|
@ -20,8 +20,8 @@ from __future__ import print_function
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.contrib.skflow.python.skflow.preprocessing import categorical
|
||||
from tensorflow.contrib.skflow.python.skflow.io import *
|
||||
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
|
||||
from tensorflow.contrib.learn.python.learn.io import *
|
||||
|
||||
|
||||
class CategoricalTest(tf.test.TestCase):
|
@ -19,7 +19,7 @@ from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.contrib.skflow.python.skflow.preprocessing import categorical_vocabulary
|
||||
from tensorflow.contrib.learn.python.learn.preprocessing import categorical_vocabulary
|
||||
|
||||
|
||||
class CategoricalVocabularyTest(tf.test.TestCase):
|
@ -20,8 +20,8 @@ from __future__ import unicode_literals
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.contrib.skflow.python.skflow.preprocessing import text
|
||||
from tensorflow.contrib.skflow.python.skflow.preprocessing import CategoricalVocabulary
|
||||
from tensorflow.contrib.learn.python.learn.preprocessing import text
|
||||
from tensorflow.contrib.learn.python.learn.preprocessing import CategoricalVocabulary
|
||||
|
||||
|
||||
class TextTest(tf.test.TestCase):
|
@ -26,6 +26,8 @@ except ImportError:
|
||||
|
||||
import numpy as np
|
||||
|
||||
from tensorflow.python.platform.default import _gfile as gfile
|
||||
|
||||
from .categorical_vocabulary import CategoricalVocabulary
|
||||
|
||||
TOKENIZER_RE = re.compile(
|
||||
@ -207,7 +209,7 @@ class VocabularyProcessor(object):
|
||||
Args:
|
||||
filename: Path to output file.
|
||||
"""
|
||||
with open(filename, 'wb') as f:
|
||||
with gfile.Open(filename, 'wb') as f:
|
||||
f.write(pickle.dumps(self))
|
||||
|
||||
@classmethod
|
||||
@ -220,6 +222,6 @@ class VocabularyProcessor(object):
|
||||
Returns:
|
||||
VocabularyProcessor object.
|
||||
"""
|
||||
with open(filename, 'rb') as f:
|
||||
with gfile.Open(filename, 'rb') as f:
|
||||
return pickle.loads(f.read())
|
||||
|
@ -17,14 +17,15 @@ from __future__ import print_function
|
||||
|
||||
import random
|
||||
|
||||
from sklearn import datasets
|
||||
from sklearn.metrics import accuracy_score, mean_squared_error, log_loss
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.contrib.skflow.python import skflow
|
||||
from tensorflow.contrib.skflow.python.skflow.estimators import base
|
||||
from tensorflow.contrib.learn.python import learn
|
||||
from tensorflow.contrib.learn.python.learn import datasets
|
||||
from tensorflow.contrib.learn.python.learn.estimators import base
|
||||
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
|
||||
from tensorflow.contrib.learn.python.learn.estimators._sklearn import log_loss
|
||||
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
|
||||
|
||||
|
||||
class BaseTest(tf.test.TestCase):
|
||||
@ -33,21 +34,21 @@ class BaseTest(tf.test.TestCase):
|
||||
random.seed(42)
|
||||
X = np.random.rand(1000)
|
||||
y = 2 * X + 3
|
||||
regressor = skflow.TensorFlowLinearRegressor()
|
||||
regressor = learn.TensorFlowLinearRegressor()
|
||||
regressor.fit(X, y)
|
||||
score = mean_squared_error(regressor.predict(X), y)
|
||||
self.assertLess(score, 0.5, "Failed with score = {0}".format(score))
|
||||
score = mean_squared_error(y, regressor.predict(X))
|
||||
self.assertLess(score, 1.0, "Failed with score = {0}".format(score))
|
||||
|
||||
def testIris(self):
|
||||
iris = datasets.load_iris()
|
||||
classifier = skflow.TensorFlowLinearClassifier(n_classes=3)
|
||||
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
|
||||
classifier.fit(iris.data, [float(x) for x in iris.target])
|
||||
score = accuracy_score(iris.target, classifier.predict(iris.data))
|
||||
self.assertGreater(score, 0.7, "Failed with score = {0}".format(score))
|
||||
|
||||
|
||||
def testIrisClassWeight(self):
|
||||
iris = datasets.load_iris()
|
||||
classifier = skflow.TensorFlowLinearClassifier(
|
||||
classifier = learn.TensorFlowLinearClassifier(
|
||||
n_classes=3, class_weight=[0.1, 0.8, 0.1])
|
||||
classifier.fit(iris.data, iris.target)
|
||||
score = accuracy_score(iris.target, classifier.predict(iris.data))
|
||||
@ -55,15 +56,15 @@ class BaseTest(tf.test.TestCase):
|
||||
|
||||
def testIrisSummaries(self):
|
||||
iris = datasets.load_iris()
|
||||
classifier = skflow.TensorFlowLinearClassifier(n_classes=3)
|
||||
classifier.fit(iris.data, iris.target, logdir='/tmp/skflow_tests/')
|
||||
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
|
||||
classifier.fit(iris.data, iris.target, logdir='/tmp/learn_tests/')
|
||||
score = accuracy_score(iris.target, classifier.predict(iris.data))
|
||||
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
|
||||
|
||||
|
||||
def testIrisContinueTraining(self):
|
||||
iris = datasets.load_iris()
|
||||
classifier = skflow.TensorFlowLinearClassifier(n_classes=3,
|
||||
classifier = learn.TensorFlowLinearClassifier(n_classes=3,
|
||||
learning_rate=0.01, continue_training=True, steps=250)
|
||||
classifier.fit(iris.data, iris.target)
|
||||
score1 = accuracy_score(iris.target, classifier.predict(iris.data))
|
||||
@ -89,7 +90,7 @@ class BaseTest(tf.test.TestCase):
|
||||
for y in iris.target:
|
||||
yield y
|
||||
|
||||
classifier = skflow.TensorFlowLinearClassifier(n_classes=3, steps=100)
|
||||
classifier = learn.TensorFlowLinearClassifier(n_classes=3, steps=100)
|
||||
classifier.fit(iris_data(), iris_target())
|
||||
score1 = accuracy_score(iris.target, classifier.predict(iris.data))
|
||||
score2 = accuracy_score(iris.target, classifier.predict(iris_predict_data()))
|
||||
@ -99,17 +100,19 @@ class BaseTest(tf.test.TestCase):
|
||||
"data.".format(score2, score1))
|
||||
|
||||
def testIris_proba(self):
|
||||
random.seed(42)
|
||||
iris = datasets.load_iris()
|
||||
classifier = skflow.TensorFlowClassifier(n_classes=3, steps=250)
|
||||
classifier.fit(iris.data, iris.target)
|
||||
score = log_loss(iris.target, classifier.predict_proba(iris.data))
|
||||
self.assertLess(score, 0.8, "Failed with score = {0}".format(score))
|
||||
# If sklearn available.
|
||||
if log_loss:
|
||||
random.seed(42)
|
||||
iris = datasets.load_iris()
|
||||
classifier = learn.TensorFlowClassifier(n_classes=3, steps=250)
|
||||
classifier.fit(iris.data, iris.target)
|
||||
score = log_loss(iris.target, classifier.predict_proba(iris.data))
|
||||
self.assertLess(score, 0.8, "Failed with score = {0}".format(score))
|
||||
|
||||
def testBoston(self):
|
||||
random.seed(42)
|
||||
boston = datasets.load_boston()
|
||||
regressor = skflow.TensorFlowLinearRegressor(
|
||||
regressor = learn.TensorFlowLinearRegressor(
|
||||
batch_size=boston.data.shape[0],
|
||||
steps=500,
|
||||
learning_rate=0.001)
|
||||
@ -119,7 +122,7 @@ class BaseTest(tf.test.TestCase):
|
||||
self.assertLess(score, 150, "Failed with score = {0}".format(score))
|
||||
|
||||
def testUnfitted(self):
|
||||
estimator = skflow.TensorFlowEstimator(model_fn=None, n_classes=1)
|
||||
estimator = learn.TensorFlowEstimator(model_fn=None, n_classes=1)
|
||||
with self.assertRaises(base.NotFittedError):
|
||||
estimator.predict([1, 2, 3])
|
||||
with self.assertRaises(base.NotFittedError):
|
@ -19,10 +19,11 @@ import tensorflow as tf
|
||||
|
||||
import random
|
||||
|
||||
from sklearn import datasets, metrics
|
||||
from sklearn.cross_validation import train_test_split
|
||||
from tensorflow.contrib.learn.python import learn
|
||||
from tensorflow.contrib.learn.python.learn import datasets
|
||||
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
|
||||
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
|
||||
|
||||
from tensorflow.contrib.skflow.python import skflow
|
||||
|
||||
class CustomDecayTest(tf.test.TestCase):
|
||||
|
||||
@ -34,16 +35,17 @@ class CustomDecayTest(tf.test.TestCase):
|
||||
iris.target,
|
||||
test_size=0.2,
|
||||
random_state=42)
|
||||
|
||||
# setup exponential decay function
|
||||
def exp_decay(global_step):
|
||||
return tf.train.exponential_decay(
|
||||
learning_rate=0.1, global_step=global_step,
|
||||
decay_steps=100, decay_rate=0.001)
|
||||
classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
|
||||
classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
|
||||
n_classes=3, steps=800,
|
||||
learning_rate=exp_decay)
|
||||
classifier.fit(X_train, y_train)
|
||||
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
|
||||
score = accuracy_score(y_test, classifier.predict(X_test))
|
||||
|
||||
self.assertGreater(score, 0.7, "Failed with score = {0}".format(score))
|
||||
|
@ -21,50 +21,50 @@ import six
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.contrib.skflow.python.skflow.io import *
|
||||
|
||||
|
||||
class MockPlaceholder(object):
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
from tensorflow.contrib.learn.python.learn.io import *
|
||||
|
||||
|
||||
class DataFeederTest(tf.test.TestCase):
|
||||
|
||||
def test_unsupervised(self):
|
||||
data = np.matrix([[1, 2], [2, 3], [3, 4]])
|
||||
feeder = data_feeder.DataFeeder(data, None, n_classes=0, batch_size=1)
|
||||
with self.test_session():
|
||||
inp, out = feeder.input_builder()
|
||||
feed_dict_fn = feeder.get_feed_dict_fn()
|
||||
feed_dict = feed_dict_fn()
|
||||
self.assertAllClose(feed_dict[inp.name], [[1, 2]])
|
||||
|
||||
def test_data_feeder_regression(self):
|
||||
X = np.matrix([[1, 2], [3, 4]])
|
||||
y = np.array([1, 2])
|
||||
df = data_feeder.DataFeeder(X, y, n_classes=0, batch_size=3)
|
||||
feed_dict_fn = df.get_feed_dict_fn(
|
||||
MockPlaceholder(name='input'),
|
||||
MockPlaceholder(name='output'))
|
||||
inp, out = df.input_builder()
|
||||
feed_dict_fn = df.get_feed_dict_fn()
|
||||
feed_dict = feed_dict_fn()
|
||||
|
||||
self.assertAllClose(feed_dict['input'], [[3, 4], [1, 2]])
|
||||
self.assertAllClose(feed_dict['output'], [2, 1])
|
||||
self.assertAllClose(feed_dict[inp.name], [[3, 4], [1, 2]])
|
||||
self.assertAllClose(feed_dict[out.name], [2, 1])
|
||||
|
||||
def test_data_feeder_multioutput_regression(self):
|
||||
X = np.matrix([[1, 2], [3, 4]])
|
||||
y = np.array([[1, 2], [3, 4]])
|
||||
df = data_feeder.DataFeeder(X, y, n_classes=0, batch_size=2)
|
||||
feed_dict_fn = df.get_feed_dict_fn(
|
||||
MockPlaceholder(name='input'),
|
||||
MockPlaceholder(name='output'))
|
||||
inp, out = df.input_builder()
|
||||
feed_dict_fn = df.get_feed_dict_fn()
|
||||
feed_dict = feed_dict_fn()
|
||||
self.assertAllClose(feed_dict['input'], [[3, 4], [1, 2]])
|
||||
self.assertAllClose(feed_dict['output'], [[3, 4], [1, 2]])
|
||||
self.assertAllClose(feed_dict[inp.name], [[3, 4], [1, 2]])
|
||||
self.assertAllClose(feed_dict[out.name], [[3, 4], [1, 2]])
|
||||
|
||||
def test_data_feeder_multioutput_classification(self):
|
||||
X = np.matrix([[1, 2], [3, 4]])
|
||||
y = np.array([[0, 1, 2], [2, 3, 4]])
|
||||
df = data_feeder.DataFeeder(X, y, n_classes=5, batch_size=2)
|
||||
feed_dict_fn = df.get_feed_dict_fn(
|
||||
MockPlaceholder(name='input'),
|
||||
MockPlaceholder(name='output'))
|
||||
inp, out = df.input_builder()
|
||||
feed_dict_fn = df.get_feed_dict_fn()
|
||||
feed_dict = feed_dict_fn()
|
||||
self.assertAllClose(feed_dict['input'], [[3, 4], [1, 2]])
|
||||
self.assertAllClose(feed_dict['output'], [[[0, 0, 1, 0, 0],
|
||||
self.assertAllClose(feed_dict[inp.name], [[3, 4], [1, 2]])
|
||||
self.assertAllClose(feed_dict[out.name], [[[0, 0, 1, 0, 0],
|
||||
[0, 0, 0, 1, 0],
|
||||
[0, 0, 0, 0, 1]],
|
||||
[[1, 0, 0, 0, 0],
|
||||
@ -81,12 +81,11 @@ class DataFeederTest(tf.test.TestCase):
|
||||
yield np.array([2])
|
||||
df = data_feeder.StreamingDataFeeder(X_iter(), y_iter(), n_classes=0,
|
||||
batch_size=2)
|
||||
feed_dict_fn = df.get_feed_dict_fn(
|
||||
MockPlaceholder(name='input'),
|
||||
MockPlaceholder(name='output'))
|
||||
inp, out = df.input_builder()
|
||||
feed_dict_fn = df.get_feed_dict_fn()
|
||||
feed_dict = feed_dict_fn()
|
||||
self.assertAllClose(feed_dict['input'], [[1, 2], [3, 4]])
|
||||
self.assertAllClose(feed_dict['output'], [1, 2])
|
||||
self.assertAllClose(feed_dict[inp.name], [[1, 2], [3, 4]])
|
||||
self.assertAllClose(feed_dict[out.name], [1, 2])
|
||||
|
||||
def test_dask_data_feeder(self):
|
||||
if HAS_PANDAS and HAS_DASK:
|
||||
@ -98,13 +97,12 @@ class DataFeederTest(tf.test.TestCase):
|
||||
# X = extract_dask_data(X)
|
||||
# y = extract_dask_labels(y)
|
||||
df = data_feeder.DaskDataFeeder(X, y, n_classes=2, batch_size=2)
|
||||
feed_dict_fn = df.get_feed_dict_fn(
|
||||
MockPlaceholder(name='input'),
|
||||
MockPlaceholder(name='output'))
|
||||
inp, out = df.input_builder()
|
||||
feed_dict_fn = df.get_feed_dict_fn()
|
||||
feed_dict = feed_dict_fn()
|
||||
self.assertAllClose(feed_dict['input'], [[ 0.40000001, 0.1],
|
||||
self.assertAllClose(feed_dict[inp.name], [[ 0.40000001, 0.1],
|
||||
[ 0.60000002, 0.2]])
|
||||
self.assertAllClose(feed_dict['output'], [[ 0., 0., 1.],
|
||||
self.assertAllClose(feed_dict[out.name], [[ 0., 0., 1.],
|
||||
[ 0., 1., 0.]])
|
||||
|
||||
|
@ -19,10 +19,10 @@ import tensorflow as tf
|
||||
|
||||
import random
|
||||
|
||||
from sklearn import datasets, metrics
|
||||
from sklearn.cross_validation import train_test_split
|
||||
|
||||
from tensorflow.contrib.skflow.python import skflow
|
||||
from tensorflow.contrib.learn.python import learn
|
||||
from tensorflow.contrib.learn.python.learn import datasets
|
||||
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
|
||||
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
|
||||
|
||||
|
||||
class EarlyStoppingTest(tf.test.TestCase):
|
||||
@ -37,20 +37,20 @@ class EarlyStoppingTest(tf.test.TestCase):
|
||||
random_state=42)
|
||||
|
||||
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2)
|
||||
val_monitor = skflow.monitors.ValidationMonitor(X_val, y_val, n_classes=3)
|
||||
val_monitor = learn.monitors.ValidationMonitor(X_val, y_val, n_classes=3)
|
||||
|
||||
# classifier without early stopping - overfitting
|
||||
classifier1 = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
|
||||
classifier1 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
|
||||
n_classes=3, steps=1000)
|
||||
classifier1.fit(X_train, y_train)
|
||||
score1 = metrics.accuracy_score(y_test, classifier1.predict(X_test))
|
||||
score1 = accuracy_score(y_test, classifier1.predict(X_test))
|
||||
|
||||
# classifier with early stopping - improved accuracy on testing set
|
||||
classifier2 = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
|
||||
classifier2 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
|
||||
n_classes=3, steps=1000)
|
||||
|
||||
classifier2.fit(X_train, y_train, val_monitor)
|
||||
score2 = metrics.accuracy_score(y_test, classifier2.predict(X_test))
|
||||
score2 = accuracy_score(y_test, classifier2.predict(X_test))
|
||||
|
||||
# self.assertGreater(score2, score1, "No improvement using early stopping.")
|
||||
|
@ -19,10 +19,10 @@ import tensorflow as tf
|
||||
|
||||
import random
|
||||
|
||||
from sklearn import datasets, metrics
|
||||
from sklearn.cross_validation import train_test_split
|
||||
|
||||
from tensorflow.contrib.skflow.python import skflow
|
||||
from tensorflow.contrib.learn.python import learn
|
||||
from tensorflow.contrib.learn.python.learn import datasets
|
||||
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
|
||||
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
|
||||
|
||||
|
||||
class CustomOptimizer(tf.test.TestCase):
|
||||
@ -40,13 +40,13 @@ class CustomOptimizer(tf.test.TestCase):
|
||||
return tf.train.exponential_decay(
|
||||
learning_rate=0.1, global_step=global_step,
|
||||
decay_steps=100, decay_rate=0.001)
|
||||
custom_optimizer = lambda x: tf.train.MomentumOptimizer(x, 0.9)
|
||||
classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
|
||||
custom_optimizer = lambda learning_rate: tf.train.MomentumOptimizer(learning_rate, 0.9)
|
||||
classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
|
||||
n_classes=3, steps=800,
|
||||
learning_rate=exp_decay,
|
||||
optimizer=custom_optimizer)
|
||||
classifier.fit(X_train, y_train)
|
||||
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
|
||||
score = accuracy_score(y_test, classifier.predict(X_test))
|
||||
|
||||
self.assertGreater(score, 0.7, "Failed with score = {0}".format(score))
|
||||
|
@ -17,28 +17,33 @@ from __future__ import print_function
|
||||
|
||||
import random
|
||||
|
||||
from sklearn import datasets
|
||||
from sklearn.grid_search import GridSearchCV
|
||||
from sklearn.metrics import accuracy_score, mean_squared_error
|
||||
try:
|
||||
from sklearn import datasets
|
||||
from sklearn.grid_search import GridSearchCV
|
||||
from sklearn.metrics import accuracy_score, mean_squared_error
|
||||
HAS_SKLEARN = True
|
||||
except ImportError:
|
||||
HAS_SKLEARN = False
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.contrib.skflow.python import skflow
|
||||
from tensorflow.contrib.learn.python import learn
|
||||
|
||||
|
||||
class GridSearchTest(tf.test.TestCase):
|
||||
|
||||
def testIrisDNN(self):
|
||||
random.seed(42)
|
||||
iris = datasets.load_iris()
|
||||
classifier = skflow.TensorFlowDNNClassifier(
|
||||
hidden_units=[10, 20, 10], n_classes=3, steps=50)
|
||||
grid_search = GridSearchCV(classifier,
|
||||
{'hidden_units': [[5, 5], [10, 10]],
|
||||
'learning_rate': [0.1, 0.01]})
|
||||
grid_search.fit(iris.data, iris.target)
|
||||
score = accuracy_score(iris.target, grid_search.predict(iris.data))
|
||||
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
|
||||
if HAS_SKLEARN:
|
||||
random.seed(42)
|
||||
iris = datasets.load_iris()
|
||||
classifier = learn.TensorFlowDNNClassifier(
|
||||
hidden_units=[10, 20, 10], n_classes=3, steps=50)
|
||||
grid_search = GridSearchCV(classifier,
|
||||
{'hidden_units': [[5, 5], [10, 10]],
|
||||
'learning_rate': [0.1, 0.01]})
|
||||
grid_search.fit(iris.data, iris.target)
|
||||
score = accuracy_score(iris.target, grid_search.predict(iris.data))
|
||||
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
@ -17,13 +17,12 @@ from __future__ import print_function
|
||||
|
||||
import random
|
||||
|
||||
from sklearn import datasets
|
||||
from sklearn.metrics import accuracy_score
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.contrib.skflow.python import skflow
|
||||
from tensorflow.contrib.skflow.python.skflow.io import *
|
||||
from tensorflow.contrib.learn.python import learn
|
||||
from tensorflow.contrib.learn.python.learn.io import *
|
||||
from tensorflow.contrib.learn.python.learn import datasets
|
||||
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
|
||||
|
||||
|
||||
class IOTest(tf.test.TestCase):
|
||||
@ -34,9 +33,9 @@ class IOTest(tf.test.TestCase):
|
||||
iris = datasets.load_iris()
|
||||
data = pd.DataFrame(iris.data)
|
||||
labels = pd.DataFrame(iris.target)
|
||||
classifier = skflow.TensorFlowLinearClassifier(n_classes=3)
|
||||
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
|
||||
classifier.fit(data, labels)
|
||||
score = accuracy_score(labels, classifier.predict(data))
|
||||
score = accuracy_score(labels[0], classifier.predict(data))
|
||||
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
|
||||
else:
|
||||
print("No pandas installed. pandas-related tests are skipped.")
|
||||
@ -47,7 +46,7 @@ class IOTest(tf.test.TestCase):
|
||||
iris = datasets.load_iris()
|
||||
data = pd.DataFrame(iris.data)
|
||||
labels = pd.Series(iris.target)
|
||||
classifier = skflow.TensorFlowLinearClassifier(n_classes=3)
|
||||
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
|
||||
classifier.fit(data, labels)
|
||||
score = accuracy_score(labels, classifier.predict(data))
|
||||
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
|
||||
@ -55,9 +54,9 @@ class IOTest(tf.test.TestCase):
|
||||
def test_string_data_formats(self):
|
||||
if HAS_PANDAS:
|
||||
with self.assertRaises(ValueError):
|
||||
skflow.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
|
||||
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
|
||||
with self.assertRaises(ValueError):
|
||||
skflow.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
|
||||
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
|
||||
|
||||
def test_dask_io(self):
|
||||
if HAS_DASK and HAS_PANDAS:
|
||||
@ -89,7 +88,7 @@ class IOTest(tf.test.TestCase):
|
||||
data = dd.from_pandas(data, npartitions=2)
|
||||
labels = pd.DataFrame(iris.target)
|
||||
labels = dd.from_pandas(labels, npartitions=2)
|
||||
classifier = skflow.TensorFlowLinearClassifier(n_classes=3)
|
||||
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
|
||||
classifier.fit(data, labels)
|
||||
predictions = data.map_partitions(classifier.predict).compute()
|
||||
score = accuracy_score(labels.compute(), predictions)
|
@ -19,11 +19,10 @@ import random
|
||||
|
||||
import numpy as np
|
||||
|
||||
from sklearn import datasets
|
||||
from sklearn.metrics import accuracy_score, mean_squared_error
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.contrib.skflow.python import skflow
|
||||
from tensorflow.contrib.learn.python import learn
|
||||
from tensorflow.contrib.learn.python.learn import datasets
|
||||
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
|
||||
|
||||
|
||||
class MultiOutputTest(tf.test.TestCase):
|
||||
@ -33,7 +32,7 @@ class MultiOutputTest(tf.test.TestCase):
|
||||
rng = np.random.RandomState(1)
|
||||
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
|
||||
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
|
||||
regressor = skflow.TensorFlowLinearRegressor(learning_rate=0.01)
|
||||
regressor = learn.TensorFlowLinearRegressor(learning_rate=0.01)
|
||||
regressor.fit(X, y)
|
||||
score = mean_squared_error(regressor.predict(X), y)
|
||||
self.assertLess(score, 10, "Failed with score = {0}".format(score))
|
@ -17,11 +17,11 @@ from __future__ import print_function
|
||||
|
||||
import random
|
||||
|
||||
from sklearn import datasets
|
||||
from sklearn.metrics import accuracy_score, mean_squared_error
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.contrib.skflow.python import skflow
|
||||
from tensorflow.contrib.learn.python import learn
|
||||
from tensorflow.contrib.learn.python.learn import datasets
|
||||
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
|
||||
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
|
||||
|
||||
|
||||
class NonLinearTest(tf.test.TestCase):
|
||||
@ -29,7 +29,7 @@ class NonLinearTest(tf.test.TestCase):
|
||||
def testIrisDNN(self):
|
||||
random.seed(42)
|
||||
iris = datasets.load_iris()
|
||||
classifier = skflow.TensorFlowDNNClassifier(
|
||||
classifier = learn.TensorFlowDNNClassifier(
|
||||
hidden_units=[10, 20, 10], n_classes=3)
|
||||
classifier.fit(iris.data, iris.target)
|
||||
score = accuracy_score(iris.target, classifier.predict(iris.data))
|
||||
@ -45,14 +45,14 @@ class NonLinearTest(tf.test.TestCase):
|
||||
def testBostonDNN(self):
|
||||
random.seed(42)
|
||||
boston = datasets.load_boston()
|
||||
regressor = skflow.TensorFlowDNNRegressor(
|
||||
regressor = learn.TensorFlowDNNRegressor(
|
||||
hidden_units=[10, 20, 10], n_classes=0,
|
||||
batch_size=boston.data.shape[0],
|
||||
steps=200, learning_rate=0.001)
|
||||
regressor.fit(boston.data, boston.target)
|
||||
score = mean_squared_error(
|
||||
boston.target, regressor.predict(boston.data))
|
||||
self.assertLess(score, 100, "Failed with score = {0}".format(score))
|
||||
self.assertLess(score, 110, "Failed with score = {0}".format(score))
|
||||
weights = regressor.weights_
|
||||
self.assertEqual(weights[0].shape, (13, 10))
|
||||
self.assertEqual(weights[1].shape, (10, 20))
|
||||
@ -64,7 +64,7 @@ class NonLinearTest(tf.test.TestCase):
|
||||
def testDNNDropout0(self):
|
||||
# Dropout prob == 0.
|
||||
iris = datasets.load_iris()
|
||||
classifier = skflow.TensorFlowDNNClassifier(
|
||||
classifier = learn.TensorFlowDNNClassifier(
|
||||
hidden_units=[10, 20, 10], n_classes=3, dropout=0.0)
|
||||
classifier.fit(iris.data, iris.target)
|
||||
score = accuracy_score(iris.target, classifier.predict(iris.data))
|
||||
@ -73,7 +73,7 @@ class NonLinearTest(tf.test.TestCase):
|
||||
def testDNNDropout0_1(self):
|
||||
# Dropping only a little.
|
||||
iris = datasets.load_iris()
|
||||
classifier = skflow.TensorFlowDNNClassifier(
|
||||
classifier = learn.TensorFlowDNNClassifier(
|
||||
hidden_units=[10, 20, 10], n_classes=3, dropout=0.1)
|
||||
classifier.fit(iris.data, iris.target)
|
||||
score = accuracy_score(iris.target, classifier.predict(iris.data))
|
||||
@ -82,7 +82,7 @@ class NonLinearTest(tf.test.TestCase):
|
||||
def testDNNDropout0_9(self):
|
||||
# Dropping out most of it.
|
||||
iris = datasets.load_iris()
|
||||
classifier = skflow.TensorFlowDNNClassifier(
|
||||
classifier = learn.TensorFlowDNNClassifier(
|
||||
hidden_units=[10, 20, 10], n_classes=3, dropout=0.9)
|
||||
classifier.fit(iris.data, iris.target)
|
||||
score = accuracy_score(iris.target, classifier.predict(iris.data))
|
||||
@ -105,7 +105,7 @@ class NonLinearTest(tf.test.TestCase):
|
||||
return tf.split(1, 5, X)
|
||||
|
||||
# Classification
|
||||
classifier = skflow.TensorFlowRNNClassifier(
|
||||
classifier = learn.TensorFlowRNNClassifier(
|
||||
rnn_size=2, cell_type='lstm', n_classes=2, input_op_fn=input_fn)
|
||||
classifier.fit(data, labels)
|
||||
classifier.weights_
|
||||
@ -113,18 +113,18 @@ class NonLinearTest(tf.test.TestCase):
|
||||
predictions = classifier.predict(test_data)
|
||||
self.assertAllClose(predictions, np.array([1, 0]))
|
||||
|
||||
classifier = skflow.TensorFlowRNNClassifier(
|
||||
classifier = learn.TensorFlowRNNClassifier(
|
||||
rnn_size=2, cell_type='rnn', n_classes=2,
|
||||
input_op_fn=input_fn, num_layers=2)
|
||||
classifier.fit(data, labels)
|
||||
classifier = skflow.TensorFlowRNNClassifier(
|
||||
classifier = learn.TensorFlowRNNClassifier(
|
||||
rnn_size=2, cell_type='invalid_cell_type', n_classes=2,
|
||||
input_op_fn=input_fn, num_layers=2)
|
||||
with self.assertRaises(ValueError):
|
||||
classifier.fit(data, labels)
|
||||
|
||||
# Regression
|
||||
regressor = skflow.TensorFlowRNNRegressor(
|
||||
regressor = learn.TensorFlowRNNRegressor(
|
||||
rnn_size=2, cell_type='gru', input_op_fn=input_fn)
|
||||
regressor.fit(data, targets)
|
||||
regressor.weights_
|
||||
@ -143,7 +143,7 @@ class NonLinearTest(tf.test.TestCase):
|
||||
return tf.split(1, 5, X)
|
||||
|
||||
# Classification
|
||||
classifier = skflow.TensorFlowRNNClassifier(
|
||||
classifier = learn.TensorFlowRNNClassifier(
|
||||
rnn_size=2, cell_type='lstm', n_classes=2, input_op_fn=input_fn,
|
||||
bidirectional=True)
|
||||
classifier.fit(data, labels)
|
@ -17,13 +17,11 @@ from __future__ import print_function
|
||||
|
||||
import random
|
||||
|
||||
from sklearn import datasets
|
||||
from sklearn.metrics import accuracy_score, mean_squared_error, log_loss
|
||||
|
||||
import numpy as np
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.contrib.skflow.python import skflow
|
||||
|
||||
from tensorflow.contrib.learn.python import learn
|
||||
from tensorflow.contrib.learn.python.learn import datasets
|
||||
|
||||
|
||||
class RegressionTest(tf.test.TestCase):
|
||||
@ -37,7 +35,7 @@ class RegressionTest(tf.test.TestCase):
|
||||
self.weights = 10 * rng.randn(n_weights)
|
||||
self.y = np.dot(self.X, self.weights)
|
||||
self.y += rng.randn(len(self.X)) * 0.05 + rng.normal(self.bias, 0.01)
|
||||
regressor = skflow.TensorFlowLinearRegressor(optimizer="SGD")
|
||||
regressor = learn.TensorFlowLinearRegressor(optimizer="SGD")
|
||||
regressor.fit(self.X, self.y)
|
||||
# Have to flatten weights since they come in (X, 1) shape
|
||||
self.assertAllClose(self.weights, regressor.weights_.flatten(), rtol=0.01)
|
@ -18,11 +18,10 @@ from __future__ import print_function
|
||||
import os
|
||||
import random
|
||||
|
||||
from sklearn import datasets
|
||||
from sklearn.metrics import accuracy_score, mean_squared_error, log_loss
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.contrib.skflow.python import skflow
|
||||
from tensorflow.contrib.learn.python import learn
|
||||
from tensorflow.contrib.learn.python.learn import datasets
|
||||
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
|
||||
|
||||
|
||||
class SaverTest(tf.test.TestCase):
|
||||
@ -31,10 +30,10 @@ class SaverTest(tf.test.TestCase):
|
||||
path = tf.test.get_temp_dir() + '/tmp.saver'
|
||||
random.seed(42)
|
||||
iris = datasets.load_iris()
|
||||
classifier = skflow.TensorFlowLinearClassifier(n_classes=3)
|
||||
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
|
||||
classifier.fit(iris.data, iris.target)
|
||||
classifier.save(path)
|
||||
new_classifier = skflow.TensorFlowEstimator.restore(path)
|
||||
new_classifier = learn.TensorFlowEstimator.restore(path)
|
||||
self.assertEqual(type(new_classifier), type(classifier))
|
||||
score = accuracy_score(iris.target, new_classifier.predict(iris.data))
|
||||
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
|
||||
@ -44,12 +43,12 @@ class SaverTest(tf.test.TestCase):
|
||||
random.seed(42)
|
||||
iris = datasets.load_iris()
|
||||
def custom_model(X, y):
|
||||
return skflow.models.logistic_regression(X, y)
|
||||
classifier = skflow.TensorFlowEstimator(model_fn=custom_model,
|
||||
return learn.models.logistic_regression(X, y)
|
||||
classifier = learn.TensorFlowEstimator(model_fn=custom_model,
|
||||
n_classes=3)
|
||||
classifier.fit(iris.data, iris.target)
|
||||
classifier.save(path)
|
||||
new_classifier = skflow.TensorFlowEstimator.restore(path)
|
||||
new_classifier = learn.TensorFlowEstimator.restore(path)
|
||||
self.assertEqual(type(new_classifier), type(classifier))
|
||||
score = accuracy_score(iris.target, new_classifier.predict(iris.data))
|
||||
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
|
||||
@ -58,28 +57,28 @@ class SaverTest(tf.test.TestCase):
|
||||
path = tf.test.get_temp_dir() + '/tmp_saver3'
|
||||
random.seed(42)
|
||||
iris = datasets.load_iris()
|
||||
classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3)
|
||||
classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3)
|
||||
classifier.fit(iris.data, iris.target)
|
||||
classifier.save(path)
|
||||
new_classifier = skflow.TensorFlowEstimator.restore(path)
|
||||
new_classifier = learn.TensorFlowEstimator.restore(path)
|
||||
self.assertEqual(type(new_classifier), type(classifier))
|
||||
score = accuracy_score(iris.target, new_classifier.predict(iris.data))
|
||||
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
|
||||
|
||||
def testNoFolder(self):
|
||||
with self.assertRaises(ValueError):
|
||||
skflow.TensorFlowEstimator.restore('no_model_path')
|
||||
learn.TensorFlowEstimator.restore('no_model_path')
|
||||
|
||||
def testNoCheckpoints(self):
|
||||
path = tf.test.get_temp_dir() + '/tmp/tmp.saver4'
|
||||
random.seed(42)
|
||||
iris = datasets.load_iris()
|
||||
classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3)
|
||||
classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3)
|
||||
classifier.fit(iris.data, iris.target)
|
||||
classifier.save(path)
|
||||
os.remove(os.path.join(path, 'checkpoint'))
|
||||
with self.assertRaises(ValueError):
|
||||
skflow.TensorFlowEstimator.restore(path)
|
||||
learn.TensorFlowEstimator.restore(path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
64
tensorflow/contrib/learn/python/learn/trainer.py
Normal file
64
tensorflow/contrib/learn/python/learn/trainer.py
Normal file
@ -0,0 +1,64 @@
|
||||
"""Generic trainer for TensorFlow models."""
|
||||
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
||||
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.ops import init_ops
|
||||
from tensorflow.python.ops import clip_ops
|
||||
from tensorflow.python.ops import control_flow_ops
|
||||
from tensorflow.python.ops import gradients
|
||||
from tensorflow.python.ops import variables
|
||||
from tensorflow.python.ops import variable_scope as vs
|
||||
from tensorflow.contrib.layers import optimizers
|
||||
|
||||
|
||||
def train(session, train_op, loss, global_step, feed_dict_fn, steps, monitor,
|
||||
summary_writer=None, summaries=None,
|
||||
feed_params_fn=None):
|
||||
"""Trains a model for given number of steps, given feed_dict function.
|
||||
|
||||
Args:
|
||||
session: Session object.
|
||||
train: Tensor, trains model.
|
||||
loss: Tensor, loss value.
|
||||
global_step: Tensor, global step of the model.
|
||||
feed_dict_fn: Function that will return a feed dictionary.
|
||||
summary_writer: SummaryWriter object to use for writing summaries.
|
||||
steps: Number of steps to run.
|
||||
monitor: Monitor object to track training progress and induce early stopping
|
||||
summaries: Joined object of all summaries that should be ran.
|
||||
"""
|
||||
for step in xrange(steps):
|
||||
feed_dict = feed_dict_fn()
|
||||
if summaries is not None:
|
||||
global_step_value, loss_value, summ, _ = session.run(
|
||||
[global_step, loss, summaries, train_op],
|
||||
feed_dict=feed_dict)
|
||||
else:
|
||||
global_step_value, loss_value, _ = session.run(
|
||||
[global_step, loss, train_op],
|
||||
feed_dict=feed_dict)
|
||||
monitor.update(step, global_step_value, loss_value, session,
|
||||
feed_params_fn, loss_expression_tensor=loss)
|
||||
if summaries is not None and summary_writer and summ is not None:
|
||||
summary_writer.add_summary(summ, global_step_value)
|
||||
if monitor.monitor_inducing_stop():
|
||||
break
|
||||
|
@ -18,7 +18,7 @@ py_library(
|
||||
|
||||
cuda_py_tests(
|
||||
name = "histogram_ops_test",
|
||||
size = "small",
|
||||
size = "medium",
|
||||
srcs = ["python/kernel_tests/histogram_ops_test.py"],
|
||||
additional_deps = [
|
||||
":metrics_py",
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Description:
|
||||
# contains Scikit Flow sub-project with high level tensorflow API.
|
||||
# This is a old build rule for Scikit Flow (moved to learn).
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
@ -9,186 +9,14 @@ package(default_visibility = ["//tensorflow:__subpackages__"])
|
||||
|
||||
py_library(
|
||||
name = "skflow",
|
||||
srcs = glob([
|
||||
"python/skflow/**/*.py",
|
||||
]),
|
||||
srcs_version = "PY2AND3",
|
||||
deps = ["//tensorflow/python:framework"],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_base",
|
||||
srcs = ["python/skflow/tests/test_base.py"],
|
||||
srcs = ["__init__.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":skflow",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_custom_decay",
|
||||
srcs = ["python/skflow/tests/test_custom_decay.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":skflow",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_data_feeder",
|
||||
srcs = ["python/skflow/tests/test_data_feeder.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":skflow",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_estimators",
|
||||
srcs = ["python/skflow/tests/test_estimators.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":skflow",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_grid_search",
|
||||
srcs = ["python/skflow/tests/test_grid_search.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":skflow",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_io",
|
||||
srcs = ["python/skflow/tests/test_io.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":skflow",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_multioutput",
|
||||
srcs = ["python/skflow/tests/test_multioutput.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":skflow",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_nonlinear",
|
||||
srcs = ["python/skflow/tests/test_nonlinear.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":skflow",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_regression",
|
||||
srcs = ["python/skflow/tests/test_regression.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":skflow",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_saver",
|
||||
srcs = ["python/skflow/tests/test_saver.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":skflow",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_ops",
|
||||
srcs = ["python/skflow/ops/tests/test_ops.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":skflow",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_dropout_ops",
|
||||
srcs = ["python/skflow/ops/tests/test_dropout_ops.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":skflow",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_seq2seq_ops",
|
||||
srcs = ["python/skflow/ops/tests/test_seq2seq_ops.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":skflow",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_categorical",
|
||||
srcs = ["python/skflow/preprocessing/tests/test_categorical.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":skflow",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_categorical_vocabulary",
|
||||
srcs = ["python/skflow/preprocessing/tests/test_categorical_vocabulary.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":skflow",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "test_text",
|
||||
srcs = ["python/skflow/preprocessing/tests/test_text.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":skflow",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//third_party/py/numpy",
|
||||
"//third_party/py/pandas",
|
||||
"//third_party/py/setuptools",
|
||||
"//third_party/py/sklearn",
|
||||
"//tensorflow/contrib/learn",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -12,8 +12,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from python import *
|
||||
from tensorflow.contrib.learn.python.learn import *
|
||||
|
@ -1,39 +0,0 @@
|
||||
"""Main Scikit Flow module."""
|
||||
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
## Check existance of sklearn and it's version
|
||||
try:
|
||||
import sklearn
|
||||
except ImportError:
|
||||
raise ImportError("Please install sklearn (pip install sklearn) to use "
|
||||
"skflow.")
|
||||
|
||||
if sklearn.__version__ < '0.16.0':
|
||||
raise ImportError("Your scikit-learn version needs to be at least 0.16. "
|
||||
"Your current version is %s. " % sklearn.VERSION)
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.contrib.skflow.python.skflow.io import *
|
||||
from tensorflow.contrib.skflow.python.skflow.estimators import *
|
||||
from tensorflow.contrib.skflow.python.skflow import ops
|
||||
from tensorflow.contrib.skflow.python.skflow import preprocessing
|
||||
from tensorflow.contrib.skflow.python.skflow import models
|
||||
from tensorflow.contrib.skflow.python.skflow.trainer import TensorFlowTrainer
|
@ -1,40 +0,0 @@
|
||||
"""Configuration Addon."""
|
||||
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
class ConfigAddon(object):
|
||||
"""This class specifies the specific configurations for a session.
|
||||
|
||||
Parameters:
|
||||
num_cores: Number of cores to be used. (default: 4)
|
||||
verbose: Controls the verbosity, possible values:
|
||||
0: the algorithm and debug information is muted.
|
||||
1: trainer prints the progress.
|
||||
2: log device placement is printed.
|
||||
gpu_memory_fraction: Fraction of GPU memory used by the process on
|
||||
each GPU uniformly on the same machine.
|
||||
"""
|
||||
|
||||
def __init__(self, num_cores=4, verbose=1, gpu_memory_fraction=1):
|
||||
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
|
||||
self.config = tf.ConfigProto(log_device_placement=(verbose > 1),
|
||||
inter_op_parallelism_threads=num_cores,
|
||||
intra_op_parallelism_threads=num_cores,
|
||||
gpu_options=gpu_options)
|
||||
|
@ -1,27 +0,0 @@
|
||||
"""Scikit Flow Estimators."""
|
||||
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from tensorflow.contrib.skflow.python.skflow.estimators.base import TensorFlowEstimator
|
||||
from tensorflow.contrib.skflow.python.skflow.estimators.linear import TensorFlowLinearClassifier
|
||||
from tensorflow.contrib.skflow.python.skflow.estimators.linear import TensorFlowClassifier
|
||||
from tensorflow.contrib.skflow.python.skflow.estimators.linear import TensorFlowLinearRegressor
|
||||
from tensorflow.contrib.skflow.python.skflow.estimators.linear import TensorFlowRegressor
|
||||
from tensorflow.contrib.skflow.python.skflow.estimators.dnn import TensorFlowDNNClassifier
|
||||
from tensorflow.contrib.skflow.python.skflow.estimators.dnn import TensorFlowDNNRegressor
|
||||
from tensorflow.contrib.skflow.python.skflow.estimators.rnn import TensorFlowRNNClassifier
|
||||
from tensorflow.contrib.skflow.python.skflow.estimators.rnn import TensorFlowRNNRegressor
|
@ -1,150 +0,0 @@
|
||||
"""Generic trainer for TensorFlow models."""
|
||||
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
||||
|
||||
from tensorflow.python.training import training as train
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.ops import init_ops
|
||||
from tensorflow.python.ops import clip_ops
|
||||
from tensorflow.python.ops import control_flow_ops
|
||||
from tensorflow.python.ops import gradients
|
||||
from tensorflow.python.ops import variables
|
||||
from tensorflow.python.ops import variable_scope as vs
|
||||
|
||||
|
||||
OPTIMIZER_CLS_NAMES = {
|
||||
"SGD": train.GradientDescentOptimizer,
|
||||
"Adagrad": train.AdagradOptimizer,
|
||||
"Adam": train.AdamOptimizer,
|
||||
}
|
||||
|
||||
|
||||
class TensorFlowTrainer(object):
|
||||
"""General trainer class.
|
||||
|
||||
Attributes:
|
||||
model: Model object.
|
||||
gradients: Gradients tensor.
|
||||
"""
|
||||
|
||||
def __init__(self, loss, global_step, optimizer,
|
||||
learning_rate, clip_gradients=5.0):
|
||||
"""Build a trainer part of graph.
|
||||
|
||||
Args:
|
||||
loss: Tensor that evaluates to model's loss.
|
||||
global_step: Tensor with global step of the model.
|
||||
optimizer: Name of the optimizer class (SGD, Adam, Adagrad) or class.
|
||||
learning_rate: If this is constant float value, no decay function is used.
|
||||
Instead, a customized decay function can be passed that accepts
|
||||
global_step as parameter and returns a Tensor.
|
||||
e.g. exponential decay function:
|
||||
def exp_decay(global_step):
|
||||
return tf.train.exponential_decay(
|
||||
learning_rate=0.1, global_step=global_step,
|
||||
decay_steps=2, decay_rate=0.001)
|
||||
Raises:
|
||||
ValueError: if learning_rate is not a float or a callable.
|
||||
"""
|
||||
self.loss = loss
|
||||
self.global_step = global_step
|
||||
# pylint: disable=redefined-variable-type
|
||||
if isinstance(learning_rate, float):
|
||||
self._learning_rate = vs.get_variable(
|
||||
"learning_rate",
|
||||
[],
|
||||
initializer=init_ops.constant_initializer(learning_rate))
|
||||
elif callable(learning_rate):
|
||||
self._learning_rate = learning_rate(self.global_step)
|
||||
else:
|
||||
raise ValueError("learning_rate should be a float or a callable function.")
|
||||
params = variables.trainable_variables()
|
||||
self.gradients = gradients.gradients(loss, params)
|
||||
if clip_gradients > 0.0:
|
||||
self.gradients, self.gradients_norm = clip_ops.clip_by_global_norm(
|
||||
self.gradients, clip_gradients)
|
||||
grads_and_vars = zip(self.gradients, params)
|
||||
if isinstance(optimizer, str):
|
||||
self._optimizer = OPTIMIZER_CLS_NAMES[
|
||||
optimizer](self._learning_rate)
|
||||
else:
|
||||
self._optimizer = optimizer(self._learning_rate)
|
||||
self.trainer = self._optimizer.apply_gradients(grads_and_vars,
|
||||
global_step=global_step,
|
||||
name="train")
|
||||
# Update ops during training, e.g. batch_norm_ops
|
||||
self.trainer = control_flow_ops.group(self.trainer, *ops.get_collection('update_ops'))
|
||||
# Get all initializers for all trainable variables.
|
||||
self._initializers = variables.initialize_all_variables()
|
||||
|
||||
def initialize(self, sess):
|
||||
"""Initalizes all variables.
|
||||
|
||||
Args:
|
||||
sess: Session object.
|
||||
|
||||
Returns:
|
||||
Values of initializers.
|
||||
"""
|
||||
return sess.run(self._initializers)
|
||||
|
||||
def train(self, sess, feed_dict_fn, steps, monitor,
|
||||
summary_writer=None, summaries=None,
|
||||
feed_params_fn=None):
|
||||
"""Trains a model for given number of steps, given feed_dict function.
|
||||
|
||||
Args:
|
||||
sess: Session object.
|
||||
feed_dict_fn: Function that will return a feed dictionary.
|
||||
summary_writer: SummaryWriter object to use for writing summaries.
|
||||
steps: Number of steps to run.
|
||||
monitor: Monitor object to track training progress and induce early stopping
|
||||
summaries: Joined object of all summaries that should be ran.
|
||||
|
||||
Returns:
|
||||
List of losses for each step.
|
||||
"""
|
||||
for step in xrange(steps):
|
||||
feed_dict = feed_dict_fn()
|
||||
if summaries is not None:
|
||||
global_step, loss, summ, _ = sess.run(
|
||||
[self.global_step, self.loss, summaries, self.trainer],
|
||||
feed_dict=feed_dict)
|
||||
else:
|
||||
global_step, loss, _ = sess.run(
|
||||
[self.global_step, self.loss, self.trainer],
|
||||
feed_dict=feed_dict)
|
||||
monitor.update(step, global_step, loss, sess,
|
||||
feed_params_fn, loss_expression_tensor=self.loss)
|
||||
if summaries is not None and summary_writer and summ is not None:
|
||||
summary_writer.add_summary(summ, global_step)
|
||||
if monitor.monitor_inducing_stop():
|
||||
break
|
||||
return
|
||||
|
||||
|
||||
class RestoredTrainer(TensorFlowTrainer):
|
||||
"""Trainer class that takes already existing graph."""
|
||||
|
||||
# pylint: disable=super-init-not-called
|
||||
def __init__(self, loss, global_step, trainer):
|
||||
self.global_step = global_step
|
||||
self.loss = loss
|
||||
self.trainer = trainer
|
@ -9,6 +9,8 @@ package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
|
||||
load(
|
||||
"//tensorflow:tensorflow.bzl",
|
||||
"tf_cc_test",
|
||||
@ -777,6 +779,7 @@ tf_kernel_libraries(
|
||||
name = "linalg",
|
||||
prefixes = [
|
||||
"cholesky_op",
|
||||
"cholesky_grad",
|
||||
"determinant_op",
|
||||
"self_adjoint_eig_op",
|
||||
"matrix_inverse_op",
|
||||
|
173
tensorflow/core/kernels/cholesky_grad.cc
Normal file
173
tensorflow/core/kernels/cholesky_grad.cc
Normal file
@ -0,0 +1,173 @@
|
||||
/* Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#include "third_party/eigen3/Eigen/Core"
|
||||
#include "tensorflow/core/framework/op.h"
|
||||
|
||||
#include "tensorflow/core/framework/op_kernel.h"
|
||||
|
||||
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
|
||||
#include "tensorflow/core/framework/tensor_types.h"
|
||||
#include "tensorflow/core/framework/types.h"
|
||||
#include "tensorflow/core/kernels/linalg_ops_common.h"
|
||||
|
||||
namespace tensorflow {
|
||||
|
||||
template <typename T>
|
||||
class CholeskyGrad : public OpKernel {
|
||||
public:
|
||||
explicit CholeskyGrad(OpKernelConstruction* context) : OpKernel(context) {}
|
||||
using Matrix =
|
||||
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
|
||||
using ConstMatrixMap = Eigen::Map<const Matrix>;
|
||||
using MatrixMap = Eigen::Map<Matrix>;
|
||||
using ConstRef = Eigen::Ref<const Matrix>;
|
||||
using Ref = Eigen::Ref<Matrix>;
|
||||
|
||||
void Compute(OpKernelContext* context) override {
|
||||
const Tensor& input_tensor_l = context->input(0);
|
||||
const Tensor& input_tensor_grad = context->input(1);
|
||||
// Check that input tensors represent a matrix.
|
||||
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_tensor_l.shape()),
|
||||
errors::InvalidArgument("In[0] is not a matrix"));
|
||||
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_tensor_grad.shape()),
|
||||
errors::InvalidArgument("In[1] is not a matrix"));
|
||||
// Check that input tensors are square.
|
||||
OP_REQUIRES(context,
|
||||
input_tensor_l.dim_size(0) == input_tensor_l.dim_size(1),
|
||||
errors::InvalidArgument("Input matrix must be square."));
|
||||
OP_REQUIRES(context,
|
||||
input_tensor_grad.dim_size(0) == input_tensor_grad.dim_size(1),
|
||||
errors::InvalidArgument("Input matrix must be square."));
|
||||
|
||||
// Check that input tensors are of same size.
|
||||
OP_REQUIRES(context,
|
||||
input_tensor_l.dim_size(0) == input_tensor_grad.dim_size(0),
|
||||
errors::InvalidArgument("Input matrices must be same size."));
|
||||
|
||||
// Create an output tensor
|
||||
Tensor* output_tensor = NULL;
|
||||
OP_REQUIRES_OK(context, context->allocate_output(
|
||||
0, input_tensor_grad.shape(), &output_tensor));
|
||||
|
||||
if (output_tensor->NumElements() == 0) {
|
||||
// the output shape is a 0-element matrix, so there is nothing to do.
|
||||
return;
|
||||
}
|
||||
// The next lines are necessary to get Eigen matrix behaviour.
|
||||
const ConstMatrixMap input_matrix_l_full(input_tensor_l.flat<T>().data(),
|
||||
input_tensor_l.dim_size(0),
|
||||
input_tensor_l.dim_size(1));
|
||||
const ConstMatrixMap input_matrix_grad(input_tensor_grad.flat<T>().data(),
|
||||
input_tensor_grad.dim_size(0),
|
||||
input_tensor_grad.dim_size(1));
|
||||
MatrixMap output_matrix(output_tensor->template flat<T>().data(),
|
||||
input_tensor_l.dim_size(0),
|
||||
input_tensor_l.dim_size(1));
|
||||
|
||||
// Algorithm only depends on lower triangular half on input_tensor_l.
|
||||
const Matrix input_matrix_l =
|
||||
input_matrix_l_full.template triangularView<Eigen::Lower>();
|
||||
// Algorithm only depends on lower triangular half on input_matrix_grad.
|
||||
output_matrix = input_matrix_grad.template triangularView<Eigen::Lower>();
|
||||
|
||||
const int64 kMatrixSize = input_matrix_l.rows();
|
||||
const int64 kMaxBlockSize = 32;
|
||||
|
||||
for (int64 block_end = kMatrixSize; block_end > 0;
|
||||
block_end -= kMaxBlockSize) {
|
||||
/* This shows the block structure.
|
||||
|
||||
/ \
|
||||
| |
|
||||
| R D |
|
||||
\ B C /
|
||||
|
||||
Variables names representing the derivative matrix have a trailing '_bar'.
|
||||
*/
|
||||
|
||||
const int64 block_begin = std::max(0ll, block_end - kMaxBlockSize);
|
||||
const int64 block_size = block_end - block_begin;
|
||||
const int64 trailing_size = kMatrixSize - block_end;
|
||||
|
||||
auto B = input_matrix_l.block(block_end, 0, trailing_size, block_begin);
|
||||
auto B_bar =
|
||||
output_matrix.block(block_end, 0, trailing_size, block_begin);
|
||||
|
||||
auto C = input_matrix_l.block(block_end, block_begin, trailing_size,
|
||||
block_size);
|
||||
auto C_bar = output_matrix.block(block_end, block_begin, trailing_size,
|
||||
block_size);
|
||||
|
||||
auto D = input_matrix_l.block(block_begin, block_begin, block_size,
|
||||
block_size);
|
||||
auto D_bar =
|
||||
output_matrix.block(block_begin, block_begin, block_size, block_size);
|
||||
|
||||
auto R = input_matrix_l.block(block_begin, 0, block_size, block_begin);
|
||||
auto R_bar = output_matrix.block(block_begin, 0, block_size, block_begin);
|
||||
|
||||
C_bar = D.adjoint()
|
||||
.template triangularView<Eigen::Upper>()
|
||||
.solve(C_bar.adjoint())
|
||||
.adjoint();
|
||||
D_bar -= (C_bar.adjoint() * C).template triangularView<Eigen::Lower>();
|
||||
B_bar -= C_bar * R;
|
||||
R_bar -= C_bar.adjoint() * B;
|
||||
CholeskyGradUnblocked(D, D_bar);
|
||||
R_bar -= (D_bar + D_bar.adjoint()) * R;
|
||||
}
|
||||
output_matrix = (0.5 * (output_matrix + output_matrix.transpose())).eval();
|
||||
}
|
||||
void CholeskyGradUnblocked(const ConstRef l_block, Ref grad_block) {
|
||||
const int64 kMatrixSize = l_block.rows();
|
||||
for (int64 k = kMatrixSize - 1; k >= 0; k--) {
|
||||
/* This shows the block structure.
|
||||
|
||||
/ \
|
||||
| |
|
||||
| r d |
|
||||
\ B c /
|
||||
|
||||
Variables names representing the derivative matrix have a trailing '_bar'.
|
||||
*/
|
||||
|
||||
const int64 number_rows_B = kMatrixSize - (k + 1);
|
||||
const int64 number_rows_r_stack_B = number_rows_B + 1;
|
||||
|
||||
auto r = l_block.block(k, 0, 1, k);
|
||||
auto r_bar = grad_block.block(k, 0, 1, k);
|
||||
auto d = l_block(k, k); // This needs to be a scalar rather than a view.
|
||||
auto d_bar = grad_block.block(k, k, 1, 1);
|
||||
// B is not included explicitly because it is not used on its own.
|
||||
auto B_bar = grad_block.block(k + 1, 0, number_rows_B, k);
|
||||
auto c = l_block.block(k + 1, k, number_rows_B, 1);
|
||||
auto c_bar = grad_block.block(k + 1, k, number_rows_B, 1);
|
||||
// Result of vertical stacking d_bar and c_bar.
|
||||
auto d_stack_c_bar = grad_block.block(k, k, number_rows_r_stack_B, 1);
|
||||
// Result of vertical stacking of r and B.
|
||||
auto r_stack_B = l_block.block(k, 0, number_rows_r_stack_B, k);
|
||||
d_bar -= (c.adjoint() * c_bar) / d;
|
||||
d_stack_c_bar /= d;
|
||||
r_bar -= d_stack_c_bar.adjoint() * r_stack_B;
|
||||
B_bar -= c_bar * r;
|
||||
d_bar /= 2.;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
REGISTER_LINALG_OP("CholeskyGrad", (CholeskyGrad<float>), float);
|
||||
REGISTER_LINALG_OP("CholeskyGrad", (CholeskyGrad<double>), double);
|
||||
} // namespace tensorflow
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user