Merge changes from github.

Change: 133874452
This commit is contained in:
Martin Wicke 2016-09-21 13:16:48 -08:00 committed by TensorFlower Gardener
parent 8e308a8493
commit 999b794c13
86 changed files with 5702 additions and 3902 deletions

View File

@ -33,10 +33,10 @@ and discussion.**
People who are a little more adventurous can also try our nightly binaries:
* Linux CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/))
* Linux GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/))
* Mac CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac1-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac1-slave/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac1-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac1-slave/))
* Mac GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/))
* Linux CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/))
* Linux GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/))
* Mac CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac1-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac1-slave/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac1-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac1-slave/))
* Mac GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/))
* [Android](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-android/TF_BUILD_CONTAINER_TYPE=ANDROID,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=NO_PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=android-slave/lastSuccessfulBuild/artifact/bazel-out/local_linux/bin/tensorflow/examples/android/tensorflow_demo.apk) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-android/TF_BUILD_CONTAINER_TYPE=ANDROID,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=NO_PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=android-slave/))
#### *Try your first TensorFlow program*

View File

@ -320,6 +320,41 @@ filegroup(
],
)
filegroup(
name = "numericjs",
srcs = [
"benchmark.html",
"benchmark2.html",
"demo.html",
"documentation.html",
"myworker.js",
"resources/style.css",
"resources/style-ie.css",
"src/documentation.html",
"src/numeric.js",
"src/quadprog.js",
"src/seedrandom.js",
"src/sparse2.js",
"src/svd.js",
"tools/XMLHttpRequest.js",
"tools/closurelib.js",
"tools/excanvas.min.js",
"tools/goog-require.js",
"tools/jquery.flot.image.js",
"tools/jquery.flot.image.min.js",
"tools/jquery.flot.js",
"tools/jquery.flot.min.js",
"tools/jquery-1.7.1.js",
"tools/jquery-1.7.1.min.js",
"tools/json2.js",
"tools/megalib.js",
"tools/mytest.html",
"tools/sylvester.js",
"tools/unit2.js",
"tools/workshop.html",
],
)
filegroup(
name = "paper_behaviors",
srcs = [
@ -581,6 +616,275 @@ filegroup(
],
)
filegroup(
name = "three_js",
srcs = [
"three.js-r77/build/three.js",
"three.js-r77/build/three.min.js",
"three.js-r77/examples/js/AnimationClipCreator.js",
"three.js-r77/examples/js/BlendCharacter.js",
"three.js-r77/examples/js/BlendCharacterGui.js",
"three.js-r77/examples/js/BufferGeometryUtils.js",
"three.js-r77/examples/js/Car.js",
"three.js-r77/examples/js/Cloth.js",
"three.js-r77/examples/js/CurveExtras.js",
"three.js-r77/examples/js/Detector.js",
"three.js-r77/examples/js/Encodings.js",
"three.js-r77/examples/js/GPUParticleSystem.js",
"three.js-r77/examples/js/Gyroscope.js",
"three.js-r77/examples/js/Half.js",
"three.js-r77/examples/js/ImprovedNoise.js",
"three.js-r77/examples/js/MD2Character.js",
"three.js-r77/examples/js/MD2CharacterComplex.js",
"three.js-r77/examples/js/MarchingCubes.js",
"three.js-r77/examples/js/Mirror.js",
"three.js-r77/examples/js/MorphAnimMesh.js",
"three.js-r77/examples/js/MorphAnimation.js",
"three.js-r77/examples/js/Ocean.js",
"three.js-r77/examples/js/Octree.js",
"three.js-r77/examples/js/PRNG.js",
"three.js-r77/examples/js/ParametricGeometries.js",
"three.js-r77/examples/js/RollerCoaster.js",
"three.js-r77/examples/js/ShaderGodRays.js",
"three.js-r77/examples/js/ShaderSkin.js",
"three.js-r77/examples/js/ShaderTerrain.js",
"three.js-r77/examples/js/ShaderToon.js",
"three.js-r77/examples/js/SimplexNoise.js",
"three.js-r77/examples/js/SimulationRenderer.js",
"three.js-r77/examples/js/SkyShader.js",
"three.js-r77/examples/js/TimelinerController.js",
"three.js-r77/examples/js/TypedArrayUtils.js",
"three.js-r77/examples/js/UCSCharacter.js",
"three.js-r77/examples/js/Volume.js",
"three.js-r77/examples/js/VolumeSlice.js",
"three.js-r77/examples/js/WaterShader.js",
"three.js-r77/examples/js/WebVR.js",
"three.js-r77/examples/js/animation/CCDIKSolver.js",
"three.js-r77/examples/js/animation/MMDPhysics.js",
"three.js-r77/examples/js/cameras/CinematicCamera.js",
"three.js-r77/examples/js/cameras/CombinedCamera.js",
"three.js-r77/examples/js/controls/DeviceOrientationControls.js",
"three.js-r77/examples/js/controls/DragControls.js",
"three.js-r77/examples/js/controls/EditorControls.js",
"three.js-r77/examples/js/controls/FirstPersonControls.js",
"three.js-r77/examples/js/controls/FlyControls.js",
"three.js-r77/examples/js/controls/MouseControls.js",
"three.js-r77/examples/js/controls/OrbitControls.js",
"three.js-r77/examples/js/controls/OrthographicTrackballControls.js",
"three.js-r77/examples/js/controls/PointerLockControls.js",
"three.js-r77/examples/js/controls/TrackballControls.js",
"three.js-r77/examples/js/controls/TransformControls.js",
"three.js-r77/examples/js/controls/VRControls.js",
"three.js-r77/examples/js/crossfade/gui.js",
"three.js-r77/examples/js/crossfade/scenes.js",
"three.js-r77/examples/js/crossfade/transition.js",
"three.js-r77/examples/js/curves/NURBSCurve.js",
"three.js-r77/examples/js/curves/NURBSSurface.js",
"three.js-r77/examples/js/curves/NURBSUtils.js",
"three.js-r77/examples/js/effects/AnaglyphEffect.js",
"three.js-r77/examples/js/effects/AsciiEffect.js",
"three.js-r77/examples/js/effects/ParallaxBarrierEffect.js",
"three.js-r77/examples/js/effects/PeppersGhostEffect.js",
"three.js-r77/examples/js/effects/StereoEffect.js",
"three.js-r77/examples/js/effects/VREffect.js",
"three.js-r77/examples/js/exporters/OBJExporter.js",
"three.js-r77/examples/js/exporters/STLBinaryExporter.js",
"three.js-r77/examples/js/exporters/STLExporter.js",
"three.js-r77/examples/js/exporters/TypedGeometryExporter.js",
"three.js-r77/examples/js/geometries/ConvexGeometry.js",
"three.js-r77/examples/js/geometries/DecalGeometry.js",
"three.js-r77/examples/js/geometries/TeapotBufferGeometry.js",
"three.js-r77/examples/js/geometries/hilbert2D.js",
"three.js-r77/examples/js/geometries/hilbert3D.js",
"three.js-r77/examples/js/libs/ammo.js",
"three.js-r77/examples/js/libs/charsetencoder.min.js",
"three.js-r77/examples/js/libs/dat.gui.min.js",
"three.js-r77/examples/js/libs/earcut.js",
"three.js-r77/examples/js/libs/inflate.min.js",
"three.js-r77/examples/js/libs/jszip.min.js",
"three.js-r77/examples/js/libs/msgpack-js.js",
"three.js-r77/examples/js/libs/pnltri.min.js",
"three.js-r77/examples/js/libs/stats.min.js",
"three.js-r77/examples/js/libs/system.min.js",
"three.js-r77/examples/js/libs/timeliner_gui.min.js",
"three.js-r77/examples/js/libs/tween.min.js",
"three.js-r77/examples/js/libs/zlib_and_gzip.min.js",
"three.js-r77/examples/js/loaders/3MFLoader.js",
"three.js-r77/examples/js/loaders/AMFLoader.js",
"three.js-r77/examples/js/loaders/AWDLoader.js",
"three.js-r77/examples/js/loaders/AssimpJSONLoader.js",
"three.js-r77/examples/js/loaders/BabylonLoader.js",
"three.js-r77/examples/js/loaders/BinaryLoader.js",
"three.js-r77/examples/js/loaders/ColladaLoader.js",
"three.js-r77/examples/js/loaders/ColladaLoader2.js",
"three.js-r77/examples/js/loaders/DDSLoader.js",
"three.js-r77/examples/js/loaders/FBXLoader.js",
"three.js-r77/examples/js/loaders/HDRCubeTextureLoader.js",
"three.js-r77/examples/js/loaders/KMZLoader.js",
"three.js-r77/examples/js/loaders/MD2Loader.js",
"three.js-r77/examples/js/loaders/MMDLoader.js",
"three.js-r77/examples/js/loaders/MTLLoader.js",
"three.js-r77/examples/js/loaders/NRRDLoader.js",
"three.js-r77/examples/js/loaders/OBJLoader.js",
"three.js-r77/examples/js/loaders/PCDLoader.js",
"three.js-r77/examples/js/loaders/PDBLoader.js",
"three.js-r77/examples/js/loaders/PLYLoader.js",
"three.js-r77/examples/js/loaders/PVRLoader.js",
"three.js-r77/examples/js/loaders/PlayCanvasLoader.js",
"three.js-r77/examples/js/loaders/RGBELoader.js",
"three.js-r77/examples/js/loaders/STLLoader.js",
"three.js-r77/examples/js/loaders/SVGLoader.js",
"three.js-r77/examples/js/loaders/TGALoader.js",
"three.js-r77/examples/js/loaders/UTF8Loader.js",
"three.js-r77/examples/js/loaders/VRMLLoader.js",
"three.js-r77/examples/js/loaders/VTKLoader.js",
"three.js-r77/examples/js/loaders/collada/Animation.js",
"three.js-r77/examples/js/loaders/collada/AnimationHandler.js",
"three.js-r77/examples/js/loaders/collada/KeyFrameAnimation.js",
"three.js-r77/examples/js/loaders/ctm/CTMLoader.js",
"three.js-r77/examples/js/loaders/ctm/CTMWorker.js",
"three.js-r77/examples/js/loaders/ctm/ctm.js",
"three.js-r77/examples/js/loaders/ctm/lzma.js",
"three.js-r77/examples/js/loaders/deprecated/SceneLoader.js",
"three.js-r77/examples/js/loaders/gltf/glTF-parser.js",
"three.js-r77/examples/js/loaders/gltf/glTFAnimation.js",
"three.js-r77/examples/js/loaders/gltf/glTFLoader.js",
"three.js-r77/examples/js/loaders/gltf/glTFLoaderUtils.js",
"three.js-r77/examples/js/loaders/gltf/glTFShaders.js",
"three.js-r77/examples/js/loaders/gltf/gltfUtilities.js",
"three.js-r77/examples/js/loaders/sea3d/SEA3D.js",
"three.js-r77/examples/js/loaders/sea3d/SEA3DDeflate.js",
"three.js-r77/examples/js/loaders/sea3d/SEA3DLZMA.js",
"three.js-r77/examples/js/loaders/sea3d/SEA3DLegacy.js",
"three.js-r77/examples/js/loaders/sea3d/SEA3DLoader.js",
"three.js-r77/examples/js/math/ColorConverter.js",
"three.js-r77/examples/js/math/Lut.js",
"three.js-r77/examples/js/modifiers/BufferSubdivisionModifier.js",
"three.js-r77/examples/js/modifiers/ExplodeModifier.js",
"three.js-r77/examples/js/modifiers/SubdivisionModifier.js",
"three.js-r77/examples/js/modifiers/TessellateModifier.js",
"three.js-r77/examples/js/nodes/BuilderNode.js",
"three.js-r77/examples/js/nodes/ConstNode.js",
"three.js-r77/examples/js/nodes/FunctionCallNode.js",
"three.js-r77/examples/js/nodes/FunctionNode.js",
"three.js-r77/examples/js/nodes/GLNode.js",
"three.js-r77/examples/js/nodes/InputNode.js",
"three.js-r77/examples/js/nodes/NodeLib.js",
"three.js-r77/examples/js/nodes/NodeMaterial.js",
"three.js-r77/examples/js/nodes/RawNode.js",
"three.js-r77/examples/js/nodes/TempNode.js",
"three.js-r77/examples/js/nodes/accessors/CameraNode.js",
"three.js-r77/examples/js/nodes/accessors/ColorsNode.js",
"three.js-r77/examples/js/nodes/accessors/LightNode.js",
"three.js-r77/examples/js/nodes/accessors/NormalNode.js",
"three.js-r77/examples/js/nodes/accessors/PositionNode.js",
"three.js-r77/examples/js/nodes/accessors/ReflectNode.js",
"three.js-r77/examples/js/nodes/accessors/ScreenUVNode.js",
"three.js-r77/examples/js/nodes/accessors/UVNode.js",
"three.js-r77/examples/js/nodes/inputs/ColorNode.js",
"three.js-r77/examples/js/nodes/inputs/CubeTextureNode.js",
"three.js-r77/examples/js/nodes/inputs/FloatNode.js",
"three.js-r77/examples/js/nodes/inputs/IntNode.js",
"three.js-r77/examples/js/nodes/inputs/Matrix4Node.js",
"three.js-r77/examples/js/nodes/inputs/MirrorNode.js",
"three.js-r77/examples/js/nodes/inputs/ScreenNode.js",
"three.js-r77/examples/js/nodes/inputs/TextureNode.js",
"three.js-r77/examples/js/nodes/inputs/Vector2Node.js",
"three.js-r77/examples/js/nodes/inputs/Vector3Node.js",
"three.js-r77/examples/js/nodes/inputs/Vector4Node.js",
"three.js-r77/examples/js/nodes/materials/PhongNode.js",
"three.js-r77/examples/js/nodes/materials/PhongNodeMaterial.js",
"three.js-r77/examples/js/nodes/materials/StandardNode.js",
"three.js-r77/examples/js/nodes/materials/StandardNodeMaterial.js",
"three.js-r77/examples/js/nodes/math/Math1Node.js",
"three.js-r77/examples/js/nodes/math/Math2Node.js",
"three.js-r77/examples/js/nodes/math/Math3Node.js",
"three.js-r77/examples/js/nodes/math/OperatorNode.js",
"three.js-r77/examples/js/nodes/postprocessing/NodePass.js",
"three.js-r77/examples/js/nodes/utils/ColorAdjustmentNode.js",
"three.js-r77/examples/js/nodes/utils/JoinNode.js",
"three.js-r77/examples/js/nodes/utils/LuminanceNode.js",
"three.js-r77/examples/js/nodes/utils/NoiseNode.js",
"three.js-r77/examples/js/nodes/utils/NormalMapNode.js",
"three.js-r77/examples/js/nodes/utils/ResolutionNode.js",
"three.js-r77/examples/js/nodes/utils/RoughnessToBlinnExponentNode.js",
"three.js-r77/examples/js/nodes/utils/SwitchNode.js",
"three.js-r77/examples/js/nodes/utils/TimerNode.js",
"three.js-r77/examples/js/nodes/utils/VelocityNode.js",
"three.js-r77/examples/js/objects/ShadowMesh.js",
"three.js-r77/examples/js/pmrem/PMREMCubeUVPacker.js",
"three.js-r77/examples/js/pmrem/PMREMGenerator.js",
"three.js-r77/examples/js/postprocessing/AdaptiveToneMappingPass.js",
"three.js-r77/examples/js/postprocessing/BloomPass.js",
"three.js-r77/examples/js/postprocessing/BokehPass.js",
"three.js-r77/examples/js/postprocessing/ClearPass.js",
"three.js-r77/examples/js/postprocessing/DotScreenPass.js",
"three.js-r77/examples/js/postprocessing/EffectComposer.js",
"three.js-r77/examples/js/postprocessing/FilmPass.js",
"three.js-r77/examples/js/postprocessing/GlitchPass.js",
"three.js-r77/examples/js/postprocessing/ManualMSAARenderPass.js",
"three.js-r77/examples/js/postprocessing/MaskPass.js",
"three.js-r77/examples/js/postprocessing/RenderPass.js",
"three.js-r77/examples/js/postprocessing/SMAAPass.js",
"three.js-r77/examples/js/postprocessing/SavePass.js",
"three.js-r77/examples/js/postprocessing/ShaderPass.js",
"three.js-r77/examples/js/postprocessing/TAARenderPass.js",
"three.js-r77/examples/js/postprocessing/TexturePass.js",
"three.js-r77/examples/js/renderers/CSS2DRenderer.js",
"three.js-r77/examples/js/renderers/CSS3DRenderer.js",
"three.js-r77/examples/js/renderers/CanvasRenderer.js",
"three.js-r77/examples/js/renderers/Projector.js",
"three.js-r77/examples/js/renderers/RaytracingRenderer.js",
"three.js-r77/examples/js/renderers/RaytracingWorker.js",
"three.js-r77/examples/js/renderers/SVGRenderer.js",
"three.js-r77/examples/js/renderers/SoftwareRenderer.js",
"three.js-r77/examples/js/shaders/BasicShader.js",
"three.js-r77/examples/js/shaders/BleachBypassShader.js",
"three.js-r77/examples/js/shaders/BlendShader.js",
"three.js-r77/examples/js/shaders/BokehShader.js",
"three.js-r77/examples/js/shaders/BokehShader2.js",
"three.js-r77/examples/js/shaders/BrightnessContrastShader.js",
"three.js-r77/examples/js/shaders/ColorCorrectionShader.js",
"three.js-r77/examples/js/shaders/ColorifyShader.js",
"three.js-r77/examples/js/shaders/ConvolutionShader.js",
"three.js-r77/examples/js/shaders/CopyShader.js",
"three.js-r77/examples/js/shaders/DOFMipMapShader.js",
"three.js-r77/examples/js/shaders/DigitalGlitch.js",
"three.js-r77/examples/js/shaders/DotScreenShader.js",
"three.js-r77/examples/js/shaders/EdgeShader.js",
"three.js-r77/examples/js/shaders/EdgeShader2.js",
"three.js-r77/examples/js/shaders/FXAAShader.js",
"three.js-r77/examples/js/shaders/FilmShader.js",
"three.js-r77/examples/js/shaders/FocusShader.js",
"three.js-r77/examples/js/shaders/FresnelShader.js",
"three.js-r77/examples/js/shaders/GammaCorrectionShader.js",
"three.js-r77/examples/js/shaders/HorizontalBlurShader.js",
"three.js-r77/examples/js/shaders/HorizontalTiltShiftShader.js",
"three.js-r77/examples/js/shaders/HueSaturationShader.js",
"three.js-r77/examples/js/shaders/KaleidoShader.js",
"three.js-r77/examples/js/shaders/LuminosityShader.js",
"three.js-r77/examples/js/shaders/MirrorShader.js",
"three.js-r77/examples/js/shaders/NormalMapShader.js",
"three.js-r77/examples/js/shaders/OceanShaders.js",
"three.js-r77/examples/js/shaders/ParallaxShader.js",
"three.js-r77/examples/js/shaders/RGBShiftShader.js",
"three.js-r77/examples/js/shaders/SMAAShader.js",
"three.js-r77/examples/js/shaders/SSAOShader.js",
"three.js-r77/examples/js/shaders/SepiaShader.js",
"three.js-r77/examples/js/shaders/TechnicolorShader.js",
"three.js-r77/examples/js/shaders/ToneMapShader.js",
"three.js-r77/examples/js/shaders/TriangleBlurShader.js",
"three.js-r77/examples/js/shaders/UnpackDepthRGBAShader.js",
"three.js-r77/examples/js/shaders/VerticalBlurShader.js",
"three.js-r77/examples/js/shaders/VerticalTiltShiftShader.js",
"three.js-r77/examples/js/shaders/VignetteShader.js",
"three.js-r77/examples/js/utils/GeometryUtils.js",
"three.js-r77/examples/js/utils/ImageUtils.js",
"three.js-r77/examples/js/utils/ShadowMapViewer.js",
"three.js-r77/examples/js/utils/UVsDebug.js",
],
)
filegroup(
name = "web_animations_js",
srcs = [
@ -608,3 +912,25 @@ filegroup(
"webcomponents-lite.min.js",
],
)
filegroup(
name = "weblas",
srcs = [
"benchmark.html",
"benchmark/sgemm.js",
"dist/weblas.js",
"index.js",
"lib/globals.js",
"lib/pipeline.js",
"lib/saxpycalculator.js",
"lib/sclmpcalculator.js",
"lib/sdwnscalculator.js",
"lib/sgemmcalculator.js",
"lib/sscalcalculator.js",
"lib/tensor.js",
"lib/test.js",
"lib/webgl.js",
"test.html",
"test/data/generate.js",
],
)

View File

@ -15,13 +15,49 @@ HEADERS = [
"gif_lib_private.h",
]
config_setting(
name = "windows",
values = {
"cpu": "x64_windows_msvc",
},
visibility = ["//visibility:public"],
)
prefix_dir = "giflib-5.1.4/lib"
prefix_dir_windows = "windows/giflib-5.1.4/lib"
genrule(
name = "srcs_without_unistd",
srcs = [prefix_dir + "/" + source for source in SOURCES],
outs = [prefix_dir_windows + "/" + source for source in SOURCES],
cmd = "for f in $(SRCS); do " +
" sed 's/#include <unistd.h>//g' $$f > $(@D)/%s/$$(basename $$f);" % prefix_dir_windows +
"done",
)
genrule(
name = "hdrs_without_unistd",
srcs = [prefix_dir + "/" + hdrs for hdrs in HEADERS],
outs = [prefix_dir_windows + "/" + hdrs for hdrs in HEADERS],
cmd = "for f in $(SRCS); do " +
" sed 's/#include <unistd.h>//g' $$f > $(@D)/%s/$$(basename $$f);" % prefix_dir_windows +
"done",
)
cc_library(
name = "gif",
srcs = [prefix_dir + "/" + source for source in SOURCES],
hdrs = [prefix_dir + "/" + hdrs for hdrs in HEADERS],
includes = [prefix_dir],
srcs = select({
"//conditions:default" : [prefix_dir + "/" + source for source in SOURCES],
":windows" : [":srcs_without_unistd"],
}),
hdrs = select({
"//conditions:default" : [prefix_dir + "/" + hdrs for hdrs in HEADERS],
":windows" : [":hdrs_without_unistd"],
}),
includes = select({
"//conditions:default" : [prefix_dir],
":windows" : [prefix_dir_windows],
}),
defines = [
"HAVE_CONFIG_H",
],

View File

@ -1224,6 +1224,7 @@ cc_library(
deps = [
":gpr",
"//external:nanopb",
"//external:zlib",
],
copts = [
"-std=gnu99",

View File

@ -177,7 +177,6 @@ setup(
include_package_data=True,
package_data={
'tensorflow': ['python/_pywrap_tensorflow.so',
'python/libpython_deps.so',
] + matches,
},
zip_safe=False,

View File

@ -12,11 +12,7 @@
# Resolve installed dependencies
########################################################
# 1. Resolve the installed version of SWIG.
FIND_PACKAGE(SWIG REQUIRED)
INCLUDE(${SWIG_USE_FILE})
# 2. Resolve the installed version of Python (for Python.h and python).
# 1. Resolve the installed version of Python (for Python.h and python).
# TODO(mrry): Parameterize the build script to enable Python 3 building.
include(FindPythonInterp)
if(NOT PYTHON_INCLUDE_DIR)
@ -32,7 +28,7 @@ if(NOT PYTHON_INCLUDE_DIR)
endif(NOT PYTHON_INCLUDE_DIR)
FIND_PACKAGE(PythonLibs)
# 3. Resolve the installed version of NumPy (for numpy/arrayobject.h).
# 2. Resolve the installed version of NumPy (for numpy/arrayobject.h).
if(NOT NUMPY_INCLUDE_DIR)
set(NUMPY_NOT_FOUND false)
exec_program("${PYTHON_EXECUTABLE}"
@ -45,7 +41,7 @@ if(NOT NUMPY_INCLUDE_DIR)
endif(${NUMPY_NOT_FOUND})
endif(NOT NUMPY_INCLUDE_DIR)
# 4. Resolve the installed version of zlib (for libz.so).
# 3. Resolve the installed version of zlib (for libz.so).
find_package(ZLIB REQUIRED)
@ -292,12 +288,30 @@ add_dependencies(tf_python_ops tf_python_op_gen_main)
# Build the SWIG-wrapped library for the TensorFlow runtime.
############################################################
# python_deps is a shared library containing all of the TensorFlow
find_package(SWIG REQUIRED)
# Generate the C++ and Python source code for the SWIG wrapper.
add_custom_command(
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python/pywrap_tensorflow.py"
"${CMAKE_CURRENT_BINARY_DIR}/pywrap_tensorflow.cc"
DEPENDS tf_python_touchup_modules
COMMAND ${SWIG_EXECUTABLE}
ARGS -python -c++
-I${tensorflow_source_dir}
-I${CMAKE_CURRENT_BINARY_DIR}
-module pywrap_tensorflow
-outdir ${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python
-o ${CMAKE_CURRENT_BINARY_DIR}/pywrap_tensorflow.cc
-globals ''
${tensorflow_source_dir}/tensorflow/python/tensorflow.i
COMMENT "Running SWIG to generate Python wrappers"
VERBATIM )
# pywrap_tensorflow is a shared library containing all of the TensorFlow
# runtime and the standard ops and kernels. These are installed into
# tf_python/tensorflow/python/.
# TODO(mrry): Refactor this to expose a framework library that
# facilitates `tf.load_op_library()`.
add_library(python_deps SHARED
add_library(pywrap_tensorflow SHARED
"${tensorflow_source_dir}/tensorflow/python/client/tf_session_helper.h"
"${tensorflow_source_dir}/tensorflow/python/client/tf_session_helper.cc"
"${tensorflow_source_dir}/tensorflow/python/framework/cpp_shape_inference.h"
@ -318,6 +332,7 @@ add_library(python_deps SHARED
"${tensorflow_source_dir}/tensorflow/c/checkpoint_reader.h"
"${tensorflow_source_dir}/tensorflow/c/tf_status_helper.cc"
"${tensorflow_source_dir}/tensorflow/c/tf_status_helper.h"
"${CMAKE_CURRENT_BINARY_DIR}/pywrap_tensorflow.cc"
$<TARGET_OBJECTS:tf_core_lib>
$<TARGET_OBJECTS:tf_core_cpu>
$<TARGET_OBJECTS:tf_core_framework>
@ -326,7 +341,7 @@ add_library(python_deps SHARED
$<TARGET_OBJECTS:tf_core_distributed_runtime>
$<TARGET_OBJECTS:tf_core_kernels>
)
target_link_libraries(python_deps
target_link_libraries(pywrap_tensorflow
${CMAKE_THREAD_LIBS_INIT}
tf_protos_cc
${GRPC_LIBRARIES}
@ -341,7 +356,7 @@ target_link_libraries(python_deps
${ZLIB_LIBRARIES}
${CMAKE_DL_LIBS}
)
target_include_directories(python_deps PUBLIC
target_include_directories(pywrap_tensorflow PUBLIC
${tensorflow_source_dir}
${CMAKE_CURRENT_BINARY_DIR}
${eigen_INCLUDE_DIRS}
@ -349,62 +364,32 @@ target_include_directories(python_deps PUBLIC
${NUMPY_INCLUDE_DIR}
)
# C++11
target_compile_features(python_deps PRIVATE
target_compile_features(pywrap_tensorflow PRIVATE
cxx_rvalue_references
)
set_target_properties(python_deps PROPERTIES
LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/tf_python/tensorflow/python)
# _pywrap_tensorflow is the target that generates the SWIG bindings
# and compiles them as a shared library that depends on python_deps.
set(CMAKE_SWIG_FLAGS "")
set(CMAKE_SWIG_OUTDIR ${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python)
SET_SOURCE_FILES_PROPERTIES("${tensorflow_source_dir}/tensorflow/python/tensorflow.i"
PROPERTIES CPLUSPLUS ON
)
SET_PROPERTY(SOURCE "${tensorflow_source_dir}/tensorflow/python/tensorflow.i"
PROPERTY SWIG_FLAGS "-I\"${tensorflow_source_dir}\"" "-module" "pywrap_tensorflow"
)
SWIG_ADD_MODULE(pywrap_tensorflow python
"${tensorflow_source_dir}/tensorflow/python/tensorflow.i"
)
SWIG_LINK_LIBRARIES(pywrap_tensorflow
python_deps
${PROTOBUF_LIBRARY}
${CMAKE_DL_LIBS}
)
target_include_directories(_pywrap_tensorflow PUBLIC
${tensorflow_source_dir}
${CMAKE_CURRENT_BINARY_DIR}
${eigen_INCLUDE_DIRS}
${PYTHON_INCLUDE_DIR}
${NUMPY_INCLUDE_DIR}
)
add_dependencies(_pywrap_tensorflow
eigen
tf_core_direct_session
tf_core_distributed_runtime
tf_core_framework
python_deps
tf_python_copy_scripts_to_destination
tf_python_ops
tf_python_touchup_modules
)
# C++11
target_compile_features(_pywrap_tensorflow PRIVATE
cxx_rvalue_references
)
set_target_properties(_pywrap_tensorflow PROPERTIES
LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/tf_python/tensorflow/python)
############################################################
# Build a PIP package containing the TensorFlow runtime.
############################################################
add_custom_target(tf_python_copy_pip_files)
add_dependencies(tf_python_copy_pip_files _pywrap_tensorflow tf_python_copy_scripts_to_destination tf_python_touchup_modules)
add_dependencies(tf_python_copy_pip_files
pywrap_tensorflow
tf_python_copy_scripts_to_destination
tf_python_touchup_modules
tf_python_ops)
add_custom_command(TARGET tf_python_copy_pip_files POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${tensorflow_source_dir}/tensorflow/contrib/cmake/setup.py ${CMAKE_CURRENT_BINARY_DIR}/tf_python/)
COMMAND ${CMAKE_COMMAND} -E copy ${tensorflow_source_dir}/tensorflow/contrib/cmake/setup.py
${CMAKE_CURRENT_BINARY_DIR}/tf_python/)
add_custom_command(TARGET tf_python_copy_pip_files POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${tensorflow_source_dir}/tensorflow/tools/pip_package/README ${CMAKE_CURRENT_BINARY_DIR}/tf_python/)
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_BINARY_DIR}/libpywrap_tensorflow.so
${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python/_pywrap_tensorflow.so)
add_custom_command(TARGET tf_python_copy_pip_files POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${tensorflow_source_dir}/tensorflow/tools/pip_package/MANIFEST.in ${CMAKE_CURRENT_BINARY_DIR}/tf_python/)
COMMAND ${CMAKE_COMMAND} -E copy ${tensorflow_source_dir}/tensorflow/tools/pip_package/README
${CMAKE_CURRENT_BINARY_DIR}/tf_python/)
add_custom_command(TARGET tf_python_copy_pip_files POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${tensorflow_source_dir}/tensorflow/tools/pip_package/MANIFEST.in
${CMAKE_CURRENT_BINARY_DIR}/tf_python/)
add_custom_command(TARGET tf_python_copy_pip_files POST_BUILD
COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/tf_python/setup.py bdist_wheel
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/tf_python)

View File

@ -13,7 +13,17 @@ This folder contains examples of how to build applications for iOS devices using
- Download
[Inception v1](https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip),
and extract the label and graph files into the data folders inside both the
simple and camera examples.
simple and camera examples:
```bash
mkdir -p ~/graphs
curl -o ~/graphs/inception5h.zip \
https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip \
&& unzip ~/graphs/inception5h.zip -d ~/graphs/inception5h
cp ~/graphs/inception5h/* tensorflow/contrib/ios_examples/benchmark/data/
cp ~/graphs/inception5h/* tensorflow/contrib/ios_examples/camera/data/
cp ~/graphs/inception5h/* tensorflow/contrib/ios_examples/simple/data/
```
- Load the Xcode project inside the `simple` subfolder, and press Command-R to
build and run it on the simulator or your connected device.
@ -86,3 +96,46 @@ rundown:
flag will cause these duplicates to become link errors. If you were using
`-all_load` to avoid issues with Objective-C categories in static libraries,
you may be able to replace it with the `-ObjC` flag.
## Reducing the binary size
TensorFlow is a comparatively large library for a mobile device, so it will
increase the size of your app. Currently on iOS we see around a 11 MB binary
footprint per CPU architecture, though we're actively working on reducing that.
It can be tricky to set up the right configuration in your own app to keep the
size minimized, so if you do run into this issue we recommend you start by
looking at the simple example to examine its size. Here's how you do that:
- Open the Xcode project in tensorflow/contrib/ios_examples/simple.
- Make sure you've followed the steps above to get the data files.
- Choose "Generic iOS Device" as the build configuration.
- Select Product->Build.
- Once the build's complete, open the Report Navigator and select the logs.
- Near the bottom, you'll see a line saying "Touch tf_ios_makefile_example.app".
- Expand that line using the icon on the right, and copy the first argument to
the Touch command.
- Go to the terminal, type `ls -lah ` and then paste the path you copied.
- For example it might look like `ls -lah /Users/petewarden/Library/Developer/Xcode/DerivedData/tf_ios_makefile_example-etdbksqytcnzeyfgdwiihzkqpxwr/Build/Products/Debug-iphoneos/tf_ios_makefile_example.app`
- Running this command will show the size of the executable as the
`tf_ios_makefile_example` line.
Right now you'll see a size of around 23 MB, since it's including two
architectures (armv7 and arm64). As a first step, you should make sure the size
increase you see in your own app is similar, and if it's larger, look at the
"Other Linker Flags" used in the Simple Xcode project settings to strip the
executable.
After that, you can manually look at modifying the list of kernels
included in tensorflow/contrib/makefile/tf_op_files.txt to reduce the number of
implementations to the ones you're actually using in your own model. We're
hoping to automate this step in the future, but for now manually removing them
is the best approach.

View File

@ -17,6 +17,7 @@
#include <memory>
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/memmapped_file_system.h"
@interface CameraExampleViewController
: UIViewController<UIGestureRecognizerDelegate,
@ -35,6 +36,7 @@
NSMutableArray *labelLayers;
AVCaptureSession *session;
std::unique_ptr<tensorflow::Session> tf_session;
std::unique_ptr<tensorflow::MemmappedEnv> tf_memmapped_env;
std::vector<std::string> labels;
}
@property(retain, nonatomic) CATextLayer *predictionTextLayer;

View File

@ -22,6 +22,27 @@
#include "tensorflow_utils.h"
// If you have your own model, modify this to the file name, and make sure
// you've added the file to your app resources too.
static NSString* model_file_name = @"tensorflow_inception_graph";
static NSString* model_file_type = @"pb";
// This controls whether we'll be loading a plain GraphDef proto, or a
// file created by the convert_graphdef_memmapped_format utility that wraps a
// GraphDef and parameter file that can be mapped into memory from file to
// reduce overall memory usage.
const bool model_uses_memory_mapping = false;
// If you have your own model, point this to the labels file.
static NSString* labels_file_name = @"imagenet_comp_graph_label_strings";
static NSString* labels_file_type = @"txt";
// These dimensions need to match those the model was trained with.
const int wanted_input_width = 224;
const int wanted_input_height = 224;
const int wanted_input_channels = 3;
const float input_mean = 117.0f;
const float input_std = 1.0f;
const std::string input_layer_name = "input";
const std::string output_layer_name = "softmax1";
static const NSString *AVCaptureStillImageIsCapturingStillImageContext =
@"AVCaptureStillImageIsCapturingStillImageContext";
@ -269,39 +290,32 @@ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
}
const int image_channels = 4;
const int wanted_width = 224;
const int wanted_height = 224;
const int wanted_channels = 3;
const float input_mean = 117.0f;
const float input_std = 1.0f;
assert(image_channels >= wanted_channels);
assert(image_channels >= wanted_input_channels);
tensorflow::Tensor image_tensor(
tensorflow::DT_FLOAT,
tensorflow::TensorShape(
{1, wanted_height, wanted_width, wanted_channels}));
{1, wanted_input_height, wanted_input_width, wanted_input_channels}));
auto image_tensor_mapped = image_tensor.tensor<float, 4>();
tensorflow::uint8 *in = sourceStartAddr;
float *out = image_tensor_mapped.data();
for (int y = 0; y < wanted_height; ++y) {
float *out_row = out + (y * wanted_width * wanted_channels);
for (int x = 0; x < wanted_width; ++x) {
const int in_x = (y * image_width) / wanted_width;
const int in_y = (x * image_height) / wanted_height;
for (int y = 0; y < wanted_input_height; ++y) {
float *out_row = out + (y * wanted_input_width * wanted_input_channels);
for (int x = 0; x < wanted_input_width; ++x) {
const int in_x = (y * image_width) / wanted_input_width;
const int in_y = (x * image_height) / wanted_input_height;
tensorflow::uint8 *in_pixel =
in + (in_y * image_width * image_channels) + (in_x * image_channels);
float *out_pixel = out_row + (x * wanted_channels);
for (int c = 0; c < wanted_channels; ++c) {
float *out_pixel = out_row + (x * wanted_input_channels);
for (int c = 0; c < wanted_input_channels; ++c) {
out_pixel[c] = (in_pixel[c] - input_mean) / input_std;
}
}
}
if (tf_session.get()) {
std::string input_layer = "input";
std::string output_layer = "output";
std::vector<tensorflow::Tensor> outputs;
tensorflow::Status run_status = tf_session->Run(
{{input_layer, image_tensor}}, {output_layer}, {}, &outputs);
{{input_layer_name, image_tensor}}, {output_layer_name}, {}, &outputs);
if (!run_status.ok()) {
LOG(ERROR) << "Running model failed:" << run_status;
} else {
@ -362,22 +376,28 @@ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
- (void)viewDidLoad {
[super viewDidLoad];
[self setupAVCapture];
square = [[UIImage imageNamed:@"squarePNG"] retain];
synth = [[AVSpeechSynthesizer alloc] init];
labelLayers = [[NSMutableArray alloc] init];
oldPredictionValues = [[NSMutableDictionary alloc] init];
tensorflow::Status load_status =
LoadModel(@"tensorflow_inception_graph", @"pb", &tf_session);
tensorflow::Status load_status;
if (model_uses_memory_mapping) {
load_status = LoadMemoryMappedModel(
model_file_name, model_file_type, &tf_session, &tf_memmapped_env);
} else {
load_status = LoadModel(model_file_name, model_file_type, &tf_session);
}
if (!load_status.ok()) {
LOG(FATAL) << "Couldn't load model: " << load_status;
}
tensorflow::Status labels_status =
LoadLabels(@"imagenet_comp_graph_label_strings", @"txt", &labels);
LoadLabels(labels_file_name, labels_file_type, &labels);
if (!labels_status.ok()) {
LOG(FATAL) << "Couldn't load labels: " << labels_status;
}
[self setupAVCapture];
}
- (void)viewDidUnload {

View File

@ -304,12 +304,6 @@
OTHER_LDFLAGS = (
"-force_load",
"$(SRCROOT)/../../makefile/gen/lib/libtensorflow-core.a",
"-Xlinker",
"-S",
"-Xlinker",
"-x",
"-Xlinker",
"-dead_strip",
);
PRODUCT_BUNDLE_IDENTIFIER = com.google.CameraExample;
PRODUCT_NAME = "$(TARGET_NAME)";
@ -369,12 +363,6 @@
OTHER_LDFLAGS = (
"-force_load",
"$(SRCROOT)/../../makefile/gen/lib/libtensorflow-core.a",
"-Xlinker",
"-S",
"-Xlinker",
"-x",
"-Xlinker",
"-dead_strip",
);
PRODUCT_BUNDLE_IDENTIFIER = com.google.CameraExample;
PRODUCT_NAME = "$(TARGET_NAME)";

View File

@ -19,15 +19,34 @@
#include <vector>
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/memmapped_file_system.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
// Reads a serialized GraphDef protobuf file from the bundle, typically
// created with the freeze_graph script. Populates the session argument with a
// Session object that has the model loaded.
tensorflow::Status LoadModel(NSString* file_name, NSString* file_type,
std::unique_ptr<tensorflow::Session>* session);
// Loads a model from a file that has been created using the
// convert_graphdef_memmapped_format tool. This bundles together a GraphDef
// proto together with a file that can be memory-mapped, containing the weight
// parameters for the model. This is useful because it reduces the overall
// memory pressure, since the read-only parameter regions can be easily paged
// out and don't count toward memory limits on iOS.
tensorflow::Status LoadMemoryMappedModel(
NSString* file_name, NSString* file_type,
std::unique_ptr<tensorflow::Session>* session,
std::unique_ptr<tensorflow::MemmappedEnv>* memmapped_env);
// Takes a text file with a single label on each line, and returns a list.
tensorflow::Status LoadLabels(NSString* file_name, NSString* file_type,
std::vector<std::string>* label_strings);
// Sorts the results from a model execution, and returns the highest scoring.
void GetTopN(const Eigen::TensorMap<Eigen::Tensor<float, 1, Eigen::RowMajor>,
Eigen::Aligned>& prediction, const int num_results,
const float threshold,
Eigen::Aligned>& prediction,
const int num_results, const float threshold,
std::vector<std::pair<float, int> >* top_results);
#endif // TENSORFLOW_CONTRIB_IOS_EXAMPLES_CAMERA_TENSORFLOW_UTILS_H_

View File

@ -16,9 +16,9 @@
#include "tensorflow_utils.h"
#include <fstream>
#include <pthread.h>
#include <unistd.h>
#include <fstream>
#include <queue>
#include <sstream>
#include <string>
@ -35,56 +35,58 @@
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
namespace {
class IfstreamInputStream : public ::google::protobuf::io::CopyingInputStream {
public:
explicit IfstreamInputStream(const std::string& file_name)
: ifs_(file_name.c_str(), std::ios::in | std::ios::binary) {}
~IfstreamInputStream() { ifs_.close(); }
int Read(void* buffer, int size) {
if (!ifs_) {
return -1;
}
ifs_.read(static_cast<char*>(buffer), size);
return ifs_.gcount();
// Helper class used to load protobufs efficiently.
class IfstreamInputStream : public ::google::protobuf::io::CopyingInputStream {
public:
explicit IfstreamInputStream(const std::string& file_name)
: ifs_(file_name.c_str(), std::ios::in | std::ios::binary) {}
~IfstreamInputStream() { ifs_.close(); }
int Read(void* buffer, int size) {
if (!ifs_) {
return -1;
}
private:
std::ifstream ifs_;
};
ifs_.read(static_cast<char*>(buffer), size);
return ifs_.gcount();
}
private:
std::ifstream ifs_;
};
} // namespace
// Returns the top N confidence values over threshold in the provided vector,
// sorted by confidence in descending order.
void GetTopN(const Eigen::TensorMap<Eigen::Tensor<float, 1, Eigen::RowMajor>,
Eigen::Aligned>& prediction, const int num_results,
const float threshold,
Eigen::Aligned>& prediction,
const int num_results, const float threshold,
std::vector<std::pair<float, int> >* top_results) {
// Will contain top N results in ascending order.
std::priority_queue<std::pair<float, int>,
std::vector<std::pair<float, int> >,
std::greater<std::pair<float, int> > > top_result_pq;
std::vector<std::pair<float, int> >,
std::greater<std::pair<float, int> > >
top_result_pq;
const int count = prediction.size();
for (int i = 0; i < count; ++i) {
const float value = prediction(i);
// Only add it if it beats the threshold and has a chance at being in
// the top N.
if (value < threshold) {
continue;
}
top_result_pq.push(std::pair<float, int>(value, i));
// If at capacity, kick the smallest value out.
if (top_result_pq.size() > num_results) {
top_result_pq.pop();
}
}
// Copy to output vector and reverse into descending order.
while (!top_result_pq.empty()) {
top_results->push_back(top_result_pq.top());
@ -93,11 +95,10 @@ void GetTopN(const Eigen::TensorMap<Eigen::Tensor<float, 1, Eigen::RowMajor>,
std::reverse(top_results->begin(), top_results->end());
}
bool PortableReadFileToProto(const std::string& file_name,
::google::protobuf::MessageLite* proto) {
::google::protobuf::io::CopyingInputStreamAdaptor stream(
new IfstreamInputStream(file_name));
new IfstreamInputStream(file_name));
stream.SetOwnsCopyingStream(true);
::google::protobuf::io::CodedInputStream coded_stream(&stream);
// Total bytes hard limit / warning limit are set to 1GB and 512MB
@ -107,10 +108,11 @@ bool PortableReadFileToProto(const std::string& file_name,
}
NSString* FilePathForResourceName(NSString* name, NSString* extension) {
NSString* file_path = [[NSBundle mainBundle] pathForResource:name ofType:extension];
NSString* file_path =
[[NSBundle mainBundle] pathForResource:name ofType:extension];
if (file_path == NULL) {
LOG(FATAL) << "Couldn't find '" << [name UTF8String] << "."
<< [extension UTF8String] << "' in bundle.";
<< [extension UTF8String] << "' in bundle.";
return nullptr;
}
return file_path;
@ -119,19 +121,18 @@ NSString* FilePathForResourceName(NSString* name, NSString* extension) {
tensorflow::Status LoadModel(NSString* file_name, NSString* file_type,
std::unique_ptr<tensorflow::Session>* session) {
tensorflow::SessionOptions options;
tensorflow::Session* session_pointer = nullptr;
tensorflow::Status session_status = tensorflow::NewSession(options, &session_pointer);
tensorflow::Status session_status =
tensorflow::NewSession(options, &session_pointer);
if (!session_status.ok()) {
LOG(ERROR) << "Could not create TensorFlow Session: " << session_status;
return session_status;
}
session->reset(session_pointer);
LOG(INFO) << "Session created.";
tensorflow::GraphDef tensorflow_graph;
LOG(INFO) << "Graph created.";
NSString* model_path = FilePathForResourceName(file_name, file_type);
if (!model_path) {
LOG(ERROR) << "Failed to find model proto at" << [file_name UTF8String]
@ -139,37 +140,89 @@ tensorflow::Status LoadModel(NSString* file_name, NSString* file_type,
return tensorflow::errors::NotFound([file_name UTF8String],
[file_type UTF8String]);
}
const bool read_proto_succeeded = PortableReadFileToProto(
[model_path UTF8String], &tensorflow_graph);
const bool read_proto_succeeded =
PortableReadFileToProto([model_path UTF8String], &tensorflow_graph);
if (!read_proto_succeeded) {
LOG(ERROR) << "Failed to load model proto from" << [model_path UTF8String];
return tensorflow::errors::NotFound([model_path UTF8String]);
}
LOG(INFO) << "Creating session.";
tensorflow::Status create_status = (*session)->Create(tensorflow_graph);
if (!create_status.ok()) {
LOG(ERROR) << "Could not create TensorFlow Graph: " << create_status;
return create_status;
}
return tensorflow::Status::OK();
}
tensorflow::Status LoadMemoryMappedModel(
NSString* file_name, NSString* file_type,
std::unique_ptr<tensorflow::Session>* session,
std::unique_ptr<tensorflow::MemmappedEnv>* memmapped_env) {
NSString* network_path = FilePathForResourceName(file_name, file_type);
memmapped_env->reset(
new tensorflow::MemmappedEnv(tensorflow::Env::Default()));
tensorflow::Status mmap_status =
(memmapped_env->get())->InitializeFromFile([network_path UTF8String]);
if (!mmap_status.ok()) {
LOG(ERROR) << "MMap failed with " << mmap_status.error_message();
return mmap_status;
}
tensorflow::GraphDef tensorflow_graph;
tensorflow::Status load_graph_status = ReadBinaryProto(
memmapped_env->get(),
tensorflow::MemmappedFileSystem::kMemmappedPackageDefaultGraphDef,
&tensorflow_graph);
if (!load_graph_status.ok()) {
LOG(ERROR) << "MMap load graph failed with "
<< load_graph_status.error_message();
return load_graph_status;
}
tensorflow::SessionOptions options;
// Disable optimizations on this graph so that constant folding doesn't
// increase the memory footprint by creating new constant copies of the weight
// parameters.
options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(::tensorflow::OptimizerOptions::L0);
options.env = memmapped_env->get();
tensorflow::Session* session_pointer = nullptr;
tensorflow::Status session_status =
tensorflow::NewSession(options, &session_pointer);
if (!session_status.ok()) {
LOG(ERROR) << "Could not create TensorFlow Session: " << session_status;
return session_status;
}
tensorflow::Status create_status = session_pointer->Create(tensorflow_graph);
if (!create_status.ok()) {
LOG(ERROR) << "Could not create TensorFlow Graph: " << create_status;
return create_status;
}
session->reset(session_pointer);
return tensorflow::Status::OK();
}
tensorflow::Status LoadLabels(NSString* file_name, NSString* file_type,
std::vector<std::string>* label_strings) {
std::vector<std::string>* label_strings) {
// Read the label list
NSString* labels_path = FilePathForResourceName(file_name, file_type);
if (!labels_path) {
LOG(ERROR) << "Failed to find model proto at" << [file_name UTF8String]
<< [file_type UTF8String];
<< [file_type UTF8String];
return tensorflow::errors::NotFound([file_name UTF8String],
[file_type UTF8String]);
}
std::ifstream t;
t.open([labels_path UTF8String]);
std::string line;
while(t){
while (t) {
std::getline(t, line);
label_strings->push_back(line);
}

View File

@ -239,7 +239,7 @@ NSString* RunInferenceOnImage() {
const float confidence = result.first;
const int index = result.second;
ss << index << " " << confidence << " ";
ss << index << " " << confidence << " ";
// Write out the result as a string
if (index < label_strings.size()) {

View File

@ -274,8 +274,11 @@
591157B31CF4011D00C31E3A /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
CLANG_DEBUG_INFORMATION_LEVEL = default;
CODE_SIGN_IDENTITY = "iPhone Developer";
ENABLE_BITCODE = NO;
GCC_ENABLE_CPP_EXCEPTIONS = YES;
GCC_ENABLE_CPP_RTTI = YES;
HEADER_SEARCH_PATHS = (
"$(SRCROOT)/../../../..",
"$(SRCROOT)/../../makefile/downloads/protobuf/src/",
@ -290,6 +293,7 @@
"$(SRCROOT)/../../makefile/gen/protobuf_ios/lib",
"$(SRCROOT)/../../makefile/gen/lib",
);
OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
OTHER_LDFLAGS = (
"-force_load",
"$(SRCROOT)/../../makefile/gen/lib/libtensorflow-core.a",
@ -302,14 +306,18 @@
);
PRODUCT_BUNDLE_IDENTIFIER = "com.google.TF-Test";
PRODUCT_NAME = "$(TARGET_NAME)";
SEPARATE_STRIP = NO;
};
name = Debug;
};
591157B41CF4011D00C31E3A /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
CLANG_DEBUG_INFORMATION_LEVEL = default;
CODE_SIGN_IDENTITY = "iPhone Developer";
ENABLE_BITCODE = NO;
GCC_ENABLE_CPP_EXCEPTIONS = YES;
GCC_ENABLE_CPP_RTTI = YES;
HEADER_SEARCH_PATHS = (
"$(SRCROOT)/../../../..",
"$(SRCROOT)/../../makefile/downloads/protobuf/src/",
@ -325,6 +333,7 @@
"$(SRCROOT)/../../makefile/gen/lib",
);
ONLY_ACTIVE_ARCH = YES;
OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
OTHER_LDFLAGS = (
"-force_load",
"$(SRCROOT)/../../makefile/gen/lib/libtensorflow-core.a",
@ -337,6 +346,7 @@
);
PRODUCT_BUNDLE_IDENTIFIER = "com.google.TF-Test";
PRODUCT_NAME = "$(TARGET_NAME)";
SEPARATE_STRIP = NO;
};
name = Release;
};

View File

@ -1244,7 +1244,7 @@ class _RealValuedColumn(_FeatureColumn, collections.namedtuple(
return variable_scope.get_variable(
name,
shape=[self.dimension, num_outputs],
initializer=array_ops.zeros_initializer,
initializer=init_ops.zeros_initializer,
collections=_add_variable_collection(weight_collections))
if self.name:
@ -1834,7 +1834,7 @@ class DataFrameColumn(_FeatureColumn,
return variable_scope.get_variable(
name,
shape=[self.dimension, num_outputs],
initializer=array_ops.zeros_initializer,
initializer=init_ops.zeros_initializer,
collections=_add_variable_collection(weight_collections))
if self.name:

View File

@ -375,7 +375,7 @@ def weighted_sum_from_feature_columns(columns_to_tensors,
variable = [contrib_variables.model_variable(
name='weight',
shape=[tensor.get_shape()[1], num_outputs],
initializer=array_ops.zeros_initializer,
initializer=init_ops.zeros_initializer,
collections=weight_collections)]
predictions = math_ops.matmul(tensor, variable[0], name='matmul')
except ValueError as ee:

View File

@ -112,7 +112,7 @@ def variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False,
raise TypeError('Cannot create initializer for non-floating point type.')
# Estimating fan_in and fan_out is not possible to do perfectly, but we try.
# This is the right thing for matrix multiply and convolutions.
fan_in = float(shape[-2])
fan_in = float(shape[-2]) if len(shape) > 1 else float(shape[-1])
fan_out = float(shape[-1])
for dim in shape[:-2]:
fan_in *= float(dim)

View File

@ -168,5 +168,32 @@ class VarianceScalingInitializerTest(tf.test.TestCase):
mode='FAN_AVG',
uniform=True)
def test_1d_shape_fan_in(self):
for uniform in [False, True]:
self._test_variance(tf.contrib.layers.variance_scaling_initializer,
shape=[100],
variance=2. / 100.,
factor=2.0,
mode='FAN_IN',
uniform=uniform)
def test_1d_shape_fan_out(self):
for uniform in [False, True]:
self._test_variance(tf.contrib.layers.variance_scaling_initializer,
shape=[100],
variance=2. / 100.,
factor=2.0,
mode='FAN_OUT',
uniform=uniform)
def test_1d_shape_fan_avg(self):
for uniform in [False, True]:
self._test_variance(tf.contrib.layers.variance_scaling_initializer,
shape=[100],
variance=4. / (100. + 100.),
factor=2.0,
mode='FAN_AVG',
uniform=uniform)
if __name__ == '__main__':
tf.test.main()

View File

@ -135,7 +135,7 @@ def batch_norm(inputs,
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(update_ops)
updates = tf.group(*update_ops)
total_loss = control_flow_ops.with_dependencies([updates], total_loss)
One can set update_collections=None to force the updates in place, but that
@ -150,7 +150,8 @@ def batch_norm(inputs,
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
epsilon: small float added to variance to avoid dividing by zero.
activation_fn: Optional activation function.
activation_fn: activation function, default set to None to skip it and
maintain a linear activation.
updates_collections: collections to collect the update ops for computation.
The updates_ops need to be excuted with the train_op.
If None, a control dependency would be added to make sure the updates are
@ -275,7 +276,7 @@ def batch_norm(inputs,
outputs = nn.batch_normalization(
inputs, mean, variance, beta, gamma, epsilon)
outputs.set_shape(inputs_shape)
if activation_fn:
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@ -298,7 +299,8 @@ def bias_add(inputs,
Args:
inputs: a tensor of with at least rank 2 and value for the last dimension,
e.g. `[batch_size, depth]`, `[None, None, None, depth]`.
activation_fn: Optional activation function.
activation_fn: activation function, default set to None to skip it and
maintain a linear activation.
initializer: An initializer for the bias, defaults to 0.
regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`.
@ -328,7 +330,7 @@ def bias_add(inputs,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(inputs, biases)
if activation_fn:
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@ -378,10 +380,12 @@ def convolution2d(inputs,
rate: integer. If less than or equal to 1, a standard convolution is used.
If greater than 1, than the a'trous convolution is applied and `stride`
must be set to 1.
activation_fn: activation function.
activation_fn: activation function, set to None to skip it and maintain
a linear activation.
normalizer_fn: normalization function to use instead of `biases`. If
`normalize_fn` is provided then `biases_initializer` and
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
@ -427,7 +431,7 @@ def convolution2d(inputs,
else:
outputs = nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1],
padding=padding)
if normalizer_fn:
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
@ -442,7 +446,7 @@ def convolution2d(inputs,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(outputs, biases)
if activation_fn:
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@ -487,10 +491,12 @@ def convolution2d_in_plane(
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding type to use, either 'SAME' or 'VALID'.
activation_fn: activation function.
activation_fn: activation function, set to None to skip it and maintain
a linear activation.
normalizer_fn: normalization function to use instead of `biases`. If
`normalize_fn` is provided then `biases_initializer` and
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
@ -527,7 +533,7 @@ def convolution2d_in_plane(
depthwise_weights = array_ops.tile(weights, [1, 1, num_filters_in, 1])
outputs = nn.depthwise_conv2d(inputs, depthwise_weights,
[1, stride_h, stride_w, 1], padding)
if normalizer_fn:
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
@ -543,7 +549,7 @@ def convolution2d_in_plane(
trainable=trainable)
outputs = nn.bias_add(outputs, biases)
if activation_fn:
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@ -583,10 +589,12 @@ def convolution2d_transpose(
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: one of 'VALID' or 'SAME'.
activation_fn: activation function.
activation_fn: activation function, set to None to skip it and maintain
a linear activation.
normalizer_fn: normalization function to use instead of `biases`. If
`normalize_fn` is provided then `biases_initializer` and
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
@ -656,7 +664,7 @@ def convolution2d_transpose(
out_shape[2] = get_deconv_dim(out_shape[2], stride_w, kernel_w, padding)
outputs.set_shape(out_shape)
if normalizer_fn:
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
@ -671,7 +679,7 @@ def convolution2d_transpose(
collections=biases_collections)
outputs = nn.bias_add(outputs, biases)
if activation_fn:
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@ -777,10 +785,12 @@ def fully_connected(inputs,
inputs: A tensor of with at least rank 2 and value for the last dimension,
i.e. `[batch_size, depth]`, `[None, None, None, channels]`.
num_outputs: Integer or long, the number of output units in the layer.
activation_fn: activation function.
activation_fn: activation function, set to None to skip it and maintain
a linear activation.
normalizer_fn: normalization function to use instead of `biases`. If
`normalize_fn` is provided then `biases_initializer` and
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
@ -830,7 +840,7 @@ def fully_connected(inputs,
# Reshape inputs
inputs = array_ops.reshape(inputs, [-1, num_input_units])
outputs = standard_ops.matmul(inputs, weights)
if normalizer_fn:
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
@ -845,7 +855,7 @@ def fully_connected(inputs,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(outputs, biases)
if activation_fn:
if activation_fn is not None:
outputs = activation_fn(outputs)
if len(static_shape) > 2:
# Reshape back outputs
@ -880,7 +890,8 @@ def layer_norm(inputs,
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
activation_fn: Optional activation function.
activation_fn: activation function, default set to None to skip it and
maintain a linear activation.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional collections for the variables.
@ -935,7 +946,7 @@ def layer_norm(inputs,
outputs = nn.batch_normalization(
inputs, mean, variance, beta, gamma, variance_epsilon)
outputs.set_shape(inputs_shape)
if activation_fn:
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope,
@ -1014,7 +1025,7 @@ def one_hot_encoding(labels,
def _apply_activation(y, activation_fn, output_collections):
if activation_fn:
if activation_fn is not None:
y = activation_fn(y)
ops.add_to_collections(list(output_collections or []) +
[ops.GraphKeys.ACTIVATIONS], y)
@ -1109,10 +1120,12 @@ def separable_convolution2d(
stride: a list of length 2: [stride_height, stride_width], specifying the
depthwise convolution stride. Can be an int if both strides are the same.
padding: one of 'VALID' or 'SAME'.
activation_fn: activation function.
activation_fn: activation function, set to None to skip it and maintain
a linear activation.
normalizer_fn: normalization function to use instead of `biases`. If
`normalize_fn` is provided then `biases_initializer` and
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
@ -1171,7 +1184,7 @@ def separable_convolution2d(
outputs = nn.depthwise_conv2d(inputs, depthwise_weights, strides, padding)
num_outputs = depth_multiplier * num_filters_in
if normalizer_fn:
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
@ -1186,7 +1199,7 @@ def separable_convolution2d(
collections=biases_collections)
outputs = nn.bias_add(outputs, biases)
if activation_fn:
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@ -1362,8 +1375,8 @@ def legacy_fully_connected(x,
Args:
x: The input `Tensor`.
num_output_units: The size of the output.
activation_fn: A function that requires a single Tensor that is applied as a
non-linearity. If None is used, do not apply any activation.
activation_fn: activation function, default set to None to skip it and
maintain a linear activation.
weight_init: An optional weight initialization, defaults to
`xavier_initializer`.
bias_init: An initializer for the bias, defaults to 0. Set to `None` in

View File

@ -408,6 +408,7 @@ py_test(
size = "small",
srcs = ["python/learn/estimators/classifier_test.py"],
srcs_version = "PY2AND3",
tags = ["manual"], # http://b/31032996
deps = [
":learn",
"//tensorflow:tensorflow_py",

View File

@ -35,21 +35,6 @@ Dataset = collections.namedtuple('Dataset', ['data', 'target'])
Datasets = collections.namedtuple('Datasets', ['train', 'validation', 'test'])
@deprecated('2016-09-15', 'Please use load_csv_{with|without}_header instead.')
def load_csv(filename, target_dtype, target_column=-1, has_header=True):
"""Load dataset from CSV file."""
if has_header:
return load_csv_with_header(filename=filename,
target_dtype=target_dtype,
features_dtype=np.float64,
target_column=target_column)
else:
return load_csv_without_header(filename=filename,
target_dtype=target_dtype,
features_dtype=np.float64,
target_column=target_column)
def load_csv_with_header(filename,
target_dtype,
features_dtype,
@ -60,7 +45,7 @@ def load_csv_with_header(filename,
header = next(data_file)
n_samples = int(header[0])
n_features = int(header[1])
data = np.zeros((n_samples, n_features))
data = np.zeros((n_samples, n_features), dtype=features_dtype)
target = np.zeros((n_samples,), dtype=target_dtype)
for i, row in enumerate(data_file):
target[i] = np.asarray(row.pop(target_column), dtype=target_dtype)
@ -83,8 +68,7 @@ def load_csv_without_header(filename,
target = np.array(target, dtype=target_dtype)
data = np.array(data)
return Dataset(data=np.array(data),
target=np.array(target).astype(target_dtype))
return Dataset(data=data, target=target)
def shrink_csv(filename, ratio):

View File

@ -22,8 +22,6 @@ from __future__ import print_function
import functools
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.contrib.learn.python.learn.ops import autoencoder_ops
from tensorflow.contrib.learn.python.learn.ops import dnn_ops
from tensorflow.contrib.learn.python.learn.ops import losses_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
@ -177,31 +175,6 @@ def logistic_regression(x,
class_weight=class_weight)
def get_dnn_model(hidden_units, target_predictor_fn, dropout=None):
"""Returns a function that creates a DNN TensorFlow subgraph.
Args:
hidden_units: List of values of hidden units for layers.
target_predictor_fn: Function that will predict target from input
features. This can be logistic regression,
linear regression or any other model,
that takes x, y and returns predictions and loss
tensors.
dropout: When not none, causes dropout regularization to be used,
with the specified probability of removing a given coordinate.
Returns:
A function that creates the subgraph.
"""
def dnn_estimator(x, y):
"""DNN estimator with target predictor function on top."""
layers = dnn_ops.dnn(x, hidden_units, dropout=dropout)
return target_predictor_fn(layers, y)
return dnn_estimator
## This will be in TensorFlow 0.7.
## TODO(ilblackdragon): Clean this up when it's released
def _reverse_seq(input_seq, lengths):

View File

@ -21,8 +21,6 @@ from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.ops.array_ops import *
from tensorflow.contrib.learn.python.learn.ops.autoencoder_ops import *
from tensorflow.contrib.learn.python.learn.ops.dnn_ops import *
from tensorflow.contrib.learn.python.learn.ops.embeddings_ops import *
from tensorflow.contrib.learn.python.learn.ops.losses_ops import *
from tensorflow.contrib.learn.python.learn.ops.seq2seq_ops import *

View File

@ -1,58 +0,0 @@
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow ops for autoencoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.ops import dnn_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope as vs
def dnn_autoencoder(
tensor_in, hidden_units, activation=nn.relu, add_noise=None, dropout=None,
scope=None):
"""Creates fully connected autoencoder subgraph.
Args:
tensor_in: tensor or placeholder for input features.
hidden_units: list of counts of hidden units in each layer.
activation: activation function used to map inner latent layer onto
reconstruction layer.
add_noise: a function that adds noise to tensor_in,
e.g. def add_noise(x):
return(x + np.random.normal(0, 0.1, (len(x), len(x[0]))))
dropout: if not None, will add a dropout layer with given
probability.
scope: the variable scope for this op.
Returns:
Tensors for encoder and decoder.
"""
with vs.variable_scope(scope, "autoencoder", [tensor_in]):
if add_noise is not None:
tensor_in = add_noise(tensor_in)
with vs.variable_scope("encoder"):
# build DNN encoder
encoder = dnn_ops.dnn(
tensor_in, hidden_units, activation=activation, dropout=dropout)
with vs.variable_scope("decoder"):
# reverse hidden_units and built DNN decoder
decoder = dnn_ops.dnn(
encoder, hidden_units[::-1], activation=activation, dropout=dropout)
return encoder, decoder

View File

@ -140,7 +140,7 @@ $(shell mkdir -p $(DEPDIR) >/dev/null)
# Settings for the target compiler.
CXX := $(CC_PREFIX) gcc
OPTFLAGS := -O0
CXXFLAGS := --std=c++11 -DIS_SLIM_BUILD $(OPTFLAGS)
CXXFLAGS := --std=c++11 -DIS_SLIM_BUILD -fno-exceptions -DNDEBUG $(OPTFLAGS)
LDFLAGS := \
-L/usr/local/lib
DEPFLAGS = -MT $@ -MMD -MP -MF $(DEPDIR)/$*.Td
@ -188,7 +188,9 @@ ifeq ($(HAS_GEN_HOST_PROTOC),true)
LIBFLAGS += -L$(MAKEFILE_DIR)/gen/protobuf-host/lib
export LD_LIBRARY_PATH=$(MAKEFILE_DIR)/gen/protobuf-host/lib
endif
CXXFLAGS += -fPIC
LIBFLAGS += -Wl,--allow-multiple-definition -Wl,--whole-archive
LDFLAGS := -Wl,--no-whole-archive
endif
# If we're on Linux, also link in the dl library.
ifeq ($(TARGET),LINUX)
@ -282,7 +284,7 @@ ifeq ($(TARGET),IOS)
IPHONESIMULATOR_SYSROOT := $(shell xcrun --sdk iphonesimulator \
--show-sdk-path)
IOS_SDK_VERSION := $(shell xcrun --sdk iphoneos --show-sdk-version)
MIN_SDK_VERSION := 9.2
MIN_SDK_VERSION := 8.2
# Override IOS_ARCH with ARMV7, ARMV7S, ARM64, or I386.
IOS_ARCH := X86_64
ifeq ($(IOS_ARCH),ARMV7)

View File

@ -45,9 +45,9 @@ IPHONEOS_SYSROOT=`xcrun --sdk iphoneos --show-sdk-path`
IPHONESIMULATOR_PLATFORM=`xcrun --sdk iphonesimulator --show-sdk-platform-path`
IPHONESIMULATOR_SYSROOT=`xcrun --sdk iphonesimulator --show-sdk-path`
IOS_SDK_VERSION=`xcrun --sdk iphoneos --show-sdk-version`
MIN_SDK_VERSION=9.2
MIN_SDK_VERSION=8.2
CFLAGS="-DNDEBUG -g -O0 -pipe -fPIC -fcxx-exceptions"
CFLAGS="-DNDEBUG -Os -pipe -fPIC -fno-exceptions"
CXXFLAGS="${CFLAGS} -std=c++11 -stdlib=libc++"
LDFLAGS="-stdlib=libc++"
LIBS="-lc++ -lc++abi"

View File

@ -16,9 +16,21 @@
# Builds the TensorFlow core library with ARM and x86 architectures for iOS, and
# packs them into a fat file.
function less_than_required_version() {
echo $1 | (IFS=. read major minor micro
if [ $major -ne $2 ]; then
[ $major -lt $2 ]
elif [ $minor -ne $3 ]; then
[ $minor -lt $3 ]
else
[ ${micro:-0} -lt $4 ]
fi
)
}
ACTUAL_XCODE_VERSION=`xcodebuild -version | head -n 1 | sed 's/Xcode //'`
REQUIRED_XCODE_VERSION=7.3.0
if [ ${ACTUAL_XCODE_VERSION//.} -lt ${REQUIRED_XCODE_VERSION//.} ]
if less_than_required_version $ACTUAL_XCODE_VERSION 7 3 0
then
echo "error: Xcode ${REQUIRED_XCODE_VERSION} or later is required."
exit 1

View File

@ -30,6 +30,7 @@ from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import histogram_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
@ -152,7 +153,7 @@ def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
# Holds running total histogram of scores for records labeled True.
hist_true_acc = variable_scope.get_variable(
'hist_true_acc',
initializer=array_ops.zeros_initializer(
initializer=init_ops.zeros_initializer(
[nbins],
dtype=hist_true.dtype),
collections=collections,
@ -160,7 +161,7 @@ def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
# Holds running total histogram of scores for records labeled False.
hist_false_acc = variable_scope.get_variable(
'hist_false_acc',
initializer=array_ops.zeros_initializer(
initializer=init_ops.zeros_initializer(
[nbins],
dtype=hist_false.dtype),
collections=collections,

View File

@ -981,7 +981,7 @@ def main(unused_args):
return -1
tf_graph = tf.GraphDef()
with tf.gfile.Open(FLAGS.input, "r") as f:
with tf.gfile.Open(FLAGS.input, "rb") as f:
data = f.read()
tf_graph.ParseFromString(data)
@ -993,7 +993,7 @@ def main(unused_args):
output_graph = rewriter.rewrite(FLAGS.output_node_names.split(","))
f = tf.gfile.FastGFile(FLAGS.output, "w")
f = tf.gfile.FastGFile(FLAGS.output, "wb")
f.write(output_graph.SerializeToString())
return 0

View File

@ -207,7 +207,7 @@ class CoupledInputForgetGateLSTMCell(rnn_cell.RNNCell):
b = vs.get_variable(
"B", shape=[3 * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
initializer=init_ops.zeros_initializer, dtype=dtype)
# j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(1, [inputs, m_prev])
@ -334,7 +334,7 @@ class TimeFreqLSTMCell(rnn_cell.RNNCell):
dtype, self._num_unit_shards)
b = vs.get_variable(
"B", shape=[4 * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
initializer=init_ops.zeros_initializer, dtype=dtype)
# Diagonal connections
if self._use_peepholes:
@ -537,7 +537,7 @@ class GridLSTMCell(rnn_cell.RNNCell):
dtype, self._num_unit_shards)
b_f = vs.get_variable(
"B_f", shape=[num_gates * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
initializer=init_ops.zeros_initializer, dtype=dtype)
if not self._share_time_frequency_weights:
concat_w_t = _get_concat_variable(
"W_t", [actual_input_size + 2 * self._num_units,
@ -545,7 +545,7 @@ class GridLSTMCell(rnn_cell.RNNCell):
dtype, self._num_unit_shards)
b_t = vs.get_variable(
"B_t", shape=[num_gates * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
initializer=init_ops.zeros_initializer, dtype=dtype)
if self._use_peepholes:
# Diagonal connections

View File

@ -1099,6 +1099,7 @@ filegroup(
"lib/core/refcount.h",
"lib/core/status.h",
"lib/core/stringpiece.h",
"lib/core/threadpool.h",
"lib/gtl/array_slice.h",
"lib/gtl/array_slice_internal.h",
"lib/gtl/inlined_vector.h",
@ -1513,7 +1514,7 @@ tf_cc_tests(
"framework/op_kernel_test.cc",
"framework/op_registration_test.cc",
"framework/partial_tensor_shape_test.cc",
"framework/rendezvous_test.cc",
# "framework/rendezvous_test.cc", # flaky b/30476344
"framework/resource_mgr_test.cc",
"framework/shape_inference_test.cc",
"framework/shape_inference_testutil_test.cc",

View File

@ -262,14 +262,16 @@ class Im2ColConvFunctor {
errors::InvalidArgument("Im2Col patch too large for buffer"));
const size_t patches_per_chunk =
max_chunk_size / (filter_value_count * sizeof(T1));
const size_t chunk_value_count =
(max_chunk_size + (sizeof(T1) - 1)) / sizeof(T1);
// Because memory allocation is very expensive on mobile platforms, try to
// allocate a persistent buffer that will be kept around between calls. We
// use TensorFlow's resource management to ensure that the memory will be
// released when the session is over.
Im2ColBufferResource<T1, max_chunk_size>* im2col_buffer_resource;
std::function<Status(Im2ColBufferResource<T1, max_chunk_size>**)> creator =
[](Im2ColBufferResource<T1, max_chunk_size>** resource) {
*resource = new Im2ColBufferResource<T1, max_chunk_size>();
Im2ColBufferResource<T1, chunk_value_count>* im2col_buffer_resource;
std::function<Status(Im2ColBufferResource<T1, chunk_value_count>**)> creator =
[](Im2ColBufferResource<T1, chunk_value_count>** resource) {
*resource = new Im2ColBufferResource<T1, chunk_value_count>();
return Status::OK();
};
OP_REQUIRES_OK(context, context->resource_manager()->LookupOrCreate(

View File

@ -22,6 +22,8 @@ namespace functor {
template struct BinaryFunctor<GPUDevice, logical_and, 1>;
template struct BinaryFunctor<GPUDevice, logical_and, 2>;
template struct BinaryFunctor<GPUDevice, logical_and, 3>;
template struct BinaryFunctor<GPUDevice, logical_and, 4>;
template struct BinaryFunctor<GPUDevice, logical_and, 5>;
} // namespace functor
} // namespace tensorflow

View File

@ -22,6 +22,8 @@ namespace functor {
template struct BinaryFunctor<GPUDevice, logical_or, 1>;
template struct BinaryFunctor<GPUDevice, logical_or, 2>;
template struct BinaryFunctor<GPUDevice, logical_or, 3>;
template struct BinaryFunctor<GPUDevice, logical_or, 4>;
template struct BinaryFunctor<GPUDevice, logical_or, 5>;
} // namespace functor
} // namespace tensorflow

View File

@ -122,6 +122,20 @@ class BinaryOp : public BinaryOpShared {
BCast::ToIndexArray<3>(bcast->x_bcast()),
in1.shaped<Tin, 3>(bcast->y_reshape()),
BCast::ToIndexArray<3>(bcast->y_bcast()), error_ptr);
} else if (ndims == 4) {
functor::BinaryFunctor<Device, Functor, 4>().BCast(
eigen_device, out->shaped<Tout, 4>(bcast->result_shape()),
in0.shaped<Tin, 4>(bcast->x_reshape()),
BCast::ToIndexArray<4>(bcast->x_bcast()),
in1.shaped<Tin, 4>(bcast->y_reshape()),
BCast::ToIndexArray<4>(bcast->y_bcast()), error_ptr);
} else if (ndims == 5) {
functor::BinaryFunctor<Device, Functor, 5>().BCast(
eigen_device, out->shaped<Tout, 5>(bcast->result_shape()),
in0.shaped<Tin, 5>(bcast->x_reshape()),
BCast::ToIndexArray<5>(bcast->x_bcast()),
in1.shaped<Tin, 5>(bcast->y_reshape()),
BCast::ToIndexArray<5>(bcast->y_bcast()), error_ptr);
} else {
SetUnimplementedError(ctx);
}

View File

@ -128,7 +128,9 @@ struct BinaryFunctor<GPUDevice, Functor, NDIMS, has_errors> {
#define DEFINE_BINARY1(F, T) \
template struct BinaryFunctor<GPUDevice, F<T>, 1>; \
template struct BinaryFunctor<GPUDevice, F<T>, 2>; \
template struct BinaryFunctor<GPUDevice, F<T>, 3>
template struct BinaryFunctor<GPUDevice, F<T>, 3>; \
template struct BinaryFunctor<GPUDevice, F<T>, 4>; \
template struct BinaryFunctor<GPUDevice, F<T>, 5>
#define DEFINE_BINARY2(F, T0, T1) \
DEFINE_BINARY1(F, T0); \
DEFINE_BINARY1(F, T1)

View File

@ -578,7 +578,7 @@ bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
height of the underlying image.
For example, if an image is 100 x 200 pixels and the bounding box is
`[0.1, 0.5, 0.2, 0.9]`, the bottom-left and upper-right coordinates of the
`[0.1, 0.2, 0.5, 0.9]`, the bottom-left and upper-right coordinates of the
bounding box will be `(10, 40)` to `(50, 180)`.
Parts of the bounding box may fall outside the image.

View File

@ -20,6 +20,7 @@ limitations under the License.
// IWYU pragma: friend third_party/tensorflow/core/platform/logging.h
#include <sstream>
#include <limits>
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
@ -173,6 +174,8 @@ string* MakeCheckOpString(const T1& v1, const T2& v2, const char* exprtext) {
// The (int, int) specialization works around the issue that the compiler
// will not instantiate the template version of the function on values of
// unnamed enum type - see comment below.
// The (size_t, int) and (int, size_t) specialization are to handle unsigned
// comparison errors while still being thorough with the comparison.
#define TF_DEFINE_CHECK_OP_IMPL(name, op) \
template <typename T1, typename T2> \
inline string* name##Impl(const T1& v1, const T2& v2, \
@ -184,6 +187,20 @@ string* MakeCheckOpString(const T1& v1, const T2& v2, const char* exprtext) {
} \
inline string* name##Impl(int v1, int v2, const char* exprtext) { \
return name##Impl<int, int>(v1, v2, exprtext); \
} \
inline string* name##Impl(const size_t v1, const int v2, const char* exprtext) { \
if (TF_PREDICT_FALSE(v2 < 0)) { \
return ::tensorflow::internal::MakeCheckOpString(v1, v2, exprtext);\
} \
const size_t uval = (size_t)((unsigned)v1); \
return name##Impl<size_t, size_t>(uval, v2, exprtext); \
} \
inline string* name##Impl(const int v1, const size_t v2, const char* exprtext) { \
if (TF_PREDICT_FALSE(v2 >= std::numeric_limits<int>::max())) { \
return ::tensorflow::internal::MakeCheckOpString(v1, v2, exprtext);\
} \
const size_t uval = (size_t)((unsigned)v2); \
return name##Impl<size_t, size_t>(v1, uval, exprtext); \
}
// We use the full name Check_EQ, Check_NE, etc. in case the file including

View File

@ -123,7 +123,7 @@ message ThreadPoolOptionProto {
};
// Session configuration parameters.
// The system picks an appropriate values for fields that are not set.
// The system picks appropriate values for fields that are not set.
message ConfigProto {
// Map from device type name (e.g., "CPU" or "GPU" ) to maximum
// number of devices of that type to use. If a particular device

View File

@ -20,7 +20,7 @@ limitations under the License.
#define TF_MAJOR_VERSION 0
#define TF_MINOR_VERSION 10
#define TF_PATCH_VERSION 0rc0
#define TF_PATCH_VERSION 0
// TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1",
// "-beta", "-rc", "-rc.1")

View File

@ -31,7 +31,7 @@ android {
compileSdkVersion 24
buildToolsVersion "24.0.1"
lintOptions {t
lintOptions {
abortOnError false
}

View File

@ -34,7 +34,6 @@ import pandas
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib.layers import conv2d
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_bool('test_with_fake_data', False,
@ -55,7 +54,8 @@ def char_cnn_model(x, y):
[-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = conv2d(byte_list, N_FILTERS, FILTER_SHAPE1, padding='VALID')
conv1 = tf.contrib.layers.convolution2d(byte_list, N_FILTERS,
FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
@ -65,7 +65,9 @@ def char_cnn_model(x, y):
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = conv2d(pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')
conv2 = tf.contrib.layers.convolution2d(pool1, N_FILTERS,
FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])

View File

@ -18,7 +18,7 @@ service.
In order to run the notebook locally, the following dependencies must be installed:
- Python 2.7
- Python 2.7 or 3.5
- TensorFlow (>=r0.7)
- NumPy
- Jupyter Notebook

View File

@ -278,7 +278,7 @@
" tensor = n.attr['value'].tensor\n",
" size = len(tensor.tensor_content)\n",
" if size > max_const_size:\n",
" tensor.tensor_content = \"<stripped %d bytes>\"%size\n",
" tensor.tensor_content = bytes(\"<stripped %d bytes>\"%size, 'utf-8')\n",
" return strip_def\n",
" \n",
"def rename_nodes(graph_def, rename_func):\n",

View File

@ -243,4 +243,4 @@ try:
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn and matplotlib to visualize embeddings.")
print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")

View File

@ -30,6 +30,12 @@ Static helper routines for ` TensorShape `. Includes a few common predicates on
#### `static bool tensorflow::TensorShapeUtils::IsSquareMatrix(const TensorShape &shape)` {#static_bool_tensorflow_TensorShapeUtils_IsSquareMatrix}
#### `static bool tensorflow::TensorShapeUtils::IsMatrixOrHigher(const TensorShape &shape)` {#static_bool_tensorflow_TensorShapeUtils_IsMatrixOrHigher}

View File

@ -1,25 +0,0 @@
# `struct TF_Buffer`
###Member Details
#### `const void* TF_Buffer::data` {#const_void_TF_Buffer_data}
#### `size_t TF_Buffer::length` {#size_t_TF_Buffer_length}
#### `void(* TF_Buffer::data_deallocator)(void *data, size_t length))(void *data, size_t length)` {#void_TF_Buffer_data_deallocator_void_data_size_t_length_}

View File

@ -48,7 +48,6 @@ write the graph to a file.
* [tensorflow::TensorShapeUtils](ClassTensorShapeUtils.md)
* [tensorflow::PartialTensorShape](ClassPartialTensorShape.md)
* [tensorflow::PartialTensorShapeUtils](ClassPartialTensorShapeUtils.md)
* [TF_Buffer](StructTF_Buffer.md)
## Thread

View File

@ -134,7 +134,7 @@ Example:
# v is the numpy array [10, 20]
# 'fetches' can be a list.
v = session.run([a, b])
# v a Python list with 2 numpy arrays: the numpy array [10, 20] and the
# v is a Python list with 2 numpy arrays: the numpy array [10, 20] and the
# 1-D array [1.0, 2.0]
# 'fetches' can be arbitrary lists, tuples, namedtuple, dicts:
MyData = collections.namedtuple('MyData', ['a', 'b'])

View File

@ -11,9 +11,9 @@ deconvolution.
* <b>`value`</b>: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]`.
`[batch, in_height, in_width, in_channels]`.
* <b>`filter`</b>: A 4-D `Tensor` with the same type as `value` and shape
`[height, width, output_channels, in_channels]`. `filter`'s
`[filter_height, filter_width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
* <b>`output_shape`</b>: A 1-D `Tensor` representing the output shape of the
deconvolution op.

View File

@ -7,18 +7,19 @@ github source.
The TensorFlow Python API supports Python 2.7 and Python 3.3+.
The GPU version (Linux & Mac OS X only) works best with Cuda Toolkit 7.5 and
cuDNN v4. other versions are supported (Cuda toolkit >= 7.0 and
cuDNN 6.5(v2), 7.0(v3), v5) only when installing from sources.
Please see [Cuda installation](#optional-install-cuda-gpus-on-linux)
for details.
The GPU version works best with Cuda Toolkit 7.5 and
cuDNN v5. Other versions are supported (Cuda toolkit >= 7.0 and
cuDNN >= v3) only when installing from sources.
Please see [Cuda installation](#optional-install-cuda-gpus-on-linux) for
details. For Mac OS X, please see [Setup GPU for
Mac](#optional-setup-gpu-for-mac).
## Overview
We support different ways to install TensorFlow:
* [Pip install](#pip-installation): Install TensorFlow on your machine, possibly
upgrading previously installed Python packages. May impact existing
* [Pip install](#pip-installation): Install TensorFlow on your machine,
possibly upgrading previously installed Python packages. May impact existing
Python programs on your machine.
* [Virtualenv install](#virtualenv-installation): Install TensorFlow in its own
directory, not impacting any existing Python programs on your machine.
@ -30,9 +31,9 @@ We support different ways to install TensorFlow:
* [Installing from sources](#installing-from-sources): Install TensorFlow by
building a pip wheel that you then install using pip.
If you are familiar with Pip, Virtualenv, Anaconda, or Docker, please feel free to adapt
the instructions to your particular needs. The names of the pip and Docker
images are listed in the corresponding installation sections.
If you are familiar with Pip, Virtualenv, Anaconda, or Docker, please feel free
to adapt the instructions to your particular needs. The names of the pip and
Docker images are listed in the corresponding installation sections.
If you encounter installation errors, see
[common problems](#common-problems) for some solutions.
@ -43,8 +44,9 @@ If you encounter installation errors, see
management system used to install and manage software packages written in
Python.
The packages that will be installed or upgraded during the pip install are listed in the
[REQUIRED_PACKAGES section of setup.py](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/pip_package/setup.py).
The packages that will be installed or upgraded during the pip install are
listed in the [REQUIRED_PACKAGES section of
setup.py](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/pip_package/setup.py).
Install pip (or pip3 for python3) if it is not already installed:
@ -61,37 +63,37 @@ Then, select the correct binary to install:
```bash
# Ubuntu/Linux 64-bit, CPU only, Python 2.7
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
# Requires CUDA toolkit 7.5 and CuDNN v5. For other versions, see "Install from sources" below.
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
# Mac OS X, CPU only, Python 2.7:
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0rc0-py2-none-any.whl
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0-py2-none-any.whl
# Mac OS X, GPU enabled, Python 2.7:
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0rc0-py2-none-any.whl
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0-py2-none-any.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.4
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0rc0-cp34-cp34m-linux_x86_64.whl
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0rc0-cp34-cp34m-linux_x86_64.whl
# Requires CUDA toolkit 7.5 and CuDNN v5. For other versions, see "Install from sources" below.
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.5
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0rc0-cp35-cp35m-linux_x86_64.whl
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0-cp35-cp35m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0rc0-cp35-cp35m-linux_x86_64.whl
# Requires CUDA toolkit 7.5 and CuDNN v5. For other versions, see "Install from sources" below.
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp35-cp35m-linux_x86_64.whl
# Mac OS X, CPU only, Python 3.4 or 3.5:
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0rc0-py3-none-any.whl
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0-py3-none-any.whl
# Mac OS X, GPU enabled, Python 3.4 or 3.5:
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0rc0-py3-none-any.whl
$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0-py3-none-any.whl
```
Install TensorFlow:
@ -157,37 +159,37 @@ Now, install TensorFlow just as you would for a regular Pip installation. First
```bash
# Ubuntu/Linux 64-bit, CPU only, Python 2.7
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
# Requires CUDA toolkit 7.5 and CuDNN v5. For other versions, see "Install from sources" below.
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
# Mac OS X, CPU only, Python 2.7:
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0rc0-py2-none-any.whl
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0-py2-none-any.whl
# Mac OS X, GPU enabled, Python 2.7:
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0rc0-py2-none-any.whl
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0-py2-none-any.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.4
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0rc0-cp34-cp34m-linux_x86_64.whl
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0rc0-cp34-cp34m-linux_x86_64.whl
# Requires CUDA toolkit 7.5 and CuDNN v5. For other versions, see "Install from sources" below.
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.5
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0rc0-cp35-cp35m-linux_x86_64.whl
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0-cp35-cp35m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0rc0-cp35-cp35m-linux_x86_64.whl
# Requires CUDA toolkit 7.5 and CuDNN v5. For other versions, see "Install from sources" below.
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp35-cp35m-linux_x86_64.whl
# Mac OS X, CPU only, Python 3.4 or 3.5:
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0rc0-py3-none-any.whl
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0-py3-none-any.whl
# Mac OS X, GPU enabled, Python 3.4 or 3.5:
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0rc0-py3-none-any.whl
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0-py3-none-any.whl
```
Finally install TensorFlow:
@ -211,7 +213,8 @@ When you are done using TensorFlow, deactivate the environment.
$ # Your prompt should change back
```
To use TensorFlow later you will have to activate the Virtualenv environment again:
To use TensorFlow later you will have to activate the Virtualenv environment
again:
```bash
$ source ~/tensorflow/bin/activate # If using bash.
@ -227,8 +230,9 @@ $ source ~/tensorflow/bin/activate.csh # If using csh.
[Anaconda](https://www.continuum.io/why-anaconda) is a Python distribution that
includes a large number of standard numeric and scientific computing packages.
Anaconda uses a package manager called ["conda"](http://conda.pydata.org) that has its own
[environment system](http://conda.pydata.org/docs/using/envs.html) similar to Virtualenv.
Anaconda uses a package manager called ["conda"](http://conda.pydata.org) that
has its own [environment system](http://conda.pydata.org/docs/using/envs.html)
similar to Virtualenv.
As with Virtualenv, conda environments keep the dependencies required by
different Python projects in separate places. The Anaconda environment
@ -244,7 +248,8 @@ packages needed by TensorFlow.
Install Anaconda:
Follow the instructions on the [Anaconda download site](https://www.continuum.io/downloads).
Follow the instructions on the [Anaconda download
site](https://www.continuum.io/downloads).
Create a conda environment called `tensorflow`:
@ -264,9 +269,11 @@ Activate the environment and use conda or pip to install TensorFlow inside it.
### Using conda
A community maintained conda package is available [from conda-forge](https://github.com/conda-forge/tensorflow-feedstock).
A community maintained conda package is available [from
conda-forge](https://github.com/conda-forge/tensorflow-feedstock).
Only the CPU version of TensorFlow is available at the moment and can be installed in the conda environment for Python 2 or Python 3.
Only the CPU version of TensorFlow is available at the moment and can be
installed in the conda environment for Python 2 or Python 3.
```bash
$ source activate tensorflow
@ -278,48 +285,50 @@ $ source activate tensorflow
### Using pip
If using pip make sure to use the `--ignore-installed` flag to prevent errors about `easy_install`.
If using pip make sure to use the `--ignore-installed` flag to prevent errors
about `easy_install`.
```bash
$ source activate tensorflow
(tensorflow)$ # Your prompt should change
```
Now, install TensorFlow just as you would for a regular Pip installation. First select the correct binary to install:
Now, install TensorFlow just as you would for a regular Pip installation. First
select the correct binary to install:
```bash
# Ubuntu/Linux 64-bit, CPU only, Python 2.7
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
# Requires CUDA toolkit 7.5 and CuDNN v5. For other versions, see "Install from sources" below.
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
# Mac OS X, CPU only, Python 2.7:
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0rc0-py2-none-any.whl
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0-py2-none-any.whl
# Mac OS X, GPU enabled, Python 2.7:
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0rc0-py2-none-any.whl
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0-py2-none-any.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.4
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0rc0-cp34-cp34m-linux_x86_64.whl
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0rc0-cp34-cp34m-linux_x86_64.whl
# Requires CUDA toolkit 7.5 and CuDNN v5. For other versions, see "Install from sources" below.
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.5
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0rc0-cp35-cp35m-linux_x86_64.whl
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0-cp35-cp35m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0rc0-cp35-cp35m-linux_x86_64.whl
# Requires CUDA toolkit 7.5 and CuDNN v5. For other versions, see "Install from sources" below.
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp35-cp35m-linux_x86_64.whl
# Mac OS X, CPU only, Python 3.4 or 3.5:
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0rc0-py3-none-any.whl
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0-py3-none-any.whl
# Mac OS X, GPU enabled, Python 3.4 or 3.5:
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0rc0-py3-none-any.whl
(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0-py3-none-any.whl
```
Finally install TensorFlow:
@ -358,15 +367,16 @@ $ source activate tensorflow
### Install IPython
To use tensorflow with IPython it may be necessary to install IPython into the tensorflow environment:
To use tensorflow with IPython it may be necessary to install IPython into the
tensorflow environment:
```bash
$ source activate tensorflow
(tensorflow)$ conda install ipython
```
Similarly, other Python packages like pandas may need to get installed into the tensorflow environment
before they can be used together with tensorflow.
Similarly, other Python packages like pandas may need to get installed into the
tensorflow environment before they can be used together with tensorflow.
## Docker installation
@ -385,7 +395,8 @@ code.
* `gcr.io/tensorflow/tensorflow:latest-devel-gpu`: GPU Binary image plus source
code.
We also have tags with `latest` replaced by a released version (e.g., `0.10.0rc0-gpu`).
We also have tags with `latest` replaced by a released version (e.g.,
`0.10.0-gpu`).
With Docker the installation is as follows:
@ -396,8 +407,8 @@ to allow launching containers without `sudo`.
* Launch a Docker container with the TensorFlow image. The image
gets downloaded automatically on first launch.
See [installing Docker](http://docs.docker.com/engine/installation/) for instructions
on installing Docker on your machine.
See [installing Docker](http://docs.docker.com/engine/installation/) for
instructions on installing Docker on your machine.
After Docker is installed, launch a Docker container with the TensorFlow binary
image as follows.
@ -406,21 +417,25 @@ image as follows.
$ docker run -it -p 8888:8888 gcr.io/tensorflow/tensorflow
```
The option `-p 8888:8888` is used to publish the Docker container᾿s internal port to the host machine, in this case to ensure Jupyter notebook connection.
The option `-p 8888:8888` is used to publish the Docker container᾿s internal
port to the host machine, in this case to ensure Jupyter notebook connection.
The format of the port mapping is `hostPort:containerPort`. You can specify any valid port number for the host port but have to use `8888` for the container port portion.
The format of the port mapping is `hostPort:containerPort`. You can specify any
valid port number for the host port but have to use `8888` for the container
port portion.
If you're using a container with GPU support, some additional flags must be passed to expose the GPU device to the container.
If you're using a container with GPU support, some additional flags must be
passed to expose the GPU device to the container.
For NVidia GPU support install latest NVidia drivers and
[nvidia-docker](https://github.com/NVIDIA/nvidia-docker).
Run with
[nvidia-docker](https://github.com/NVIDIA/nvidia-docker). Run with
```bash
$ nvidia-docker run -it -p 8888:8888 gcr.io/tensorflow/tensorflow:latest-gpu
```
If you have a problem running `nvidia-docker`, then using the default config, we include a
If you have a problem running `nvidia-docker`, then using the default config, we
include a
[script](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/docker/docker_run_gpu.sh)
in the repo with these flags, so the command-line would look like
@ -428,16 +443,19 @@ in the repo with these flags, so the command-line would look like
$ path/to/repo/tensorflow/tools/docker/docker_run_gpu.sh -p 8888:8888 gcr.io/tensorflow/tensorflow:latest-gpu
```
For more details see [TensorFlow docker readme](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/tools/docker).
For more details see [TensorFlow docker
readme](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/tools/docker).
You can now [test your installation](#test-the-tensorflow-installation) within the Docker container.
You can now [test your installation](#test-the-tensorflow-installation) within
the Docker container.
## Test the TensorFlow installation
### (Optional, Linux) Enable GPU Support
If you installed the GPU version of TensorFlow, you must also install the Cuda
Toolkit 7.5 and cuDNN v4. Please see [Cuda installation](#optional-install-cuda-gpus-on-linux).
Toolkit 7.5 and cuDNN v5. Please see [Cuda
installation](#optional-install-cuda-gpus-on-linux).
You also need to set the `LD_LIBRARY_PATH` and `CUDA_HOME` environment
variables. Consider adding the commands below to your `~/.bash_profile`. These
@ -471,23 +489,27 @@ Hello, TensorFlow!
### Run a TensorFlow demo model
All TensorFlow packages, including the demo models, are installed in the Python library.
The exact location of the Python library depends on your system, but is usually one of:
All TensorFlow packages, including the demo models, are installed in the Python
library. The exact location of the Python library depends on your system, but
is usually one of:
```bash
/usr/local/lib/python2.7/dist-packages/tensorflow
/usr/local/lib/python2.7/site-packages/tensorflow
```
You can find out the directory with the following command (make sure to use the Python you installed TensorFlow to, for example, use `python3` instead of `python` if you installed for Python 3):
You can find out the directory with the following command (make sure to use the
Python you installed TensorFlow to, for example, use `python3` instead of
`python` if you installed for Python 3):
```bash
$ python -c 'import os; import inspect; import tensorflow; print(os.path.dirname(inspect.getfile(tensorflow)))'
```
The simple demo model for classifying handwritten digits from the MNIST dataset
is in the sub-directory `models/image/mnist/convolutional.py`. You can run it from the command
line as follows (make sure to use the Python you installed TensorFlow with):
is in the sub-directory `models/image/mnist/convolutional.py`. You can run it
from the command line as follows (make sure to use the Python you installed
TensorFlow with):
```bash
# Using 'python -m' to find the program in the python search path:
@ -517,8 +539,8 @@ using pip. You'll need pip for that, so install it as described
$ git clone https://github.com/tensorflow/tensorflow
```
Note that these instructions will install the latest master branch
of tensorflow. If you want to install a specific branch (such as a release branch),
Note that these instructions will install the latest master branch of
tensorflow. If you want to install a specific branch (such as a release branch),
pass `-b <branchname>` to the `git clone` command and `--recurse-submodules` for
r0.8 and earlier to fetch the protobuf library that TensorFlow depends on.
@ -553,11 +575,11 @@ $ sudo apt-get install python3-numpy swig python3-dev python3-wheel
#### Optional: Install CUDA (GPUs on Linux)
In order to build or run TensorFlow with GPU support, both NVIDIA's Cuda Toolkit (>= 7.0) and
cuDNN (>= v2) need to be installed.
In order to build or run TensorFlow with GPU support, both NVIDIA's Cuda Toolkit
(>= 7.0) and cuDNN (>= v3) need to be installed.
TensorFlow GPU support requires having a GPU card with NVidia Compute Capability >= 3.0.
Supported cards include but are not limited to:
TensorFlow GPU support requires having a GPU card with NVidia Compute Capability
>= 3.0. Supported cards include but are not limited to:
* NVidia Titan
* NVidia Titan X
@ -580,15 +602,14 @@ Install the toolkit into e.g. `/usr/local/cuda`
https://developer.nvidia.com/cudnn
Download cuDNN v4 (v5 is currently a release candidate and is only supported when
installing TensorFlow from sources).
Download cuDNN v5.
Uncompress and copy the cuDNN files into the toolkit directory. Assuming the
Uncompress and copy the cuDNN files into the toolkit directory. Assuming the
toolkit is installed in `/usr/local/cuda`, run the following commands (edited
to reflect the cuDNN version you downloaded):
``` bash
tar xvzf cudnn-7.5-linux-x64-v4.tgz
tar xvzf cudnn-7.5-linux-x64-v5.1-ga.tgz
sudo cp cuda/include/cudnn.h /usr/local/cuda/include
sudo cp cuda/lib64/libcudnn* /usr/local/cuda/lib64
sudo chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib64/libcudnn*
@ -600,7 +621,8 @@ We recommend using [homebrew](http://brew.sh) to install the bazel and SWIG
dependencies, and installing python dependencies using easy_install or pip.
Of course you can also install Swig from source without using homebrew. In that
case, be sure to install its dependency [PCRE](http://www.pcre.org) and not PCRE2.
case, be sure to install its dependency [PCRE](http://www.pcre.org) and not
PCRE2.
#### Dependencies
@ -657,7 +679,7 @@ export PATH="$CUDA_HOME/bin:$PATH"
```
Finally, you will also want to install the [CUDA Deep Neural
Network](https://developer.nvidia.com/cudnn) (cuDNN) library which currently
Network](https://developer.nvidia.com/cudnn) (cuDNN v5) library which currently
requires an [Accelerated Computing Developer
Program](https://developer.nvidia.com/accelerated-computing-developer) account.
Once you have it downloaded locally, you can unzip and move the header and
@ -729,10 +751,10 @@ Setting up CUPTI lib64
Configuration finished
```
This creates a canonical set of symbolic links to the Cuda libraries on your system.
Every time you change the Cuda library paths you need to run this step again before
you invoke the bazel build command. For the cuDNN libraries, use '6.5' for R2, '7.0'
for R3, and '4.0.4' for R4-RC.
This creates a canonical set of symbolic links to the Cuda libraries on your
system. Every time you change the Cuda library paths you need to run this step
again before you invoke the bazel build command. For the cuDNN libraries, use
'7.0' for R3, and '4.0.7' for R4.
#### Known issues
@ -749,6 +771,10 @@ this more convenient by including the configure step in our build process.
When building from source, you will still build a pip package and install that.
Please note that building from sources takes a lot of memory resources by
default and if you want to limit RAM usage you can add `--local_resources
2048,.5,1.0` while invoking bazel.
```bash
$ bazel build -c opt //tensorflow/tools/pip_package:build_pip_package
@ -758,7 +784,7 @@ $ bazel build -c opt --config=cuda //tensorflow/tools/pip_package:build_pip_pack
$ bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
# The name of the .whl file will depend on your platform.
$ sudo pip install /tmp/tensorflow_pkg/tensorflow-0.10.0rc0-py2-none-any.whl
$ sudo pip install /tmp/tensorflow_pkg/tensorflow-0.10.0-py2-none-any.whl
```
## Setting up TensorFlow for Development
@ -826,22 +852,22 @@ If you encounter the following when trying to run a TensorFlow program:
ImportError: libcudart.so.7.0: cannot open shared object file: No such file or directory
```
Make sure you followed the GPU installation [instructions](#optional-install-cuda-gpus-on-linux).
If you built from source, and you left the Cuda or cuDNN version empty, try specifying them
explicitly.
Make sure you followed the GPU installation
[instructions](#optional-install-cuda-gpus-on-linux). If you built from source,
and you left the Cuda or cuDNN version empty, try specifying them explicitly.
### Protobuf library related issues
TensorFlow pip package depends on protobuf pip package version
3.0.0b2. Protobuf's pip package downloaded from [PyPI](https://pypi.python.org)
(when running `pip install protobuf`) is a Python only library, that has
Python implementations of proto serialization/deserialization which can be 10x-50x
slower than the C++ implementation. Protobuf also supports a binary extension
for the Python package that contains fast C++ based proto parsing. This
extension is not available in the standard Python only PIP package. We have
Python implementations of proto serialization/deserialization which can be
10x-50x slower than the C++ implementation. Protobuf also supports a binary
extension for the Python package that contains fast C++ based proto parsing.
This extension is not available in the standard Python only PIP package. We have
created a custom binary pip package for protobuf that contains the binary
extension. Follow these instructions to install the custom binary protobuf pip
package :
package:
```bash
# Ubuntu/Linux 64-bit:
@ -851,7 +877,7 @@ $ pip install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/prot
$ pip install --upgrade https://storage.googleapis.com/tensorflow/mac/cpu/protobuf-3.0.0-cp27-cp27m-macosx_10_11_x86_64.whl
```
and for Python 3 :
And for Python 3:
```bash
# Ubuntu/Linux 64-bit:
@ -1021,3 +1047,22 @@ installed, such as:
```bash
$ pip install --upgrade protobuf
```
### Mac OS X: Segmentation Fault when import tensorflow
On Mac OS X, you might get the following error when importing tensorflow in python:
```
>>> import tensorflow
I tensorflow/stream_executor/dso_loader.cc:108] successfully opened CUDA library libcublas.dylib locally
I tensorflow/stream_executor/dso_loader.cc:108] successfully opened CUDA library libcudnn.dylib locally
I tensorflow/stream_executor/dso_loader.cc:108] successfully opened CUDA library libcufft.dylib locally
"import tensorflow" terminated by signal SIGSEGV (Address boundary error)
```
This is due to the fact that by default, cuda creates libcuda.dylib, but tensorflow tries to load libcuda.1.dylib.
This can be resolved by create a symbolic link:
```bash
ln -sf /usr/local/cuda/lib/libcuda.dylib /usr/local/cuda/lib/libcuda.1.dylib
```

View File

@ -1000,6 +1000,11 @@ cuda_op_kernel.cu.o -I $TF_INC -fPIC -lcudart
`cuda_op_kernel.so` produced above can be loaded as usual in Python, using the
`tf.load_op_library` function.
Note that if your CUDA libraries are not installed in `/usr/local/lib64`,
you'll need to specify the path explicitly in the second (g++) command above.
For example, add `-L /usr/local/cuda-8.0/lib64/` if your CUDA is installed in
`/usr/local/cuda-8.0`.
## Implement the gradient in Python
Given a graph of ops, TensorFlow uses automatic differentiation

View File

@ -172,7 +172,7 @@ You can then do any preprocessing of these examples you want. This would be any
processing that doesn't depend on trainable parameters. Examples include
normalization of your data, picking a random slice, adding noise or distortions,
etc. See
[`tensorflow/models/image/cifar10/cifar10.py`](https://www.tensorflow.org/code/tensorflow/models/image/cifar10/cifar10.py)
[`tensorflow/models/image/cifar10/cifar10_input.py`](https://www.tensorflow.org/code/tensorflow/models/image/cifar10/cifar10_input.py)
for an example.
### Batching
@ -253,7 +253,7 @@ summary to the graph that indicates how full the example queue is. If you have
enough reading threads, that summary will stay above zero. You can
[view your summaries as training progresses using TensorBoard](../../how_tos/summaries_and_tensorboard/index.md).
### Creating threads to prefetch using `QueueRunner` objects
### Creating threads to prefetch using `QueueRunner` objects
The short version: many of the `tf.train` functions listed above add
[`QueueRunner`](../../api_docs/python/train.md#QueueRunner) objects to your

View File

@ -117,10 +117,9 @@ class PTBModel(object):
#
# The alternative version of the code below is:
#
# from tensorflow.models.rnn import rnn
# inputs = [tf.squeeze(input_, [1])
# for input_ in tf.split(1, num_steps, inputs)]
# outputs, state = rnn.rnn(cell, inputs, initial_state=self._initial_state)
# outputs, state = tf.nn.rnn(cell, inputs, initial_state=self._initial_state)
outputs = []
state = self._initial_state
with tf.variable_scope("RNN"):

View File

@ -73,7 +73,7 @@ def gunzip_file(gz_path, new_path):
def get_wmt_enfr_train_set(directory):
"""Download the WMT en-fr training corpus to directory unless it's there."""
train_path = os.path.join(directory, "giga-fren.release2")
train_path = os.path.join(directory, "giga-fren.release2.fixed")
if not (gfile.Exists(train_path +".fr") and gfile.Exists(train_path +".en")):
corpus_file = maybe_download(directory, "training-giga-fren.tar",
_WMT_ENFR_TRAIN_URL)

View File

@ -265,13 +265,6 @@ def rank_internal(input, name=None, optimize=True):
return gen_array_ops.rank(input, name=name)
# DEPRECATED use init_ops.zeros_initializer
# TODO(irving) Move it to init_ops.py
def zeros_initializer(shape, dtype=dtypes.float32, partition_info=None):
"""An adaptor for zeros() to match the Initializer spec."""
return zeros(shape, dtype)
def _SliceHelper(tensor, slice_spec, var=None):
"""Overload for Tensor.__getitem__.

View File

@ -61,8 +61,9 @@ def _assert_float_dtype(dtype):
return dtype
# TODO(irving) Move array_ops.zeros_initializer here.
zeros_initializer = array_ops.zeros_initializer
def zeros_initializer(shape, dtype=dtypes.float32, partition_info=None):
"""An adaptor for zeros() to match the Initializer spec."""
return array_ops.zeros(shape, dtype)
def ones_initializer(shape, dtype=dtypes.float32, partition_info=None):

View File

@ -138,6 +138,8 @@ common math computations that reduce various dimensions of a tensor.
@@accumulate_n
@@einsum
## Scan
TensorFlow provides several operations that you can use to perform scans

View File

@ -495,7 +495,7 @@ class LSTMCell(RNNCell):
b = vs.get_variable(
"B", shape=[4 * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
initializer=init_ops.zeros_initializer, dtype=dtype)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(1, [inputs, m_prev])

View File

@ -23,6 +23,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
@ -86,3 +88,75 @@ def lbeta(x, name='lbeta'):
return empty_lbeta()
else:
return control_flow_ops.cond(is_empty, empty_lbeta, nonempty_lbeta)
def einsum(axes, *inputs):
"""
A generalized contraction between tensors of arbitrary dimension.
Like numpy.einsum.
"""
match = re.match('([a-z,]+)->([a-z]+)', axes)
assert match, \
"Indices have incorrect format: %s" % axes
inputs = list(inputs)
idx_in = match.group(1).split(',')
idx_out = match.group(2)
idx_all = set(''.join(idx_in))
assert len(idx_in) == len(inputs), \
"Expected %d inputs but only got %d" % (len(idx_in), len(inputs))
# transpose inputs so axes are in alphabetical order
for i, (input_, axes_) in enumerate(zip(inputs, idx_in)):
assert input_.get_shape().ndims == len(axes_), \
"Input %d with axes %s has incorrect" \
" number of dimensions (expected %d, got %d)" % (
i, axes_, len(axes_), input_.get_shape().ndims
)
sorted_idx = sorted(axes_)
if list(axes_) != sorted_idx:
permuted = [axes_.find(ax) for ax in sorted_idx]
inputs[i] = array_ops.transpose(input_, permuted)
idx_in[i] = sorted_idx
missing_idx = set(idx_out).difference(idx_all)
assert not missing_idx, \
"Unknown ouput axes: %s" % missing_idx
reduction_idx = []
shapes = [[dim if dim else -1
for dim in tensor.get_shape().as_list()]
for tensor in inputs]
# validate shapes for broadcasting
for j, ax in enumerate(sorted(idx_all)):
dims = []
for i, idx in enumerate(idx_in):
if ax not in idx:
shapes[i].insert(j, 1)
else:
dim = shapes[i][j]
if isinstance(dim, int) and dim > 1:
dims.append(dim)
assert len(set(dims)) <= 1, \
"Dimension mismatch on axis: %s" % ax
if ax not in idx_out:
reduction_idx.append(j)
# reshape, multiply
expanded_inputs = [array_ops.reshape(input_, shape)
for input_, shape in zip(inputs, shapes)]
expanded_output = 1
for input_ in expanded_inputs:
expanded_output *= input_
# contract
return math_ops.reduce_sum(expanded_output, reduction_idx)

View File

@ -111,5 +111,112 @@ class LBetaTestGpu(LBetaTest):
_use_gpu = True
class EinsumTest(tf.test.TestCase):
# standard cases
simple_cases = [
'ij,jk->ik',
'ijk,jklm->il',
'ij,jk,kl->il',
'ijk->i',
]
# where axes are not in order
misordered_cases = [
'ji,kj->ik',
'ikl,kji->kl',
'klj,lki->ij',
]
# more than two arguments
multiarg_cases = [
'ijk,ijl,ikl->i',
'i,ijk,j->k',
'ij,ij,jk,kl->il',
]
invalid_cases = [
# bad formats
'ijk ijk',
'ij,jk,kl'
'ij->',
# axis in output that does not exist
'ij,jk->im',
# incorrect number of dimensions
'ij,jkl->kl',
]
dim_mismatch_cases = [
('ijk,jkl->il',
[(2,3,4), (3,5,6)]),
]
def test_simple(self):
for case in self.simple_cases:
self.run_test(case)
def test_misordered(self):
for case in self.misordered_cases:
self.run_test(case)
def test_multiarg(self):
for case in self.multiarg_cases:
self.run_test(case)
def test_invalid(self):
for axes in self.invalid_cases:
result = None
inputs = [
tf.placeholder(tf.float32, shape=(3,4)),
tf.placeholder(tf.float32, shape=(3,4)),
]
try:
result = tf.einsum(axes, *inputs)
except AssertionError as e:
print(e)
assert result is None, \
"An exception should have been thrown."
def test_dim_mismatch(self):
for axes, input_shapes in self.dim_mismatch_cases:
inputs = [
tf.placeholder(tf.float32, shape=shape)
for shape in input_shapes
]
result = None
try:
result = tf.einsum(axes, *inputs)
except AssertionError:
pass
assert result is None, "An exception should have been thrown."
def run_test(self, axes):
all_axes = {ax: np.random.randint(4, 12)
for ax in axes if ax.isalpha()}
input_vals = []
input_axes, _, _ = axes.partition('->')
for idx in input_axes.split(','):
shape = [all_axes[ax] for ax in idx]
input_vals.append(np.random.random(shape))
input_tensors = [tf.constant(val) for val in input_vals]
output_tensor = tf.einsum(axes, *input_tensors)
with self.test_session():
output_value = output_tensor.eval()
correct_value = np.einsum(axes, *input_vals)
err = np.abs(correct_value - output_value).max()
print(axes, err)
assert err < 1e-8
if __name__ == '__main__':
tf.test.main()

View File

@ -335,7 +335,7 @@ class SyncReplicasOptimizer(optimizer.Optimizer):
local_step = array_ops.reshape(local_step, ())
is_stale = math_ops.less(local_step, global_step)
with ops.name_scope(None, self._name, inputs):
with ops.name_scope(name, self._name, inputs) as name:
for grad, var in grads_and_vars:
var_list.append(var)
with ops.device(var.device):
@ -411,7 +411,8 @@ class SyncReplicasOptimizer(optimizer.Optimizer):
with ops.control_dependencies([final_train_ops]):
token = sync_token_queue.dequeue()
train_op = state_ops.scatter_update(self._local_steps,
self._replica_id, token)
self._replica_id,
token, name=name)
with ops.control_dependencies(clear_queue_ops):
# Sync_op needs to insert tokens to the token queue at the end of the

View File

@ -0,0 +1,77 @@
<link rel="import" href="../polymer/polymer.html">
<link rel="import" href="../tf-dashboard-common/tensorboard-color.html">
<!--
tf-option-selector is a simple component that has buttons as content and
provides a "selectedId" property that is one of the IDs of the buttons inside it.
-->
<dom-module id="tf-option-selector">
<template>
<div id="wrap">
<h3>[[name]]</h3>
<div class="content-wrapper"><content></content></div>
</div>
<style>
.content-wrapper ::content > * {
width: 30%;
font-size: 13px;
background: none;
margin-top: 10px;
color: var(--tb-ui-dark-accent);
}
.content-wrapper ::content :first-of-type {
margin-left: 0;
}
.content-wrapper ::content .selected {
background-color: var(--tb-ui-dark-accent);
color: white!important;
}
h3 {
color: var(--paper-grey-800);
margin: 0;
font-weight: normal;
font-size: 14px;
margin-bottom: 5px;
display: block;
pointer-events: none;
}
</style>
</template>
<script>
Polymer({
is: "tf-option-selector",
properties: {
name: String,
selectedId: {
type: String,
notify: true,
observer: '_selectedIdChanged'
}
},
attached: function() {
this.async(function() {
this.getEffectiveChildren().forEach(function(node) {
this.listen(node, 'tap', '_selectTarget');
}.bind(this));
});
},
_selectTarget: function(e) {
this.selectedId = e.currentTarget.id;
},
_selectedIdChanged: function() {
var selected = this.queryEffectiveChildren('#' + this.selectedId);
if (!selected) {
return;
}
this.getEffectiveChildren().forEach(function(node) {
node.classList.remove("selected");
});
selected.classList.add("selected");
}
});
</script>
</dom-module>

View File

@ -0,0 +1,144 @@
<!--
@license
Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<link rel="import" href="../polymer/polymer.html">
<link rel="import" href="../paper-dropdown-menu/paper-dropdown-menu.html">
<link rel="import" href="../paper-listbox/paper-listbox.html">
<link rel="import" href="../paper-item/paper-item.html">
<dom-module id='vz-projector-data-loader'>
<template>
<style>
:host {
}
input[type=file] {
display: none;
}
.file-name {
margin-right: 10px;
}
.dirs {
display: flex;
flex-direction: column;
margin-right: 10px;
line-height: 20px;
}
.dir {
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
paper-item {
--paper-item-disabled: {
border-bottom: 1px solid black;
justify-content: center;
font-size: 12px;
line-height: normal;
min-height: 0px;
};
}
</style>
<!-- Server-mode UI -->
<div class="server-controls" style="display:none;">
<div class="dirs">
<div class="dir">Checkpoint: <span id="checkpoint-file"></span></div>
<div class="dir">Metadata: <span id="metadata-file"></span></div>
</div>
<!-- List of tensors in checkpoint -->
<paper-dropdown-menu noink no-animations label="[[getNumTensorsLabel(tensorNames)]] found">
<paper-listbox attr-for-selected="value" class="dropdown-content" selected="{{selectedTensor}}">
<template is="dom-repeat" items="[[tensorNames]]">
<paper-item style="justify-content: space-between;" value="[[item.name]]" label="[[item.name]]">
[[item.name]]
<span style="margin-left: 5px; color:gray; font-size: 12px;">[[item.shape.0]]x[[item.shape.1]]</span>
</paper-item>
</template>
</paper-listbox>
</paper-dropdown-menu>
</div>
<!-- Standalone-mode UI -->
<div class="standalone-controls" style="display:none;">
<!-- Upload buttons -->
<div style="display: flex; justify-content: space-between;">
<!-- Upload data -->
<div>
<button id="upload" title="Upload a TSV file" class="ink-button">Upload data</button>
<span id="file-name" class="file-name dir"></span>
<input type="file" id="file" name="file"/>
</div>
<!-- Upload metadata -->
<div>
<button id="upload-metadata" title="Upload a TSV metadata file" class="ink-button">Upload Metadata</button>
<span id="file-metadata-name" class="file-name dir"></span>
<input type="file" id="file-metadata" name="file-metadata"/>
</div>
</div>
<!-- Demo datasets -->
<paper-dropdown-menu style="width: 100%" noink no-animations label="Select a demo dataset">
<paper-listbox attr-for-selected="value" class="dropdown-content" selected="{{selectedDemo}}">
<paper-item value="smartreply_full">SmartReply All</paper-item>
<paper-item value="smartreply_5k">SmartReply 5K</paper-item>
<paper-item value="wiki_5k">Glove Wiki 5K</paper-item>
<paper-item value="wiki_10k">Glove Wiki 10K</paper-item>
<paper-item value="wiki_40k">Glove Wiki 40K</paper-item>
<paper-item value="mnist_10k">MNIST 10K</paper-item>
<paper-item value="iris">Iris</paper-item>
</paper-listbox>
</paper-dropdown-menu>
</div>
<!-- Label by -->
<template is="dom-if" if="[[labelOptions.length]]">
<paper-dropdown-menu style="width: 100%" noink no-animations label="Label by">
<paper-listbox attr-for-selected="value" class="dropdown-content" selected="{{labelOption}}">
<template is="dom-repeat" items="[[labelOptions]]">
<paper-item style="justify-content: space-between;" value="[[item]]" label="[[item]]">
[[item]]
</paper-item>
</template>
</paper-listbox>
</paper-dropdown-menu>
</template>
<!-- Color by -->
<template is="dom-if" if="[[colorOptions.length]]">
<paper-dropdown-menu id="colorby" style="width: 100%" noink no-animations label="Color by">
<paper-listbox attr-for-selected="value" class="dropdown-content" selected="{{colorOption}}">
<template is="dom-repeat" items="[[colorOptions]]">
<paper-item style="justify-content: space-between;" class$="[[getSeparatorClass(item.isSeparator)]]" value="[[item]]" label="[[item.name]]" disabled="[[item.isSeparator]]">
[[item.name]]
<span style="margin-left: 5px; color:gray; font-size: 12px;">[[item.desc]]</span>
</paper-item>
</template>
</paper-listbox>
</paper-dropdown-menu>
</template>
<!-- Closing global template -->
</template>
</dom-module>

View File

@ -0,0 +1,500 @@
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
import {runAsyncTask, updateMessage} from './async';
import {DataPoint, DataSet, DatasetMetadata, DataSource} from './data';
import {PolymerElement} from './vz-projector-util';
/** Prefix added to the http requests when asking the server for data. */
const DATA_URL = 'data';
type DemoDataset = {
fpath: string; metadata_path?: string; metadata?: DatasetMetadata;
};
type Metadata = {
[key: string]: (number|string);
};
/** List of compiled demo datasets for showing the capabilities of the tool. */
const DEMO_DATASETS: {[name: string]: DemoDataset} = {
'wiki_5k': {
fpath: 'wiki_5000_50d_tensors.ssv',
metadata_path: 'wiki_5000_50d_labels.ssv'
},
'wiki_10k': {
fpath: 'wiki_10000_100d_tensors.ssv',
metadata_path: 'wiki_10000_100d_labels.ssv'
},
'wiki_40k': {
fpath: 'wiki_40000_100d_tensors.ssv',
metadata_path: 'wiki_40000_100d_labels.ssv'
},
'smartreply_5k': {
fpath: 'smartreply_5000_256d_tensors.tsv',
metadata_path: 'smartreply_5000_256d_labels.tsv'
},
'smartreply_full': {
fpath: 'smartreply_full_256d_tensors.tsv',
metadata_path: 'smartreply_full_256d_labels.tsv'
},
'mnist_10k': {
fpath: 'mnist_10k_784d_tensors.tsv',
metadata_path: 'mnist_10k_784d_labels.tsv',
metadata: {
image:
{sprite_fpath: 'mnist_10k_sprite.png', single_image_dim: [28, 28]}
},
},
'iris': {fpath: 'iris_tensors.tsv', metadata_path: 'iris_labels.tsv'}
};
/** Maximum number of colors supported in the color map. */
const NUM_COLORS_COLOR_MAP = 20;
interface ServerInfo {
tensors: {[name: string]: [number, number]};
tensors_file: string;
checkpoint_file: string;
checkpoint_dir: string;
metadata_file: string;
}
let DataLoaderPolymer = PolymerElement({
is: 'vz-projector-data-loader',
properties: {
dataSource: {
type: Object, // DataSource
notify: true
},
selectedDemo: {type: String, value: 'wiki_5k', notify: true},
selectedTensor: {type: String, notify: true},
labelOption: {type: String, notify: true},
colorOption: {type: Object, notify: true},
// Private.
tensorNames: Array
}
});
export type ColorOption = {
name: string; desc?: string; map?: (value: string | number) => string;
isSeparator?: boolean;
};
class DataLoader extends DataLoaderPolymer {
dataSource: DataSource;
selectedDemo: string;
labelOption: string;
labelOptions: string[];
colorOption: ColorOption;
colorOptions: ColorOption[];
selectedTensor: string;
tensorNames: {name: string, shape: number[]}[];
private dom: d3.Selection<any>;
ready() {
this.dom = d3.select(this);
if (this.dataSource) {
// There is data already.
return;
}
// Check to see if there is a server.
d3.json(`${DATA_URL}/info`, (err, serverInfo) => {
if (err) {
// No server was found, thus operate in standalone mode.
this.setupStandaloneMode();
return;
}
// Server was found, thus show the checkpoint dir and the tensors.
this.setupServerMode(serverInfo);
});
}
getSeparatorClass(isSeparator: boolean): string {
return isSeparator ? 'separator' : null;
}
private setupServerMode(info: ServerInfo) {
// Display the server-mode controls.
this.dom.select('.server-controls').style('display', null);
this.dom.select('#checkpoint-file')
.text(info.checkpoint_file)
.attr('title', info.checkpoint_file);
this.dom.select('#metadata-file')
.text(info.metadata_file)
.attr('title', info.metadata_file);
// Handle the list of checkpoint tensors.
this.dom.on('selected-tensor-changed', () => {
this.selectedTensorChanged(this.selectedTensor);
});
let names = Object.keys(info.tensors)
.filter(name => {
let shape = info.tensors[name];
return shape.length == 2 && shape[0] > 1 && shape[1] > 1;
})
.sort((a, b) => info.tensors[b][0] - info.tensors[a][0]);
this.tensorNames =
names.map(name => { return {name, shape: info.tensors[name]}; });
}
private updateMetadataUI(columnStats: ColumnStats[]) {
// Label by options.
let labelIndex = -1;
this.labelOptions = columnStats.length > 1 ? columnStats.map((stats, i) => {
// Make the default label by the first non-numeric column.
if (!stats.isNumeric && labelIndex == -1) {
labelIndex = i;
}
return stats.name;
}) :
['label'];
this.labelOption = this.labelOptions[Math.max(0, labelIndex)];
// Color by options.
let standardColorOption: ColorOption[] = [
{name: 'No color map'},
// TODO(smilkov): Implement this.
//{name: 'Distance of neighbors',
// desc: 'How far is each point from its neighbors'}
];
let metadataColorOption: ColorOption[] =
columnStats
.filter(stats => {
return !stats.tooManyUniqueValues || stats.isNumeric;
})
.map(stats => {
let map: (v: string|number) => string;
if (!stats.tooManyUniqueValues) {
let scale = d3.scale.category20();
let range = scale.range();
// Re-order the range.
let newRange = range.map((color, i) => {
let index = (i * 2) % (range.length - 1);
if (index == 0) {
index = range.length - 1;
}
return range[index];
});
scale.range(newRange).domain(stats.uniqueValues);
map = scale;
} else {
map = d3.scale.linear<string>()
.domain([stats.min, stats.max])
.range(['white', 'black']);
}
let desc = stats.tooManyUniqueValues ?
'gradient' :
stats.uniqueValues.length + ' colors';
return {name: stats.name, desc: desc, map: map};
});
if (metadataColorOption.length > 0) {
// Add a separator line between built-in color maps
// and those based on metadata columns.
standardColorOption.push({name: 'Metadata', isSeparator: true});
}
this.colorOptions = standardColorOption.concat(metadataColorOption);
this.colorOption = this.colorOptions[0];
}
private setupStandaloneMode() {
// Display the standalone UI controls.
this.dom.select('.standalone-controls').style('display', null);
// Demo dataset dropdown
let demoDatasetChanged = (demoDataSet: DemoDataset) => {
if (demoDataSet == null) {
return;
}
this.dom.selectAll('.file-name').style('display', 'none');
let separator = demoDataSet.fpath.substr(-3) == 'tsv' ? '\t' : ' ';
fetchDemoData(`${DATA_URL}/${demoDataSet.fpath}`, separator)
.then(points => {
let p1 = demoDataSet.metadata_path ?
new Promise<ColumnStats[]>((resolve, reject) => {
updateMessage('Fetching metadata...');
d3.text(
`${DATA_URL}/${demoDataSet.metadata_path}`,
(err: Error, rawMetadata: string) => {
if (err) {
console.error(err);
reject(err);
return;
}
resolve(parseAndMergeMetadata(rawMetadata, points));
});
}) :
null;
let p2 = demoDataSet.metadata && demoDataSet.metadata.image ?
fetchImage(
`${DATA_URL}/${demoDataSet.metadata.image.sprite_fpath}`) :
null;
Promise.all([p1, p2]).then(values => {
this.updateMetadataUI(values[0]);
let dataSource = new DataSource();
dataSource.originalDataSet = new DataSet(points);
dataSource.spriteImage = values[1];
dataSource.metadata = demoDataSet.metadata;
this.dataSource = dataSource;
});
});
};
this.dom.on('selected-demo-changed', () => {
demoDatasetChanged(DEMO_DATASETS[this.selectedDemo]);
});
demoDatasetChanged(DEMO_DATASETS[this.selectedDemo]);
// Show and setup the upload button.
let fileInput = this.dom.select('#file');
fileInput.on('change', () => {
let file: File = (<any>d3.event).target.files[0];
this.dom.select('#file-name')
.style('display', null)
.text(file.name)
.attr('title', file.name);
// Clear out the value of the file chooser. This ensures that if the user
// selects the same file, we'll re-read it.
(<any>d3.event).target.value = '';
// Clear the value of the datasets dropdown.
this.selectedDemo = null;
let fileReader = new FileReader();
fileReader.onload = evt => {
let str: string = (evt.target as any).result;
parseTensors(str).then(data => {
let dataSource = new DataSource();
dataSource.originalDataSet = new DataSet(data);
this.dataSource = dataSource;
});
};
fileReader.readAsText(file);
});
let uploadButton = this.dom.select('#upload');
uploadButton.on(
'click', () => { (<HTMLInputElement>fileInput.node()).click(); });
// Show and setup the upload metadata button.
let fileMetadataInput = this.dom.select('#file-metadata');
fileMetadataInput.on('change', () => {
let file: File = (<any>d3.event).target.files[0];
this.dom.select('#file-metadata-name')
.style('display', null)
.text(file.name)
.attr('title', file.name);
// Clear out the value of the file chooser. This ensures that if the user
// selects the same file, we'll re-read it.
(<any>d3.event).target.value = '';
// Clear the value of the datasets dropdown.
this.selectedDemo = null;
let fileReader = new FileReader();
fileReader.onload = evt => {
let str: string = (evt.target as any).result;
parseAndMergeMetadata(str, this.dataSource.originalDataSet.points)
.then(columnStats => {
this.updateMetadataUI(columnStats);
// Must make a shallow copy, otherwise polymer will not
// fire the 'data-changed' event, even if we explicitly
// call this.fire().
this.dataSource = this.dataSource.makeShallowCopy();
});
};
fileReader.readAsText(file);
});
let uploadMetadataButton = this.dom.select('#upload-metadata');
uploadMetadataButton.on('click', () => {
(<HTMLInputElement>fileMetadataInput.node()).click();
});
}
private selectedTensorChanged(name: string) {
// Get the tensor.
updateMessage('Fetching tensor values...');
d3.text(`${DATA_URL}/tensor?name=${name}`, (err: Error, tsv: string) => {
if (err) {
console.error(err);
return;
}
parseTensors(tsv).then(dataPoints => {
updateMessage('Fetching metadata...');
d3.text(`${DATA_URL}/metadata`, (err: Error, rawMetadata: string) => {
if (err) {
console.error(err);
return;
}
parseAndMergeMetadata(rawMetadata, dataPoints).then(columnStats => {
this.updateMetadataUI(columnStats);
let dataSource = new DataSource();
dataSource.originalDataSet = new DataSet(dataPoints);
this.dataSource = dataSource;
});
});
});
});
}
private getNumTensorsLabel(tensorNames: string[]) {
return tensorNames.length === 1 ? '1 tensor' :
tensorNames.length + ' tensors';
}
}
function fetchImage(url: string): Promise<HTMLImageElement> {
return new Promise<HTMLImageElement>((resolve, reject) => {
let image = new Image();
image.onload = () => resolve(image);
image.onerror = (err) => reject(err);
image.src = url;
});
}
/** Makes a network request for a delimited text file. */
function fetchDemoData(url: string, separator: string): Promise<DataPoint[]> {
return new Promise<DataPoint[]>((resolve, reject) => {
updateMessage('Fetching tensors...');
d3.text(url, (error: Error, dataString: string) => {
if (error) {
console.error(error);
updateMessage('Error loading data.');
reject(error);
} else {
parseTensors(dataString, separator).then(data => resolve(data));
}
});
});
}
/** Parses a tsv text file. */
function parseTensors(content: string, delim = '\t'): Promise<DataPoint[]> {
let data: DataPoint[] = [];
let numDim: number;
return runAsyncTask('Parsing tensors...', () => {
let lines = content.split('\n');
lines.forEach(line => {
line = line.trim();
if (line == '') {
return;
}
let row = line.split(delim);
let dataPoint: DataPoint = {
metadata: {},
vector: null,
dataSourceIndex: data.length,
projections: null,
projectedPoint: null
};
// If the first label is not a number, take it as the label.
if (isNaN(row[0] as any) || numDim == row.length - 1) {
dataPoint.metadata['label'] = row[0];
dataPoint.vector = row.slice(1).map(Number);
} else {
dataPoint.vector = row.map(Number);
}
data.push(dataPoint);
if (numDim == null) {
numDim = dataPoint.vector.length;
}
if (numDim != dataPoint.vector.length) {
updateMessage('Parsing failed. Vector dimensions do not match');
throw Error('Parsing failed');
}
if (numDim <= 1) {
updateMessage(
'Parsing failed. Found a vector with only one dimension?');
throw Error('Parsing failed');
}
});
return data;
});
}
/** Statistics for a metadata column. */
type ColumnStats = {
name: string; isNumeric: boolean; tooManyUniqueValues: boolean;
uniqueValues?: string[];
min: number;
max: number;
};
function parseAndMergeMetadata(
content: string, data: DataPoint[]): Promise<ColumnStats[]> {
return runAsyncTask('Parsing metadata...', () => {
let lines = content.split('\n').filter(line => line.trim().length > 0);
let hasHeader = (lines.length - 1 == data.length);
// Dimension mismatch.
if (lines.length != data.length && !hasHeader) {
throw Error('Dimensions do not match');
}
// If the first row doesn't contain metadata keys, we assume that the values
// are labels.
let columnNames: string[] = ['label'];
if (hasHeader) {
columnNames = lines[0].split('\t');
lines = lines.slice(1);
}
let columnStats: ColumnStats[] = columnNames.map(name => {
return {
name: name,
isNumeric: true,
tooManyUniqueValues: false,
min: Number.POSITIVE_INFINITY,
max: Number.NEGATIVE_INFINITY
};
});
let setOfValues = columnNames.map(() => d3.set());
lines.forEach((line: string, i: number) => {
let rowValues = line.split('\t');
data[i].metadata = {};
columnNames.forEach((name: string, colIndex: number) => {
let value = rowValues[colIndex];
let set = setOfValues[colIndex];
let stats = columnStats[colIndex];
data[i].metadata[name] = value;
// Update stats.
if (!stats.tooManyUniqueValues) {
set.add(value);
if (set.size() > NUM_COLORS_COLOR_MAP) {
stats.tooManyUniqueValues = true;
}
}
if (isNaN(value as any)) {
stats.isNumeric = false;
} else {
stats.min = Math.min(stats.min, +value);
stats.max = Math.max(stats.max, +value);
}
});
});
columnStats.forEach((stats, colIndex) => {
let set = setOfValues[colIndex];
if (!stats.tooManyUniqueValues) {
stats.uniqueValues = set.values();
}
});
return columnStats;
});
}
document.registerElement(DataLoader.prototype.is, DataLoader);

View File

@ -20,7 +20,7 @@ RUN /var/gcloud/google-cloud-sdk/bin/gcloud components install kubectl
# Install nightly TensorFlow pip
# TODO(cais): Should we build it locally instead?
RUN pip install \
https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
# Copy test files
COPY scripts /var/tf-dist-test/scripts

View File

@ -36,6 +36,6 @@ RUN curl -O https://bootstrap.pypa.io/get-pip.py && \
# Install TensorFlow CPU version from nightly build.
RUN pip --no-cache-dir install \
https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
ADD . /var/tf_dist_test

View File

@ -133,7 +133,7 @@ timeout ${TIMEOUT} python "${MNIST_REPLICA}" \
# Get N_PS by PS_HOSTS
N_PS=$(echo ${PS_HOSTS} | awk -F "," '{printf NF}')
# Replace the delimiter with " "
PS_ARRAY=$(echo ${PS_HOSTS} | awk -F "," '{for(i=1;i<=NF;i++){printf $i" "}}')
PS_ARRAY=($(echo ${PS_HOSTS} | awk -F "," '{for(i=1;i<=NF;i++){printf $i" "}}'))
# Run a number of ps in parallel. In general, we only set 1 ps.
echo "${N_PS} ps process(es) running in parallel..."
@ -166,7 +166,7 @@ fi
# Get N_WORKERS by WORKER_HOSTS
N_WORKERS=$(echo ${WORKER_HOSTS} | awk -F "," '{printf NF}')
# Replace the delimiter with " "
WORKER_ARRAY=$(echo ${WORKER_HOSTS} | awk -F "," '{for(i=1;i<=NF;i++){printf $i" "}}')
WORKER_ARRAY=($(echo ${WORKER_HOSTS} | awk -F "," '{for(i=1;i<=NF;i++){printf $i" "}}'))
# Run a number of workers in parallel
echo "${N_WORKERS} worker process(es) running in parallel..."

View File

@ -36,7 +36,7 @@ RUN curl -O https://bootstrap.pypa.io/get-pip.py && \
# Install TensorFlow CPU version from nightly build
RUN pip --no-cache-dir install \
https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
# Copy files, including the GRPC server binary at
# server/grpc_tensorflow_server.py

View File

@ -42,7 +42,7 @@ RUN pip install --upgrade pandas==0.18.1
# Install TensorFlow CPU version.
RUN pip --no-cache-dir install \
https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
# Copy files, including the GRPC server binary at
# server/grpc_tensorflow_server.py

View File

@ -32,7 +32,7 @@ RUN pip --no-cache-dir install \
&& \
python -m ipykernel.kernelspec
ENV TENSORFLOW_VERSION 0.10.0rc0
ENV TENSORFLOW_VERSION 0.10.0
# --- DO NOT EDIT OR DELETE BETWEEN THE LINES --- #
# These lines will be edited automatically by parameterized_docker_build.sh. #

View File

@ -65,7 +65,7 @@ RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
>>/root/.bazelrc
ENV BAZELRC /root/.bazelrc
# Install the most recent bazel release.
ENV BAZEL_VERSION 0.3.0
ENV BAZEL_VERSION 0.3.1
WORKDIR /
RUN mkdir /bazel && \
cd /bazel && \

View File

@ -66,7 +66,7 @@ RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
>>/root/.bazelrc
ENV BAZELRC /root/.bazelrc
# Install the most recent bazel release.
ENV BAZEL_VERSION 0.3.0
ENV BAZEL_VERSION 0.3.1
WORKDIR /
RUN mkdir /bazel && \
cd /bazel && \

View File

@ -32,7 +32,7 @@ RUN pip --no-cache-dir install \
&& \
python -m ipykernel.kernelspec
ENV TENSORFLOW_VERSION 0.10.0rc0
ENV TENSORFLOW_VERSION 0.10.0
# --- DO NOT EDIT OR DELETE BETWEEN THE LINES --- #
# These lines will be edited automatically by parameterized_docker_build.sh. #

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -85,7 +85,6 @@ write the graph to a file.
@@TensorShapeUtils
@@PartialTensorShape
@@PartialTensorShapeUtils
@@TF_Buffer
## Thread

View File

@ -16,7 +16,7 @@ RUN ./install_google_cloud_sdk.bash --disable-prompts --install-dir=/var/gcloud
# Install nightly TensorFlow pip
RUN pip install \
https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
# Copy test files
RUN mkdir -p /gcs-smoke/python

View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -30,6 +30,7 @@ from __future__ import print_function
import argparse
import json
import os
import subprocess
import shutil
@ -111,7 +112,10 @@ def configure(src_base_path, debug=False):
if src is None:
open(os.path.join(gen_path, target), "w").write("")
else:
os.symlink(src, os.path.join(gen_path, target))
if hasattr(os, 'symlink'):
os.symlink(src, os.path.join(gen_path, target))
else:
shutil.copy2(src, os.path.join(gen_path, target))
json.dump(spec, open(os.path.join(gen_path, "spec.json"), "w"), indent=2)
if debug:
@ -157,9 +161,8 @@ def generate(arglist):
raise RuntimeError(
"Run ./configure again, branch was '%s' but is now '%s'" %
(old_branch, new_branch))
strs["tf_git_version"] = os.popen(
"git -C \"%s\" describe --long --dirty --tags" %
(data["path"],)).read().strip()
strs["tf_git_version"] = subprocess.check_output(
["git", "-C", data["path"], "describe", "--long", "--dirty", "--tags"]).strip()
# TODO(aselle): Check for escaping
cpp_file = "\n".join("const char* %s() {return \"%s\";}" % (x, y)
for x, y in strs.items())
@ -177,7 +180,7 @@ def raw_generate(output_file):
"""
strs = {"tf_compiler_version": "__VERSION__"}
version = os.popen("git describe --long --dirty --tags").read().strip()
version = subprocess.check_output(["git", "describe", "--long", "--dirty", "--tags"]).strip()
version = version if version else "unknown"
strs["tf_git_version"] = version
cpp_file = "\n".join("const char* %s() {return \"%s\";}" % (x, y)

View File

@ -27,7 +27,7 @@ from setuptools import find_packages, setup, Command, Extension
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
_VERSION = '0.10.0rc0'
_VERSION = '0.10.0'
numpy_version = "1.8.2"
if platform.system() == "Darwin":

View File

@ -26,7 +26,7 @@ def tf_workspace(path_prefix = "", tf_repo_name = ""):
native.git_repository(
name = "com_googlesource_code_re2",
remote = "https://github.com/google/re2.git",
commit = "fc6337a382bfd4f7c861abea08f872d3c85b31da",
commit = "7bab3dc83df6a838cc004cc7a7f51d5fe1a427d5",
)
native.git_repository(
@ -204,3 +204,8 @@ def tf_workspace(path_prefix = "", tf_repo_name = ""):
sha256 = "36658cb768a54c1d4dec43c3116c27ed893e88b02ecfcb44f2166f9c0b7f2a0d",
build_file = str(Label("//:zlib.BUILD")),
)
native.bind(
name = "zlib",
actual = "@zlib_archive//:zlib",
)

View File

@ -120,7 +120,7 @@ toolchain {
# linker_flag: "-Wl,--detect-odr-violations"
# Include directory for cuda headers.
cxx_builtin_include_directory: "/usr/local/cuda%{cuda_version}/include"
cxx_builtin_include_directory: "%{cuda_include_path}"
compilation_mode_flags {
mode: DBG
@ -219,7 +219,7 @@ toolchain {
linker_flag: "-no-canonical-prefixes"
# Include directory for cuda headers.
cxx_builtin_include_directory: "/usr/local/cuda%{cuda_version}/include"
cxx_builtin_include_directory: "%{cuda_include_path}"
compilation_mode_flags {
mode: DBG

View File

@ -95,6 +95,7 @@ def GetHostCompilerOptions(argv):
parser.add_argument('-iquote', nargs='*', action='append')
parser.add_argument('--sysroot', nargs=1)
parser.add_argument('-g', nargs='*', action='append')
parser.add_argument('-fno-canonical-system-headers', action='store_true')
args, _ = parser.parse_known_args(argv)
@ -106,6 +107,8 @@ def GetHostCompilerOptions(argv):
opts += ' -iquote ' + ' -iquote '.join(sum(args.iquote, []))
if args.g:
opts += ' -g' + ' -g'.join(sum(args.g, []))
if args.fno_canonical_system_headers:
opts += ' -fno-canonical-system-headers'
if args.sysroot:
opts += ' --sysroot ' + args.sysroot[0]

View File

@ -120,13 +120,31 @@ def _enable_cuda(repository_ctx):
return False
def _cuda_toolkit_path(repository_ctx):
"""Finds the cuda toolkit directory."""
def _cuda_toolkit_path(repository_ctx, cuda_version):
"""Finds the cuda toolkit directory.
Args:
repository_ctx: The repository context.
cuda_version: The cuda toolkit version.
Returns:
A speculative real path of the cuda toolkit install directory.
"""
cuda_toolkit_path = _DEFAULT_CUDA_TOOLKIT_PATH
if _CUDA_TOOLKIT_PATH in repository_ctx.os.environ:
cuda_toolkit_path = repository_ctx.os.environ[_CUDA_TOOLKIT_PATH].strip()
if not repository_ctx.path(cuda_toolkit_path).exists:
auto_configure_fail("Cannot find cuda toolkit path.")
if cuda_version:
# Handle typical configuration where the real path is
# <basedir>/cuda-<version> and the provided path is <basedir>/cuda.
version_suffixed = "%s-%s" % (cuda_toolkit_path, cuda_version)
if repository_ctx.path(version_suffixed).exists:
return version_suffixed
# Returns the non-versioned path if cuda version is not provided or if the
# installation does not use a cuda- directory, such as on ArchLinux where
# CUDA installs directly to /opt/cuda.
return cuda_toolkit_path
@ -173,6 +191,11 @@ def _compute_capabilities(repository_ctx):
def _cpu_value(repository_ctx):
os_name = repository_ctx.os.name.lower()
if os_name.startswith("mac os"):
return "Darwin"
if os_name.find("windows") != -1:
return "Windows"
result = repository_ctx.execute(["uname", "-s"])
return result.stdout.strip()
@ -209,6 +232,17 @@ def _cuda_symlink_files(cpu_value, cuda_version, cudnn_version):
cuda_rand_lib = "lib/libcurand%s.dylib" % cuda_ext,
cuda_fft_lib = "lib/libcufft%s.dylib" % cuda_ext,
cuda_cupti_lib = "extras/CUPTI/lib/libcupti%s.dylib" % cuda_ext)
elif cpu_value == "Windows":
return struct(
cuda_lib_path = "lib",
cuda_rt_lib = "lib/cudart%s.dll" % cuda_ext,
cuda_rt_lib_static = "lib/cudart_static.lib",
cuda_blas_lib = "lib/cublas%s.dll" % cuda_ext,
cuda_dnn_lib = "lib/cudnn%s.dll" % cudnn_ext,
cuda_dnn_lib_alt = "cudnn%s.dll" % cudnn_ext,
cuda_rand_lib = "lib/curand%s.dll" % cuda_ext,
cuda_fft_lib = "lib/cufft%s.dll" % cuda_ext,
cuda_cupti_lib = "extras/CUPTI/lib/cupti%s.dll" % cuda_ext)
else:
auto_configure_fail("Not supported CPU value %s" % cpu_value)
@ -353,8 +387,8 @@ def _symlink_dir(repository_ctx, src_dir, dest_dir):
def _create_cuda_repository(repository_ctx):
"""Creates the repository containing files set up to build with CUDA."""
cuda_toolkit_path = _cuda_toolkit_path(repository_ctx)
cuda_version = _cuda_version(repository_ctx)
cuda_toolkit_path = _cuda_toolkit_path(repository_ctx, cuda_version)
cudnn_install_basedir = _cudnn_install_basedir(repository_ctx)
cudnn_version = _cudnn_version(repository_ctx)
compute_capabilities = _compute_capabilities(repository_ctx)
@ -408,7 +442,7 @@ def _create_cuda_repository(repository_ctx):
gcc_host_compiler_includes = _gcc_host_compiler_includes(repository_ctx, cc)
_tpl(repository_ctx, "crosstool:CROSSTOOL",
{
"%{cuda_version}": ("-%s" % cuda_version) if cuda_version else "",
"%{cuda_include_path}": cuda_toolkit_path + '/include',
"%{gcc_host_compiler_includes}": gcc_host_compiler_includes,
})
_tpl(repository_ctx,

View File

@ -142,10 +142,13 @@ function setup_python {
for x in $EXPECTED_PATHS; do
if [ -e "$x" ]; then
rm "$x"
# This makes ./configure slow on Windows, but it works.
rm -rf "$x"
fi
done
# ln -sf is acutally implemented as copying in msys since creating symbolic links is privileged on Windows
# So we need -rf to remove them above.
ln -sf "${python_include}" util/python/python_include
ln -sf "${python_lib}" util/python/python_lib
ln -sf "${numpy_include}" third_party/py/numpy/numpy_include
@ -159,13 +162,24 @@ function setup_python {
echo "export PYTHON_BIN_PATH=$PYTHON_BIN_PATH" > tools/python_bin_path.sh
}
PLATFORM="$(uname -s | tr 'A-Z' 'a-z')"
function is_windows() {
# On windows, the shell script is actually running in msys
if [[ "${PLATFORM}" =~ msys_nt* ]]; then
true
else
false
fi
}
function check_python {
for x in $EXPECTED_PATHS; do
if [ ! -e "$x" ]; then
echo -e "\n\nERROR: Cannot find '${x}'. Did you run configure?\n\n" 1>&2
exit 1
fi
if [ ! -L "${x}" ]; then
# Don't check symbolic link on Windows
if ! is_windows && [ ! -L "${x}" ]; then
echo -e "\n\nERROR: '${x}' is not a symbolic link. Internal error.\n\n" 1>&2
exit 1
fi