diff --git a/WORKSPACE b/WORKSPACE
index 86830a09476..74ea14d0fd7 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -104,7 +104,6 @@ http_archive(
build_file = "//:models.BUILD",
sha256 = "7efe12a8363f09bc24d7b7a450304a15655a57a7751929b2c1593a71183bb105",
urls = [
- "http://storage.googleapis.com/download.tensorflow.org/models/inception_v1.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/inception_v1.zip",
],
)
@@ -114,7 +113,6 @@ http_archive(
build_file = "//:models.BUILD",
sha256 = "bddd81ea5c80a97adfac1c9f770e6f55cbafd7cce4d3bbe15fbeb041e6b8f3e8",
urls = [
- "http://storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_android_export.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_android_export.zip",
],
)
@@ -124,7 +122,6 @@ http_archive(
build_file = "//:models.BUILD",
sha256 = "859edcddf84dddb974c36c36cfc1f74555148e9c9213dedacf1d6b613ad52b96",
urls = [
- "http://storage.googleapis.com/download.tensorflow.org/models/mobile_multibox_v1a.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/mobile_multibox_v1a.zip",
],
)
@@ -134,7 +131,6 @@ http_archive(
build_file = "//:models.BUILD",
sha256 = "3d374a730aef330424a356a8d4f04d8a54277c425e274ecb7d9c83aa912c6bfa",
urls = [
- "http://storage.googleapis.com/download.tensorflow.org/models/stylize_v1.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/stylize_v1.zip",
],
)
@@ -144,7 +140,6 @@ http_archive(
build_file = "//:models.BUILD",
sha256 = "c3ec4fea3158eb111f1d932336351edfe8bd515bb6e87aad4f25dbad0a600d0c",
urls = [
- "http://storage.googleapis.com/download.tensorflow.org/models/speech_commands_v0.01.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/speech_commands_v0.01.zip",
],
)
diff --git a/tensorflow/examples/tutorials/deepdream/README.md b/tensorflow/examples/tutorials/deepdream/README.md
index 403e4b34f9b..e16b366488a 100644
--- a/tensorflow/examples/tutorials/deepdream/README.md
+++ b/tensorflow/examples/tutorials/deepdream/README.md
@@ -5,11 +5,18 @@ by [Alexander Mordvintsev](mailto:moralex@google.com)
This directory contains Jupyter notebook that demonstrates a number of Convolutional Neural Network
image generation techniques implemented with TensorFlow:
-- visualizing individual feature channels and their combinations to explore the space of patterns learned by the neural network (see [GoogLeNet](http://storage.googleapis.com/deepdream/visualz/tensorflow_inception/index.html) and [VGG16](http://storage.googleapis.com/deepdream/visualz/vgg16/index.html) galleries)
-- embedding TensorBoard graph visualizations into Jupyter notebooks
-- producing high-resolution images with tiled computation ([example](http://storage.googleapis.com/deepdream/pilatus_flowers.jpg))
-- using Laplacian Pyramid Gradient Normalization to produce smooth and colorful visuals at low cost
-- generating DeepDream-like images with TensorFlow
+- visualizing individual feature channels and their combinations to explore
+ the space of patterns learned by the neural network (see
+ [GoogLeNet](https://storage.googleapis.com/deepdream/visualz/tensorflow_inception/index.html)
+ and
+ [VGG16](https://storage.googleapis.com/deepdream/visualz/vgg16/index.html)
+ galleries)
+- embedding TensorBoard graph visualizations into Jupyter notebooks
+- producing high-resolution images with tiled computation
+ ([example](https://storage.googleapis.com/deepdream/pilatus_flowers.jpg))
+- using Laplacian Pyramid Gradient Normalization to produce smooth and
+ colorful visuals at low cost
+- generating DeepDream-like images with TensorFlow
You can view "deepdream.ipynb" directly on GitHub. Note that GitHub Jupyter notebook preview removes
embedded graph visualizations. You can still see them online
diff --git a/tensorflow/examples/tutorials/deepdream/deepdream.ipynb b/tensorflow/examples/tutorials/deepdream/deepdream.ipynb
index 15112aafb34..448f3f6f438 100644
--- a/tensorflow/examples/tutorials/deepdream/deepdream.ipynb
+++ b/tensorflow/examples/tutorials/deepdream/deepdream.ipynb
@@ -40,14 +40,14 @@
"source": [
"This notebook demonstrates a number of Convolutional Neural Network image generation techniques implemented with TensorFlow for fun and science:\n",
"\n",
- "- visualize individual feature channels and their combinations to explore the space of patterns learned by the neural network (see [GoogLeNet](http://storage.googleapis.com/deepdream/visualz/tensorflow_inception/index.html) and [VGG16](http://storage.googleapis.com/deepdream/visualz/vgg16/index.html) galleries)\n",
+ "- visualize individual feature channels and their combinations to explore the space of patterns learned by the neural network (see [GoogLeNet](https://storage.googleapis.com/deepdream/visualz/tensorflow_inception/index.html) and [VGG16](https://storage.googleapis.com/deepdream/visualz/vgg16/index.html) galleries)\n",
"- embed TensorBoard graph visualizations into Jupyter notebooks\n",
- "- produce high-resolution images with tiled computation ([example](http://storage.googleapis.com/deepdream/pilatus_flowers.jpg))\n",
+ "- produce high-resolution images with tiled computation ([example](https://storage.googleapis.com/deepdream/pilatus_flowers.jpg))\n",
"- use Laplacian Pyramid Gradient Normalization to produce smooth and colorful visuals at low cost\n",
"- generate DeepDream-like images with TensorFlow (DogSlugs included)\n",
"\n",
"\n",
- "The network under examination is the [GoogLeNet architecture](http://arxiv.org/abs/1409.4842), trained to classify images into one of 1000 categories of the [ImageNet](http://image-net.org/) dataset. It consists of a set of layers that apply a sequence of transformations to the input image. The parameters of these transformations were determined during the training process by a variant of gradient descent algorithm. The internal image representations may seem obscure, but it is possible to visualize and interpret them. In this notebook we are going to present a few tricks that allow to make these visualizations both efficient to generate and even beautiful. Impatient readers can start with exploring the full galleries of images generated by the method described here for [GoogLeNet](http://storage.googleapis.com/deepdream/visualz/tensorflow_inception/index.html) and [VGG16](http://storage.googleapis.com/deepdream/visualz/vgg16/index.html) architectures."
+ "The network under examination is the [GoogLeNet architecture](http://arxiv.org/abs/1409.4842), trained to classify images into one of 1000 categories of the [ImageNet](http://image-net.org/) dataset. It consists of a set of layers that apply a sequence of transformations to the input image. The parameters of these transformations were determined during the training process by a variant of gradient descent algorithm. The internal image representations may seem obscure, but it is possible to visualize and interpret them. In this notebook we are going to present a few tricks that allow to make these visualizations both efficient to generate and even beautiful. Impatient readers can start with exploring the full galleries of images generated by the method described here for [GoogLeNet](https://storage.googleapis.com/deepdream/visualz/tensorflow_inception/index.html) and [VGG16](https://storage.googleapis.com/deepdream/visualz/vgg16/index.html) architectures."
]
},
{
@@ -1117,7 +1117,7 @@
"id": "mYsY6_Ngpfwl"
},
"source": [
- "Don't hesitate to use higher resolution inputs (also increase the number of octaves)! Here is an [example](http://storage.googleapis.com/deepdream/pilatus_flowers.jpg) of running the flower dream over the bigger image."
+ "Don't hesitate to use higher resolution inputs (also increase the number of octaves)! Here is an [example](https://storage.googleapis.com/deepdream/pilatus_flowers.jpg) of running the flower dream over the bigger image."
]
},
{
diff --git a/tensorflow/lite/g3doc/guide/hosted_models.md b/tensorflow/lite/g3doc/guide/hosted_models.md
index ba26ff80065..560a0261861 100644
--- a/tensorflow/lite/g3doc/guide/hosted_models.md
+++ b/tensorflow/lite/g3doc/guide/hosted_models.md
@@ -113,7 +113,7 @@ For more information about object detection, see
The object detection model we currently host is
**coco_ssd_mobilenet_v1_1.0_quant_2018_06_29**.
-Download
+Download
model and labels
## Pose estimation
diff --git a/tensorflow/lite/g3doc/models/object_detection/overview.md b/tensorflow/lite/g3doc/models/object_detection/overview.md
index 94df4aac0d9..f9da6398846 100644
--- a/tensorflow/lite/g3doc/models/object_detection/overview.md
+++ b/tensorflow/lite/g3doc/models/object_detection/overview.md
@@ -20,7 +20,7 @@ If you are using a platform other than Android or iOS, or you are already
familiar with the TensorFlow Lite APIs, you can
download our starter object detection model and the accompanying labels.
-Download
+Download
starter model and labels
For more information about the starter model, see
@@ -185,7 +185,7 @@ Note: Object detection models accept input images of a specific size. This is li
We recommend starting with this pre-trained quantized COCO SSD MobileNet v1
model.
-Download
+Download
starter model and labels
### Uses and limitations
@@ -193,7 +193,7 @@ starter model and labels
The object detection model we provide can identify and locate up to 10 objects
in an image. It is trained to recognize 80 classes of object. For a full list of
classes, see the labels file in the
-model
+model
zip.
If you want to train a model to recognize new classes, see
@@ -256,7 +256,7 @@ each object. There will always be 10 objects detected.
The pre-trained models we provide are trained to detect 80 classes of object.
For a full list of classes, see the labels file in the
-model
+model
zip.
You can use a technique known as transfer learning to re-train a model to
diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl
index a22708b4016..36c2d7e2723 100755
--- a/tensorflow/workspace.bzl
+++ b/tensorflow/workspace.bzl
@@ -829,7 +829,7 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""):
strip_prefix = "ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
- "http://storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
+ "https://storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
],
)