Fix / standardize references in Keras Applications docstrings.

PiperOrigin-RevId: 308674354
Change-Id: Id0d186974e7dcebc8a9b2d80ca940e88f20acf47
This commit is contained in:
Francois Chollet 2020-04-27 12:25:52 -07:00 committed by TensorFlower Gardener
parent acf04f1fc8
commit 71964116c5
10 changed files with 28 additions and 23 deletions

View File

@ -138,9 +138,9 @@ def DenseNet(
classifier_activation='softmax'):
"""Instantiates the DenseNet architecture.
Reference paper:
- [Densely Connected Convolutional Networks]
(https://arxiv.org/abs/1608.06993) (CVPR 2017 Best Paper Award)
Reference:
- [Densely Connected Convolutional Networks](
https://arxiv.org/abs/1608.06993) (CVPR 2017)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is

View File

@ -145,7 +145,7 @@ layers = VersionAwareLayers()
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference paper:
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](
https://arxiv.org/abs/1905.11946) (ICML 2019)

View File

@ -53,7 +53,7 @@ def InceptionResNetV2(include_top=True,
**kwargs):
"""Instantiates the Inception-ResNet v2 architecture.
Reference paper:
Reference:
- [Inception-v4, Inception-ResNet and the Impact of
Residual Connections on Learning](https://arxiv.org/abs/1602.07261)
(AAAI 2017)

View File

@ -56,7 +56,7 @@ def InceptionV3(
classifier_activation='softmax'):
"""Instantiates the Inception v3 architecture.
Reference paper:
Reference:
- [Rethinking the Inception Architecture for Computer Vision](
http://arxiv.org/abs/1512.00567) (CVPR 2016)

View File

@ -95,9 +95,10 @@ def MobileNet(input_shape=None,
**kwargs):
"""Instantiates the MobileNet architecture.
Reference paper:
- [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision
Applications](https://arxiv.org/abs/1704.04861)
Reference:
- [MobileNets: Efficient Convolutional Neural Networks
for Mobile Vision Applications](
https://arxiv.org/abs/1704.04861)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is

View File

@ -106,9 +106,9 @@ def MobileNetV2(input_shape=None,
**kwargs):
"""Instantiates the MobileNetV2 architecture.
Reference paper:
- [MobileNetV2: Inverted Residuals and Linear Bottlenecks]
(https://arxiv.org/abs/1801.04381) (CVPR 2018)
Reference:
- [MobileNetV2: Inverted Residuals and Linear Bottlenecks](
https://arxiv.org/abs/1801.04381) (CVPR 2018)
Optionally loads weights pre-trained on ImageNet.

View File

@ -79,9 +79,9 @@ def NASNet(
classifier_activation='softmax'):
"""Instantiates a NASNet model.
Reference paper:
- [Learning Transferable Architectures for Scalable Image Recognition]
(https://arxiv.org/abs/1707.07012) (CVPR 2018)
Reference:
- [Learning Transferable Architectures for Scalable Image Recognition](
https://arxiv.org/abs/1707.07012) (CVPR 2018)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is

View File

@ -15,9 +15,9 @@
# pylint: disable=invalid-name
"""ResNet models for Keras.
Reference paper:
- [Deep Residual Learning for Image Recognition]
(https://arxiv.org/abs/1512.03385) (CVPR 2015)
Reference:
- [Deep Residual Learning for Image Recognition](
https://arxiv.org/abs/1512.03385) (CVPR 2015)
"""
from __future__ import absolute_import
from __future__ import division
@ -72,9 +72,9 @@ def ResNet(stack_fn,
**kwargs):
"""Instantiates the ResNet, ResNetV2, and ResNeXt architecture.
Reference paper:
- [Deep Residual Learning for Image Recognition]
(https://arxiv.org/abs/1512.03385) (CVPR 2015)
Reference:
- [Deep Residual Learning for Image Recognition](
https://arxiv.org/abs/1512.03385) (CVPR 2015)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is

View File

@ -138,7 +138,7 @@ decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
DOC = """
Reference paper:
Reference:
- [Identity Mappings in Deep Residual Networks]
(https://arxiv.org/abs/1603.05027) (CVPR 2016)

View File

@ -18,7 +18,7 @@
On ImageNet, this model gets to a top-1 validation accuracy of 0.790
and a top-5 validation accuracy of 0.945.
Reference paper:
Reference:
- [Xception: Deep Learning with Depthwise Separable Convolutions](
https://arxiv.org/abs/1610.02357) (CVPR 2017)
@ -60,6 +60,10 @@ def Xception(
classifier_activation='softmax'):
"""Instantiates the Xception architecture.
Reference:
- [Xception: Deep Learning with Depthwise Separable Convolutions](
https://arxiv.org/abs/1610.02357) (CVPR 2017)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.