Skip to content

Commit f6eda66

Browse files
committed
Formatting fixes.
1 parent b197ba9 commit f6eda66

37 files changed

+130
-53
lines changed

keras/activations.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
"""Built-in activation functions.
2+
"""
13
from __future__ import absolute_import
24
from __future__ import division
35
from __future__ import print_function
@@ -38,11 +40,14 @@ def elu(x, alpha=1.0):
3840

3941

4042
def selu(x):
41-
"""Scaled Exponential Linear Unit. (Klambauer et al., 2017)
43+
"""Scaled Exponential Linear Unit. (Klambauer et al., 2017).
4244
4345
# Arguments
4446
x: A tensor or variable to compute the activation function for.
4547
48+
# Returns
49+
Tensor with the same shape and dtype as `x`.
50+
4651
# Note
4752
- To be used together with the initialization "lecun_normal".
4853
- To be used together with the dropout variant "AlphaDropout".
@@ -102,12 +107,12 @@ def get(identifier):
102107
return deserialize(identifier)
103108
elif callable(identifier):
104109
if isinstance(identifier, Layer):
105-
warnings.warn((
110+
warnings.warn(
106111
'Do not pass a layer instance (such as {identifier}) as the '
107112
'activation argument of another layer. Instead, advanced '
108113
'activation layers should be used just like any other '
109-
'layer in a model.'
110-
).format(identifier=identifier.__class__.__name__))
114+
'layer in a model.'.format(
115+
identifier=identifier.__class__.__name__))
111116
return identifier
112117
else:
113118
raise ValueError('Could not interpret '

keras/callbacks.py

Lines changed: 40 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
"""Callbacks: utilities called at certain points during model training.
2+
"""
13
from __future__ import absolute_import
24
from __future__ import division
35
from __future__ import print_function
@@ -815,11 +817,12 @@ class ReduceLROnPlateau(Callback):
815817
of epochs, the learning rate is reduced.
816818
817819
# Example
818-
```python
819-
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
820-
patience=5, min_lr=0.001)
821-
model.fit(X_train, Y_train, callbacks=[reduce_lr])
822-
```
820+
821+
```python
822+
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
823+
patience=5, min_lr=0.001)
824+
model.fit(X_train, Y_train, callbacks=[reduce_lr])
825+
```
823826
824827
# Arguments
825828
monitor: quantity to be monitored.
@@ -928,10 +931,11 @@ class CSVLogger(Callback):
928931
including 1D iterables such as np.ndarray.
929932
930933
# Example
931-
```python
932-
csv_logger = CSVLogger('training.log')
933-
model.fit(X_train, Y_train, callbacks=[csv_logger])
934-
```
934+
935+
```python
936+
csv_logger = CSVLogger('training.log')
937+
model.fit(X_train, Y_train, callbacks=[csv_logger])
938+
```
935939
936940
# Arguments
937941
filename: filename of the csv file, e.g. 'run/log.csv'.
@@ -1020,32 +1024,33 @@ class LambdaCallback(Callback):
10201024
on_train_end: called at the end of model training.
10211025
10221026
# Example
1023-
```python
1024-
# Print the batch number at the beginning of every batch.
1025-
batch_print_callback = LambdaCallback(
1026-
on_batch_begin=lambda batch,logs: print(batch))
1027-
1028-
# Stream the epoch loss to a file in JSON format. The file content
1029-
# is not well-formed JSON but rather has a JSON object per line.
1030-
import json
1031-
json_log = open('loss_log.json', mode='wt', buffering=1)
1032-
json_logging_callback = LambdaCallback(
1033-
on_epoch_end=lambda epoch, logs: json_log.write(
1034-
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
1035-
on_train_end=lambda logs: json_log.close()
1036-
)
1037-
1038-
# Terminate some processes after having finished model training.
1039-
processes = ...
1040-
cleanup_callback = LambdaCallback(
1041-
on_train_end=lambda logs: [
1042-
p.terminate() for p in processes if p.is_alive()])
1043-
1044-
model.fit(...,
1045-
callbacks=[batch_print_callback,
1046-
json_logging_callback,
1047-
cleanup_callback])
1048-
```
1027+
1028+
```python
1029+
# Print the batch number at the beginning of every batch.
1030+
batch_print_callback = LambdaCallback(
1031+
on_batch_begin=lambda batch,logs: print(batch))
1032+
1033+
# Stream the epoch loss to a file in JSON format. The file content
1034+
# is not well-formed JSON but rather has a JSON object per line.
1035+
import json
1036+
json_log = open('loss_log.json', mode='wt', buffering=1)
1037+
json_logging_callback = LambdaCallback(
1038+
on_epoch_end=lambda epoch, logs: json_log.write(
1039+
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
1040+
on_train_end=lambda logs: json_log.close()
1041+
)
1042+
1043+
# Terminate some processes after having finished model training.
1044+
processes = ...
1045+
cleanup_callback = LambdaCallback(
1046+
on_train_end=lambda logs: [
1047+
p.terminate() for p in processes if p.is_alive()])
1048+
1049+
model.fit(...,
1050+
callbacks=[batch_print_callback,
1051+
json_logging_callback,
1052+
cleanup_callback])
1053+
```
10491054
"""
10501055

10511056
def __init__(self,

keras/constraints.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
"""Constraints: functions that impose constraints on weight values.
2+
"""
13
from __future__ import absolute_import
24
from __future__ import division
35
from __future__ import print_function

keras/datasets/boston_housing.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
"""Boston housing price regression dataset.
2+
"""
13
from __future__ import absolute_import
24
from __future__ import division
35
from __future__ import print_function

keras/datasets/cifar.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
11
# -*- coding: utf-8 -*-
2+
"""Utilities common to CIFAR10 and CIFAR100 datasets.
3+
"""
24
from __future__ import absolute_import
35
from __future__ import division
46
from __future__ import print_function

keras/datasets/cifar10.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
"""CIFAR10 small images classification dataset.
2+
"""
13
from __future__ import absolute_import
24
from __future__ import division
35
from __future__ import print_function

keras/datasets/cifar100.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
"""CIFAR100 small images classification dataset.
2+
"""
13
from __future__ import absolute_import
24
from __future__ import division
35
from __future__ import print_function

keras/datasets/fashion_mnist.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
"""Fashion-MNIST dataset.
2+
"""
13
from __future__ import absolute_import
24
from __future__ import division
35
from __future__ import print_function

keras/datasets/imdb.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
"""IMDB sentiment classification dataset.
2+
"""
13
from __future__ import absolute_import
24
from __future__ import division
35
from __future__ import print_function

keras/datasets/mnist.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
"""MNIST handwritten digits dataset.
2+
"""
13
from __future__ import absolute_import
24
from __future__ import division
35
from __future__ import print_function

0 commit comments

Comments
 (0)