Skip to content

Commit 1b22915

Browse files
committed
Documentation improvements
1 parent 5824f2e commit 1b22915

File tree

5 files changed

+145
-145
lines changed

5 files changed

+145
-145
lines changed

docs/autogen.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -117,8 +117,8 @@ def code_snippet(snippet):
117117

118118

119119
def process_class_docstring(docstring):
120-
docstring = re.sub(r' # (.*)\n',
121-
r' __\1__\n\n',
120+
docstring = re.sub(r'\n # (.*)\n',
121+
r'\n __\1__\n\n',
122122
docstring)
123123

124124
docstring = re.sub(r' ([^\s\\]+):(.*)\n',

keras/layers/advanced_activations.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@
66

77
class LeakyReLU(MaskedLayer):
88
'''Special version of a Rectified Linear Unit
9-
that allows a small gradient when the unit is not active
10-
(`f(x) = alpha*x for x < 0`).
9+
that allows a small gradient when the unit is not active:
10+
`f(x) = alpha*x for x < 0`.
1111
1212
# Input shape
1313
Arbitrary. Use the keyword argument `input_shape`

keras/layers/core.py

Lines changed: 117 additions & 116 deletions
Original file line numberDiff line numberDiff line change
@@ -345,53 +345,6 @@ def get_config(self):
345345
return dict(list(base_config.items()) + list(config.items()))
346346

347347

348-
class TimeDistributedMerge(Layer):
349-
'''Sum/multiply/average over the outputs of a TimeDistributed layer.
350-
351-
# Input shape
352-
3D tensor with shape: `(samples, steps, features)`.
353-
354-
# Output shape
355-
2D tensor with shape: `(samples, features)`.
356-
357-
# Arguments
358-
mode: one of {'sum', 'mul', 'ave'}
359-
'''
360-
input_ndim = 3
361-
362-
def __init__(self, mode='sum', **kwargs):
363-
super(TimeDistributedMerge, self).__init__(**kwargs)
364-
self.mode = mode
365-
self.trainable_weights = []
366-
self.regularizers = []
367-
self.constraints = []
368-
self.updates = []
369-
370-
@property
371-
def output_shape(self):
372-
return (None, self.input_shape[2])
373-
374-
def get_output(self, train=False):
375-
X = self.get_input(train)
376-
if self.mode == 'ave':
377-
s = K.mean(X, axis=1)
378-
return s
379-
if self.mode == 'sum':
380-
s = K.sum(X, axis=1)
381-
return s
382-
elif self.mode == 'mul':
383-
s = K.prod(X, axis=1)
384-
return s
385-
else:
386-
raise Exception('Unknown merge mode')
387-
388-
def get_config(self):
389-
config = {'name': self.__class__.__name__,
390-
'mode': self.mode}
391-
base_config = super(TimeDistributedMerge, self).get_config()
392-
return dict(list(base_config.items()) + list(config.items()))
393-
394-
395348
class Merge(Layer):
396349
'''Merge the output of a list of layers or containers into a single tensor.
397350
@@ -411,24 +364,24 @@ class Merge(Layer):
411364
# Examples
412365
413366
```python
414-
left = Sequential()
415-
left.add(Dense(50, input_shape=(784,)))
416-
left.add(Activation('relu'))
367+
left = Sequential()
368+
left.add(Dense(50, input_shape=(784,)))
369+
left.add(Activation('relu'))
417370
418-
right = Sequential()
419-
right.add(Dense(50, input_shape=(784,)))
420-
right.add(Activation('relu'))
371+
right = Sequential()
372+
right.add(Dense(50, input_shape=(784,)))
373+
right.add(Activation('relu'))
421374
422-
model = Sequential()
423-
model.add(Merge([left, right], mode='sum'))
375+
model = Sequential()
376+
model.add(Merge([left, right], mode='sum'))
424377
425-
model.add(Dense(10))
426-
model.add(Activation('softmax'))
378+
model.add(Dense(10))
379+
model.add(Activation('softmax'))
427380
428-
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
381+
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
429382
430-
model.fit([X_train, X_train], Y_train, batch_size=128, nb_epoch=20,
431-
validation_data=([X_test, X_test], Y_test))
383+
model.fit([X_train, X_train], Y_train, batch_size=128, nb_epoch=20,
384+
validation_data=([X_test, X_test], Y_test))
432385
```
433386
'''
434387
def __init__(self, layers, mode='sum', concat_axis=-1, dot_axes=-1):
@@ -626,6 +579,53 @@ def get_config(self):
626579
return dict(list(base_config.items()) + list(config.items()))
627580

628581

582+
class TimeDistributedMerge(Layer):
583+
'''Sum/multiply/average over the outputs of a TimeDistributed layer.
584+
585+
# Input shape
586+
3D tensor with shape: `(samples, steps, features)`.
587+
588+
# Output shape
589+
2D tensor with shape: `(samples, features)`.
590+
591+
# Arguments
592+
mode: one of {'sum', 'mul', 'ave'}
593+
'''
594+
input_ndim = 3
595+
596+
def __init__(self, mode='sum', **kwargs):
597+
super(TimeDistributedMerge, self).__init__(**kwargs)
598+
self.mode = mode
599+
self.trainable_weights = []
600+
self.regularizers = []
601+
self.constraints = []
602+
self.updates = []
603+
604+
@property
605+
def output_shape(self):
606+
return (None, self.input_shape[2])
607+
608+
def get_output(self, train=False):
609+
X = self.get_input(train)
610+
if self.mode == 'ave':
611+
s = K.mean(X, axis=1)
612+
return s
613+
if self.mode == 'sum':
614+
s = K.sum(X, axis=1)
615+
return s
616+
elif self.mode == 'mul':
617+
s = K.prod(X, axis=1)
618+
return s
619+
else:
620+
raise Exception('Unknown merge mode')
621+
622+
def get_config(self):
623+
config = {'name': self.__class__.__name__,
624+
'mode': self.mode}
625+
base_config = super(TimeDistributedMerge, self).get_config()
626+
return dict(list(base_config.items()) + list(config.items()))
627+
628+
629629
class Dropout(MaskedLayer):
630630
'''Apply Dropout to the input. Dropout consists in randomly setting
631631
a fraction `p` of input units to 0 at each update during training time,
@@ -986,42 +986,6 @@ def get_config(self):
986986
return dict(list(base_config.items()) + list(config.items()))
987987

988988

989-
class ActivityRegularization(Layer):
990-
'''Layer that passes through its input unchanged, but applies an update
991-
to the cost function based on the activity.
992-
993-
# Input shape
994-
Arbitrary. Use the keyword argument `input_shape`
995-
(tuple of integers, does not include the samples axis)
996-
when using this layer as the first layer in a model.
997-
998-
# Output shape
999-
Same shape as input.
1000-
1001-
# Arguments
1002-
l1: L1 regularization factor.
1003-
l2: L2 regularization factor.
1004-
'''
1005-
def __init__(self, l1=0., l2=0., **kwargs):
1006-
super(ActivityRegularization, self).__init__(**kwargs)
1007-
self.l1 = l1
1008-
self.l2 = l2
1009-
1010-
activity_regularizer = ActivityRegularizer(l1=l1, l2=l2)
1011-
activity_regularizer.set_layer(self)
1012-
self.regularizers = [activity_regularizer]
1013-
1014-
def get_output(self, train=False):
1015-
return self.get_input(train)
1016-
1017-
def get_config(self):
1018-
config = {'name': self.__class__.__name__,
1019-
'l1': self.l1,
1020-
'l2': self.l2}
1021-
base_config = super(ActivityRegularization, self).get_config()
1022-
return dict(list(base_config.items()) + list(config.items()))
1023-
1024-
1025989
class TimeDistributedDense(MaskedLayer):
1026990
'''Apply a same Dense layer for each dimension[1] (time_dimension) input.
1027991
Especially useful after a recurrent network with 'return_sequence=True'.
@@ -1147,6 +1111,42 @@ def get_config(self):
11471111
return dict(list(base_config.items()) + list(config.items()))
11481112

11491113

1114+
class ActivityRegularization(Layer):
1115+
'''Layer that passes through its input unchanged, but applies an update
1116+
to the cost function based on the activity.
1117+
1118+
# Input shape
1119+
Arbitrary. Use the keyword argument `input_shape`
1120+
(tuple of integers, does not include the samples axis)
1121+
when using this layer as the first layer in a model.
1122+
1123+
# Output shape
1124+
Same shape as input.
1125+
1126+
# Arguments
1127+
l1: L1 regularization factor.
1128+
l2: L2 regularization factor.
1129+
'''
1130+
def __init__(self, l1=0., l2=0., **kwargs):
1131+
super(ActivityRegularization, self).__init__(**kwargs)
1132+
self.l1 = l1
1133+
self.l2 = l2
1134+
1135+
activity_regularizer = ActivityRegularizer(l1=l1, l2=l2)
1136+
activity_regularizer.set_layer(self)
1137+
self.regularizers = [activity_regularizer]
1138+
1139+
def get_output(self, train=False):
1140+
return self.get_input(train)
1141+
1142+
def get_config(self):
1143+
config = {'name': self.__class__.__name__,
1144+
'l1': self.l1,
1145+
'l2': self.l2}
1146+
base_config = super(ActivityRegularization, self).get_config()
1147+
return dict(list(base_config.items()) + list(config.items()))
1148+
1149+
11501150
class AutoEncoder(Layer):
11511151
'''A customizable autoencoder model.
11521152
@@ -1168,32 +1168,33 @@ class AutoEncoder(Layer):
11681168
11691169
# Examples
11701170
```python
1171-
from keras.layers import containers
1171+
from keras.layers import containers, AutoEncoder, Dense
1172+
from keras import models
11721173
1173-
# input shape: (nb_samples, 32)
1174-
encoder = containers.Sequential([Dense(16, input_dim=32), Dense(8)])
1175-
decoder = containers.Sequential([Dense(16, input_dim=8), Dense(32)])
1174+
# input shape: (nb_samples, 32)
1175+
encoder = containers.Sequential([Dense(16, input_dim=32), Dense(8)])
1176+
decoder = containers.Sequential([Dense(16, input_dim=8), Dense(32)])
11761177
1177-
autoencoder = AutoEncoder(encoder=encoder, decoder=decoder, output_reconstruction=True)
1178-
model = Sequential()
1179-
model.add(autoencoder)
1178+
autoencoder = AutoEncoder(encoder=encoder, decoder=decoder, output_reconstruction=True)
1179+
model = models.Sequential()
1180+
model.add(autoencoder)
11801181
1181-
# training the autoencoder:
1182-
model.compile(optimizer='sgd', loss='mse')
1183-
model.fit(X_train, X_train, nb_epoch=10)
1182+
# training the autoencoder:
1183+
model.compile(optimizer='sgd', loss='mse')
1184+
model.fit(X_train, X_train, nb_epoch=10)
11841185
1185-
# predicting compressed representations of inputs:
1186-
autoencoder.output_reconstruction = False # the model has to be recompiled after modifying this property
1187-
model.compile(optimizer='sgd', loss='mse')
1188-
representations = model.predict(X_test)
1186+
# predicting compressed representations of inputs:
1187+
autoencoder.output_reconstruction = False # the model has to be recompiled after modifying this property
1188+
model.compile(optimizer='sgd', loss='mse')
1189+
representations = model.predict(X_test)
11891190
1190-
# the model is still trainable, although it now expects compressed representations as targets:
1191-
model.fit(X_test, representations, nb_epoch=1) # in this case the loss will be 0, so it's useless
1191+
# the model is still trainable, although it now expects compressed representations as targets:
1192+
model.fit(X_test, representations, nb_epoch=1) # in this case the loss will be 0, so it's useless
11921193
1193-
# to keep training against the original inputs, just switch back output_reconstruction to True:
1194-
autoencoder.output_reconstruction = True
1195-
model.compile(optimizer='sgd', loss='mse')
1196-
model.fit(X_train, X_train, nb_epoch=10)
1194+
# to keep training against the original inputs, just switch back output_reconstruction to True:
1195+
autoencoder.output_reconstruction = True
1196+
model.compile(optimizer='sgd', loss='mse')
1197+
model.fit(X_train, X_train, nb_epoch=10)
11971198
```
11981199
'''
11991200
def __init__(self, encoder, decoder, output_reconstruction=True,

keras/layers/recurrent.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,6 @@ class Recurrent(MaskedLayer):
5252
of timesteps. To introduce masks to your data,
5353
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
5454
set to `True`.
55-
**Note:** for the time being, masking is only supported with Theano.
5655
5756
# TensorFlow warning
5857
For the time being, when using the TensorFlow backend,

keras/models.py

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -904,18 +904,18 @@ def fit_generator(self, generator, samples_per_epoch, nb_epoch,
904904
# Examples
905905
906906
```python
907-
def generate_arrays_from_file(path):
908-
while 1:
909-
f = open(path)
910-
for line in f:
911-
# create numpy arrays of input data
912-
# and labels, from each line in the file
913-
x, y = process_line(line)
914-
yield x, y
915-
f.close()
916-
917-
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
918-
samples_per_epoch=10000, nb_epoch=10)
907+
def generate_arrays_from_file(path):
908+
while 1:
909+
f = open(path)
910+
for line in f:
911+
# create numpy arrays of input data
912+
# and labels, from each line in the file
913+
x, y = process_line(line)
914+
yield x, y
915+
f.close()
916+
917+
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
918+
samples_per_epoch=10000, nb_epoch=10)
919919
```
920920
'''
921921
max_queue_size = 10 # maximum number of batches in queue
@@ -1369,18 +1369,18 @@ def fit_generator(self, generator, samples_per_epoch, nb_epoch,
13691369
# Examples
13701370
13711371
```python
1372-
def generate_arrays_from_file(path):
1373-
while 1:
1374-
f = open(path)
1375-
for line in f:
1376-
# create numpy arrays of input data
1377-
# and labels, from each line in the file
1378-
x1, x2, y = process_line(line)
1379-
yield {'input_1': x1, 'input_2': x2, 'output': y}
1380-
f.close()
1381-
1382-
graph.fit_generator(generate_arrays_from_file('/my_file.txt'),
1383-
samples_per_epoch=10000, nb_epoch=10)
1372+
def generate_arrays_from_file(path):
1373+
while 1:
1374+
f = open(path)
1375+
for line in f:
1376+
# create numpy arrays of input data
1377+
# and labels, from each line in the file
1378+
x1, x2, y = process_line(line)
1379+
yield {'input_1': x1, 'input_2': x2, 'output': y}
1380+
f.close()
1381+
1382+
graph.fit_generator(generate_arrays_from_file('/my_file.txt'),
1383+
samples_per_epoch=10000, nb_epoch=10)
13841384
```
13851385
'''
13861386
max_queue_size = 10 # maximum number of batches in queue

0 commit comments

Comments
 (0)