@@ -345,53 +345,6 @@ def get_config(self):
345
345
return dict (list (base_config .items ()) + list (config .items ()))
346
346
347
347
348
- class TimeDistributedMerge (Layer ):
349
- '''Sum/multiply/average over the outputs of a TimeDistributed layer.
350
-
351
- # Input shape
352
- 3D tensor with shape: `(samples, steps, features)`.
353
-
354
- # Output shape
355
- 2D tensor with shape: `(samples, features)`.
356
-
357
- # Arguments
358
- mode: one of {'sum', 'mul', 'ave'}
359
- '''
360
- input_ndim = 3
361
-
362
- def __init__ (self , mode = 'sum' , ** kwargs ):
363
- super (TimeDistributedMerge , self ).__init__ (** kwargs )
364
- self .mode = mode
365
- self .trainable_weights = []
366
- self .regularizers = []
367
- self .constraints = []
368
- self .updates = []
369
-
370
- @property
371
- def output_shape (self ):
372
- return (None , self .input_shape [2 ])
373
-
374
- def get_output (self , train = False ):
375
- X = self .get_input (train )
376
- if self .mode == 'ave' :
377
- s = K .mean (X , axis = 1 )
378
- return s
379
- if self .mode == 'sum' :
380
- s = K .sum (X , axis = 1 )
381
- return s
382
- elif self .mode == 'mul' :
383
- s = K .prod (X , axis = 1 )
384
- return s
385
- else :
386
- raise Exception ('Unknown merge mode' )
387
-
388
- def get_config (self ):
389
- config = {'name' : self .__class__ .__name__ ,
390
- 'mode' : self .mode }
391
- base_config = super (TimeDistributedMerge , self ).get_config ()
392
- return dict (list (base_config .items ()) + list (config .items ()))
393
-
394
-
395
348
class Merge (Layer ):
396
349
'''Merge the output of a list of layers or containers into a single tensor.
397
350
@@ -411,24 +364,24 @@ class Merge(Layer):
411
364
# Examples
412
365
413
366
```python
414
- left = Sequential()
415
- left.add(Dense(50, input_shape=(784,)))
416
- left.add(Activation('relu'))
367
+ left = Sequential()
368
+ left.add(Dense(50, input_shape=(784,)))
369
+ left.add(Activation('relu'))
417
370
418
- right = Sequential()
419
- right.add(Dense(50, input_shape=(784,)))
420
- right.add(Activation('relu'))
371
+ right = Sequential()
372
+ right.add(Dense(50, input_shape=(784,)))
373
+ right.add(Activation('relu'))
421
374
422
- model = Sequential()
423
- model.add(Merge([left, right], mode='sum'))
375
+ model = Sequential()
376
+ model.add(Merge([left, right], mode='sum'))
424
377
425
- model.add(Dense(10))
426
- model.add(Activation('softmax'))
378
+ model.add(Dense(10))
379
+ model.add(Activation('softmax'))
427
380
428
- model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
381
+ model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
429
382
430
- model.fit([X_train, X_train], Y_train, batch_size=128, nb_epoch=20,
431
- validation_data=([X_test, X_test], Y_test))
383
+ model.fit([X_train, X_train], Y_train, batch_size=128, nb_epoch=20,
384
+ validation_data=([X_test, X_test], Y_test))
432
385
```
433
386
'''
434
387
def __init__ (self , layers , mode = 'sum' , concat_axis = - 1 , dot_axes = - 1 ):
@@ -626,6 +579,53 @@ def get_config(self):
626
579
return dict (list (base_config .items ()) + list (config .items ()))
627
580
628
581
582
+ class TimeDistributedMerge (Layer ):
583
+ '''Sum/multiply/average over the outputs of a TimeDistributed layer.
584
+
585
+ # Input shape
586
+ 3D tensor with shape: `(samples, steps, features)`.
587
+
588
+ # Output shape
589
+ 2D tensor with shape: `(samples, features)`.
590
+
591
+ # Arguments
592
+ mode: one of {'sum', 'mul', 'ave'}
593
+ '''
594
+ input_ndim = 3
595
+
596
+ def __init__ (self , mode = 'sum' , ** kwargs ):
597
+ super (TimeDistributedMerge , self ).__init__ (** kwargs )
598
+ self .mode = mode
599
+ self .trainable_weights = []
600
+ self .regularizers = []
601
+ self .constraints = []
602
+ self .updates = []
603
+
604
+ @property
605
+ def output_shape (self ):
606
+ return (None , self .input_shape [2 ])
607
+
608
+ def get_output (self , train = False ):
609
+ X = self .get_input (train )
610
+ if self .mode == 'ave' :
611
+ s = K .mean (X , axis = 1 )
612
+ return s
613
+ if self .mode == 'sum' :
614
+ s = K .sum (X , axis = 1 )
615
+ return s
616
+ elif self .mode == 'mul' :
617
+ s = K .prod (X , axis = 1 )
618
+ return s
619
+ else :
620
+ raise Exception ('Unknown merge mode' )
621
+
622
+ def get_config (self ):
623
+ config = {'name' : self .__class__ .__name__ ,
624
+ 'mode' : self .mode }
625
+ base_config = super (TimeDistributedMerge , self ).get_config ()
626
+ return dict (list (base_config .items ()) + list (config .items ()))
627
+
628
+
629
629
class Dropout (MaskedLayer ):
630
630
'''Apply Dropout to the input. Dropout consists in randomly setting
631
631
a fraction `p` of input units to 0 at each update during training time,
@@ -986,42 +986,6 @@ def get_config(self):
986
986
return dict (list (base_config .items ()) + list (config .items ()))
987
987
988
988
989
- class ActivityRegularization (Layer ):
990
- '''Layer that passes through its input unchanged, but applies an update
991
- to the cost function based on the activity.
992
-
993
- # Input shape
994
- Arbitrary. Use the keyword argument `input_shape`
995
- (tuple of integers, does not include the samples axis)
996
- when using this layer as the first layer in a model.
997
-
998
- # Output shape
999
- Same shape as input.
1000
-
1001
- # Arguments
1002
- l1: L1 regularization factor.
1003
- l2: L2 regularization factor.
1004
- '''
1005
- def __init__ (self , l1 = 0. , l2 = 0. , ** kwargs ):
1006
- super (ActivityRegularization , self ).__init__ (** kwargs )
1007
- self .l1 = l1
1008
- self .l2 = l2
1009
-
1010
- activity_regularizer = ActivityRegularizer (l1 = l1 , l2 = l2 )
1011
- activity_regularizer .set_layer (self )
1012
- self .regularizers = [activity_regularizer ]
1013
-
1014
- def get_output (self , train = False ):
1015
- return self .get_input (train )
1016
-
1017
- def get_config (self ):
1018
- config = {'name' : self .__class__ .__name__ ,
1019
- 'l1' : self .l1 ,
1020
- 'l2' : self .l2 }
1021
- base_config = super (ActivityRegularization , self ).get_config ()
1022
- return dict (list (base_config .items ()) + list (config .items ()))
1023
-
1024
-
1025
989
class TimeDistributedDense (MaskedLayer ):
1026
990
'''Apply a same Dense layer for each dimension[1] (time_dimension) input.
1027
991
Especially useful after a recurrent network with 'return_sequence=True'.
@@ -1147,6 +1111,42 @@ def get_config(self):
1147
1111
return dict (list (base_config .items ()) + list (config .items ()))
1148
1112
1149
1113
1114
+ class ActivityRegularization (Layer ):
1115
+ '''Layer that passes through its input unchanged, but applies an update
1116
+ to the cost function based on the activity.
1117
+
1118
+ # Input shape
1119
+ Arbitrary. Use the keyword argument `input_shape`
1120
+ (tuple of integers, does not include the samples axis)
1121
+ when using this layer as the first layer in a model.
1122
+
1123
+ # Output shape
1124
+ Same shape as input.
1125
+
1126
+ # Arguments
1127
+ l1: L1 regularization factor.
1128
+ l2: L2 regularization factor.
1129
+ '''
1130
+ def __init__ (self , l1 = 0. , l2 = 0. , ** kwargs ):
1131
+ super (ActivityRegularization , self ).__init__ (** kwargs )
1132
+ self .l1 = l1
1133
+ self .l2 = l2
1134
+
1135
+ activity_regularizer = ActivityRegularizer (l1 = l1 , l2 = l2 )
1136
+ activity_regularizer .set_layer (self )
1137
+ self .regularizers = [activity_regularizer ]
1138
+
1139
+ def get_output (self , train = False ):
1140
+ return self .get_input (train )
1141
+
1142
+ def get_config (self ):
1143
+ config = {'name' : self .__class__ .__name__ ,
1144
+ 'l1' : self .l1 ,
1145
+ 'l2' : self .l2 }
1146
+ base_config = super (ActivityRegularization , self ).get_config ()
1147
+ return dict (list (base_config .items ()) + list (config .items ()))
1148
+
1149
+
1150
1150
class AutoEncoder (Layer ):
1151
1151
'''A customizable autoencoder model.
1152
1152
@@ -1168,32 +1168,33 @@ class AutoEncoder(Layer):
1168
1168
1169
1169
# Examples
1170
1170
```python
1171
- from keras.layers import containers
1171
+ from keras.layers import containers, AutoEncoder, Dense
1172
+ from keras import models
1172
1173
1173
- # input shape: (nb_samples, 32)
1174
- encoder = containers.Sequential([Dense(16, input_dim=32), Dense(8)])
1175
- decoder = containers.Sequential([Dense(16, input_dim=8), Dense(32)])
1174
+ # input shape: (nb_samples, 32)
1175
+ encoder = containers.Sequential([Dense(16, input_dim=32), Dense(8)])
1176
+ decoder = containers.Sequential([Dense(16, input_dim=8), Dense(32)])
1176
1177
1177
- autoencoder = AutoEncoder(encoder=encoder, decoder=decoder, output_reconstruction=True)
1178
- model = Sequential()
1179
- model.add(autoencoder)
1178
+ autoencoder = AutoEncoder(encoder=encoder, decoder=decoder, output_reconstruction=True)
1179
+ model = models. Sequential()
1180
+ model.add(autoencoder)
1180
1181
1181
- # training the autoencoder:
1182
- model.compile(optimizer='sgd', loss='mse')
1183
- model.fit(X_train, X_train, nb_epoch=10)
1182
+ # training the autoencoder:
1183
+ model.compile(optimizer='sgd', loss='mse')
1184
+ model.fit(X_train, X_train, nb_epoch=10)
1184
1185
1185
- # predicting compressed representations of inputs:
1186
- autoencoder.output_reconstruction = False # the model has to be recompiled after modifying this property
1187
- model.compile(optimizer='sgd', loss='mse')
1188
- representations = model.predict(X_test)
1186
+ # predicting compressed representations of inputs:
1187
+ autoencoder.output_reconstruction = False # the model has to be recompiled after modifying this property
1188
+ model.compile(optimizer='sgd', loss='mse')
1189
+ representations = model.predict(X_test)
1189
1190
1190
- # the model is still trainable, although it now expects compressed representations as targets:
1191
- model.fit(X_test, representations, nb_epoch=1) # in this case the loss will be 0, so it's useless
1191
+ # the model is still trainable, although it now expects compressed representations as targets:
1192
+ model.fit(X_test, representations, nb_epoch=1) # in this case the loss will be 0, so it's useless
1192
1193
1193
- # to keep training against the original inputs, just switch back output_reconstruction to True:
1194
- autoencoder.output_reconstruction = True
1195
- model.compile(optimizer='sgd', loss='mse')
1196
- model.fit(X_train, X_train, nb_epoch=10)
1194
+ # to keep training against the original inputs, just switch back output_reconstruction to True:
1195
+ autoencoder.output_reconstruction = True
1196
+ model.compile(optimizer='sgd', loss='mse')
1197
+ model.fit(X_train, X_train, nb_epoch=10)
1197
1198
```
1198
1199
'''
1199
1200
def __init__ (self , encoder , decoder , output_reconstruction = True ,
0 commit comments