Skip to content

Commit 136f966

Browse files
author
l2k2
committed
translation in lstm
1 parent 5d2dfc0 commit 136f966

File tree

16 files changed

+271
-41
lines changed

16 files changed

+271
-41
lines changed

keras-cnn/convolution.ipynb

Lines changed: 17 additions & 7 deletions
Large diffs are not rendered by default.

keras-cnn/maxpool.ipynb

Lines changed: 5 additions & 5 deletions
Large diffs are not rendered by default.

keras-mlp/dropout.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
config = run.config
1313
config.optimizer = "adam"
1414
config.epochs = 50
15-
config.dropout = 10
15+
config.dropout = 0.4
1616
config.hidden_nodes = 100
1717

1818
# load data

keras-mlp/mlp.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,9 @@
3737
# create model
3838
model=Sequential()
3939
model.add(Flatten(input_shape=(img_width,img_height)))
40+
model.add(Dropout(0.4))
4041
model.add(Dense(config.hidden_nodes, activation='relu'))
42+
model.add(Dropout(0.4))
4143
model.add(Dense(num_classes, activation='softmax'))
4244
model.compile(loss='categorical_crossentropy', optimizer=config.optimizer,
4345
metrics=['accuracy'])

keras-perceptron/perceptron-linear.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,10 @@
1313
# load data
1414
(X_train, y_train), (X_test, y_test) = mnist.load_data()
1515

16+
# normalize data
17+
X_train = X_train.astype('float32') / 255.
18+
X_test = X_test.astype('float32') / 255.
19+
1620
img_width = X_train.shape[1]
1721
img_height = X_train.shape[2]
1822

@@ -26,12 +30,13 @@
2630
# create model
2731
model=Sequential()
2832
model.add(Flatten(input_shape=(img_width,img_height)))
29-
model.add(Dense(num_classes))
30-
model.compile(loss='mse', optimizer='adam',
33+
model.add(Dense(num_classes, activation="softmax"))
34+
model.compile(loss='categorical_crossentropy', optimizer='adam',
3135
metrics=['accuracy'])
3236

3337
# Fit the model
3438
model.fit(X_train, y_train, epochs=10, validation_data=(X_test, y_test),
3539
callbacks=[WandbCallback(data_type="image", labels=labels, save_model=False)])
36-
40+
print(model.predict(X_test[:10]))
41+
model.save('model.h5')
3742

keras-sign/perceptron.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,5 +37,4 @@
3737
metrics=['accuracy'])
3838

3939
# Fit the model
40-
model.fit(X_train, y_train, epochs=config.epochs, validation_data=(X_test, y_test),
41-
callbacks=[WandbCallback(data_type="image", labels=signdata.letters)])
40+
model.fit(X_train, y_train, epochs=config.epochs, validation_data=(X_test, y_test), callbacks=[WandbCallback(data_type="image", labels=signdata.letters)])

keras-transfer/inception-inspect.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
model = InceptionV3(weights='imagenet')
99

10-
img_path = 'elephant.jpg'
10+
img_path = 'crane.jpeg'
1111
img = image.load_img(img_path, target_size=(299, 299))
1212
x = image.img_to_array(img)
1313
x = np.expand_dims(x, axis=0)

keras-transfer/resnet50-inspect.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
model = ResNet50(weights='imagenet')
99

10-
img_path = 'elephant.jpg'
10+
img_path = 'crane.jpeg'
1111
img = image.load_img(img_path, target_size=(224, 224))
1212
x = image.img_to_array(img)
1313
x = np.expand_dims(x, axis=0)

keras-transfer/vgg-inspect.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -65,12 +65,12 @@ def VGG_16(weights_path=None):
6565
model.summary()
6666
# Test pretrained model if weights exist
6767
if os.path.exists("vgg16_weights.h5"):
68-
#im = cv2.resize(cv2.imread('elephant.jpg'), (224, 224)).astype(np.float32)
69-
#im[:,:,0] -= 103.939
70-
#im[:,:,1] -= 116.779
71-
#im[:,:,2] -= 123.68
72-
#im = im.transpose((2,0,1))
73-
#im = np.expand_dims(im, axis=0)
68+
im = cv2.resize(cv2.imread('elephant.jpg'), (224, 224)).astype(np.float32)
69+
im[:,:,0] -= 103.939
70+
im[:,:,1] -= 116.779
71+
im[:,:,2] -= 123.68
72+
im = im.transpose((2,0,1))
73+
im = np.expand_dims(im, axis=0)
7474
model = VGG_16('vgg16_weights.h5')
7575
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
7676
model.compile(optimizer=sgd, loss='categorical_crossentropy')

lstm/imdb-classifier/imdb-bow.py

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,10 @@
11
import imdb
22
import numpy as np
33
from keras.preprocessing import text
4+
from keras.models import Sequential
5+
from keras.layers import Conv2D, MaxPooling2D, Dropout, Dense, Flatten
46
import wandb
7+
from wandb.keras import WandbCallback
58
from sklearn.linear_model import LogisticRegression
69

710
wandb.init()
@@ -10,18 +13,21 @@
1013

1114
(X_train, y_train), (X_test, y_test) = imdb.load_imdb()
1215

13-
1416
tokenizer = text.Tokenizer(num_words=config.vocab_size)
1517
tokenizer.fit_on_texts(X_train)
1618
X_train = tokenizer.texts_to_matrix(X_train)
1719
X_test = tokenizer.texts_to_matrix(X_test)
1820

19-
bow_model = LogisticRegression()
20-
bow_model.fit(X_train, y_train)
21+
# one hot encode outputs
22+
y_train = np_utils.to_categorical(y_train)
23+
y_test = np_utils.to_categorical(y_test)
2124

22-
pred_train = bow_model.predict(X_train)
23-
acc = np.sum(pred_train==y_train)/len(pred_train)
25+
# create model
26+
model=Sequential()
27+
model.add(Dense(2, activation="softmax", input_shape=(1000,)))
28+
model.compile(loss='binary_crossentropy', optimizer='adam',
29+
metrics=['accuracy'])
2430

25-
pred_test = bow_model.predict(X_test)
26-
val_acc = np.sum(pred_test==y_test)/len(pred_test)
27-
wandb.log({"val_acc": val_acc, "acc": acc})
31+
# Fit the model
32+
model.fit(X_train, y_train, epochs=10, validation_data=(X_test, y_test),
33+
callbacks=[WandbCallback(save_model=False)])

lstm/imdb-classifier/imdb-cnn.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@
2424
config.epochs = 10
2525

2626
(X_train, y_train), (X_test, y_test) = imdb.load_imdb()
27+
print("Review", X_train[0])
28+
print("Label", y_train[0])
2729

2830
tokenizer = text.Tokenizer(num_words=config.vocab_size)
2931
tokenizer.fit_on_texts(X_train)
@@ -32,6 +34,8 @@
3234

3335
X_train = sequence.pad_sequences(X_train, maxlen=config.maxlen)
3436
X_test = sequence.pad_sequences(X_test, maxlen=config.maxlen)
37+
print(X_train.shape)
38+
print("After pre-processing", X_train[0])
3539

3640
model = Sequential()
3741
model.add(Embedding(config.vocab_size,

lstm/text-gen/char-gen.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@
5050
y[i, char_indices[next_chars[i]]] = 1
5151

5252
model = Sequential()
53-
model.add(SimpleRNN(128, input_shape=(config.maxlen, len(chars))))
53+
model.add(GRU(128, input_shape=(config.maxlen, len(chars))))
5454
model.add(Dense(len(chars), activation='softmax'))
5555
model.compile(loss='categorical_crossentropy', optimizer="rmsprop")
5656

lstm/time-series/plotutil.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def on_epoch_end(self, epoch, logs):
4949
if self.repeat_predictions:
5050
preds = repeated_predictions(self.model, self.trainX[-1,:,0], self.look_back, self.testX.shape[0])
5151
else:
52-
preds = model.predict(testX)
52+
preds = self.model.predict(self.testX)
5353

5454
# Generate a figure with matplotlib</font>
5555
figure = matplotlib.pyplot.figure( figsize=(10,10) )

lstm/time-series/rnn.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,8 @@
1515
wandb.init()
1616
config = wandb.config
1717

18-
config.repeated_predictions = False
19-
config.look_back = 20
18+
config.repeated_predictions = True
19+
config.look_back = 4
2020

2121
def load_data(data_type="airline"):
2222
if data_type == "flu":
@@ -39,7 +39,7 @@ def create_dataset(dataset):
3939
dataY.append(dataset[i + config.look_back])
4040
return np.array(dataX), np.array(dataY)
4141

42-
data = load_data()
42+
data = load_data("sin")
4343

4444
# normalize data to between 0 and 1
4545
max_val = max(data)
@@ -60,8 +60,8 @@ def create_dataset(dataset):
6060
# create and fit the RNN
6161
model = Sequential()
6262
model.add(SimpleRNN(1, input_shape=(config.look_back,1 )))
63-
model.compile(loss='mse', optimizer='adam')
64-
model.fit(trainX, trainY, epochs=1000, batch_size=1, validation_data=(testX, testY), callbacks=[WandbCallback(), PlotCallback(trainX, trainY, testX, testY, config.look_back)])
63+
model.compile(loss='mae', optimizer='rmsprop')
64+
model.fit(trainX, trainY, epochs=1000, batch_size=20, validation_data=(testX, testY), callbacks=[WandbCallback(), PlotCallback(trainX, trainY, testX, testY, config.look_back)])
6565

6666

6767

0 commit comments

Comments
 (0)