Skip to content

Commit 05fd10c

Browse files
author
l2k2
committed
colorizer
1 parent d309808 commit 05fd10c

File tree

8 files changed

+69
-67
lines changed

8 files changed

+69
-67
lines changed

keras-autoencoder/autoencoder.py

Lines changed: 20 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
1-
from keras.layers import Input, Dense, Flatten, Reshape
1+
from keras.layers import Input, Dense, Flatten, Reshape, UpSampling2D, Conv2D, MaxPooling2D
22
from keras.models import Model, Sequential
33

44
from keras.datasets import mnist
5+
from keras.datasets import fashion_mnist
6+
57
from keras.callbacks import Callback
68
import numpy as np
79
import wandb
@@ -13,16 +15,28 @@
1315
config.encoding_dim = 32
1416
config.epochs = 1000
1517

16-
(x_train, _), (x_test, _) = mnist.load_data()
18+
(x_train, _), (x_test, _) = fashion_mnist.load_data()
1719

1820
x_train = x_train.astype('float32') / 255.
1921
x_test = x_test.astype('float32') / 255.
2022

2123
model = Sequential()
22-
model.add(Flatten(input_shape=(28,28)))
23-
model.add(Dense(config.encoding_dim, activation='relu'))
24-
model.add(Dense(28*28, activation='sigmoid'))
24+
model.add(Reshape((28,28,1),input_shape=(28,28)))
25+
model.add(Conv2D(4, (3,3), padding="same", activation="relu"))
26+
model.add(MaxPooling2D(pool_size=(2,2)))
27+
model.add(Conv2D(8, (3,3), padding="same", activation="relu"))
28+
model.add(MaxPooling2D(pool_size=(2,2)))
29+
model.add(Flatten())
30+
model.add(Dense(10, activation="relu"))
31+
model.add(Dense(7*7, activation="relu"))
32+
model.add(Reshape((7,7,1)))
33+
model.add(Conv2D(8, (3,3), padding="same", activation="relu"))
34+
model.add(UpSampling2D())
35+
model.add(Conv2D(4, (3,3), padding="same", activation="relu"))
36+
model.add(UpSampling2D())
37+
model.add(Conv2D(1, (3,3), padding="same", activation="sigmoid"))
2538
model.add(Reshape((28,28)))
39+
2640
model.compile(optimizer='adam', loss='mse')
2741

2842
model.summary()
@@ -41,7 +55,7 @@ def on_epoch_end(self, epoch, logs):
4155
model.fit(x_train, x_train,
4256
epochs=config.epochs,
4357
validation_data=(x_test, x_test),
44-
callbacks=[Images(), WandbCallback()])
58+
callbacks=[Images(), WandbCallback()])
4559

4660

4761
model.save('auto-small.h5')

keras-autoencoder/denoising_autoencoder.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,9 @@
77
from wandb.keras import WandbCallback
88

99
def add_noise(x_train, x_test):
10-
noise_factor = 0.5
11-
x_train_noisy = x_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_train.shape)
12-
x_test_noisy = x_test + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_test.shape)
10+
noise_factor = 1.0
11+
x_train_noisy = x_train + np.random.normal(loc=0.0, scale=noise_factor, size=x_train.shape)
12+
x_test_noisy = x_test + np.random.normal(loc=0.0, scale=noise_factor, size=x_test.shape)
1313

1414
x_train_noisy = np.clip(x_train_noisy, 0., 1.)
1515
x_test_noisy = np.clip(x_test_noisy, 0., 1.)
@@ -34,7 +34,7 @@ def add_noise(x_train, x_test):
3434
model.add(Dense(config.encoding_dim, activation='relu'))
3535
model.add(Dense(784, activation='sigmoid'))
3636
model.add(Reshape((28,28)))
37-
model.compile(optimizer='adam', loss='mse')
37+
model.compile(optimizer='adam', loss='binary_crossentropy')
3838

3939
class Images(Callback):
4040
def on_epoch_end(self, epoch, logs):

keras-color/wandb/settings

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
[default]
2-
entity: qualcomm
3-
project: color-jul27
2+
entity: mlclass
3+
project: color-image
44
base_url: https://api.wandb.ai

keras-fashion/perceptron-linear.py

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import numpy
22
from keras.datasets import fashion_mnist
33
from keras.models import Sequential
4-
from keras.layers import Dense, Flatten, Dropout
4+
from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPooling2D, Reshape
55
from keras.utils import np_utils
66
import wandb
77
from wandb.keras import WandbCallback
@@ -14,6 +14,10 @@
1414
# load data
1515
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
1616

17+
X_train = X_train.astype('float32') / 255.
18+
X_test = X_test.astype('float32') / 255.
19+
20+
1721
img_width = X_train.shape[1]
1822
img_height = X_train.shape[2]
1923
labels =["T-shirt/top","Trouser","Pullover","Dress",
@@ -27,14 +31,20 @@
2731

2832
# create model
2933
model=Sequential()
30-
model.add(Flatten(input_shape=(img_width,img_height)))
31-
model.add(Dense(num_classes))
32-
model.compile(loss='mse', optimizer='adam',
34+
model.add(Reshape((28,28,1), input_shape=(28,28)))
35+
model.add(Dropout(0.5))
36+
model.add(Conv2D(32, (3,3), activation='relu'))
37+
model.add(Dropout(0.5))
38+
model.add(MaxPooling2D(pool_size=(2,2)))
39+
model.add(Flatten())
40+
model.add(Dense(100, activation='relu'))
41+
model.add(Dropout(0.5))
42+
model.add(Dense(num_classes, activation='softmax'))
43+
model.compile(loss='categorical_crossentropy', optimizer='adam',
3344
metrics=['accuracy'])
3445

3546
# Fit the model
36-
model.fit(X_train, y_train, epochs=config.epochs, validation_data=(X_test, y_test),
47+
model.fit(X_train, y_train, epochs=10, validation_data=(X_test, y_test),
3748
callbacks=[WandbCallback(data_type="image", labels=labels)])
3849

3950

40-

keras-imdb/imdb-cnn.py

Lines changed: 21 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,51 +1,57 @@
11
from keras.preprocessing import sequence
22
from keras.models import Sequential
33
from keras.layers import Dense, Dropout, Activation
4-
from keras.layers import Embedding
4+
from keras.layers import Embedding, LSTM
55
from keras.layers import Conv1D, Flatten
66
from keras.datasets import imdb
77
import wandb
88
from wandb.keras import WandbCallback
9+
import imdb
10+
import numpy as np
11+
from keras.preprocessing import text
912

1013
wandb.init()
1114
config = wandb.config
1215

1316
# set parameters:
14-
config.max_features = 5000
15-
config.maxlen = 400
17+
config.vocab_size = 1000
18+
config.maxlen = 1000
1619
config.batch_size = 32
1720
config.embedding_dims = 50
1821
config.filters = 250
1922
config.kernel_size = 3
2023
config.hidden_dims = 250
21-
config.epochs = 2
24+
config.epochs = 10
2225

23-
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=config.max_features)
26+
(X_train, y_train), (X_test, y_test) = imdb.load_imdb()
2427

25-
x_train = sequence.pad_sequences(x_train, maxlen=config.maxlen)
26-
x_test = sequence.pad_sequences(x_test, maxlen=config.maxlen)
27-
print('x_train shape:', x_train.shape)
28-
print('x_test shape:', x_test.shape)
28+
tokenizer = text.Tokenizer(num_words=config.vocab_size)
29+
tokenizer.fit_on_texts(X_train)
30+
X_train = tokenizer.texts_to_matrix(X_train)
31+
X_test = tokenizer.texts_to_matrix(X_test)
32+
33+
X_train = sequence.pad_sequences(X_train, maxlen=config.maxlen)
34+
X_test = sequence.pad_sequences(X_test, maxlen=config.maxlen)
2935

30-
print('Build model...')
3136
model = Sequential()
32-
model.add(Embedding(config.max_features,
37+
model.add(Embedding(config.vocab_size,
3338
config.embedding_dims,
3439
input_length=config.maxlen))
35-
model.add(Dropout(0.2))
40+
model.add(Dropout(0.5))
3641
model.add(Conv1D(config.filters,
3742
config.kernel_size,
3843
padding='valid',
3944
activation='relu'))
4045
model.add(Flatten())
4146
model.add(Dense(config.hidden_dims, activation='relu'))
42-
model.add(Dropout(0.2))
47+
model.add(Dropout(0.5))
4348
model.add(Dense(1, activation='sigmoid'))
4449

4550
model.compile(loss='binary_crossentropy',
4651
optimizer='adam',
4752
metrics=['accuracy'])
48-
model.fit(x_train, y_train,
53+
54+
model.fit(X_train, y_train,
4955
batch_size=config.batch_size,
5056
epochs=config.epochs,
51-
validation_data=(x_test, y_test))
57+
validation_data=(X_test, y_test), callbacks=[WandbCallback()])

keras-imdb/imdb-lstm.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515

1616
# set parameters:
1717
config.vocab_size = 1000
18-
config.maxlen = 1000
18+
config.maxlen = 300
1919
config.batch_size = 32
2020
config.embedding_dims = 50
2121
config.filters = 250
@@ -37,9 +37,8 @@
3737
model.add(Embedding(config.vocab_size,
3838
config.embedding_dims,
3939
input_length=config.maxlen))
40-
model.add(LSTM(config.hidden_dims, dropout=0.2, recurrent_dropout=0.2, activation="sigmoid", internal_activation="sigmoid"))
40+
model.add(LSTM(config.hidden_dims, activation="sigmoid"))
4141
model.add(Dense(1, activation='sigmoid'))
42-
4342
model.compile(loss='binary_crossentropy',
4443
optimizer='rmsprop',
4544
metrics=['accuracy'])

keras-seq2seq/train.py

Lines changed: 3 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,37 +1,12 @@
11

2-
# -*- coding: utf-8 -*-
3-
'''An implementation of sequence to sequence learning for performing addition
4-
Input: "535+61"
5-
Output: "596"
6-
Padding is handled by using a repeated sentinel character (space)
7-
Input may optionally be reversed, shown to increase performance in many tasks in:
8-
"Learning to Execute"
9-
http://arxiv.org/abs/1410.4615
10-
and
11-
"Sequence to Sequence Learning with Neural Networks"
12-
http://papers.nips.cc/paper/5346-sequence-to-sequence-learning-with-neural-networks.pdf
13-
Theoretically it introduces shorter term dependencies between source and target.
14-
Two digits reversed:
15-
+ One layer LSTM (128 HN), 5k training examples = 99% train/test accuracy in 55 epochs
16-
Three digits reversed:
17-
+ One layer LSTM (128 HN), 50k training examples = 99% train/test accuracy in 100 epochs
18-
Four digits reversed:
19-
+ One layer LSTM (128 HN), 400k training examples = 99% train/test accuracy in 20 epochs
20-
Five digits reversed:
21-
+ One layer LSTM (128 HN), 550k training examples = 99% train/test accuracy in 30 epochs
22-
''' # noqa
23-
24-
from __future__ import print_function
252
from keras.models import Sequential
263
from keras import layers
274
import numpy as np
28-
from six.moves import range
295
import wandb
306
from wandb.keras import WandbCallback
317

328
wandb.init()
339

34-
3510
class CharacterTable(object):
3611
"""Given a set of characters:
3712
+ Encode them to a one hot integer representation
@@ -64,10 +39,7 @@ def decode(self, x, calc_argmax=True):
6439
return ''.join(self.indices_char[x] for x in x)
6540

6641

67-
class colors:
68-
ok = '\033[92m'
69-
fail = '\033[91m'
70-
close = '\033[0m'
42+
7143

7244
# Parameters for the model and dataset.
7345
TRAINING_SIZE = 50000
@@ -193,7 +165,7 @@ class colors:
193165
print('Q', q[::-1] if REVERSE else q, end=' ')
194166
print('T', correct, end=' ')
195167
if correct == guess:
196-
print(colors.ok + '☑' + colors.close, end=' ')
168+
print('☑', end=' ')
197169
else:
198-
print(colors.fail + '☒' + colors.close, end=' ')
170+
print('☒', end=' ')
199171
print(guess)

keras-sign/perceptron.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
# create model
3737
model=Sequential()
3838
model.add(Flatten(input_shape=(img_width, img_height)))
39+
model.add(Dense(100, activation='relu'))
3940
model.add(Dense(num_classes, activation='softmax'))
4041
model.compile(loss=config.loss, optimizer=config.optimizer,
4142
metrics=['accuracy'])

0 commit comments

Comments
 (0)