Skip to content

Commit 4b50c2c

Browse files
committed
Updates for class 2
1 parent 109907e commit 4b50c2c

File tree

8 files changed

+343
-184
lines changed

8 files changed

+343
-184
lines changed

keras-autoencoder/autoencoder.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -43,9 +43,7 @@ def on_epoch_end(self, epoch, logs):
4343
decoder.add(Dense(28*28, activation="sigmoid"))
4444
decoder.add(Reshape((28,28)))
4545

46-
model = Sequential()
47-
model.add(encoder)
48-
model.add(decoder)
46+
model = Model(encoder, decoder)
4947

5048
model.compile(optimizer='adam', loss='mse')
5149

keras-autoencoder/wandb/settings

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
[default]
22
entity: qualcomm
3-
project: encoding-july27
3+
project: encoding-aug22
44
base_url: https://api.wandb.ai

keras-cifar/cifar-cnn.py

Lines changed: 21 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -8,63 +8,58 @@
88

99
import os
1010
import wandb
11-
from wandb.wandb_keras import WandbKerasCallback
11+
from wandb.keras import WandbCallback
1212

1313
run = wandb.init()
1414
config = run.config
15+
config.dropout = 0.25
16+
config.dense_layer_nodes = 100
17+
config.learn_rate = 0.01
18+
config.batch_size = 32
19+
config.epochs = 50
20+
1521
class_names = ['airplane','automobile','bird','cat','deer',
1622
'dog','frog','horse','ship','truck']
17-
num_classes = 10
18-
23+
num_classes = len(class_names)
1924

20-
save_dir = os.path.join(os.getcwd(), 'saved_models')
21-
model_name = 'keras_cifar10_trained_model.h5'
22-
23-
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
25+
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
2426

2527
# Convert class vectors to binary class matrices.
2628
y_train = keras.utils.to_categorical(y_train, num_classes)
2729
y_test = keras.utils.to_categorical(y_test, num_classes)
2830

2931
model = Sequential()
3032
model.add(Conv2D(32, (3, 3), padding='same',
31-
input_shape=x_train.shape[1:], activation='relu'))
33+
input_shape=X_train.shape[1:], activation='relu'))
3234
model.add(MaxPooling2D(pool_size=(2, 2)))
33-
model.add(Dropout(0.25))
35+
model.add(Dropout(config.dropout))
3436

3537
model.add(Flatten())
3638
model.add(Dense(config.dense_layer_nodes, activation='relu'))
37-
model.add(Dropout(0.5))
39+
model.add(Dropout(config.dropout))
3840
model.add(Dense(num_classes, activation='softmax'))
3941

40-
4142
opt = keras.optimizers.SGD(lr=config.learn_rate)
4243

4344
# Let's train the model using RMSprop
4445
model.compile(loss='categorical_crossentropy',
4546
optimizer=opt,
4647
metrics=['accuracy'])
4748

48-
x_train = x_train.astype('float32')
49-
x_test = x_test.astype('float32')
50-
x_train /= 255
51-
x_test /= 255
52-
53-
54-
datagen = ImageDataGenerator(
55-
width_shift_range=0.1)
56-
49+
X_train = X_train.astype('float32') / 255.
50+
X_test = X_test.astype('float32') / 255.
5751

58-
datagen.fit(x_train)
52+
datagen = ImageDataGenerator(width_shift_range=0.1)
53+
datagen.fit(X_train)
5954

60-
# Fit the model on the batches generated by datagen.flow().
61-
model.fit_generator(datagen.flow(x_train, y_train,
55+
# Fit the model on the batches generated by datagen.flow().
56+
model.fit_generator(datagen.flow(X_train, y_train,
6257
batch_size=config.batch_size),
63-
steps_per_epoch=x_train.shape[0] // config.batch_size,
58+
steps_per_epoch=X_train.shape[0] // config.batch_size,
6459
epochs=config.epochs,
65-
validation_data=(x_test, y_test),
60+
validation_data=(X_test, y_test),
6661
workers=4,
6762
callbacks=[WandbKerasCallback(data_type="image", labels=class_names)]
68-
)
63+
)
6964

7065

keras-fashion/nn.py

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -36,23 +36,14 @@
3636
# create model
3737
model=Sequential()
3838
#model.add(Reshape((img_width, img_height, 1), input_shape=(img_width,img_height)))
39-
#model.add(Dropout(0.4))
40-
#model.add(Conv2D(32, (3,3), activation='relu'))
41-
#model.add(MaxPooling2D(2,2))
42-
#model.add(Dropout(0.4))
43-
#model.add(Conv2D(32, (3,3), activation='relu'))
44-
#model.add(MaxPooling2D(2,2))
4539
model.add(Flatten(input_shape=(img_width,img_height)))
4640
model.add(Dropout(0.4))
4741
model.add(Dense(100, activation='relu'))
4842
model.add(Dropout(0.4))
4943
model.add(Dense(num_classes, activation='softmax'))
5044
model.compile(loss='categorical_crossentropy', optimizer='adam',
5145
metrics=['accuracy'])
52-
model.summary()
5346
# Fit the model
5447
model.fit(X_train, y_train, epochs=config.epochs, validation_data=(X_test, y_test),
55-
callbacks=[WandbCallback(validation_data=X_test, labels=labels)])
48+
callbacks=[WandbCallback(data_type="image", labels=labels)])
5649

57-
#print("Predictions", model.predict(X_train[:50]))
58-
#print("Truth", y_train[:50])

keras-fashion/wandb/settings

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
[default]
22
entity: qualcomm
3-
project: fashion-july27
3+
project: fashion-aug22
44
base_url: https://api.wandb.ai

keras-gan/gan-simple.py

Lines changed: 152 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,152 @@
1+
import os
2+
import numpy as np
3+
4+
from keras.layers import Input
5+
from keras.models import Model, Sequential
6+
from keras.layers.core import Reshape, Dense, Dropout, Flatten
7+
from keras.layers.advanced_activations import LeakyReLU
8+
from keras.layers.convolutional import Convolution2D, UpSampling2D
9+
from keras.layers.normalization import BatchNormalization
10+
from keras.datasets import mnist
11+
from keras.optimizers import Adam
12+
from keras import backend as K
13+
from keras import initializers
14+
from PIL import Image
15+
from keras.callbacks import LambdaCallback
16+
import wandb
17+
18+
# Find more tricks here: https://github.com/soumith/ganhacks
19+
20+
run = wandb.init()
21+
config = wandb.config
22+
23+
# The results are a little better when the dimensionality of the random vector is only 10.
24+
# The dimensionality has been left at 100 for consistency with other GAN implementations.
25+
randomDim = 10
26+
27+
# Load MNIST data
28+
(X_train, y_train), (X_test, y_test) = mnist.load_data()
29+
X_train = (X_train.astype(np.float32) - 127.5)/127.5
30+
X_train = X_train.reshape(60000, 784)
31+
32+
config.lr=0.0002
33+
config.beta_1=0.5
34+
config.batch_size=128
35+
config.epochs=10
36+
37+
# Optimizer
38+
adam = Adam(config.lr, beta_1=config.beta_1)
39+
40+
generator = Sequential()
41+
generator.add(Dense(256, input_dim=randomDim, kernel_initializer=initializers.RandomNormal(stddev=0.02)))
42+
generator.add(LeakyReLU(0.2))
43+
generator.add(Dense(512))
44+
generator.add(LeakyReLU(0.2))
45+
generator.add(Dense(1024))
46+
generator.add(LeakyReLU(0.2))
47+
generator.add(Dense(784, activation='tanh'))
48+
generator.compile(loss='binary_crossentropy', optimizer=adam, metrics=['acc'])
49+
50+
discriminator = Sequential()
51+
discriminator.add(Dense(1024, input_dim=784, kernel_initializer=initializers.RandomNormal(stddev=0.02)))
52+
discriminator.add(LeakyReLU(0.2))
53+
discriminator.add(Dropout(0.3))
54+
discriminator.add(Dense(512))
55+
discriminator.add(LeakyReLU(0.2))
56+
discriminator.add(Dropout(0.3))
57+
discriminator.add(Dense(256))
58+
discriminator.add(LeakyReLU(0.2))
59+
discriminator.add(Dropout(0.3))
60+
discriminator.add(Dense(1, activation='sigmoid'))
61+
discriminator.compile(loss='binary_crossentropy', optimizer=adam, metrics=['binary_accuracy'])
62+
63+
# Combined network
64+
discriminator.trainable = False
65+
ganInput = Input(shape=(randomDim,))
66+
x = generator(ganInput)
67+
ganOutput = discriminator(x)
68+
gan = Model(inputs=ganInput, outputs=ganOutput)
69+
gan.compile(loss='binary_crossentropy', optimizer=adam, metrics = ['binary_accuracy'])
70+
71+
iter = 0
72+
# Write out generated MNIST images
73+
def writeGeneratedImages(epoch, examples=100, dim=(10, 10), figsize=(10, 10)):
74+
noise = np.random.normal(0, 1, size=[examples, randomDim])
75+
generatedImages = generator.predict(noise)
76+
generatedImages = generatedImages.reshape(examples, 28, 28)
77+
78+
for i in range(10):
79+
img = Image.fromarray((generatedImages[0] + 1.)* (255/2.))
80+
img = img.convert('RGB')
81+
img.save(str(i) + ".jpg")
82+
83+
84+
# Save the generator and discriminator networks (and weights) for later use
85+
def saveModels(epoch):
86+
generator.save('models/gan_generator_epoch_%d.h5' % epoch)
87+
discriminator.save('models/gan_discriminator_epoch_%d.h5' % epoch)
88+
89+
90+
def log_generator(epoch, logs):
91+
global iter
92+
iter += 1
93+
if iter % 500 == 0:
94+
wandb.log({'generator_loss': logs['loss'],
95+
'generator_acc': logs['binary_accuracy'],
96+
'discriminator_loss': 0.0,
97+
'discriminator_acc': (1-logs['binary_accuracy'])})
98+
99+
def log_discriminator(epoch, logs):
100+
global iter
101+
if iter % 500 == 250:
102+
wandb.log({
103+
'generator_loss': 0.0,
104+
'generator_acc': logs['binary_accuracy'],
105+
'discriminator_loss': logs['loss'],
106+
'discriminator_acc': logs['binary_accuracy']})
107+
108+
def train(epochs=config.epochs, batchSize=config.batch_size):
109+
batchCount = int(X_train.shape[0] / config.batch_size)
110+
print('Epochs:', epochs)
111+
print('Batch size:', batchSize)
112+
print('Batches per epoch:', batchCount)
113+
114+
wandb_logging_callback_d = LambdaCallback(on_epoch_end=log_discriminator)
115+
wandb_logging_callback_g = LambdaCallback(on_epoch_end=log_generator)
116+
117+
118+
for e in range(1, epochs+1):
119+
print("Epoch {}:".format(e))
120+
for i in range(batchCount):
121+
# Get a random set of input noise and images
122+
noise = np.random.normal(0, 1, size=[batchSize, randomDim])
123+
imageBatch = X_train[np.random.randint(0, X_train.shape[0], size=batchSize)]
124+
125+
# Generate fake MNIST images
126+
generatedImages = generator.predict(noise)
127+
# print np.shape(imageBatch), np.shape(generatedImages)
128+
X = np.concatenate([imageBatch, generatedImages])
129+
130+
# Labels for generated and real data
131+
yDis = np.zeros(2*batchSize)
132+
# One-sided label smoothing
133+
yDis[:batchSize] = 0.9
134+
135+
# Train discriminator
136+
discriminator.trainable = True
137+
dloss = discriminator.fit(X, yDis, verbose=0, callbacks=[wandb_logging_callback_d])
138+
139+
# Train generator
140+
noise = np.random.normal(0, 1, size=[batchSize, randomDim])
141+
yGen = np.ones(batchSize)
142+
discriminator.trainable = False
143+
gloss = gan.fit(noise, yGen, verbose=0, callbacks=[wandb_logging_callback_g])
144+
145+
writeGeneratedImages(i)
146+
147+
print("Discriminator loss: {}, acc: {}".format(dloss.history["loss"][-1], dloss.history["binary_accuracy"][-1]))
148+
print("Generator loss: {}, acc: {}".format(gloss.history["loss"][-1], 1-gloss.history["binary_accuracy"][-1]))
149+
150+
151+
if __name__ == '__main__':
152+
train(200, 128)

0 commit comments

Comments
 (0)