Skip to content

Commit e3de7b7

Browse files
committed
Refactoring
1 parent 24396c9 commit e3de7b7

File tree

3 files changed

+41
-8
lines changed

3 files changed

+41
-8
lines changed
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
"""
2-
autoencoder
3-
~~~~~~~~~~~
2+
deep_autoencoder
3+
~~~~~~~~~~~~~~~~
44
55
A module which implements deep autoencoders.
66
"""

code/mnist_autoencoder.py

Lines changed: 38 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,11 @@
22
mnist_autoencoder
33
~~~~~~~~~~~~~~~~~
44
5-
Implements an autoencoder for the MNIST data, and plots the
6-
autoencoder's output for the first ten digits in the MNIST test set.
5+
Implements an autoencoder for the MNIST data. The program can do two
6+
things: (1) plot the autoencoder's output for the first ten images in
7+
the MNIST test set; and (2) use the autoencoder to build a classifier.
8+
The program is a quick-and-dirty hack --- we'll do things in a more
9+
systematic way in the module ``deep_autoencoder``.
710
"""
811

912
# My Libraries
@@ -15,14 +18,28 @@
1518
import matplotlib.pyplot as plt
1619
import numpy as np
1720

18-
def mnist_autoencoder(hidden_units):
19-
# Do the training
21+
def autoencoder_results(hidden_units):
22+
"""
23+
Train an autoencoder using the MNIST training data and plot the
24+
results when the first ten MNIST test images are passed through
25+
the autoencoder.
26+
"""
2027
training_data, test_inputs, actual_test_results = \
2128
mnist_loader.load_data_nn()
29+
net = train_autoencoder(hidden_units, training_data)
30+
plot_test_results(net, test_inputs, actual_test_results)
31+
32+
def train_autoencoder(hidden_units, training_data):
33+
"Return a trained autoencoder."
2234
autoencoder_training_data = [(x, x) for x, _ in training_data]
2335
net = Network([784, hidden_units, 784])
2436
net.SGD(autoencoder_training_data, 3, 10, 0.01, 0.05)
25-
# Plot the first ten test outputs
37+
return net
38+
39+
def plot_test_results(net, test_inputs, actual_test_results):
40+
"""
41+
Plot the results after passing the first ten test MNIST digits through
42+
the autoencoder ``net``."""
2643
fig = plt.figure()
2744
ax = fig.add_subplot(111)
2845
images_in = [test_inputs[j].reshape(-1, 28) for j in range(10)]
@@ -35,3 +52,19 @@ def mnist_autoencoder(hidden_units):
3552
plt.xticks(np.array([]))
3653
plt.yticks(np.array([]))
3754
plt.show()
55+
56+
def classifier(hidden_units):
57+
"""
58+
Train an autoencoder using the MNIST training data, and then use
59+
the autoencoder to create a classifier with a single hidden layer.
60+
"""
61+
training_data, test_inputs, actual_test_results = \
62+
mnist_loader.load_data_nn()
63+
net_ae = train_autoencoder(hidden_units, training_data)
64+
net_c = Network([784, hidden_units, 10])
65+
net_c.biases = net_ae.biases[:2]+[np.random.randn(10, 1)/np.sqrt(10)]
66+
net_c.weights = net_ae.weights[:2]+\
67+
[np.random.randn(10, hidden_units)/np.sqrt(10)]
68+
net_c.SGD(training_data, 3, 10, 0.01, 0.05)
69+
print net_c.evaluate(test_inputs, actual_test_results)
70+
return net_c

code/mnist_pca.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
pca = PCA(n_components=30)
2121
nn_images = [x for (x, y) in training_data]
2222
pca_images = np.concatenate(nn_images, axis=1).transpose()
23-
pca_r = pca.fit(pca_images[:1000])
23+
pca_r = pca.fit(pca_images)
2424

2525
# Try PCA on first ten test images
2626
test_images = np.array(test_inputs[:10]).reshape((10,784))

0 commit comments

Comments
 (0)