22mnist_autoencoder
33~~~~~~~~~~~~~~~~~
44
5- Implements an autoencoder for the MNIST data, and plots the
6- autoencoder's output for the first ten digits in the MNIST test set.
5+ Implements an autoencoder for the MNIST data. The program can do two
6+ things: (1) plot the autoencoder's output for the first ten images in
7+ the MNIST test set; and (2) use the autoencoder to build a classifier.
8+ The program is a quick-and-dirty hack --- we'll do things in a more
9+ systematic way in the module ``deep_autoencoder``.
710"""
811
912# My Libraries
1518import matplotlib .pyplot as plt
1619import numpy as np
1720
18- def mnist_autoencoder (hidden_units ):
19- # Do the training
21+ def autoencoder_results (hidden_units ):
22+ """
23+ Train an autoencoder using the MNIST training data and plot the
24+ results when the first ten MNIST test images are passed through
25+ the autoencoder.
26+ """
2027 training_data , test_inputs , actual_test_results = \
2128 mnist_loader .load_data_nn ()
29+ net = train_autoencoder (hidden_units , training_data )
30+ plot_test_results (net , test_inputs , actual_test_results )
31+
32+ def train_autoencoder (hidden_units , training_data ):
33+ "Return a trained autoencoder."
2234 autoencoder_training_data = [(x , x ) for x , _ in training_data ]
2335 net = Network ([784 , hidden_units , 784 ])
2436 net .SGD (autoencoder_training_data , 3 , 10 , 0.01 , 0.05 )
25- # Plot the first ten test outputs
37+ return net
38+
39+ def plot_test_results (net , test_inputs , actual_test_results ):
40+ """
41+ Plot the results after passing the first ten test MNIST digits through
42+ the autoencoder ``net``."""
2643 fig = plt .figure ()
2744 ax = fig .add_subplot (111 )
2845 images_in = [test_inputs [j ].reshape (- 1 , 28 ) for j in range (10 )]
@@ -35,3 +52,19 @@ def mnist_autoencoder(hidden_units):
3552 plt .xticks (np .array ([]))
3653 plt .yticks (np .array ([]))
3754 plt .show ()
55+
56+ def classifier (hidden_units ):
57+ """
58+ Train an autoencoder using the MNIST training data, and then use
59+ the autoencoder to create a classifier with a single hidden layer.
60+ """
61+ training_data , test_inputs , actual_test_results = \
62+ mnist_loader .load_data_nn ()
63+ net_ae = train_autoencoder (hidden_units , training_data )
64+ net_c = Network ([784 , hidden_units , 10 ])
65+ net_c .biases = net_ae .biases [:2 ]+ [np .random .randn (10 , 1 )/ np .sqrt (10 )]
66+ net_c .weights = net_ae .weights [:2 ]+ \
67+ [np .random .randn (10 , hidden_units )/ np .sqrt (10 )]
68+ net_c .SGD (training_data , 3 , 10 , 0.01 , 0.05 )
69+ print net_c .evaluate (test_inputs , actual_test_results )
70+ return net_c
0 commit comments