Skip to content

Commit f09302f

Browse files
merged upstream/master
1 parent c7489f5 commit f09302f

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

55 files changed

+95
-2664
lines changed

.travis.yml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
before_script:
2+
- sudo apt-get install octave
3+
4+
script:
5+
- sh -c "octave tests/runalltests.m"
6+
7+
notifications:
8+
email: false

CAE/caeexamples.m

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
%% mnist data
22
clear all; close all; clc;
3-
load ../data/mnist_uint8;
3+
load mnist_uint8;
44
x = cell(100, 1);
55
N = 600;
66
for i = 1 : 100
@@ -29,4 +29,4 @@
2929
mm = cae.ok{1}{i}(1,:,:);
3030
ff(i,:) = mm(:);
3131
end;
32-
figure;visualize(ff',1)
32+
figure;visualize(ff')

CNN/cnnsetup.m

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
function net = cnnsetup(net, x, y)
2+
assert(exist('OCTAVE_VERSION')==0, 'CNNs does not work with Octave as there is a bug in the implementation of convolution in octave. See: http://savannah.gnu.org/bugs/?39314');
23
inputmaps = 1;
34
mapsize = size(squeeze(x(:, :, 1)));
45

CONTRIBUTING.md

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
Thank you so much for wanting to give back to the toolbox. Here's some info on how to contribute:
2+
3+
#General:
4+
5+
Don't bunch up changes, e.g. if you have bug-fixes, new features and style changes, rather make 3 seperate pull requests.
6+
7+
Ensure that you introduce tests/examples for any new functionality
8+
9+
# Guide
10+
1. Fork repository
11+
2. Create a new branch, e.g. `checkout -b my-stuff`
12+
3. Commit and push your changes to that branch
13+
4. Make sure that the test works (!) (see known errors)
14+
5. Create a pull request
15+
6. I accept your pull request
16+
17+
18+
19+
20+

DBN/rbmtrain.m

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
function rbm = rbmtrain(rbm, x, opts)
22
assert(isfloat(x), 'x must be a float');
3+
assert(all(x>=0) && all(x<=1), "all data in x must be in [0:1]");
34
m = size(x, 1);
45
numbatches = m / opts.batchsize;
56

@@ -14,7 +15,7 @@
1415
v1 = batch;
1516
h1 = sigmrnd(repmat(rbm.c', opts.batchsize, 1) + v1 * rbm.W');
1617
v2 = sigmrnd(repmat(rbm.b', opts.batchsize, 1) + h1 * rbm.W);
17-
h2 = sigmrnd(repmat(rbm.c', opts.batchsize, 1) + v2 * rbm.W');
18+
h2 = sigm(repmat(rbm.c', opts.batchsize, 1) + v2 * rbm.W');
1819

1920
c1 = h1' * v1;
2021
c2 = h2' * v2;

NN/nnapplygrads.m

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
for i = 1 : (nn.n - 1)
77
if(nn.weightPenaltyL2>0)
8-
dW = nn.dW{i} + nn.weightPenaltyL2 * nn.W{i};
8+
dW = nn.dW{i} + nn.weightPenaltyL2 * [zeros(size(nn.W{i},1),1) nn.W{i}(:,2:end)];
99
else
1010
dW = nn.dW{i};
1111
end

NN/nnchecknumgrad.m

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,9 @@ function nnchecknumgrad(nn, x, y)
88
nn_m = nn; nn_p = nn;
99
nn_m.W{l}(i, j) = nn.W{l}(i, j) - epsilon;
1010
nn_p.W{l}(i, j) = nn.W{l}(i, j) + epsilon;
11-
rng(0);
11+
rand('state',0)
1212
nn_m = nnff(nn_m, x, y);
13-
rng(0);
13+
rand('state',0)
1414
nn_p = nnff(nn_p, x, y);
1515
dW = (nn_p.L - nn_m.L) / (2 * epsilon);
1616
e = abs(dW - nn.dW{l}(i, j));

NN/nntrain.m

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -58,17 +58,19 @@
5858
end
5959

6060
t = toc;
61-
61+
62+
if opts.validation == 1
63+
loss = nneval(nn, loss, train_x, train_y, val_x, val_y);
64+
str_perf = sprintf('; Full-batch train mse = %f, val mse = %f', loss.train.e(end), loss.val.e(end));
65+
else
66+
loss = nneval(nn, loss, train_x, train_y);
67+
str_perf = sprintf('; Full-batch train err = %f', loss.train.e(end));
68+
end
6269
if ishandle(fhandle)
63-
if opts.validation == 1
64-
loss = nneval(nn, loss, train_x, train_y, val_x, val_y);
65-
else
66-
loss = nneval(nn, loss, train_x, train_y);
67-
end
6870
nnupdatefigures(nn, fhandle, loss, opts, i);
6971
end
7072

71-
disp(['epoch ' num2str(i) '/' num2str(opts.numepochs) '. Took ' num2str(t) ' seconds' '. Mean squared error on training set is ' num2str(mean(L((n-numbatches):(n-1))))]);
73+
disp(['epoch ' num2str(i) '/' num2str(opts.numepochs) '. Took ' num2str(t) ' seconds' '. Mini-batch mean squared error on training set is ' num2str(mean(L((n-numbatches):(n-1)))) str_perf]);
7274
nn.learningRate = nn.learningRate * nn.scaling_learningRate;
7375
end
7476
end

NN/nnupdatefigures.m

Lines changed: 2 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -43,40 +43,21 @@ function nnupdatefigures(nn,fhandle,L,opts,i)
4343
legend(p1, M,'Location','NorthEast');
4444
set(p1, 'Xlim',[0,opts.numepochs + 1])
4545

46-
if i ==2 % speeds up plotting by factor of ~2
47-
set(gca,'LegendColorbarListeners',[]);
48-
setappdata(gca,'LegendColorbarManualSpace',1);
49-
setappdata(gca,'LegendColorbarReclaimSpace',1);
50-
end
51-
5246
p2 = subplot(1,2,2);
5347
plot(plot_x,plot_yfrac);
5448
xlabel('Number of epochs'); ylabel('Misclassification rate');
5549
title('Misclassification rate')
5650
legend(p2, M,'Location','NorthEast');
5751
set(p2, 'Xlim',[0,opts.numepochs + 1])
5852

59-
if i ==2 % speeds up plotting by factor of ~2
60-
set(gca,'LegendColorbarListeners',[]);
61-
setappdata(gca,'LegendColorbarManualSpace',1);
62-
setappdata(gca,'LegendColorbarReclaimSpace',1);
63-
end
64-
6553
else
6654

6755
p = plot(plot_x,plot_ye);
6856
xlabel('Number of epochs'); ylabel('Error');title('Error');
6957
legend(p, M,'Location','NorthEast');
7058
set(gca, 'Xlim',[0,opts.numepochs + 1])
71-
72-
if i ==2 % speeds up plotting by factor of ~2
73-
set(gca,'LegendColorbarListeners',[]);
74-
setappdata(gca,'LegendColorbarManualSpace',1);
75-
setappdata(gca,'LegendColorbarReclaimSpace',1);
76-
77-
end
78-
59+
7960
end
8061
drawnow;
8162
end
82-
end
63+
end

README.md

Lines changed: 16 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
12
DeepLearnToolbox
23
================
34

@@ -53,9 +54,12 @@ Setup
5354
1. Download.
5455
2. addpath(genpath('DeepLearnToolbox'));
5556

56-
Everything is work in progress
57+
Known errors
5758
------------------------------
5859

60+
`test_cnn_gradients_are_numerically_correct` fails on Octave because of a bug in Octave's convn implementation. See http://savannah.gnu.org/bugs/?39314
61+
62+
`test_example_CNN` fails in Octave for the same reason.
5963
Example: Deep Belief Network
6064
---------------------
6165
```matlab
@@ -69,7 +73,7 @@ train_y = double(train_y);
6973
test_y = double(test_y);
7074
7175
%% ex1 train a 100 hidden unit RBM and visualize its weights
72-
rng(0);
76+
rand('state',0)
7377
dbn.sizes = [100];
7478
opts.numepochs = 1;
7579
opts.batchsize = 100;
@@ -80,7 +84,7 @@ dbn = dbntrain(dbn, train_x, opts);
8084
figure; visualize(dbn.rbm{1}.W'); % Visualize the RBM weights
8185
8286
%% ex2 train a 100-100 hidden unit DBN and use its weights to initialize a NN
83-
rng(0);
87+
rand('state',0)
8488
%train dbn
8589
dbn.sizes = [100 100];
8690
opts.numepochs = 1;
@@ -119,7 +123,7 @@ test_y = double(test_y);
119123
120124
%% ex1 train a 100 hidden unit SDAE and use it to initialize a FFNN
121125
% Setup and train a stacked denoising autoencoder (SDAE)
122-
rng(0);
126+
rand('state',0)
123127
sae = saesetup([784 100]);
124128
sae.ae{1}.activation_function = 'sigm';
125129
sae.ae{1}.learningRate = 1;
@@ -142,39 +146,6 @@ nn = nntrain(nn, train_x, train_y, opts);
142146
[er, bad] = nntest(nn, test_x, test_y);
143147
assert(er < 0.16, 'Too big error');
144148
145-
%% ex2 train a 100-100 hidden unit SDAE and use it to initialize a FFNN
146-
% Setup and train a stacked denoising autoencoder (SDAE)
147-
rng(0);
148-
sae = saesetup([784 100 100]);
149-
sae.ae{1}.activation_function = 'sigm';
150-
sae.ae{1}.learningRate = 1;
151-
sae.ae{1}.inputZeroMaskedFraction = 0.5;
152-
153-
sae.ae{2}.activation_function = 'sigm';
154-
sae.ae{2}.learningRate = 1;
155-
sae.ae{2}.inputZeroMaskedFraction = 0.5;
156-
157-
opts.numepochs = 1;
158-
opts.batchsize = 100;
159-
sae = saetrain(sae, train_x, opts);
160-
visualize(sae.ae{1}.W{1}(:,2:end)')
161-
162-
% Use the SDAE to initialize a FFNN
163-
nn = nnsetup([784 100 100 10]);
164-
nn.activation_function = 'sigm';
165-
nn.learningRate = 1;
166-
167-
%add pretrained weights
168-
nn.W{1} = sae.ae{1}.W{1};
169-
nn.W{2} = sae.ae{2}.W{1};
170-
171-
% Train the FFNN
172-
opts.numepochs = 1;
173-
opts.batchsize = 100;
174-
nn = nntrain(nn, train_x, train_y, opts);
175-
[er, bad] = nntest(nn, test_x, test_y);
176-
assert(er < 0.1, 'Too big error');
177-
178149
```
179150

180151

@@ -193,7 +164,7 @@ test_y = double(test_y');
193164
%% ex1 Train a 6c-2s-12c-2s Convolutional neural network
194165
%will run 1 epoch in about 200 second and get around 11% error.
195166
%With 100 epochs you'll get around 1.2% error
196-
rng(0)
167+
rand('state',0)
197168
cnn.layers = {
198169
struct('type', 'i') %input layer
199170
struct('type', 'c', 'outputmaps', 6, 'kernelsize', 5) %convolution layer
@@ -236,7 +207,7 @@ test_y = double(test_y);
236207
test_x = normalize(test_x, mu, sigma);
237208
238209
%% ex1 vanilla neural net
239-
rng(0);
210+
rand('state',0)
240211
nn = nnsetup([784 100 10]);
241212
opts.numepochs = 1; % Number of full sweeps through data
242213
opts.batchsize = 100; % Take a mean gradient step over this many samples
@@ -246,16 +217,8 @@ opts.batchsize = 100; % Take a mean gradient step over this many samples
246217
247218
assert(er < 0.08, 'Too big error');
248219
249-
% Make an artificial one and verify that we can predict it
250-
x = zeros(1,28,28);
251-
x(:, 14:15, 6:22) = 1;
252-
x = reshape(x,1,28^2);
253-
figure; visualize(x');
254-
predicted = nnpredict(nn,x)-1;
255-
256-
assert(predicted == 1);
257220
%% ex2 neural net with L2 weight decay
258-
rng(0);
221+
rand('state',0)
259222
nn = nnsetup([784 100 10]);
260223
261224
nn.weightPenaltyL2 = 1e-4; % L2 weight decay
@@ -269,7 +232,7 @@ assert(er < 0.1, 'Too big error');
269232
270233
271234
%% ex3 neural net with dropout
272-
rng(0);
235+
rand('state',0)
273236
nn = nnsetup([784 100 10]);
274237
275238
nn.dropoutFraction = 0.5; % Dropout fraction
@@ -282,7 +245,7 @@ nn = nntrain(nn, train_x, train_y, opts);
282245
assert(er < 0.1, 'Too big error');
283246
284247
%% ex4 neural net with sigmoid activation function
285-
rng(0);
248+
rand('state',0)
286249
nn = nnsetup([784 100 10]);
287250
288251
nn.activation_function = 'sigm'; % Sigmoid activation function
@@ -296,7 +259,7 @@ nn = nntrain(nn, train_x, train_y, opts);
296259
assert(er < 0.1, 'Too big error');
297260
298261
%% ex5 plotting functionality
299-
rng(0);
262+
rand('state',0)
300263
nn = nnsetup([784 20 10]);
301264
opts.numepochs = 5; % Number of full sweeps through data
302265
nn.output = 'softmax'; % use softmax output
@@ -315,7 +278,7 @@ tx = train_x(10001:end,:);
315278
vy = train_y(1:10000,:);
316279
ty = train_y(10001:end,:);
317280
318-
rng(0);
281+
rand('state',0)
319282
nn = nnsetup([784 20 10]);
320283
nn.output = 'softmax'; % use softmax output
321284
opts.numepochs = 5; % Number of full sweeps through data
@@ -325,6 +288,7 @@ nn = nntrain(nn, tx, ty, opts, vx, vy); % nntrain takes validati
325288
326289
[er, bad] = nntest(nn, test_x, test_y);
327290
assert(er < 0.1, 'Too big error');
291+
328292
```
329293

330294

0 commit comments

Comments
 (0)