Skip to content

Commit 82652e5

Browse files
Merged code from Ole Winther. Thanks! Adds linear and softmax ouput neurons to NN
1 parent d244c95 commit 82652e5

File tree

5 files changed

+44
-9
lines changed

5 files changed

+44
-9
lines changed

NN/nnbp.m

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,12 @@
55

66
n = nn.n;
77
sparsityError = 0;
8-
9-
d{n} = - nn.e .* (nn.a{n} .* (1 - nn.a{n}));
8+
switch nn.output
9+
case 'sigm'
10+
d{n} = - nn.e .* (nn.a{n} .* (1 - nn.a{n}));
11+
case {'softmax','linear'}
12+
d{n} = - nn.e;
13+
end
1014
for i = (n - 1) : -1 : 2
1115
if(nn.nonSparsityPenalty>0)
1216
pi = repmat(nn.p{i}, size(nn.a{i}, 1), 1);

NN/nnchecknumgrad.m

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
function nnchecknumgrad(nn, x, y)
22
epsilon = 1e-6;
3-
er = 1e-10;
3+
er = 1e-8;
44
n = nn.n;
55
for l = 1 : (n - 1)
66
for i = 1 : size(nn.W{l}, 1)

NN/nnff.m

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,23 +9,37 @@
99
nn.a{1} = x;
1010

1111
%feedforward pass
12-
for i = 2 : n
12+
for i = 2 : n-1
1313
nn.a{i} = sigm(repmat(nn.b{i - 1}', m, 1) + nn.a{i - 1} * nn.W{i - 1}');
14-
if(nn.dropoutFraction > 0 && i<n)
14+
if(nn.dropoutFraction > 0)
1515
if(nn.testing)
1616
nn.a{i} = nn.a{i}.*(1 - nn.dropoutFraction);
1717
else
1818
nn.a{i} = nn.a{i}.*(rand(size(nn.a{i}))>nn.dropoutFraction);
1919
end
2020
end
21-
2221
%calculate running exponential activations for use with sparsity
2322
if(nn.nonSparsityPenalty>0)
2423
nn.p{i} = 0.99 * nn.p{i} + 0.01 * mean(nn.a{i}, 1);
2524
end
2625
end
26+
switch nn.output
27+
case 'sigm'
28+
nn.a{n} = sigm(repmat(nn.b{n - 1}', m, 1) + nn.a{n - 1} * nn.W{n - 1}');
29+
case 'linear'
30+
nn.a{n} = repmat(nn.b{n - 1}', m, 1) + nn.a{n - 1} * nn.W{n - 1}';
31+
case 'softmax'
32+
nn.a{n} = repmat(nn.b{n - 1}', m, 1) + nn.a{n - 1} * nn.W{n - 1}';
33+
nn.a{n} = exp(bsxfun(@minus, nn.a{n}, max(nn.a{n},[],2)));
34+
nn.a{n} = bsxfun(@rdivide, nn.a{n}, sum(nn.a{n}, 2));
35+
end
2736

2837
%error and loss
2938
nn.e = y - nn.a{n};
30-
nn.L = 1/2 * sum(sum(nn.e .^ 2)) / m;
39+
switch nn.output
40+
case {'sigm','linear'}
41+
nn.L = 1/2 * sum(sum(nn.e .^ 2)) / m;
42+
case 'softmax'
43+
nn.L = -sum(sum(y .* log(nn.a{n}))) / m;
44+
end
3145
end

NN/nnsetup.m

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
nn.inputZeroMaskedFraction = 0; % Used for Denoising AutoEncoders
1515
nn.dropoutFraction = 0; % Dropout level (http://www.cs.toronto.edu/~hinton/absps/dropout.pdf)
1616
nn.testing = 0; % Internal variable. nntest sets this to one.
17+
nn.output = 'sigm'; % output unit 'sigm' (=logistic), 'softmax' and 'linear'
1718

1819
for i = 2 : nn.n
1920
% biases and bias momentum
Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,29 @@
11
function test_nn_gradients_are_numerically_correct
2-
nn = nnsetup([5 3 2]);
32
batch_x = rand(20, 5);
43
batch_y = rand(20, 2);
4+
5+
nn = nnsetup([5 3 2]);
6+
nn.output='sigm';
7+
nn = nnff(nn, batch_x, batch_y);
8+
nn = nnbp(nn);
9+
nnchecknumgrad(nn, batch_x, batch_y);
10+
11+
nn = nnsetup([5 3 2]);
12+
nn.output='linear';
513
nn = nnff(nn, batch_x, batch_y);
614
nn = nnbp(nn);
715
nnchecknumgrad(nn, batch_x, batch_y);
816

17+
nn = nnsetup([5 3 2]);
18+
batch_y=batch_y==repmat(max(batch_y,[],2),1,size(batch_y,2));
19+
nn.output='softmax';
20+
nn = nnff(nn, batch_x, batch_y);
21+
nn = nnbp(nn);
22+
nnchecknumgrad(nn, batch_x, batch_y);
23+
24+
nn = nnsetup([5 3 2]);
925
nn.dropoutFraction=0.5;
1026
rng(0);
1127
nn = nnff(nn, batch_x, batch_y);
1228
nn = nnbp(nn);
13-
nnchecknumgrad(nn, batch_x, batch_y);
29+
nnchecknumgrad(nn, batch_x, batch_y);

0 commit comments

Comments
 (0)