Skip to content

Commit cc307c7

Browse files
committed
added LinearLearner, LogisticLearner with tests and fixed NeuralNetLearner and PerceptronLearner
1 parent 184a109 commit cc307c7

File tree

5 files changed

+375
-209
lines changed

5 files changed

+375
-209
lines changed

deep_learning4e.py

Lines changed: 114 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,9 @@
88
from keras.layers import Embedding, SimpleRNN, Dense
99
from keras.preprocessing import sequence
1010

11+
from learning4e import Learner
1112
from utils4e import (Sigmoid, softmax1D, conv1D, gaussian_kernel, element_wise_product, vector_add, random_weights,
12-
scalar_vector_product, matrix_multiplication, map_vector, mean_squared_error_loss)
13+
scalar_vector_product, map_vector, mean_squared_error_loss)
1314

1415

1516
class Node:
@@ -190,7 +191,7 @@ def stochastic_gradient_descent(dataset, net, loss, epochs=1000, l_rate=0.01, ba
190191
for j in range(len(weights[i])):
191192
net[i].nodes[j].weights = weights[i][j]
192193

193-
if verbose and (e + 1) % verbose == 0:
194+
if verbose:
194195
print("epoch:{}, total_loss:{}".format(e + 1, total_loss))
195196

196197
return net
@@ -247,7 +248,7 @@ def adam(dataset, net, loss, epochs=1000, rho=(0.9, 0.999), delta=1 / 10 ** 8,
247248
for j in range(len(weights[i])):
248249
net[i].nodes[j].weights = weights[i][j]
249250

250-
if verbose and (e + 1) % verbose == 0:
251+
if verbose:
251252
print("epoch:{}, total_loss:{}".format(e + 1, total_loss))
252253

253254
return net
@@ -288,16 +289,16 @@ def BackPropagation(inputs, targets, theta, net, loss):
288289
# initialize delta
289290
delta = [[] for _ in range(n_layers)]
290291

291-
previous = [layer_out[i] - t_val[i] for i in range(o_units)]
292+
previous = np.array([layer_out[i] - t_val[i] for i in range(o_units)])
292293
h_layers = n_layers - 1
293294

294295
# backward pass
295296
for i in range(h_layers, 0, -1):
296297
layer = net[i]
297-
derivative = [layer.activation.derivative(node.value) for node in layer.nodes]
298-
delta[i] = element_wise_product(previous, derivative)
298+
derivative = np.array([layer.activation.derivative(node.value) for node in layer.nodes])
299+
delta[i] = previous * derivative
299300
# pass to layer i-1 in the next iteration
300-
previous = matrix_multiplication([delta[i]], theta[i])[0]
301+
previous = np.matmul([delta[i]], theta[i])[0]
301302
# compute gradient of layer i
302303
gradients[i] = [scalar_vector_product(d, net[i].inputs) for d in delta[i]]
303304

@@ -338,64 +339,136 @@ def get_batch(examples, batch_size=1):
338339
yield examples[i: i + batch_size]
339340

340341

341-
def NeuralNetLearner(dataset, hidden_layer_sizes, l_rate=0.01, epochs=1000, batch_size=1,
342-
optimizer=stochastic_gradient_descent, verbose=None):
342+
class NeuralNetLearner(Learner):
343343
"""
344344
Simple dense multilayer neural network.
345345
:param hidden_layer_sizes: size of hidden layers in the form of a list
346346
"""
347-
input_size = len(dataset.inputs)
348-
output_size = len(dataset.values[dataset.target])
349347

350-
# initialize the network
351-
raw_net = [InputLayer(input_size)]
352-
# add hidden layers
353-
hidden_input_size = input_size
354-
for h_size in hidden_layer_sizes:
355-
raw_net.append(DenseLayer(hidden_input_size, h_size))
356-
hidden_input_size = h_size
357-
raw_net.append(DenseLayer(hidden_input_size, output_size))
358-
359-
# update parameters of the network
360-
learned_net = optimizer(dataset, raw_net, mean_squared_error_loss, epochs, l_rate=l_rate,
361-
batch_size=batch_size, verbose=verbose)
362-
363-
def predict(example):
364-
n_layers = len(learned_net)
348+
def __init__(self, hidden_layer_sizes, l_rate=0.01, epochs=1000, batch_size=1,
349+
optimizer=stochastic_gradient_descent, verbose=False):
350+
self.hidden_layer_sizes = hidden_layer_sizes
351+
self.l_rate = l_rate
352+
self.epochs = epochs
353+
self.optimizer = optimizer
354+
self.batch_size = batch_size
355+
self.verbose = verbose
356+
357+
def fit(self, dataset):
358+
input_size = len(dataset.inputs)
359+
output_size = len(dataset.values[dataset.target])
360+
361+
# initialize the network
362+
raw_net = [InputLayer(input_size)]
363+
# add hidden layers
364+
hidden_input_size = input_size
365+
for h_size in self.hidden_layer_sizes:
366+
raw_net.append(DenseLayer(hidden_input_size, h_size))
367+
hidden_input_size = h_size
368+
raw_net.append(DenseLayer(hidden_input_size, output_size))
369+
370+
# update parameters of the network
371+
self.learned_net = self.optimizer(dataset, raw_net, mean_squared_error_loss, epochs=self.epochs,
372+
l_rate=self.l_rate, batch_size=self.batch_size, verbose=self.verbose)
373+
return self
374+
375+
def predict(self, example):
376+
n_layers = len(self.learned_net)
365377

366378
layer_input = example
367379
layer_out = example
368380

369381
# get the output of each layer by forward passing
370382
for i in range(1, n_layers):
371-
layer_out = learned_net[i].forward(layer_input)
383+
layer_out = self.learned_net[i].forward(layer_input)
372384
layer_input = layer_out
373385

374386
return layer_out.index(max(layer_out))
375387

376-
return predict
377-
378388

379-
def PerceptronLearner(dataset, l_rate=0.01, epochs=1000, batch_size=1,
380-
optimizer=stochastic_gradient_descent, verbose=None):
389+
class PerceptronLearner(Learner):
381390
"""
382391
Simple perceptron neural network.
383392
"""
384-
input_size = len(dataset.inputs)
385-
output_size = len(dataset.values[dataset.target])
386393

387-
# initialize the network, add dense layer
388-
raw_net = [InputLayer(input_size), DenseLayer(input_size, output_size)]
394+
def __init__(self, l_rate=0.01, epochs=1000, batch_size=1, optimizer=stochastic_gradient_descent, verbose=None):
395+
self.l_rate = l_rate
396+
self.epochs = epochs
397+
self.optimizer = optimizer
398+
self.batch_size = batch_size
399+
self.verbose = verbose
389400

390-
# update the network
391-
learned_net = optimizer(dataset, raw_net, mean_squared_error_loss, epochs, l_rate=l_rate,
392-
batch_size=batch_size, verbose=verbose)
401+
def fit(self, dataset):
402+
input_size = len(dataset.inputs)
403+
output_size = len(dataset.values[dataset.target])
393404

394-
def predict(example):
395-
layer_out = learned_net[1].forward(example)
405+
# initialize the network, add dense layer
406+
raw_net = [InputLayer(input_size), DenseLayer(input_size, output_size)]
407+
408+
# update the network
409+
self.learned_net = self.optimizer(dataset, raw_net, mean_squared_error_loss, epochs=self.epochs,
410+
l_rate=self.l_rate, batch_size=self.batch_size, verbose=self.verbose)
411+
return self
412+
413+
def predict(self, example):
414+
layer_out = self.learned_net[1].forward(example)
396415
return layer_out.index(max(layer_out))
397416

398-
return predict
417+
418+
if __name__ == "__main__":
419+
from learning4e import DataSet, grade_learner, err_ratio, Learner, Learner, Learner, \
420+
LinearRegressionLearner, MeanSquaredError, MultiLogisticRegressionLearner
421+
422+
#
423+
# iris_tests = [([5.0, 3.1, 0.9, 0.1], 0),
424+
# ([5.1, 3.5, 1.0, 0.0], 0),
425+
# ([4.9, 3.3, 1.1, 0.1], 0),
426+
# ([6.0, 3.0, 4.0, 1.1], 1),
427+
# ([6.1, 2.2, 3.5, 1.0], 1),
428+
# ([5.9, 2.5, 3.3, 1.1], 1),
429+
# ([7.5, 4.1, 6.2, 2.3], 2),
430+
# ([7.3, 4.0, 6.1, 2.4], 2),
431+
# ([7.0, 3.3, 6.1, 2.5], 2)]
432+
#
433+
# iris = DataSet(name='iris')
434+
# classes = ['setosa', 'versicolor', 'virginica']
435+
# iris.classes_to_numbers(classes)
436+
# nnl_gd = NeuralNetLearner([4], l_rate=0.15, epochs=100, optimizer=stochastic_gradient_descent).fit(iris)
437+
# nnl_adam = NeuralNetLearner([4], l_rate=0.001, epochs=200, optimizer=adam).fit(iris)
438+
# assert grade_learner(nnl_gd, iris_tests) == 1
439+
# assert err_ratio(nnl_gd, iris) < 0.08
440+
# assert grade_learner(nnl_adam, iris_tests) == 1
441+
# assert err_ratio(nnl_adam, iris) < 0.08
442+
#
443+
# iris = DataSet(name='iris')
444+
# classes = ['setosa', 'versicolor', 'virginica']
445+
# iris.classes_to_numbers(classes)
446+
# pl_gd = PerceptronLearner(l_rate=0.01, epochs=100, optimizer=stochastic_gradient_descent).fit(iris)
447+
# pl_adam = PerceptronLearner(l_rate=0.01, epochs=100, optimizer=adam).fit(iris)
448+
# assert grade_learner(pl_gd, iris_tests) == 1
449+
# assert err_ratio(pl_gd, iris) < 0.08
450+
# assert grade_learner(pl_adam, iris_tests) == 1
451+
# assert err_ratio(pl_adam, iris) < 0.08
452+
453+
iris_tests = [([[5.0, 3.1, 0.9, 0.1]], 0),
454+
([[5.1, 3.5, 1.0, 0.0]], 0),
455+
([[4.9, 3.3, 1.1, 0.1]], 0),
456+
([[6.0, 3.0, 4.0, 1.1]], 1),
457+
([[6.1, 2.2, 3.5, 1.0]], 1),
458+
([[5.9, 2.5, 3.3, 1.1]], 1),
459+
([[7.5, 4.1, 6.2, 2.3]], 2),
460+
([[7.3, 4.0, 6.1, 2.4]], 2),
461+
([[7.0, 3.3, 6.1, 2.5]], 2)]
462+
463+
iris = DataSet(name='iris')
464+
classes = ['setosa', 'versicolor', 'virginica']
465+
iris.classes_to_numbers(classes)
466+
n_samples, n_features = len(iris.examples), iris.target
467+
X, y = np.array([x[:n_features] for x in iris.examples]), \
468+
np.array([x[n_features] for x in iris.examples])
469+
ll = MultiLogisticRegressionLearner().fit(X, y)
470+
assert grade_learner(ll, iris_tests) == 1
471+
assert np.allclose(err_ratio(ll, iris), 0.04)
399472

400473

401474
def SimpleRNNLearner(train_data, val_data, epochs=2):

0 commit comments

Comments
 (0)