1313
1414
1515class Node :
16- """
17- A node in a computational graph contains the pointer to all its parents.
18- :param val: value of current node
19- :param parents: a container of all parents of current node
20- """
21-
22- def __init__ (self , val = None , parents = None ):
23- if parents is None :
24- parents = []
25- self .val = val
26- self .parents = parents
27-
28- def __repr__ (self ):
29- return "<Node {}>" .format (self .val )
30-
31-
32- class NNUnit (Node ):
3316 """
3417 A single unit of a layer in a neural network
3518 :param weights: weights between parent nodes and current node
3619 :param value: value of current node
3720 """
3821
3922 def __init__ (self , weights = None , value = None ):
40- super (). __init__ ( value )
23+ self . value = value
4124 self .weights = weights or []
4225
4326
@@ -47,8 +30,8 @@ class Layer:
4730 :param size: number of units in the current layer
4831 """
4932
50- def __init__ (self , size = 3 ):
51- self .nodes = [NNUnit () for _ in range (size )]
33+ def __init__ (self , size ):
34+ self .nodes = [Node () for _ in range (size )]
5235
5336 def forward (self , inputs ):
5437 """Define the operation to get the output of this layer"""
@@ -65,7 +48,7 @@ def forward(self, inputs):
6548 """Take each value of the inputs to each unit in the layer."""
6649 assert len (self .nodes ) == len (inputs )
6750 for node , inp in zip (self .nodes , inputs ):
68- node .val = inp
51+ node .value = inp
6952 return inputs
7053
7154
@@ -79,7 +62,7 @@ def forward(self, inputs):
7962 assert len (self .nodes ) == len (inputs )
8063 res = softmax1D (inputs )
8164 for node , val in zip (self .nodes , res ):
82- node .val = val
65+ node .value = val
8366 return res
8467
8568
@@ -91,11 +74,11 @@ class DenseLayer(Layer):
9174 :param activation: (Activation object) activation function
9275 """
9376
94- def __init__ (self , in_size = 3 , out_size = 3 , activation = None ):
77+ def __init__ (self , in_size = 3 , out_size = 3 , activation = Sigmoid ):
9578 super ().__init__ (out_size )
9679 self .out_size = out_size
9780 self .inputs = None
98- self .activation = Sigmoid () if not activation else activation
81+ self .activation = activation ()
9982 # initialize weights
10083 for node in self .nodes :
10184 node .weights = random_weights (- 0.5 , 0.5 , in_size )
@@ -105,8 +88,8 @@ def forward(self, inputs):
10588 res = []
10689 # get the output value of each unit
10790 for unit in self .nodes :
108- val = self .activation .f (dot_product (unit .weights , inputs ))
109- unit .val = val
91+ val = self .activation .function (dot_product (unit .weights , inputs ))
92+ unit .value = val
11093 res .append (val )
11194 return res
11295
@@ -131,7 +114,7 @@ def forward(self, features):
131114 for node , feature in zip (self .nodes , features ):
132115 out = conv1D (feature , node .weights )
133116 res .append (out )
134- node .val = out
117+ node .value = out
135118 return res
136119
137120
@@ -157,7 +140,7 @@ def forward(self, features):
157140 out = [max (feature [i :i + self .kernel_size ])
158141 for i in range (len (feature ) - self .kernel_size + 1 )]
159142 res .append (out )
160- self .nodes [i ].val = out
143+ self .nodes [i ].value = out
161144 return res
162145
163146
@@ -181,7 +164,7 @@ def init_examples(examples, idx_i, idx_t, o_units):
181164 return inputs , targets
182165
183166
184- def gradient_descent (dataset , net , loss , epochs = 1000 , l_rate = 0.01 , batch_size = 1 , verbose = None ):
167+ def stochastic_gradient_descent (dataset , net , loss , epochs = 1000 , l_rate = 0.01 , batch_size = 1 , verbose = None ):
185168 """
186169 Gradient descent algorithm to update the learnable parameters of a network.
187170 :return: the updated network
@@ -200,6 +183,7 @@ def gradient_descent(dataset, net, loss, epochs=1000, l_rate=0.01, batch_size=1,
200183 # update weights with gradient descent
201184 weights = vector_add (weights , scalar_vector_product (- l_rate , gs ))
202185 total_loss += batch_loss
186+
203187 # update the weights of network each batch
204188 for i in range (len (net )):
205189 if weights [i ]:
@@ -310,7 +294,7 @@ def BackPropagation(inputs, targets, theta, net, loss):
310294 # backward pass
311295 for i in range (h_layers , 0 , - 1 ):
312296 layer = net [i ]
313- derivative = [layer .activation .derivative (node .val ) for node in layer .nodes ]
297+ derivative = [layer .activation .derivative (node .value ) for node in layer .nodes ]
314298 delta [i ] = element_wise_product (previous , derivative )
315299 # pass to layer i-1 in the next iteration
316300 previous = matrix_multiplication ([delta [i ]], theta [i ])[0 ]
@@ -344,7 +328,7 @@ def forward(self, inputs):
344328 for i in range (len (self .nodes )):
345329 val = [(inputs [i ] - mu ) * self .weights [0 ] / np .sqrt (self .eps + stderr ** 2 ) + self .weights [1 ]]
346330 res .append (val )
347- self .nodes [i ].val = val
331+ self .nodes [i ].value = val
348332 return res
349333
350334
@@ -354,15 +338,12 @@ def get_batch(examples, batch_size=1):
354338 yield examples [i : i + batch_size ]
355339
356340
357- def NeuralNetLearner (dataset , hidden_layer_sizes = None , learning_rate = 0.01 , epochs = 100 ,
358- optimizer = gradient_descent , batch_size = 1 , verbose = None ):
341+ def NeuralNetLearner (dataset , hidden_layer_sizes , l_rate = 0.01 , epochs = 1000 , batch_size = 1 ,
342+ optimizer = stochastic_gradient_descent , verbose = None ):
359343 """
360344 Simple dense multilayer neural network.
361345 :param hidden_layer_sizes: size of hidden layers in the form of a list
362346 """
363-
364- if hidden_layer_sizes is None :
365- hidden_layer_sizes = [4 ]
366347 input_size = len (dataset .inputs )
367348 output_size = len (dataset .values [dataset .target ])
368349
@@ -376,7 +357,7 @@ def NeuralNetLearner(dataset, hidden_layer_sizes=None, learning_rate=0.01, epoch
376357 raw_net .append (DenseLayer (hidden_input_size , output_size ))
377358
378359 # update parameters of the network
379- learned_net = optimizer (dataset , raw_net , mean_squared_error_loss , epochs , l_rate = learning_rate ,
360+ learned_net = optimizer (dataset , raw_net , mean_squared_error_loss , epochs , l_rate = l_rate ,
380361 batch_size = batch_size , verbose = verbose )
381362
382363 def predict (example ):
@@ -395,7 +376,8 @@ def predict(example):
395376 return predict
396377
397378
398- def PerceptronLearner (dataset , learning_rate = 0.01 , epochs = 100 , optimizer = gradient_descent , batch_size = 1 , verbose = None ):
379+ def PerceptronLearner (dataset , l_rate = 0.01 , epochs = 1000 , batch_size = 1 ,
380+ optimizer = stochastic_gradient_descent , verbose = None ):
399381 """
400382 Simple perceptron neural network.
401383 """
@@ -406,7 +388,7 @@ def PerceptronLearner(dataset, learning_rate=0.01, epochs=100, optimizer=gradien
406388 raw_net = [InputLayer (input_size ), DenseLayer (input_size , output_size )]
407389
408390 # update the network
409- learned_net = optimizer (dataset , raw_net , mean_squared_error_loss , epochs , l_rate = learning_rate ,
391+ learned_net = optimizer (dataset , raw_net , mean_squared_error_loss , epochs , l_rate = l_rate ,
410392 batch_size = batch_size , verbose = verbose )
411393
412394 def predict (example ):
0 commit comments