1
1
import numpy as np
2
2
import z_helper as h
3
+ import time
4
+
3
5
4
6
class NeuralNetwork :
5
7
@@ -8,49 +10,44 @@ def __init__(self, layer_sizes, layer_activations, learning_rate=0.1, low=-2, hi
8
10
assert len (layer_sizes )- 1 == len (layer_activations )
9
11
10
12
# Initialize weights between every neuron in all adjacent layers.
11
- self .weights = [h . random_np (low , high , (layer_sizes [i - 1 ], layer_sizes [i ])) for i in range (1 , len (layer_sizes ))]
13
+ self .weights = [np . random . uniform (low , high , (layer_sizes [i - 1 ], layer_sizes [i ])) for i in range (1 , len (layer_sizes ))]
12
14
# Initialize biases for every neuron in all layers
13
- self .biases = np .array ([h . random_np (low , high , layer_sizes [i ]). reshape ( - 1 , 1 ) for i in range (1 , len (layer_sizes ))])
15
+ self .biases = np .array ([np . random . uniform (low , high , ( layer_sizes [i ], 1 ) ) for i in range (1 , len (layer_sizes ))])
14
16
# Initialize empty list of output of every neuron in all layers.
15
- self .layer_outputs = [np .zeros (layer_sizes [i ]). reshape ( - 1 , 1 ) for i in range (len (layer_sizes ))]
17
+ self .layer_outputs = [np .zeros (( layer_sizes [i ], 1 ) ) for i in range (len (layer_sizes ))]
16
18
17
19
self .layer_activations = layer_activations
18
20
self .layer_sizes = layer_sizes
19
21
self .learning_rate = learning_rate
20
22
21
23
def calculate_output (self , input_data ):
22
- input_data = np .array (input_data ).reshape (- 1 , 1 )
23
24
assert len (input_data ) == self .layer_sizes [0 ]
24
25
num_calculations = len (self .weights )
25
26
26
27
y = input_data
27
28
self .layer_outputs [0 ] = y
28
29
29
30
for i in range (num_calculations ):
30
- y = h . activation ( self .layer_activations [i ]) (np .dot (self .weights [i ].T , y ) + self .biases [i ])
31
+ y = self .layer_activations [i ](np .dot (self .weights [i ].T , y ) + self .biases [i ], False )
31
32
self .layer_outputs [i + 1 ] = y
32
33
33
34
return y
34
35
35
36
def train (self , input_data , desired_output_data ):
36
- input_data = np .array (input_data ).reshape (- 1 , 1 )
37
- desired_output_data = np .array (desired_output_data ).reshape (- 1 , 1 )
38
37
assert len (input_data ) == self .layer_sizes [0 ]
39
38
assert len (desired_output_data ) == self .layer_sizes [- 1 ]
40
39
self .calculate_output (input_data )
41
40
42
- error = (desired_output_data - self .layer_outputs [- 1 ]) * h . derivative ( self .layer_activations [- 1 ]) (self .layer_outputs [- 1 ])
41
+ error = (desired_output_data - self .layer_outputs [- 1 ]) * self .layer_activations [- 1 ](self .layer_outputs [- 1 ], True )
43
42
self .weights [- 1 ] += (self .learning_rate * self .layer_outputs [- 2 ] * error .T )
44
43
self .biases [- 1 ] += self .learning_rate * error
45
44
46
45
for i in reversed (range (len (self .weights )- 1 )):
47
- error = np .dot (self .weights [i + 1 ], error ) * h . derivative ( self .layer_activations [i ]) (self .layer_outputs [i + 1 ])
46
+ error = np .dot (self .weights [i + 1 ], error ) * self .layer_activations [i ](self .layer_outputs [i + 1 ], True )
48
47
self .weights [i ] += (self .learning_rate * self .layer_outputs [i ] * error .T )
49
48
self .biases [i ] += self .learning_rate * error
50
-
49
+
51
50
def calculate_SSE (self , input_data , desired_output_data ):
52
- input_data = np .array (input_data ).reshape (- 1 , 1 )
53
- desired_output_data = np .array (desired_output_data ).reshape (- 1 , 1 )
54
51
assert len (input_data ) == self .layer_sizes [0 ]
55
52
assert len (desired_output_data ) == self .layer_sizes [- 1 ]
56
53
return np .sum (np .power (desired_output_data - self .calculate_output (input_data ), 2 ))
@@ -61,32 +58,35 @@ def print_weights_and_biases(self):
61
58
62
59
63
60
np .set_printoptions (linewidth = 200 )
64
- for i in range (5 ):
61
+
62
+ data_input = h .import_from_csv ("data/features.txt" , float )
63
+ data_output = h .import_from_csv ("data/targets.txt" , int )
64
+ data_output = np .array ([h .class_to_array (np .amax (data_output ), x ) for x in data_output ])
65
+
66
+ data_input = data_input .reshape ((len (data_input ), - 1 , 1 ))
67
+ data_output = data_output .reshape ((len (data_input ), - 1 , 1 ))
68
+
69
+ for i in range (4 ):
65
70
random_seed = np .random .randint (10 , 1010 )
66
71
np .random .seed (random_seed )
67
72
68
- data_input = h .import_from_csv ("data/features.txt" , float )
69
- data_output = h .import_from_csv ("data/targets.txt" , int )
70
- data_output = np .array ([h .class_to_array (np .amax (data_output ), x ) for x in data_output ])
71
-
72
73
train_input , validate_input , test_input = h .kfold (4 , data_input , random_seed )
73
74
train_output , validate_output , test_output = h .kfold (4 , data_output , random_seed )
74
75
75
- nn = NeuralNetwork (layer_sizes = [10 , 15 , 7 ], layer_activations = [" sigmoid" , " sigmoid" ])
76
+ nn = NeuralNetwork (layer_sizes = [10 , 15 , 7 ], layer_activations = [h . sigmoid , h . sigmoid ])
76
77
77
78
previous_mse = 1
78
79
current_mse = 0
79
80
epochs = 0
81
+ begin_time = time .time_ns ()
80
82
while (current_mse < previous_mse ):
83
+ epochs += 1
81
84
previous_mse = h .calculate_MSE (nn , validate_input , validate_output )
82
85
for i in range (len (train_input )):
83
86
nn .train (train_input [i ], train_output [i ])
84
87
current_mse = h .calculate_MSE (nn , validate_input , validate_output )
85
-
86
- epochs += 1
87
- # if epochs % 10 == 0: print("Epoch: " + str(epochs) + " MSE: " + str(current_mse))
88
-
88
+ end_time = time .time_ns ()
89
89
90
90
train_mse = h .calculate_MSE (nn , train_input , train_output )
91
91
test_mse = h .calculate_MSE (nn , test_input , test_output )
92
- print ("Random_Seed: " + str ( random_seed ) + " Epochs: " + str ( epochs ) + " Tr: " + str ( train_mse ) + " V: " + str ( current_mse ) + " T: " + str ( test_mse ) )
92
+ print ("Seed:" , random_seed , " Epochs:" , epochs , "Time:" , ( end_time - begin_time ) / 1e9 , " Tr:" , train_mse , "V:" , current_mse , "T:" , test_mse )
0 commit comments