1
- #Credit: https://github.com/karpathy/micrograd/blob/master/demo.ipynb
2
- #cell
1
+ # Credit: https://github.com/karpathy/micrograd/blob/master/demo.ipynb
2
+ # cell
3
+ import datetime
3
4
import random
4
- import numpy as np
5
+
5
6
import matplotlib .pyplot as plt
6
- import datetime
7
+ import numpy as np
7
8
8
- #cell
9
+ # cell
9
10
from micrograd .engine import Value
10
- from micrograd .nn import Neuron , Layer , MLP
11
+ from micrograd .nn import MLP , Layer , Neuron
11
12
12
13
print_statements = []
13
14
14
- def run_all_micrograd_demo (* args ,** kwargs ):
15
+
16
+ def run_all_micrograd_demo (* args , ** kwargs ):
15
17
result = micrograd_demo ()
16
- pyscript .write ('micrograd-run-all-fig2-div' , result )
18
+ pyscript .write ("micrograd-run-all-fig2-div" , result )
19
+
17
20
18
21
def print_div (o ):
19
22
o = str (o )
20
- print_statements .append (o + ' \n <br>' )
21
- pyscript .write (' micrograd-run-all-print-div' , '' .join (print_statements ))
23
+ print_statements .append (o + " \n <br>" )
24
+ pyscript .write (" micrograd-run-all-print-div" , "" .join (print_statements ))
22
25
23
- #All code is wrapped in this run_all function so it optionally executed (called)
24
- #from pyscript when a button is pressed.
25
- def micrograd_demo (* args ,** kwargs ):
26
+
27
+ # All code is wrapped in this run_all function so it optionally executed (called)
28
+ # from pyscript when a button is pressed.
29
+ def micrograd_demo (* args , ** kwargs ):
26
30
"""
27
31
Runs the micrograd demo.
28
32
29
33
*args and **kwargs do nothing and are only there to capture any parameters passed
30
34
from pyscript when this function is called when a button is clicked.
31
35
"""
32
-
33
- #cell
36
+
37
+ # cell
34
38
start = datetime .datetime .now ()
35
- print_div (' Starting...' )
39
+ print_div (" Starting..." )
36
40
37
- #cell
41
+ # cell
38
42
np .random .seed (1337 )
39
43
random .seed (1337 )
40
44
41
- # cell
42
- # An adaptation of sklearn's make_moons function https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_moons.html
45
+ # cell
46
+ # An adaptation of sklearn's make_moons function https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_moons.html
43
47
def make_moons (n_samples = 100 , noise = None ):
44
48
n_samples_out , n_samples_in = n_samples , n_samples
45
49
@@ -48,26 +52,38 @@ def make_moons(n_samples=100, noise=None):
48
52
inner_circ_x = 1 - np .cos (np .linspace (0 , np .pi , n_samples_in ))
49
53
inner_circ_y = 1 - np .sin (np .linspace (0 , np .pi , n_samples_in )) - 0.5
50
54
51
- X = np .vstack ([np .append (outer_circ_x , inner_circ_x ), np .append (outer_circ_y , inner_circ_y )]).T
52
- y = np .hstack ([np .zeros (n_samples_out , dtype = np .intp ), np .ones (n_samples_in , dtype = np .intp )])
53
- if noise is not None : X += np .random .normal (loc = 0.0 , scale = noise , size = X .shape )
55
+ X = np .vstack (
56
+ [
57
+ np .append (outer_circ_x , inner_circ_x ),
58
+ np .append (outer_circ_y , inner_circ_y ),
59
+ ]
60
+ ).T
61
+ y = np .hstack (
62
+ [
63
+ np .zeros (n_samples_out , dtype = np .intp ),
64
+ np .ones (n_samples_in , dtype = np .intp ),
65
+ ]
66
+ )
67
+ if noise is not None :
68
+ X += np .random .normal (loc = 0.0 , scale = noise , size = X .shape )
54
69
return X , y
70
+
55
71
X , y = make_moons (n_samples = 100 , noise = 0.1 )
56
72
57
- #cell
58
- y = y * 2 - 1 # make y be -1 or 1
73
+ # cell
74
+ y = y * 2 - 1 # make y be -1 or 1
59
75
# visualize in 2D
60
- plt .figure (figsize = (5 ,5 ))
61
- plt .scatter (X [:,0 ], X [:,1 ], c = y , s = 20 , cmap = ' jet' )
76
+ plt .figure (figsize = (5 , 5 ))
77
+ plt .scatter (X [:, 0 ], X [:, 1 ], c = y , s = 20 , cmap = " jet" )
62
78
plt
63
- pyscript .write (' micrograd-run-all-fig1-div' , plt )
79
+ pyscript .write (" micrograd-run-all-fig1-div" , plt )
64
80
65
- #cell
66
- model = MLP (2 , [16 , 16 , 1 ]) # 2-layer neural network
81
+ # cell
82
+ model = MLP (2 , [16 , 16 , 1 ]) # 2-layer neural network
67
83
print_div (model )
68
84
print_div (("number of parameters" , len (model .parameters ())))
69
85
70
- #cell
86
+ # cell
71
87
# loss function
72
88
def loss (batch_size = None ):
73
89
# inline DataLoader :)
@@ -77,51 +93,53 @@ def loss(batch_size=None):
77
93
ri = np .random .permutation (X .shape [0 ])[:batch_size ]
78
94
Xb , yb = X [ri ], y [ri ]
79
95
inputs = [list (map (Value , xrow )) for xrow in Xb ]
80
-
96
+
81
97
# forward the model to get scores
82
98
scores = list (map (model , inputs ))
83
-
99
+
84
100
# svm "max-margin" loss
85
- losses = [(1 + - yi * scorei ).relu () for yi , scorei in zip (yb , scores )]
101
+ losses = [(1 + - yi * scorei ).relu () for yi , scorei in zip (yb , scores )]
86
102
data_loss = sum (losses ) * (1.0 / len (losses ))
87
103
# L2 regularization
88
104
alpha = 1e-4
89
- reg_loss = alpha * sum ((p * p for p in model .parameters ()))
105
+ reg_loss = alpha * sum ((p * p for p in model .parameters ()))
90
106
total_loss = data_loss + reg_loss
91
-
107
+
92
108
# also get accuracy
93
- accuracy = [((yi ).__gt__ (0 )) == ((scorei .data ).__gt__ (0 )) for yi , scorei in zip (yb , scores )]
109
+ accuracy = [
110
+ ((yi ).__gt__ (0 )) == ((scorei .data ).__gt__ (0 ))
111
+ for yi , scorei in zip (yb , scores )
112
+ ]
94
113
return total_loss , sum (accuracy ) / len (accuracy )
95
114
96
115
total_loss , acc = loss ()
97
116
print ((total_loss , acc ))
98
117
99
- #cell
118
+ # cell
100
119
# optimization
101
- for k in range (20 ): # was 100
102
-
120
+ for k in range (20 ): # was 100
121
+
103
122
# forward
104
123
total_loss , acc = loss ()
105
-
124
+
106
125
# backward
107
126
model .zero_grad ()
108
127
total_loss .backward ()
109
-
128
+
110
129
# update (sgd)
111
- learning_rate = 1.0 - 0.9 * k / 100
130
+ learning_rate = 1.0 - 0.9 * k / 100
112
131
for p in model .parameters ():
113
132
p .data -= learning_rate * p .grad
114
-
133
+
115
134
if k % 1 == 0 :
116
135
# print(f"step {k} loss {total_loss.data}, accuracy {acc*100}%")
117
136
print_div (f"step { k } loss { total_loss .data } , accuracy { acc * 100 } %" )
118
137
119
- #cell
138
+ # cell
120
139
h = 0.25
121
140
x_min , x_max = X [:, 0 ].min () - 1 , X [:, 0 ].max () + 1
122
141
y_min , y_max = X [:, 1 ].min () - 1 , X [:, 1 ].max () + 1
123
- xx , yy = np .meshgrid (np .arange (x_min , x_max , h ),
124
- np .arange (y_min , y_max , h ))
142
+ xx , yy = np .meshgrid (np .arange (x_min , x_max , h ), np .arange (y_min , y_max , h ))
125
143
Xmesh = np .c_ [xx .ravel (), yy .ravel ()]
126
144
inputs = [list (map (Value , xrow )) for xrow in Xmesh ]
127
145
scores = list (map (model , inputs ))
0 commit comments