8000 Use black profile in isort to prevent conflicts (#244) · wolfmib/pyscript_tutorial@88a0dd3 · GitHub
[go: up one dir, main page]

Skip to content

Commit 88a0dd3

Browse files
authored
Use black profile in isort to prevent conflicts (pyscript#244)
* Use black profile in isort to prevent conflicts
1 parent b5b150a commit 88a0dd3

File tree

2 files changed

+65
-46
lines changed

2 files changed

+65
-46
lines changed

.pre-commit-config.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,3 +9,4 @@ repos:
99
hooks:
1010
- id: isort
1111
name: isort (python)
12+
args: [--profile, black]

pyscriptjs/examples/micrograd_ai.py

Lines changed: 64 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -1,45 +1,49 @@
1-
#Credit: https://github.com/karpathy/micrograd/blob/master/demo.ipynb
2-
#cell
1+
# Credit: https://github.com/karpathy/micrograd/blob/master/demo.ipynb
2+
# cell
3+
import datetime
34
import random
4-
import numpy as np
5+
56
import matplotlib.pyplot as plt
6-
import datetime
7+
import numpy as np
78

8-
#cell
9+
# cell
910
from micrograd.engine import Value
10-
from micrograd.nn import Neuron, Layer, MLP
11+
from micrograd.nn import MLP, Layer, Neuron
1112

1213
print_statements = []
1314

14-
def run_all_micrograd_demo(*args,**kwargs):
15+
16+
def run_all_micrograd_demo(*args, **kwargs):
1517
result = micrograd_demo()
16-
pyscript.write('micrograd-run-all-fig2-div', result)
18+
pyscript.write("micrograd-run-all-fig2-div", result)
19+
1720

1821
def print_div(o):
1922
o = str(o)
20-
print_statements.append(o + ' \n<br>')
21-
pyscript.write('micrograd-run-all-print-div', ''.join(print_statements))
23+
print_statements.append(o + " \n<br>")
24+
pyscript.write("micrograd-run-all-print-div", "".join(print_statements))
2225

23-
#All code is wrapped in this run_all function so it optionally executed (called)
24-
#from pyscript when a button is pressed.
25-
def micrograd_demo(*args,**kwargs):
26+
27+
# All code is wrapped in this run_all function so it optionally executed (called)
28+
# from pyscript when a button is pressed.
29+
def micrograd_demo(*args, **kwargs):
2630
"""
2731
Runs the micrograd demo.
2832
2933
*args and **kwargs do nothing and are only there to capture any parameters passed
3034
from pyscript when this function is called when a button is clicked.
3135
"""
32-
33-
#cell
36+
37+
# cell
3438
start = datetime.datetime.now()
35-
print_div('Starting...')
39+
print_div("Starting...")
3640

37-
#cell
41+
# cell
3842
np.random.seed(1337)
3943
random.seed(1337)
4044

41-
#cell
42-
#An adaptation of sklearn's make_moons function https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_moons.html
45+
# cell
46+
# An adaptation of sklearn's make_moons function https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_moons.html
4347
def make_moons(n_samples=100, noise=None):
4448
n_samples_out, n_samples_in = n_samples, n_samples
4549

@@ -48,26 +52,38 @@ def make_moons(n_samples=100, noise=None):
4852
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
4953
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - 0.5
5054

51-
X = np.vstack([np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y)]).T
52-
y = np.hstack([np.zeros(n_samples_out, dtype=np.intp), np.ones(n_samples_in, dtype=np.intp)])
53-
if noise is not None: X += np.random.normal(loc=0.0, scale=noise, size=X.shape)
55+
X = np.vstack(
56+
[
57+
np.append(outer_circ_x, inner_circ_x),
58+
np.append(outer_circ_y, inner_circ_y),
59+
]
60+
).T
61+
y = np.hstack(
62+
[
63+
np.zeros(n_samples_out, dtype=np.intp),
64+
np.ones(n_samples_in, dtype=np.intp),
65+
]
66+
)
67+
if noise is not None:
68+
X += np.random.normal(loc=0.0, scale=noise, size=X.shape)
5469
return X, y
70+
5571
X, y = make_moons(n_samples=100, noise=0.1)
5672

57-
#cell
58-
y = y*2 - 1 # make y be -1 or 1
73+
# cell
74+
y = y * 2 - 1 # make y be -1 or 1
5975
# visualize in 2D
60-
plt.figure(figsize=(5,5))
61-
plt.scatter(X[:,0], X[:,1], c=y, s=20, cmap='jet')
76+
plt.figure(figsize=(5, 5))
77+
plt.scatter(X[:, 0], X[:, 1], c=y, s=20, cmap="jet")
6278
plt
63-
pyscript.write('micrograd-run-all-fig1-div', plt)
79+
pyscript.write("micrograd-run-all-fig1-div", plt)
6480

65-
#cell
66-
model = MLP(2, [16, 16, 1]) # 2-layer neural network
81+
# cell
82+
model = MLP(2, [16, 16, 1]) # 2-layer neural network
6783
print_div(model)
6884
print_div(("number of parameters", len(model.parameters())))
6985

70-
#cell
86+
# cell
7187
# loss function
7288
def loss(batch_size=None):
7389
# inline DataLoader :)
@@ -77,51 +93,53 @@ def loss(batch_size=None):
7793
ri = np.random.permutation(X.shape[0])[:batch_size]
7894
Xb, yb = X[ri], y[ri]
7995
inputs = [list(map(Value, xrow)) for xrow in Xb]
80-
96+
8197
# forward the model to get scores
8298
scores = list(map(model, inputs))
83-
99+
84100
# svm "max-margin" loss
85-
losses = [(1 + -yi*scorei).relu() for yi, scorei in zip(yb, scores)]
101+
losses = [(1 + -yi * scorei).relu() for yi, scorei in zip(yb, scores)]
86102
data_loss = sum(losses) * (1.0 / len(losses))
87103
# L2 regularization
88104
alpha = 1e-4
89-
reg_loss = alpha * sum((p*p for p in model.parameters()))
105+
reg_loss = alpha * sum((p * p for p in model.parameters()))
90106
total_loss = data_loss + reg_loss
91-
107+
92108
# also get accuracy
93-
accuracy = [((yi).__gt__(0)) == ((scorei.data).__gt__(0)) for yi, scorei in zip(yb, scores)]
109+
accuracy = [
110+
((yi).__gt__(0)) == ((scorei.data).__gt__(0))
111+
for yi, scorei in zip(yb, scores)
112+
]
94113
return total_loss, sum(accuracy) / len(accuracy)
95114

96115
total_loss, acc = loss()
97116
print((total_loss, acc))
98117

99-
#cell
118+
# cell
100119
# optimization
101-
for k in range(20): #was 100
102-
120+
for k in range(20): # was 100
121+
103122
# forward
104123
total_loss, acc = loss()
105-
124+
106125
# backward
107126
model.zero_grad()
108127
total_loss.backward()
109-
128+
110129
# update (sgd)
111-
learning_rate = 1.0 - 0.9*k/100
130+
learning_rate = 1.0 - 0.9 * k / 100
112131
for p in model.parameters():
113132
p.data -= learning_rate * p.grad
114-
133+
115134
if k % 1 == 0:
116135
# print(f"step {k} loss {total_loss.data}, accuracy {acc*100}%")
117136
print_div(f"step {k} loss {total_loss.data}, accuracy {acc*100}%")
118137

119-
#cell
138+
# cell
120139
h = 0.25
121140
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
122141
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
123-
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
124-
np.arange(y_min, y_max, h))
142+
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
125143
Xmesh = np.c_[xx.ravel(), yy.ravel()]
126144
inputs = [list(map(Value, xrow)) for xrow in Xmesh]
127145
scores = list(map(model, inputs))

0 commit comments

Comments
 (0)
0