10000 add file · sunxvming/python-learn@d332ce7 · GitHub
[go: up one dir, main page]

Skip to content

Commit d332ce7

Browse files
committed
add file
1 parent 1cee8e0 commit d332ce7

File tree

110 files changed

+107786
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

110 files changed

+107786
-0 E377
lines changed

README.md

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,3 +6,13 @@ python各种小功能的示例程序代码
66
* Mail.py 发送邮件的
77
* spider 抓取豆瓣的爬虫程序
88
* markdown2blog 将markdown格式文件转换成html格式文件
9+
* opencv
10+
* cv_test.py 读取视频流并进行播放
11+
* video_cap_to_img.py 读取每帧的视频流并保存成图片
12+
* net
13+
* wireshark
14+
* wireshark_extract.py 读取wireshark录制的包并提取网络包中的信息
15+
* make-your-own-neural-network Python神经网络编程的随书代码,包含一个简单的神经网络框架
16+
* deep-learning-from-scratch 深度学习入门:基于Python的理论与实现的随书代码
17+
* gan PyTorch生成对抗网络编程的随书代码
18+
* md-to-toc.py 生成markdown的toc的工具

deep-learning-from-scratch/.gitignore

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
.DS_Store
2+
*~
3+
*.gz
4+
*.pyc
5+
__pycache__/
Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
# coding: utf-8
2+
import sys, os
3+
sys.path.append(os.pardir) # 为了导入父目录的文件而进行的设定
4+
import numpy as np
5+
import matplotlib.pyplot as plt
6+
from dataset.mnist import load_mnist
7+
from two_layer_net import TwoLayerNet
8+
9+
# 读入数据
10+
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
11+
12+
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
13+
14+
iters_num = 1000 # 适当设定循环的次数
15+
train_size = x_train.shape[0]
16+
batch_size = 100
17+
learning_rate = 0.1
18+
19+
train_loss_list = []
20+
train_acc_list = []
21+
test_acc_list = []
22+
23+
iter_per_epoch = max(train_size / batch_size, 1)
24+
25+
for i in range(iters_num):
26+
batch_mask = np.random.choice(train_size, batch_size)
27+
x_batch = x_train[batch_mask]
28+
t_batch = t_train[batch_mask]
29+
30+
# 计算梯度
31+
#grad = network.numerical_gradient(x_batch, t_batch)
32+
grad = network.gradient(x_batch, t_batch)
33+
34+
# 更新参数
35+
for key in ('W1', 'b1', 'W2', 'b2'):
36+
network.params[key] -= learning_rate * grad[key]
37+
38+
loss = network.loss(x_batch, t_batch)
39+
train_loss_list.append(loss)
40+
41+
if i % iter_per_epoch == 0:
42+
train_acc = network.accuracy(x_train, t_train)
43+
test_acc = network.accuracy(x_test, t_test)
44+
train_acc_list.append(train_acc)
45+
test_acc_list.append(test_acc)
46+
print("train acc, test acc | " + str(train_acc) + ", " + str(test_acc))
47+
48+
# 绘制图形
49+
markers = {'train': 'o', 'test': 's'}
50+
x = np.arange(len(train_acc_list))
51+
plt.plot(x, train_acc_list, label='train acc')
52+
plt.plot(x, test_acc_list, label='test acc', linestyle='--')
53+
plt.xlabel("epochs")
54+
plt.ylabel("accuracy")
55+
plt.ylim(0, 1.0)
56+
plt.legend(loc='lower right')
57+
plt.show()
Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
# coding: utf-8
2+
import sys, os
3+
sys.path.append(os.pardir) # 为了导入父目录的文件而进行的设定
4+
from common.functions import *
5+
from common.gradient import numerical_gradient
6+
7+
8+
class TwoLayerNet:
9+
10+
def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
11+
# 初始化权重
12+
self.params = {}
13+
self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
14+
self.params['b1'] = np.zeros(hidden_size)
15+
self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
16+
self.params['b2'] = np.zeros(output_size)
17+
18+
def predict(self, x):
19+
W1, W2 = self.params['W1'], self.params['W2']
20+
b1, b2 = self.params['b1'], self.params['b2']
21+
22+
a1 = np.dot(x, W1) + b1
23+
z1 = sigmoid(a1)
24+
a2 = np.dot(z1, W2) + b2
25+
y = softmax(a2)
26+
27+
return y
28+
29+
# x:输入数据, t:监督数据
30+
def loss(self, x, t):
31+
y = self.predict(x)
32+
33+
return cross_entropy_error(y, t)
34+
35+
def accuracy(self, x, t):
36+
y = self.predict(x)
37+
y = np.argmax(y, axis=1)
38+
t = np.argmax(t, axis=1)
39+
40+
accuracy = np.sum(y == t) / float(x.shape[0])
41+
return accuracy
42+
43+
# x:输入数据, t:监督数据
44+
def numerical_gradient(self, x, t):
45+
loss_W = lambda W: self.loss(x, t)
46+
47+
grads = {}
48+
grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
49+
grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
50+
grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
51+
grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
52+
53+
return grads
54+
55+
def gradient(self, x, t):
56+
W1, W2 = self.params['W1'], self.params['W2']
57+
b1, b2 = self.params['b1'], self.params['b2']
58+
grads = {}
59+
60+
batch_num = x.shape[0]
61+
62+
# forward
63+
a1 = np.dot(x, W1) + b1
64+
z1 = sigmoid(a1)
65+
a2 = np.dot(z1, W2) + b2
66+
y = softmax(a2)
67+
68+
# backward
69+
dy = (y - t) / batch_num
70+
grads['W2'] = np.dot(z1.T, dy)
71+
grads['b2'] = np.sum(dy, axis=0)
72+
73+
da1 = np.dot(dy, W2.T)
74+
dz1 = sigmoid_grad(a1) * da1
75+
grads['W1'] = np.dot(x.T, dz1)
76+
grads['b1'] = np.sum(dz1, axis=0)
77+
78+
return grads
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
# coding: utf-8
2+
import sys, os
3+
sys.path.append(os.pardir) # 为了导入父目录的文件而进行的设定
4+
import numpy as np
5+
from dataset.mnist import load_mnist
6+
from two_layer_net import TwoLayerNet
7+
8+
# 读入数据
9+
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
10+
11+
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
12+
13+
x_batch = x_train[:3]
14+
t_batch = t_train[:3]
15+
16+
grad_numerical = network.numerical_gradient(x_batch, t_batch)
17+
grad_backprop = network.gradient(x_batch, t_batch)
18+
19+
for key in grad_numerical.keys():
20+
diff = np.average( np.abs(grad_backprop[key] - grad_numerical[key]) )
21+
print(key + ":" + str(diff))
Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
# coding: utf-8
2+
import sys, os
3+
sys.path.append(os.pardir)
4+
5+
import numpy as np
6+
from dataset.mnist import load_mnist
7+
from two_layer_net import TwoLayerNet
8+
9+
# 读入数据
10+
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
11+
12+
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
13+
14+
iters_num = 10000
15+
train_size = x_train.shape[0]
16+
batch_size = 100
17+
learning_rate = 0.1
18+
19+
train_loss_list = []
20+
train_acc_list = []
21+
test_acc_list = []
22+
23+
iter_per_epoch = max(train_size / batch_size, 1)
24+
25+
for i in range(iters_num):
26+
batch_mask = np.random.choice(train_size, batch_size)
27+
x_batch = x_train[batch_mask]
28+
t_batch = t_train[batch_mask]
29+
30+
# 梯度
31+
#grad = network.numerical_gradient(x_batch, t_batch)
32+
grad = network.gradient(x_batch, t_batch)
33+
34+
# 更新
35+
for key in ('W1', 'b1', 'W2', 'b2'):
36+
network.params[key] -= learning_rate * grad[key]
37+
38+
loss = network.loss(x_batch, t_batch)
39+
train_loss_list.append(loss)
40+
41+
if i % iter_per_epoch == 0:
42+
train_acc = network.accuracy(x_train, t_train)
43+
test_acc = network.accuracy(x_test, t_test)
44+
train_acc_list.append(train_acc)
45+
test_acc_list.append(test_acc)
46+
print(train_acc, test_acc)
Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
# coding: utf-8
2+
import sys, os
3+
sys.path.append(os.pardir) # 为了导入父目录的文件而进行的设定
4+
import numpy as np
5+
from common.layers import *
6+
from common.gradient import numerical_gradient
7+
from collections import OrderedDict
8+
9+
10+
class TwoLayerNet:
11+
12+
def __init__(self, input_size, hidden_size, output_size, weight_init_std = 0.01):
13+
# 初始化权重
14+
self.params = {}
15+
self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
16+
self.params['b1'] = np.zeros(hidden_size)
17+
self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
18+
self.params['b2'] = np.zeros(output_size)
19+
20+
# 生成层
21+
self.layers = OrderedDict() # OrderedDict 有序字典,“有序”是指它可以记住向字典里添加元素的顺序
22+
self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])
23+
self.layers['Relu1'] = Relu()
24+
self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])
25+
26+
self.lastLayer = SoftmaxWithLoss()
27+
28+
def predict(self, x):
29+
for layer in self.layers.values():
30+
x = layer.forward(x)
31+
32+
return x
33+
34+
# x:输入数据, t:监督数据
35+
def loss(self, x, t):
36+
y = self.predict(x)
37+
return self.lastLayer.forward(y, t)
38+
39+
def accuracy(self, x, t):
40+
y = self.predict(x)
41+
y = np.argmax(y, axis=1)
42+
if t.ndim != 1 : t = np.argmax(t, axis=1)
43+
44+
accuracy = np.sum(y == t) / float(x.shape[0])
45+
return accuracy
46+
47+
# x:输入数据, t:监督数据
48+
def numerical_gradient(self, x, t):
49+
loss_W = lambda W: self.loss(x, t)
50+
51+
grads = {}
52+
grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
53+
grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
54+
grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
55+
grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
56+
57+
return grads
58+
59+
def gradient(self, x, t):
60+
# forward
61+
self.loss(x, t)
62+
63+
# backward
64+
dout = 1
65+
dout = self.lastLayer.backward(dout)
66+
67+
layers = list(self.layers.values())
68+
layers.reverse()
69+
for layer in layers:
70+
dout = layer.backward(dout)
71+
72+
# 设定
73+
grads = {}
74+
grads['W1'], grads['b1'] = self.layers['Affine1'].dW, self.layers['Affine1'].db
75+
grads['W2'], grads['b2'] = self.layers['Affine2'].dW, self.layers['Affine2'].db
76+
77+
return grads
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
# coding: utf-8
2+
import sys, os
3+
sys.path.append(os.pardir) # 为了导入父目录的文件而进行的设定
4+
import numpy as np
5+
from dataset.mnist import load_mnist
6+
from common.multi_layer_net_extend import MultiLayerNetExtend
7+
8+
# 读入数据
9+
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
10+
11+
network = MultiLayerNetExtend(input_size=784, hidden_size_list=[100, 100], output_size=10,
12+
use_batchnorm=True)
13+
14+
x_batch = x_train[:1]
15+
t_batch = t_train[:1]
16+
17+
grad_backprop = network.gradient(x_batch, t_batch)
18+
grad_numerical = network.numerical_gradient(x_batch, t_batch)
19+
20+
21+
for key in grad_numerical.keys():
22+
diff = np.average( np.abs(grad_backprop[key] - grad_numerical[key]) )
23+
print(key + ":" + str(diff))

0 commit comments

Comments
 (0)
0