[go: up one dir, main page]

0% found this document useful (0 votes)
35 views3 pages

Perceptron Pytorch

The document details the implementation of a classic Perceptron model for binary classification using PyTorch, including data preparation, model definition, training, and evaluation. A toy dataset is created, normalized, and split into training and test sets, with visualizations provided for both sets. The model achieves a test accuracy of 93.33%, and a 2D decision boundary is plotted to illustrate the classification results.

Uploaded by

pnqanh.gdsciu
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
35 views3 pages

Perceptron Pytorch

The document details the implementation of a classic Perceptron model for binary classification using PyTorch, including data preparation, model definition, training, and evaluation. A toy dataset is created, normalized, and split into training and test sets, with visualizations provided for both sets. The model achieves a test accuracy of 93.33%, and a 2D decision boundary is plotted to illustrate the classification results.

Uploaded by

pnqanh.gdsciu
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 3

L03: Perceptrons

Implementation of the classic Perceptron by Frank Rosenblatt for binary classification (here: 0/1 class labels) in PyTorch

Imports
import numpy as np
import matplotlib.pyplot as plt
import torch
%matplotlib inline

Preparing a toy dataset


##########################
### DATASET
##########################

data = np.genfromtxt('perceptron_toydata.txt', delimiter='\t')


X, y = data[:, :2], data[:, 2]
y = y.astype(np.int)

print('Class label counts:', np.bincount(y))


print('X.shape:', X.shape)
print('y.shape:', y.shape)

# Shuffling & train/test split


shuffle_idx = np.arange(y.shape[0])
shuffle_rng = np.random.RandomState(123)
shuffle_rng.shuffle(shuffle_idx)
X, y = X[shuffle_idx], y[shuffle_idx]

X_train, X_test = X[shuffle_idx[:70]], X[shuffle_idx[70:]]


y_train, y_test = y[shuffle_idx[:70]], y[shuffle_idx[70:]]

# Normalize (mean zero, unit variance)


mu, sigma = X_train.mean(axis=0), X_train.std(axis=0)
X_train = (X_train - mu) / sigma
X_test = (X_test - mu) / sigma

Class label counts: [50 50]


X.shape: (100, 2)
y.shape: (100,)

plt.scatter(X_train[y_train==0, 0], X_train[y_train==0, 1], label='class 0', marker='o')


plt.scatter(X_train[y_train==1, 0], X_train[y_train==1, 1], label='class 1', marker='s')
plt.title('Training set')
plt.xlabel('feature 1')
plt.ylabel('feature 2')
plt.xlim([-3, 3])
plt.ylim([-3, 3])
plt.legend()
plt.show()

plt.scatter(X_test[y_test==0, 0], X_test[y_test==0, 1], label='class 0', marker='o')


plt.scatter(X_test[y_test==1, 0], X_test[y_test==1, 1], label='class 1', marker='s')
plt.title('Test set')
plt.xlabel('feature 1')
plt.ylabel('feature 2')
plt.xlim([-3, 3])
plt.ylim([-3, 3])
plt.legend()
plt.show()

Defining the Perceptron model


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

class Perceptron():
def __init__(self, num_features):
self.num_features = num_features
self.weights = torch.zeros(num_features, 1,
dtype=torch.float32, device=device)
self.bias = torch.zeros(1, dtype=torch.float32, device=device)

# placeholder vectors so they don't


# need to be recreated each time
self.ones = torch.ones(1)
self.zeros = torch.zeros(1)

def forward(self, x):


linear = torch.mm(x, self.weights) + self.bias
predictions = torch.where(linear > 0., self.ones, self.zeros)
return predictions

def backward(self, x, y):


predictions = self.forward(x)
errors = y - predictions
return errors

def train(self, x, y, epochs):


for e in range(epochs):

for i in range(y.shape[0]):
# use view because backward expects a matrix (i.e., 2D tensor)
errors = self.backward(x[i].reshape(1, self.num_features), y[i]).reshape(-1)
self.weights += (errors * x[i]).reshape(self.num_features, 1)
self.bias += errors

def evaluate(self, x, y):


predictions = self.forward(x).reshape(-1)
accuracy = torch.sum(predictions == y).float() / y.shape[0]
return accuracy

Training the Perceptron


ppn = Perceptron(num_features=2)

X_train_tensor = torch.tensor(X_train, dtype=torch.float32, device=device)


y_train_tensor = torch.tensor(y_train, dtype=torch.float32, device=device)

ppn.train(X_train_tensor, y_train_tensor, epochs=5)

print('Model parameters:')
print(' Weights: %s' % ppn.weights)
print(' Bias: %s' % ppn.bias)

Model parameters:
Weights: tensor([[1.2734],
[1.3464]])
Bias: tensor([-1.])
Evaluating the model
X_test_tensor = torch.tensor(X_test, dtype=torch.float32, device=device)
y_test_tensor = torch.tensor(y_test, dtype=torch.float32, device=device)

test_acc = ppn.evaluate(X_test_tensor, y_test_tensor)


print('Test set accuracy: %.2f%%' % (test_acc*100))

Test set accuracy: 93.33%

##########################
### 2D Decision Boundary
##########################

w, b = ppn.weights, ppn.bias

x0_min = -2
x1_min = ( (-(w[0] * x0_min) - b[0])
/ w[1] )

x0_max = 2
x1_max = ( (-(w[0] * x0_max) - b[0])
/ w[1] )

fig, ax = plt.subplots(1, 2, sharex=True, figsize=(7, 3))

ax[0].plot([x0_min, x0_max], [x1_min, x1_max])


ax[1].plot([x0_min, x0_max], [x1_min, x1_max])

ax[0].scatter(X_train[y_train==0, 0], X_train[y_train==0, 1], label='class 0', marker='o')


ax[0].scatter(X_train[y_train==1, 0], X_train[y_train==1, 1], label='class 1', marker='s')

ax[1].scatter(X_test[y_test==0, 0], X_test[y_test==0, 1], label='class 0', marker='o')


ax[1].scatter(X_test[y_test==1, 0], X_test[y_test==1, 1], label='class 1', marker='s')

ax[1].legend(loc='upper left')
plt.show()

Loading [MathJax]/jax/output/CommonHTML/fonts/TeX/fontdata.js

You might also like