[go: up one dir, main page]

0% found this document useful (0 votes)
4 views1 page

ML Lab 9 ANN

Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
4 views1 page

ML Lab 9 ANN

Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 1

import numpy as np

class NeuralNetwork:
def __init__(self, input_size, hidden_size, output_size):
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size

# Initialize weights
self.weights_input_hidden = np.random.randn(self.input_size, self.hidden_size)
self.weights_hidden_output = np.random.randn(self.hidden_size, self.output_size)

# Initialize the biases


self.bias_hidden = np.zeros((1, self.hidden_size))
self.bias_output = np.zeros((1, self.output_size))

def sigmoid(self, x):


return 1 / (1 + np.exp(-x))

def sigmoid_derivative(self, x):


return x * (1 - x)

def feedforward(self, X):


# Input to hidden
self.hidden_activation = np.dot(X, self.weights_input_hidden) + self.bias_hidden
self.hidden_output = self.sigmoid(self.hidden_activation)

# Hidden to output
self.output_activation = np.dot(self.hidden_output, self.weights_hidden_output) + self.bias_output
self.predicted_output = self.sigmoid(self.output_activation)

return self.predicted_output

def backward(self, X, y, learning_rate):


# Compute the output layer error
output_error = y - self.predicted_output
output_delta = output_error * self.sigmoid_derivative(self.predicted_output)

# Compute the hidden layer error


hidden_error = np.dot(output_delta, self.weights_hidden_output.T)
hidden_delta = hidden_error * self.sigmoid_derivative(self.hidden_output)

# Update weights and biases


self.weights_hidden_output += np.dot(self.hidden_output.T, output_delta) * learning_rate
self.bias_output += np.sum(output_delta, axis=0, keepdims=True) * learning_rate
self.weights_input_hidden += np.dot(X.T, hidden_delta) * learning_rate
self.bias_hidden += np.sum(hidden_delta, axis=0, keepdims=True) * learning_rate

def train(self, X, y, epochs, learning_rate):


for epoch in range(epochs):
output = self.feedforward(X)
self.backward(X, y, learning_rate)
if epoch % 4000 == 0:
loss = np.mean(np.square(y - output))
print(f"Epoch {epoch}, Loss:{loss}")

X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])


y = np.array([[0], [1], [1], [0]])

nn = NeuralNetwork(input_size=2, hidden_size=4, output_size=1) nn.train(X, y, epochs=10000, learning_rate=0.1)

nn = NeuralNetwork(input_size=2, hidden_size=4, output_size=1)


nn.train(X, y, epochs=10000, learning_rate=0.1)

Epoch 0, Loss:0.25282086847551766
Epoch 4000, Loss:0.028877220331387954
Epoch 8000, Loss:0.0032639954570880822

output = nn.feedforward(X)
print("Predictions after training:")
print(output)

Predictions after training:


[[0.0441004 ]
[0.95480128]
[0.95475774]
[0.04908783]]

You might also like