#1.
Implementation of Perceptron Algorithm for AND Logic Gate with 2-
bit Binary Input
import numpy as np
# Input and target output for AND gate
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([0, 0, 0, 1])
# Initialize weights, bias, and learning rate
weights = np.zeros(2)
bias = 0
learning_rate = 0.1
epochs = 10
# Training the perceptron
for _ in range(epochs):
for i in range(len(X)):
y_pred = 1 if np.dot(X[i], weights) + bias > 0 else 0
error = y[i] - y_pred
weights += learning_rate * error * X[i]
bias += learning_rate * error
# Testing the perceptron
for x in X:
output = 1 if np.dot(x, weights) + bias > 0 else 0
print(f"Input: {x}, Predicted Output: {output}")
#2. Design and implement a Madaline network to solve the XOR problem.
Simulate the network for non-linearly separable data and demonstrate
the results.
import numpy as np
# XOR input and target output
X = np.array([[1, 1], [1, 0], [0, 1], [0, 0]])
y = np.array([0, 1, 1, 0])
# Manually define weights and biases after training
weights_hidden = np.array([[1, -1], [-1, 1]]) # Pre-trained weights
for hidden layer
bias_hidden = np.array([-0.5, -0.5]) # Pre-trained biases for hidden
layer
weights_output = np.array([1, 1]) # Pre-trained weights for output
layer
bias_output = -0.5 # Pre-trained bias for output layer
# Activation function
def activation(x):
return np.where(x > 0, 1, 0)
# Testing the XOR logic
print("Testing XOR logic:")
for x in X:
hidden = activation(np.dot(x, weights_hidden.T) + bias_hidden) #
Hidden layer output
output = activation(np.dot(hidden, weights_output) +
bias_output) # Final output
print(f"Input: {x}, Predicted Output: {output}")
#3. Write a program to implement a Multi-Layer Perceptron using the
backpropagation algorithm. Train the MLP on a classification problem,
using gradient descent for weight updates.
import numpy as np
# Sigmoid and its derivative
sigmoid = lambda x: 1 / (1 + np.exp(-x))
sigmoid_derivative = lambda x: x * (1 - x)
# XOR inputs and outputs
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0], [1], [1], [0]])
# Initialize weights and biases
np.random.seed(0)
w1, w2 = np.random.rand(2, 2), np.random.rand(2, 1)
b1, b2 = np.random.rand(1, 2), np.random.rand(1, 1)
# Train MLP
for _ in range(10000):
h = sigmoid(np.dot(X, w1) + b1) # Hidden layer
o = sigmoid(np.dot(h, w2) + b2) # Output layer
d2 = (y - o) * sigmoid_derivative(o) # Output delta
d1 = np.dot(d2, w2.T) * sigmoid_derivative(h) # Hidden delta
w2 += np.dot(h.T, d2) * 0.1 # Update weights
b2 += np.sum(d2, axis=0, keepdims=True) * 0.1
w1 += np.dot(X.T, d1) * 0.1
b1 += np.sum(d1, axis=0, keepdims=True) * 0.1
# Test MLP
for x in X:
h = sigmoid(np.dot(x, w1) + b1)
o = sigmoid(np.dot(h, w2) + b2)
print(f"Input: {x}, Output: {o.round()}")
#4. Write a program to implement a Radial Basis Function Network (RBFN)
for pattern classification. Use Gaussian radial basis functions, and
train the network using supervised learning techniques.
import numpy as np
# Gaussian RBF
def rbf(x, center, sigma):
return np.exp(-np.linalg.norm(x - center) ** 2 / (2 * sigma ** 2))
# Train the RBF Network
def train_rbfn(X, y, centers, sigma):
G = np.zeros((X.shape[0], len(centers)))
for i, x in enumerate(X):
for j, c in enumerate(centers):
G[i, j] = rbf(x, c, sigma)
weights = np.linalg.pinv(G).dot(y) # Calculate weights using
pseudoinverse
return weights
# Predict with RBF Network
def predict_rbfn(X, centers, weights, sigma):
G = np.zeros((X.shape[0], len(centers)))
for i, x in enumerate(X):
for j, c in enumerate(centers):
G[i, j] = rbf(x, c, sigma)
return np.round(G.dot(weights))
# XOR problem
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([0, 1, 1, 0])
# Set RBF centers (using training points as centers) and sigma
centers = X
sigma = 1.0
# Train and test RBF Network
weights = train_rbfn(X, y, centers, sigma)
predictions = predict_rbfn(X, centers, weights, sigma)
# Output results
print("Input:\n", X)
print("Predicted Output:\n", predictions)
#5. Implement an autoassociative memory network to store and recall
patterns. Write a program that uses Hebbian learning to train the
network and test its performance with noisy inputs.
import numpy as np
# Hebbian learning
def train_hebbian(patterns):
return np.dot(patterns.T, patterns)
# Recall a pattern
def recall(pattern, weights):
return np.sign(np.dot(pattern, weights))
# Patterns to store
patterns = np.array([
[1, -1, 1, -1],
[-1, 1, -1, 1]
])
# Train the network
weights = train_hebbian(patterns)
# Test with noisy input
noisy_input = np.array([1, -1, 1, 1]) # Slightly noisy version of the
first pattern
recalled = recall(noisy_input, weights)
print("Noisy Input:", noisy_input)
print("Recalled Pattern:", recalled)
#6. Implement a heteroassociative memory network that maps input
patterns to output patterns. Train the network using Hebbian learning
and test its performance by introducing noise in the input.
import numpy as np
# Hebbian learning for heteroassociative memory
def train_heteroassociative(input_patterns, output_patterns):
return np.dot(input_patterns.T, output_patterns)
# Recall a pattern
def recall(input_pattern, weights):
return np.dot(input_pattern, weights)
# Input and output patterns
input_patterns = np.array([
[1, -1, 1],
[-1, 1, -1]
])
output_patterns = np.array([
[1, 1],
[-1, -1]
])
# Train the network
weights = train_heteroassociative(input_patterns, output_patterns)
# Test with noisy input
noisy_input = np.array([1, -1, -1]) # Noisy version of the first
pattern
recalled_output = recall(noisy_input, weights)
print("Noisy Input:", noisy_input)
print("Recalled Output:", recalled_output)
#7. Write a program to implement a Bidirectional Associative Memory
(BAM). Train the network to recall patterns in both directions, i.e.,
input-to-output and output-to-input, and test its recall accuracy.
import numpy as np
# Hebbian learning for BAM
def train_bam(input_patterns, output_patterns):
# Compute weights for input-to-output and output-to-input
w_in_out = np.dot(input_patterns.T, output_patterns)
w_out_in = np.dot(output_patterns.T, input_patterns)
return w_in_out, w_out_in
# Recall from input to output
def recall_input_to_output(input_pattern, w_in_out):
return np.sign(np.dot(input_pattern, w_in_out))
# Recall from output to input
def recall_output_to_input(output_pattern, w_out_in):
return np.sign(np.dot(output_pattern, w_out_in))
# Input and output patterns
input_patterns = np.array([
[1, -1, 1],
[-1, 1, -1]
])
output_patterns = np.array([
[1, 1],
[-1, -1]
])
# Train the BAM network
w_in_out, w_out_in = train_bam(input_patterns, output_patterns)
# Test recall accuracy
input_test = np.array([1, -1, 1]) # Test input pattern
output_test = np.array([1, 1]) # Test output pattern
recalled_output = recall_input_to_output(input_test, w_in_out)
recalled_input = recall_output_to_input(output_test, w_out_in)
print("Input Test:", input_test)
print("Recalled Output:", recalled_output)
print("Output Test:", output_test)
print("Recalled Input:", recalled_input)
#8. Implement a Hopfield network for pattern storage and recall. Write
a program that demonstrates the energy minimization process and the
ability to recall patterns even in the presence of noise.
import numpy as np
# Hopfield Network class
class HopfieldNetwork:
def __init__(self, num_neurons):
self.num_neurons = num_neurons
self.weights = np.zeros((num_neurons, num_neurons))
# Train the network using Hebbian learning
def train(self, patterns):
for pattern in patterns:
self.weights += np.outer(pattern, pattern)
np.fill_diagonal(self.weights, 0) # No self-connections
# Recall a pattern with noise and energy minimization
def recall(self, noisy_pattern, iterations=5):
pattern = noisy_pattern
for _ in range(iterations):
for i in range(self.num_neurons):
# Update each neuron based on the weighted sum of
others
sum_input = np.dot(self.weights[i], pattern)
pattern[i] = 1 if sum_input >= 0 else -1
return pattern
# Define stored patterns
patterns = np.array([
[1, -1, 1],
[-1, 1, -1]
])
# Create the Hopfield network and train it
hopfield = HopfieldNetwork(num_neurons=3)
hopfield.train(patterns)
# Test with noisy input (introducing noise)
noisy_input = np.array([1, -1, -1]) # Noisy version of the first
pattern
# Recall the pattern from the noisy input
recalled_pattern = hopfield.recall(noisy_input)
print("Noisy Input:", noisy_input)
print("Recalled Pattern:", recalled_pattern)
#9. Implement a fixed-weight competitive network where neurons compete
to be activated. Simulate the network for clustering input patterns and
demonstrate the winner-takes-all strategy.
import numpy as np
# Competitive Network
class CompetitiveNetwork:
def __init__(self, num_neurons, input_dim):
self.num_neurons = num_neurons
self.weights = np.random.rand(num_neurons, input_dim)
# Find the winner neuron based on the minimum distance
def winner_takes_all(self, input_pattern):
distances = np.linalg.norm(self.weights - input_pattern,
axis=1)
winner = np.argmin(distances)
return winner
# Input patterns for clustering
inputs = np.array([
[1, 1],
[1, -1],
[-1, 1],
[-1, -1]
])
# Create and train the competitive network
network = CompetitiveNetwork(num_neurons=2, input_dim=2)
# Simulate the network for clustering
for input_pattern in inputs:
winner = network.winner_takes_all(input_pattern)
print(f"Input: {input_pattern}, Winner Neuron: {winner}")
#10. Implement a Recurrent Neural Network (RNN) for predicting the
next character in a string "HELLO." Evaluate the model's performance
based on accuracy and loss metrics after training.
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from tensorflow.keras.utils import to_categorical
# Prepare data
data = "HELLO"
chars = sorted(set(data))
char_to_int = {c: i for i, c in enumerate(chars)}
dataX = [char_to_int[data[i]] for i in range(len(data)-1)]
dataY = [char_to_int[data[i+1]] for i in range(len(data)-1)]
X = np.reshape(dataX, (len(dataX), 1, 1)) / len(chars)
y = to_categorical(dataY)
# Build the model
model = Sequential([
LSTM(256, input_shape=(X.shape[1], X.shape[2])),
Dropout(0.2),
Dense(len(chars), activation='softmax')
])
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
# Train and evaluate the model
model.fit(X, y, epochs=1000, batch_size=1, verbose=0)
loss, accuracy = model.evaluate(X, y, verbose=0)
print(f"Accuracy: {accuracy*100:.2f}%, Loss: {loss:.2f}")
# Predict the next character after the last input character
prediction_input = np.reshape(dataX[-1], (1, 1, 1)) / len(chars)
predicted_prob = model.predict(prediction_input, verbose=0)
predicted_idx = np.argmax(predicted_prob)
predicted_char =
list(char_to_int.keys())[list(char_to_int.values()).index(predicted_idx
)]
print(f"Predicted next character: {predicted_char}")