LIST OF PROGRAMS
1. Write a python program to Implement Perceptron for understanding single-layer neural network?
2. Write a program to Visualize Activation Functions (Sigmoid, ReLU, Tanh)?
3. Write a program to Build a Simple Feedforward Neural Network?
4. Write a program to MNIST Digit Classification using Keras?
5. Write a program to Create and Visualize CNN Layers .
6. Write a program to CIFAR-10 Image Classification for understanding Multiclass image classification
using CNN.
7. Write a program to implement Image Augmentation Techniques for application of preprocessing
& transformation?
8. Write a program by Use of Dropout for Regularization to Improve model generalization?
9. Write a program to Build Model Using PyTorch?
10. Write a program to Compare Training with and without Batch Normalization for Analyzing
performance and convergence?
PROGRAM 1 : Write a python program to Implement Perceptron for understanding single-layer
neural network?
Objective:The Perceptron is the simplest form of a neural network. It is a type of single-layer neural
network used for binary classification. The Perceptron works by learning weights that represent the
strength of connections between the input features and the output prediction. It then classifies the
input into one of the two classes (usually 0 or 1) based on a threshold.
The goal of implementing a perceptron is to understand the concept of linear separability, where we
can separate the two classes by a straight line (in the case of 2D input).
import numpy as np
# Define the step activation function
def step_function(x):
return 1 if x >= 0 else 0
# Perceptron class to train the model
class Perceptron:
def __init__(self, input_size, learning_rate=0.1, epochs=100):
self.weights = np.zeros(input_size)
self.bias = 0
self.learning_rate = learning_rate
self.epochs = epochs
def fit(self, X, y):
"""
Train the Perceptron model.
X - input data (2D numpy array)
y - target labels (1D numpy array)
"""
for epoch in range(self.epochs):
for i in range(len(X)):
# Calculate the net input
net_input = np.dot(X[i], self.weights) + self.bias
# Apply step function
output = step_function(net_input)
# Compute the error (actual - predicted)
error = y[i] - output
# Update weights and bias
self.weights += self.learning_rate * error * X[i]
self.bias += self.learning_rate * error
def predict(self, X):
"""
Predict output for given input.
"""
net_input = np.dot(X, self.weights) + self.bias
return np.array([step_function(i) for i in net_input])
# Dataset for AND operation
# Input data: 2 features (x1, x2)
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
# Target labels (AND operation: 1 if both inputs are 1, else 0)
y = np.array([0, 0, 0, 1])
# Initialize and train the Perceptron
perceptron = Perceptron(input_size=2, learning_rate=0.1, epochs=10)
perceptron.fit(X, y)
# Test the Perceptron
test_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
predictions = perceptron.predict(test_data)
print("Predictions for AND operation:")
for i, prediction in enumerate(predictions):
print(f"Input: {test_data[i]} => Predicted Output: {prediction}")
PROGRAM 2: Write a program to Visualize Activation Functions (Sigmoid, ReLU, Tanh)?
Objective:To graphically visualize the behavior of three common neural network activation
functions—Sigmoid, ReLU, and Tanh—using Python and Matplotlib. This helps in understanding how
these functions transform inputs in machine learning models, especially during forward propagation.
import numpy as np
import matplotlib.pyplot as plt
# Define the input range
x = np.linspace(-10, 10, 1000)
# Define activation functions
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def relu(x):
return np.maximum(0, x)
def tanh(x):
return np.tanh(x)
# Compute outputs
y_sigmoid = sigmoid(x)
y_relu = relu(x)
y_tanh = tanh(x)
# Plotting
plt.figure(figsize=(12, 8))
# Sigmoid
plt.subplot(3, 1, 1)
plt.plot(x, y_sigmoid, color='blue')
plt.title("Sigmoid Activation Function")
plt.grid(True)
plt.xlabel("Input")
plt.ylabel("Output")
# ReLU
plt.subplot(3, 1, 2)
plt.plot(x, y_relu, color='green')
plt.title("ReLU Activation Function")
plt.grid(True)
plt.xlabel("Input")
plt.ylabel("Output")
# Tanh
plt.subplot(3, 1, 3)
plt.plot(x, y_tanh, color='red')
plt.title("Tanh Activation Function")
plt.grid(True)
plt.xlabel("Input")
plt.ylabel("Output")
plt.tight_layout()
plt.show()
PROGRAM 3: Write a program to Build a Simple Feedforward Neural Network?
Objective: To build a basic Feedforward Neural Network (FNN) from scratch using NumPy to
understand the core concepts of forward propagation, error computation, and weight updates via
backpropagation. The network will be trained on a simple dataset (e.g., XOR) to demonstrate its
learning ability.
import numpy as np
# Sigmoid activation and its derivative
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(x):
return x * (1 - x)
# XOR input and output
X = np.array([[0, 0],
[0, 1],
[1, 0],
[1, 1]])
y = np.array([[0],
[1],
[1],
[0]])
# Set random seed for reproducibility
np.random.seed(42)
# Initialize weights and biases
input_layer_size = 2
hidden_layer_size = 4
output_layer_size = 1
# Weights
W1 = np.random.uniform(-1, 1, (input_layer_size, hidden_layer_size))
W2 = np.random.uniform(-1, 1, (hidden_layer_size, output_layer_size))
# Biases
b1 = np.zeros((1, hidden_layer_size))
b2 = np.zeros((1, output_layer_size))
# Training the network
epochs = 10000
learning_rate = 0.1
for epoch in range(epochs):
# Forward propagation
z1 = np.dot(X, W1) + b1
a1 = sigmoid(z1)
z2 = np.dot(a1, W2) + b2
a2 = sigmoid(z2)
# Error computation
error = y - a2
# Backpropagation
d_a2 = error * sigmoid_derivative(a2)
d_W2 = np.dot(a1.T, d_a2)
d_a1 = np.dot(d_a2, W2.T) * sigmoid_derivative(a1)
d_W1 = np.dot(X.T, d_a1)
# Update weights and biases
W2 += learning_rate * d_W2
b2 += learning_rate * np.sum(d_a2, axis=0, keepdims=True)
W1 += learning_rate * d_W1
b1 += learning_rate * np.sum(d_a1, axis=0, keepdims=True)
# Print error every 1000 epochs
if epoch % 1000 == 0:
loss = np.mean(np.square(error))
print(f"Epoch {epoch} - Loss: {loss:.6f}")
# Final Output
print("\nFinal predictions after training:")
print(a2.round())
PROGRAM 4: 4. Write a program to MNIST Digit Classification using Keras?
Objective:To build a Convolutional Neural Network (CNN) using Keras (with TensorFlow backend)
that can classify handwritten digits (0–9) from the MNIST dataset with high accuracy. This
demonstrates how deep learning models can be applied to real-world image classification tasks
using high-level frameworks.
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
# Load the MNIST dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Preprocess the data
x_train = x_train.reshape(-1, 28, 28, 1).astype("float32") / 255.0
x_test = x_test.reshape(-1, 28, 28, 1).astype("float32") / 255.0
# Convert labels to one-hot encoding
y_train_cat = to_categorical(y_train, 10)
y_test_cat = to_categorical(y_test, 10)
# Build the CNN model
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)),
MaxPooling2D((2, 2)),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D((2, 2)),
Flatten(),
Dense(128, activation='relu'),
Dense(10, activation='softmax') # 10 classes for digits 0–9
])
# Compile the model
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Train the model
history = model.fit(x_train, y_train_cat, epochs=5, batch_size=64, validation_split=0.1)
# Evaluate on test set
test_loss, test_acc = model.evaluate(x_test, y_test_cat, verbose=2)
print(f"\nTest accuracy: {test_acc:.4f}")
# Plot training history
plt.plot(history.history['accuracy'], label='train acc')
plt.plot(history.history['val_accuracy'], label='val acc')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title('Training and Validation Accuracy')
plt.legend()
plt.show()
PROGRAM 5: 5. Write a program to Create and Visualize CNN Layers .
Objective:To build a simple Convolutional Neural Network (CNN) using Keras, and visualize the
feature maps (activations) of different CNN layers for a given image. This helps in understanding how
CNNs learn hierarchical features (edges, shapes, textures) as input data passes through layers.
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Input
from tensorflow.keras.datasets import mnist
# Load and preprocess MNIST image
(x_train, _), (_, _) = mnist.load_data()
x = x_train[0] # Take the first image
x_input = x.reshape(1, 28, 28, 1).astype("float32") / 255.0
# Plot original image
plt.imshow(x, cmap='gray')
plt.title("Original Input Image")
plt.axis('off')
plt.show()
# Build a simple CNN model
input_layer = Input(shape=(28, 28, 1))
x = Conv2D(16, (3, 3), activation='relu', name='conv1')(input_layer)
x = MaxPooling2D((2, 2), name='pool1')(x)
x = Conv2D(32, (3, 3), activation='relu', name='conv2')(x)
x = MaxPooling2D((2, 2), name='pool2')(x)
x = Flatten()(x)
output = Dense(10, activation='softmax')(x)
model = Model(inputs=input_layer, outputs=output)
model.summary()
# Create intermediate models to fetch feature maps
layer_outputs = [layer.output for layer in model.layers if 'conv' in layer.name or 'pool' in layer.name]
activation_model = Model(inputs=model.input, outputs=layer_outputs)
# Get feature maps for the input image
activations = activation_model.predict(x_input)
# Function to plot feature maps
def plot_feature_maps(activations, layer_names):
for layer_name, activation in zip(layer_names, activations):
num_filters = activation.shape[-1]
size = activation.shape[1]
display_grid = np.zeros((size, size * num_filters))
for i in range(num_filters):
feature_map = activation[0, :, :, i]
feature_map -= feature_map.mean()
feature_map /= feature_map.std() + 1e-5 # Normalize
feature_map *= 64
feature_map += 128
feature_map = np.clip(feature_map, 0, 255).astype('uint8')
display_grid[:, i * size : (i + 1) * size] = feature_map
plt.figure(figsize=(20, 4))
plt.title(f"Feature maps - {layer_name}")
plt.grid(False)
plt.imshow(display_grid, aspect='auto', cmap='viridis')
plt.axis('off')
plt.show()
# Layer names for display
layer_names = [layer.name for layer in model.layers if 'conv' in layer.name or 'pool' in layer.name]
# Plot feature maps
plot_feature_maps(activations, layer_names)
PROGRAM 6:
Objective: To build, train, and evaluate a Convolutional Neural Network (CNN) for multiclass image
classification using the CIFAR-10 dataset with Keras. The program demonstrates how deep learning
models can classify images into 10 different classes, showcasing the power of CNNs for solving real-
world vision problems.
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
# Load CIFAR-10 dataset
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize pixel values to [0, 1]
x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0
# One-hot encode labels
y_train_cat = to_categorical(y_train, 10)
y_test_cat = to_categorical(y_test, 10)
# Class names for later use
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
# Build CNN model
model = models.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(10, activation='softmax') # 10 output classes
])
# Compile the model
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Train the model
history = model.fit(x_train, y_train_cat,
epochs=10,
batch_size=64,
validation_split=0.1)
# Evaluate on test data
test_loss, test_acc = model.evaluate(x_test, y_test_cat, verbose=2)
print(f"\nTest accuracy: {test_acc:.4f}")
# Plot training vs validation accuracy
plt.plot(history.history['accuracy'], label='Train Acc')
plt.plot(history.history['val_accuracy'], label='Val Acc')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title('Training vs Validation Accuracy')
plt.legend()
plt.grid(True)
plt.show()
# Predict and visualize some test results
import numpy as np
predictions = model.predict(x_test[:16])
plt.figure(figsize=(12, 8))
for i in range(16):
plt.subplot(4, 4, i+1)
plt.imshow(x_test[i])
pred_label = class_names[np.argmax(predictions[i])]
true_label = class_names[y_test[i][0]]
color = 'green' if pred_label == true_label else 'red'
plt.title(f"P: {pred_label}\nT: {true_label}", color=color, fontsize=9)
plt.axis('off')
plt.tight_layout()
plt.show()
PROGRAM 7: 7. Write a program to implement Image Augmentation Techniques for application of
preprocessing & transformation?
Objective:To demonstrate the use of image augmentation techniques (such as flipping, rotating,
shifting, zooming, etc.) for preprocessing and transformation using Keras’ ImageDataGenerator. This
is critical in training deep learning models to improve generalization by artificially increasing dataset
diversity.
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.datasets import cifar10
# Load a sample image from CIFAR-10
(x_train, y_train), (_, _) = cifar10.load_data()
sample_image = x_train[1] # Pick one image
sample_image = sample_image.reshape((1, 32, 32, 3)) # Reshape to 4D for generator
# Define image augmentation configuration
datagen = ImageDataGenerator(
rotation_range=30,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
brightness_range=[0.7, 1.3],
fill_mode='nearest'
# Create an iterator
aug_iter = datagen.flow(sample_image, batch_size=1)
# Generate and plot augmented images
plt.figure(figsize=(10, 10))
for i in range(9):
batch = next(aug_iter)
image = batch[0].astype('uint8')
plt.subplot(3, 3, i + 1)
plt.imshow(image)
plt.title("Augmented")
plt.axis('off')
plt.suptitle("Image Augmentation Examples", fontsize=16)
plt.tight_layout()
plt.show()
PROGRAM 8: Write a program by Use of Dropout for Regularization to Improve model
generalization?
Objective: To implement a neural network model with Dropout layers using Keras in order to
regularize the model and prevent overfitting. The program trains a model on the MNIST handwritten
digit dataset and compares training/validation accuracy, demonstrating how Dropout helps improve
generalization.
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
# Load and preprocess MNIST dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1).astype("float32") / 255.0
x_test = x_test.reshape(-1, 28, 28, 1).astype("float32") / 255.0
# One-hot encode labels
y_train_cat = to_categorical(y_train, 10)
y_test_cat = to_categorical(y_test, 10)
# Build model with Dropout
model = models.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)),
layers.MaxPooling2D((2, 2)),
layers.Dropout(0.25), # Dropout after conv+pool
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Dropout(0.25),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dropout(0.5), # Dropout before output layer
layers.Dense(10, activation='softmax')
])
# Compile model
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Train the model
history = model.fit(x_train, y_train_cat,
epochs=10,
batch_size=64,
validation_split=0.1)
# Evaluate on test set
test_loss, test_acc = model.evaluate(x_test, y_test_cat, verbose=2)
print(f"\nTest accuracy: {test_acc:.4f}")
# Plot training and validation accuracy
plt.plot(history.history['accuracy'], label='Train Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.title('Training vs Validation Accuracy with Dropout')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.grid(True)
plt.show()
PROGRAM 9: Write a program to Build Model Using PyTorch?
Objective:To demonstrate how to build, train, and evaluate a simple neural network for image
classification using PyTorch. The model will be trained on the MNIST dataset (handwritten digits 0–
9). This helps understand core PyTorch concepts like defining models, loss functions, optimizers, and
training loops.
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
# Set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Transform for MNIST data
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
# Load MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='./data', train=True,
transform=transform, download=True)
test_dataset = torchvision.datasets.MNIST(root='./data', train=False,
transform=transform, download=True)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1000, shuffle=False)
# Define simple Feedforward Neural Network
class SimpleNN(nn.Module):
def __init__(self):
super(SimpleNN, self).__init__()
self.net = nn.Sequential(
nn.Flatten(),
nn.Linear(28*28, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10)
def forward(self, x):
return self.net(x)
# Instantiate model, loss, optimizer
model = SimpleNN().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Training loop
epochs = 5
train_loss_list = []
for epoch in range(epochs):
running_loss = 0.0
model.train()
for images, labels in train_loader:
images, labels = images.to(device), labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
avg_loss = running_loss / len(train_loader)
train_loss_list.append(avg_loss)
print(f"Epoch [{epoch+1}/{epochs}], Loss: {avg_loss:.4f}")
# Evaluate on test set
model.eval()
correct = 0
total = 0
with torch.no_grad():
for images, labels in test_loader:
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f"\nTest Accuracy: {100 * correct / total:.2f}%")
# Plot training loss
plt.plot(train_loss_list)
plt.title('Training Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.grid(True)
plt.show()
PROGRAM 10:Write a program to Compare Training with and without Batch Normalization for
Analyzing performance and convergence?
Objective: To analyze the impact of Batch Normalization on the training performance and
convergence speed of a neural network. We will compare two identical models trained on the
MNIST dataset: one with BatchNorm layers and one without, and visualize the training loss and
accuracy.
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
# Device configuration
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Data preprocessing and loading
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
train_dataset = torchvision.datasets.MNIST(root='./data', train=True, download=True,
transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=128, shuffle=True)
# Define two models: one with BatchNorm, one without
class MLP(nn.Module):
def __init__(self, use_batchnorm=False):
super(MLP, self).__init__()
layers = [
nn.Flatten(),
nn.Linear(28*28, 256)
if use_batchnorm:
layers.append(nn.BatchNorm1d(256))
layers.append(nn.ReLU())
layers += [
nn.Linear(256, 128)
if use_batchnorm:
layers.append(nn.BatchNorm1d(128))
layers.append(nn.ReLU())
layers += [nn.Linear(128, 10)]
self.net = nn.Sequential(*layers)
def forward(self, x):
return self.net(x)
# Training function
def train(model, optimizer, criterion, train_loader, epochs=5):
model.to(device)
loss_list = []
acc_list = []
for epoch in range(epochs):
model.train()
running_loss = 0.0
correct = 0
total = 0
for images, labels in train_loader:
images, labels = images.to(device), labels.to(device)
outputs = model(images)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
epoch_loss = running_loss / len(train_loader)
epoch_acc = 100 * correct / total
loss_list.append(epoch_loss)
acc_list.append(epoch_acc)
print(f"Epoch {epoch+1}: Loss = {epoch_loss:.4f}, Accuracy = {epoch_acc:.2f}%")
return loss_list, acc_list
# Initialize models
model_no_bn = MLP(use_batchnorm=False)
model_with_bn = MLP(use_batchnorm=True)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer_no_bn = optim.Adam(model_no_bn.parameters(), lr=0.001)
optimizer_with_bn = optim.Adam(model_with_bn.parameters(), lr=0.001)
# Train both models
print("Training model WITHOUT Batch Normalization...")
loss_no_bn, acc_no_bn = train(model_no_bn, optimizer_no_bn, criterion, train_loader)
print("\nTraining model WITH Batch Normalization...")
loss_with_bn, acc_with_bn = train(model_with_bn, optimizer_with_bn, criterion, train_loader)
# Plotting comparison
epochs = range(1, 6)
plt.figure(figsize=(12, 5))
# Loss plot
plt.subplot(1, 2, 1)
plt.plot(epochs, loss_no_bn, label='Without BatchNorm')
plt.plot(epochs, loss_with_bn, label='With BatchNorm')
plt.title("Training Loss Comparison")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.grid(True)
# Accuracy plot
plt.subplot(1, 2, 2)
plt.plot(epochs, acc_no_bn, label='Without BatchNorm')
plt.plot(epochs, acc_with_bn, label='With BatchNorm')
plt.title("Training Accuracy Comparison")
plt.xlabel("Epoch")
plt.ylabel("Accuracy (%)")
plt.legend()
plt.grid(True)
plt.tight_layout()
plt.show()