PROGRAM:
#1.a Feed Forward Neural Network
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.metrics import SparseCategoricalAccuracy
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
print(x_train.shape)
print(x_test.shape)
print(y_train.shape)
print(y_test.shape)
plt.imshow(x_train[59999], cmap="gray")
plt.show()
model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(128, activation='relu'),
Dense(10, activation='softmax')
])
model.compile(optimizer=Adam(),
loss=SparseCategoricalCrossentropy(),
metrics=[SparseCategoricalAccuracy()])
model.fit(x_train, y_train, epochs=5)
test_loss, test_acc = model.evaluate(x_test, y_test)
print(f'\nTest accuracy: {test_acc}')
X_test = np.array([[0.2], [0.5], [0.8]])
y_pred_test = model.predict(X_test)
print(f'Predictions for test data:\n{y_pred_test}')
REG NO: 211422243XXX NAME: XXXXXXX
OUTPUT:
11490434/11490434 ━━━━━━━━━━━━━━━━━━━━ 2s 0us/step
(60000, 28, 28)
(10000, 28, 28)
(60000,)
(10000,)
Epoch 1/5
1875/1875 ━━━━━━━━━ 4s 2ms/step - loss: 0.4416 - sparse_categorical_accuracy: 0.8764
Epoch 2/5
1875/1875 ━━━━━━━━━3s 2ms/step - loss: 0.1241 - sparse_categorical_accuracy: 0.9643
Epoch 3/5
1875/1875 ━━━━━━━━━ 3s 2ms/step - loss: 0.0801 - sparse_categorical_accuracy: 0.9761
Epoch 4/5
1875/1875 ━━━━━━━━━ 4s 2ms/step - loss: 0.0561 - sparse_categorical_accuracy: 0.9837
Epoch 5/5
1875/1875 ━━━━━━━━━ 4s 2ms/step - loss: 0.0434 - sparse_categorical_accuracy: 0.9864
313/313 ━━━━━━━━━1s 2ms/step - loss: 0.0829 - sparse_categorical_accuracy: 0.9735
Test accuracy: 0.9776999950408936
REG NO: 211422243XXX NAME: XXXXXXX
PROGRAM:
#1.b Back Propagation Using IRIS Dataset
import tensorflow as tf
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
iris = datasets.load_iris()
X, y = iris.data, iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
print(f"X_train shape: {X_train.shape}")
print(f"X_test shape: {X_test.shape}")
print(f"y_train shape: {y_train.shape}")
print(f"y_test shape: {y_test.shape}")
hidden_layer_size = 10
model = tf.keras.Sequential([
tf.keras.layers.Dense(hidden_layer_size, activation='relu', input_shape=(X_train.shape[1],)),
tf.keras.layers.Dense(3, activation='softmax')
])
model.summary()
learning_rate = 0.01
epochs = 1000
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)).shuffle(100).batch(16)
test_dataset = tf.data.Dataset.from_tensor_slices((X_test, y_test)).batch(16)
for epoch in range(epochs):
for step, (batch_X, batch_y) in enumerate(train_dataset):
with tf.GradientTape() as tape:
logits = model(batch_X, training=True)
loss_value = loss_fn(batch_y, logits)
grads = tape.gradient(loss_value, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
if (epoch + 1) % 100 == 0:
print(f"Epoch {epoch + 1}/{epochs}, Loss: {loss_value.numpy():.4f}")
test_loss = loss_fn(y_test, model(X_test, training=False)).numpy()
print(f"\nTest Loss: {test_loss:.4f}")
new_samples = np.array([[6.7, 3.3, 5.7, 2.5]])
predictions = model(new_samples)
predicted_class = np.argmax(predictions.numpy(), axis=1)
print(f"Predicted Class: {predicted_class}")
REG NO: 211422243XXX NAME: XXXXXXX
OUTPUT:
X_train shape: (120, 4)
X_test shape: (30, 4)
y_train shape: (120,)
y_test shape: (30,)
Model: "sequential"
Total params: 83 (332.00 B)
Trainable params: 83 (332.00 B)
Non-trainable params: 0 (0.00 B)
Epoch 100/1000, Loss: 0.0633
Epoch 200/1000, Loss: 0.1987
Epoch 300/1000, Loss: 0.0340
Epoch 400/1000, Loss: 0.0880
Epoch 500/1000, Loss: 0.0422
Epoch 600/1000, Loss: 0.0134
Epoch 700/1000, Loss: 0.1528
Epoch 800/1000, Loss: 0.1556
Epoch 900/1000, Loss: 0.0049
Epoch 1000/1000, Loss: 0.0123
Test Loss: 0.0809
Predicted Class: [2]
REG NO: 211422243XXX NAME: XXXXXXX
PROGRAM:
#2.Naive Bayes Classifier
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn import datasets, model_selection
iris = datasets.load_iris()
data = iris.data[:, :2]
targets = iris.target
x_train, x_test, y_train, y_test = model_selection.train_test_split(data, targets, test_size=0.2)
labels = {0: 'Setosa', 1: 'Versicolour', 2: 'Virginica'}
label_colours = ['blue', 'red', 'green']
def plot_data(x, y, labels, colours):
for y_class in np.unique(y):
index = np.where(y == y_class)
plt.scatter(x[index, 0], x[index, 1], label=labels[y_class], c=colours[y_class])
plt.title("Training set")
plt.xlabel("Sepal length (cm)")
plt.ylabel("Sepal width (cm)")
plt.legend()
plt.figure(figsize=(8, 5))
plot_data(x_train, y_train, labels, label_colours)
plt.show()
def learn_parameters(x, y, mus, scales, optimiser, epochs):
@tf.function
def nll(dist, x_train, y_train):
log_probs = dist.log_prob(x_train)
L = len(tf.unique(y_train)[0])
y_train = tf.one_hot(indices=y_train, depth=L)
return -tf.reduce_mean(log_probs * y_train)
@tf.function
def get_loss_and_grads(dist, x_train, y_train):
with tf.GradientTape() as tape:
tape.watch(dist.trainable_variables)
loss = nll(dist, x_train, y_train)
grads = tape.gradient(loss, dist.trainable_variables)
return loss, grads
nll_loss = []
mu_values = []
scales_values = []
x = tf.cast(np.expand_dims(x, axis=1), tf.float32)
dist = tfd.MultivariateNormalDiag(loc=mus, scale_diag=scales)
for epoch in range(epochs):
loss, grads = get_loss_and_grads(dist, x, y)
optimiser.apply_gradients(zip(grads, dist.trainable_variables))
nll_loss.append(loss)
mu_values.append(mus.numpy())
scales_values.append(scales.numpy())
nll_loss, mu_values, scales_values = np.array(nll_loss), np.array(mu_values), np.array(scales_values)
REG NO: 211422243XXX NAME: XXXXXXX
return (nll_loss, mu_values, scales_values, dist)
mus = tf.Variable([[1., 1.], [1., 1.], [1., 1.]])
scales = tf.Variable([[1., 1.], [1., 1.], [1., 1.]])
opt = tf.keras.optimizers.Adam(learning_rate=0.005)
epochs = 10000
nlls, mu_arr, scales_arr, class_conditionals = learn_parameters(x_train, y_train, mus, scales, opt, epochs)
fig, ax = plt.subplots(1, 3, figsize=(15, 4))
ax[0].plot(nlls)
ax[0].set_title("Loss vs. epoch")
ax[0].set_xlabel("Epoch")
ax[0].set_ylabel("Negative log-likelihood")
for k in [0, 1, 2]:
ax[1].plot(mu_arr[:, k, 0])
ax[1].plot(mu_arr[:, k, 1])
ax[1].set_title("ML estimates for model's\nmeans vs. epoch")
ax[1].set_xlabel("Epoch")
ax[1].set_ylabel("Means")
for k in [0, 1, 2]:
ax[2].plot(scales_arr[:, k, 0])
ax[2].plot(scales_arr[:, k, 1])
ax[2].set_title("ML estimates for model's\nscales vs. epoch")
ax[2].set_xlabel("Epoch")
ax[2].set_ylabel("Scales")
plt.show()
print("Class conditional means:")
print(class_conditionals.loc.numpy())
print("\nClass conditional standard deviations:")
print(class_conditionals.stddev().numpy())
def get_prior(y):
counts = np.bincount(y)
dist = tfd.Categorical(probs=counts/len(y))
return dist
prior = get_prior(y_train)
def predict_class(prior, class_conditionals, x):
def predict_fn(myx):
class_probs = class_conditionals.prob(tf.cast(myx, dtype=tf.float32))
prior_probs = tf.cast(prior.probs, dtype=tf.float32)
class_times_prior_probs = class_probs * prior_probs
Q = tf.reduce_sum(class_times_prior_probs)
P = tf.math.divide(class_times_prior_probs, Q)
Y = tf.cast(tf.argmax(P), dtype=tf.float64)
return Y
y = tf.map_fn(predict_fn, x)
return y
predictions = predict_class(prior, class_conditionals, x_test)
accuracy = accuracy_score(y_test, predictions)
print("Test accuracy: {:.4f}".format(accuracy))
def contour_plot(x0_range, x1_range, prob_fn, batch_shape, levels=None, n_points=100):
X0, X1 = get_meshgrid(x0_range, x1_range, n_points=n_points)
X_values = np.expand_dims(np.array([X0.ravel(), X1.ravel()]).T, axis=1)
Z = prob_fn(X_values)
Z = np.array(Z).T.reshape(batch_shape, *X0.shape)
for batch in np.arange(batch_shape):
plt.contourf(X0, X1, Z[batch], alpha=0.3, levels=levels)
REG NO: 211422243XXX NAME: XXXXXXX
OUTPUT:
Class conditional means: [[5.0550003 3.4325001]
[5.992683 2.795122 ]
[6.602564 2.9384618]]
Class conditional standard deviations: [[0.35351804 0.39584568]
[0.51199764 0.32606354]
[0.5980683 0.2966611 ]]
Test accuracy: 0.8667
REG NO: 211422243XXX NAME: XXXXXXX
PROGRAM:
#3.Skip Gram Model Using NLP
import tensorflow as tf
import numpy as np
corpus = ["I like playing football with my friends",
"I enjoy playing tennis",
"I hate swimming",
"I love basketball"]
window_size = 3
embedding_dim = 50
batch_size = 16
epochs = 100
learning_rate = 0.01
tokenizer = tf.keras.preprocessing.text.Tokenizer()
tokenizer.fit_on_texts(corpus)
vocab_size = len(tokenizer.word_index) + 1
sequences = tokenizer.texts_to_sequences(corpus)
data = []
for seq in sequences:
for i in range(len(seq)):
for j in range(max(0, i - window_size), min(len(seq), i + window_size + 1)):
if i != j:
data.append([seq[i], seq[j]])
data = np.array(data)
x_train = data[:, 0]
y_train = data[:, 1]
inputs = tf.keras.layers.Input(shape=(1,))
embeddings = tf.keras.layers.Embedding(vocab_size, embedding_dim)(inputs)
flatten = tf.keras.layers.Flatten()(embeddings)
output = tf.keras.layers.Dense(vocab_size, activation='softmax')(flatten)
model = tf.keras.models.Model(inputs=inputs, outputs=output)
model.compile(loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate))
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs)
word_embeddings = model.get_layer(index=1).get_weights()[0]
def get_vector(word):
idx = tokenizer.word_index.get(word)
if idx is not None:
return word_embeddings[idx]
else:
return None
word = "football"
vector = get_vector(word)
print(f"Vector representation of '{word}': {vector}")
def get_context_words(word):
idx = tokenizer.word_index[word]
context_indices = list(range(max(0, idx - window_size), min(vocab_size, idx + window_size + 1)))
context_words = [word for word, index in tokenizer.word_index.items() if index in context_indices]
return context_words
focus_word = "playing"
context_words = get_context_words(focus_word)
print(f"Context words for '{focus_word}': {context_words}")
REG NO: 211422243XXX NAME: XXXXXXX
OUTPUT:
Epoch 1/100
4/4 [==============================] - 1s 5ms/step - loss: 2.6431
Epoch 2/100
4/4 [==============================] - 0s 4ms/step - loss: 2.5489
Epoch 3/100
4/4 [==============================] - 0s 3ms/step - loss: 2.4655
Epoch 4/100
4/4 [==============================] - 0s 4ms/step - loss: 2.3813
Epoch 5/100
4/4 [==============================] - 0s 4ms/step - loss: 2.2917
Vector representation of 'football': [ 0.17141712 0.30853176 0.22219557 0.17984286 -0.24223702 0.3068344
0.03928155 -0.6306034 -0.08273036 0.24466318 -0.08293969 -0.17579114
0.14115772 0.00276356 -0.11821245 -0.09002695 -0.34314537 0.45394325
0.16790457 0.04163431 0.07040803 -0.29762593 -0.1315126 0.717115
0.17201905 -0.08250948 0.23961464 0.09098674 0.3917652 -0.20037314
0.47015923 -0.26363885 -0.36365074 -0.23005496 -0.572872 -0.11484142
0.25438645 -0.20655242 0.00347094 -0.20453684 -0.19304553 -0.22932066
0.32481378 -0.15313971 0.24397568 -0.6262668 0.09316769 -0.5698216
0.06133474 0.37273604]
Context words for 'playing': ['i', 'playing', 'like', 'football', 'with']
REG NO: 211422243XXX NAME: XXXXXXX
PROGRAM:
#4.a Object Detection
import cv2
import numpy as np
import os
yolov3_config = r'C:\Users\ygoku\Desktop\object detection\yolov3.cfg'
yolov3_weights = r'C:\Users\ygoku\Desktop\object detection\yolov3.weights'
coco_names_path = r'C:\Users\ygoku\Desktop\object detection\coco.names'
net = cv2.dnn.readNet(yolov3_weights, yolov3_config)
classes = []
with open(coco_names_path, "r") as f:
classes = f.read().strip().split('\n')
image_filename = "signs-landing.jpg"
desktop_folder = os.path.expanduser("~/Desktop")
image_path = os.path.join(desktop_folder, image_filename)
if not os.path.isfile(image_path):
print(f"Image file '{image_path}' not found.")
else:
image = cv2.imread(image_path)
height, width, _ = image.shape
blob = cv2.dnn.blobFromImage(image, 1/255.0, (416, 416), swapRB=True, crop=False)
net.setInput(blob)
layer_names = net.getUnconnectedOutLayersNames()
outs = net.forward(layer_names)
class_ids = []
confidences = []
boxes = []
conf_threshold = 0.5
nms_threshold = 0.4
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > conf_threshold:
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
for i in indices:
box = boxes[i]
x, y, w, h = box
label = str(classes[class_ids[i]])
confidence = confidences[i]
color = (0, 255, 0)
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
cv2.putText(image, f"{label} {confidence:.2f}", (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
cv2.imshow("Object Detection", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
REG NO: 211422243XXX NAME: XXXXXXX
OUTPUT:
REG NO: 211422243XXX NAME: XXXXXXX
PROGRAM:
#4.b Object Detection(Video)
import cv2
import numpy as np
net = cv2.dnn.readNet("yolov4.weights", "yolov4.cfg")
layer_names = net.getLayerNames()
output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()]
with open("coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
cap = cv2.VideoCapture("traffic_video.mp4")
while True:
ret, frame = cap.read()
if not ret:
break
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
center_x = int(detection[0] * frame.shape[1])
center_y = int(detection[1] * frame.shape[0])
w = int(detection[2] * frame.shape[1])
h = int(detection[3] * frame.shape[0])
x = center_x - w // 2
y = center_y - h // 2
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
if len(indexes) > 0:
for i in indexes.flatten():
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
color = (0, 255, 0)
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.putText(frame, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, color, 2)
cv2.imshow("Traffic Analysis", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
REG NO: 211422243XXX NAME: XXXXXXX
OUTPUT:
REG NO: 211422243XXX NAME: XXXXXXX
PROGRAM:
#5.Sentiments Analysis Using LSTM
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from deepface import DeepFace
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
image_path = "D:\\DL\\images.jpg"
img = cv2.imread(image_path)
if img is None:
raise ValueError(f"Error loading image at path: {image_path}")
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
try:
result = DeepFace.analyze(img_path=image_path, actions=['emotion'], enforce_detection=False)
dominant_emotion = result[0]['dominant_emotion']
except Exception as e:
raise ValueError(f"Error in DeepFace analysis: {e}")
emotion_to_sentiment = {
"happy": "Positive",
"surprise": "Positive",
"neutral": "Neutral",
"sad": "Negative",
"angry": "Negative",
"fear": "Negative",
"disgust": "Negative"
}
final_sentiment = emotion_to_sentiment.get(dominant_emotion, "Neutral")
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, f"Emotion: {dominant_emotion}", (10, 50), font, 1, (0, 255, 0), 2, cv2.LINE_AA)
cv2.putText(img, f"Sentiment: {final_sentiment}", (10, 90), font, 1, (255, 0, 0), 2, cv2.LINE_AA)
img_rgb_overlay = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img_rgb_overlay)
plt.title(f"Emotion: {dominant_emotion}, Sentiment: {final_sentiment}")
plt.axis("off")
plt.show()
REG NO: 211422243XXX NAME: XXXXXXX
print("Emotion Analysis Result:", result)
print("Predicted Sentiment:", final_sentiment)
OUTPUT:
0%| | 0.00/5.98M [00:00<?, ?
B/s]
9%|8 | 524k/5.98M [00:00<00:01, 3.62MB/s]
26%|##6 | 1.57M/5.98M [00:00<00:00,
5.71MB/s]
53%|#####2 | 3.15M/5.98M [00:00<00:00,
8.77MB/s]
REG NO: 211422243XXX NAME: XXXXXXX
PROGRAM:
#6.a Mnist Dataset Autoencoders
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
(x_train, _), (x_test, _) = tf.keras.datasets.mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
input_dim = x_train.shape[1]
encoding_dim = 32
input_img = tf.keras.Input(shape=(input_dim,))
encoded = tf.keras.layers.Dense(encoding_dim, activation='relu')(input_img)
decoded = tf.keras.layers.Dense(input_dim, activation='sigmoid')(encoded)
autoencoder = tf.keras.Model(input_img, decoded)
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
autoencoder.fit(x_train, x_train, epochs=50, batch_size=256, shuffle=True, validation_data=(x_test, x_test))
reconstructed_images = autoencoder.predict(x_test)
n = 10
plt.figure(figsize=(20, 4))
for i in range(n):
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(reconstructed_images[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
REG NO: 211422243XXX NAME: XXXXXXX
OUTPUT:
Epoch 1/50
235/235 [==============================] - 1s 4ms/step - loss: 0.2802 - val_loss: 0.1958
Epoch 2/50
235/235 [==============================] - 1s 4ms/step - loss: 0.1749 - val_loss: 0.1560
Epoch 3/50
235/235 [==============================] - 1s 4ms/step - loss: 0.1461 - val_loss: 0.1348
Epoch 4/50
235/235 [==============================] - 1s 4ms/step - loss: 0.1296 - val_loss: 0.1221
Epoch 5/50
235/235 [==============================] - 1s 4ms/step - loss: 0.1190 - val_loss: 0.1136
Epoch 6/50
235/235 [==============================] - 1s 4ms/step - loss: 0.1117 - val_loss: 0.1075
Epoch 7/50
235/235 [==============================] - 1s 4ms/step - loss: 0.1066 - val_loss: 0.1033
Epoch 8/50
235/235 [==============================] - 1s 4ms/step - loss: 0.1028 - val_loss: 0.1000
Epoch 9/50
235/235 [==============================] - 1s 4ms/step - loss: 0.1000 - val_loss: 0.0975
Epoch 10/50
235/235 [==============================] - 1s 4ms/step - loss: 0.0979 - val_loss: 0.0959
REG NO: 211422243XXX NAME: XXXXXXX
PROGRAM:
#6.b Medical X-ray Image
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_path = "D:\\DL\\chest_xray\\train"
test_path = "D:\\DL\\chest_xray\\test"
valid_path = "D:\\DL\\chest_xray\\val"
batch_size = 16
image_gen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
test_data_gen = ImageDataGenerator(rescale=1./255)
train = image_gen.flow_from_directory(
train_path,
target_size=(99, 128),
color_mode='grayscale',
class_mode='binary',
batch_size=batch_size
)
test = test_data_gen.flow_from_directory(
test_path,
target_size=(99, 128),
color_mode='grayscale',
shuffle=False,
class_mode='binary',
batch_size=batch_size
REG NO: 211422243XXX NAME: XXXXXXX
)
valid = test_data_gen.flow_from_directory(
valid_path,
target_size=(99, 128),
color_mode='grayscale',
class_mode='binary',
batch_size=batch_size
)
plt.figure(figsize=(12, 12))
for i in range(10):
plt.subplot(2, 5, i + 1)
for X_batch, Y_batch in train:
image = X_batch[0]
dic = {0: 'NORMAL', 1: 'PNEUMONIA'}
plt.title(dic.get(Y_batch[0]))
plt.axis('off')
plt.imshow(np.squeeze(image), cmap='gray', interpolation='nearest')
break
plt.tight_layout()
plt.show()
REG NO: 211422243XXX NAME: XXXXXXX
OUTPUT:
REG NO: 211422243XXX NAME: XXXXXXX
PROGRAM:
#7.Continuous Bag Of Words Using CNN
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Embedding, Lambda
from tensorflow.keras.preprocessing.text import Tokenizer
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
corpus = [
"The quick brown fox jumps over the lazy dog.",
"A fast brown dog jumps over the lazy cat.",
"The speedy black cat jumps over the lazy dog."
]
tokenizer = Tokenizer()
tokenizer.fit_on_texts(corpus)
sequences = tokenizer.texts_to_sequences(corpus)
vocab_size = len(tokenizer.word_index) + 1
embedding_size = 10
window_size = 2
contexts, targets = [], []
for sequence in sequences:
for i in range(window_size, len(sequence) - window_size):
context = sequence[i - window_size:i] + sequence[i + 1:i + window_size + 1]
target = sequence[i]
contexts.append(context)
targets.append(target)
X = np.array(contexts)
model = Sequential()
model.add(Embedding(input_dim=vocab_size, output_dim=embedding_size, input_length=2 * window_size))
model.add(Lambda(lambda x: tf.reduce_mean(x, axis=1)))
model.add(Dense(units=vocab_size, activation='softmax'))
model.save_weights('cbow_weights.h5')
model.load_weights('cbow_weights.h5')
embeddings = model.get_weights()[0]
pca = PCA(n_components=2)
reduced_embeddings = pca.fit_transform(embeddings)
plt.figure(figsize=(5, 5))
for i, word in enumerate(tokenizer.word_index.keys()):
x, y = reduced_embeddings[i]
plt.scatter(x, y)
plt.annotate(word, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')
plt.show()
REG NO: 211422243XXX NAME: XXXXXXX
OUTPUT:
After converting our words in the corpus into a vector of integers:
[[1, 8, 6, 9, 2, 3, 1, 4, 5],[10, 11, 6, 5, 2, 3, 1, 4, 7], [1, 12, 13, 7, 2, 3, 1, 4, 5]]
REG NO: 211422243XXX NAME: XXXXXXX
PROGRAM:
#8.Cat or Dog
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.image import ImageDataGenerator, image
import numpy as np
import matplotlib.pyplot as plt
dataset_path = 'Dataset'
img_height, img_width = 150, 150
batch_size = 32
datagen = ImageDataGenerator(rescale=1.0/255, rotation_range=20, width_shift_range=0.2, height_shift_range=0.2,
shear_range=0.2, zoom_range=0.2, horizontal_flip=True, validation_split=0.2)
train_generator = datagen.flow_from_directory(dataset_path + '/train', target_size=(img_height, img_width),
batch_size=batch_size, class_mode='binary', subset='training')
validation_generator = datagen.flow_from_directory(dataset_path + '/train', target_size=(img_height, img_width),
batch_size=batch_size, class_mode='binary', subset='validation')
model = keras.Sequential([layers.Conv2D(32, (3, 3), activation='relu', input_shape=(img_height, img_width, 3)),
layers.MaxPooling2D(2, 2),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D(2, 2),
layers.Conv2D(128, (3, 3), activation='relu'),
layers.MaxPooling2D(2, 2),
layers.Flatten(),
layers.Dense(512, activation='relu'),
layers.Dense(1, activation='sigmoid')])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
ehistory = model.fit(train_generator, validation_data=validation_generator, epochs=10)
plt.plot(ehistory.history['accuracy'], label='train_accuracy')
plt.plot(ehistory.history['val_accuracy'], label='val_accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
model.save('cat_dog_classifier.h5')
print("Model training complete. Saved as 'cat_dog_classifier.h5'")
model = tf.keras.models.load_model('cat_dog_classifier.h5')
def prepare_image(img_path, img_height=150, img_width=150):
img = image.load_img(img_path, target_size=(img_height, img_width))
img_array = image.img_to_array(img) / 255.0
img_array = np.expand_dims(img_array, axis=0)
return img_array
img_path = 'dogtest.jpg'
img_array = prepare_image(img_path)
prediction = model.predict(img_array)
title = "Prediction: Dog" if prediction[0] > 0.5 else "Prediction: Cat"
img = image.load_img(img_path)
plt.imshow(img)
plt.title(title)
plt.show()
REG NO: 211422243XXX NAME: XXXXXXX
OUTPUT:
REG NO: 211422243XXX NAME: XXXXXXX
PROGRAM:
#9.CIFAR-10
import os
import numpy as np
import tensorflow as tf
import cv2
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0
num_classes = 10
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
class_labels = ['Airplane', 'Automobile', 'Bird', 'Cat', 'Deer', 'Dog', 'Frog', 'Horse', 'Ship', 'Truck']
input_layer = Input(shape=(32, 32, 3))
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(input_layer)
pool1 = MaxPooling2D((2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
pool2 = MaxPooling2D((2, 2))(conv2)
flatten = Flatten()(pool2)
dense1 = Dense(64, activation='relu')(flatten)
output_layer = Dense(num_classes, activation='softmax')(dense1)
model = Model(inputs=input_layer, outputs=output_layer)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10, batch_size=32, validation_data=(x_test, y_test))
def predict_custom_image(img_path):
img = cv2.imread(img_path)
if img is None:
raise FileNotFoundError(f"Image not found at path: {img_path}")
img = cv2.resize(img, (32, 32))
img = img.astype('float32') / 255.0
img = np.expand_dims(img, axis=0)
prediction = model.predict(img)
predicted_class_idx = np.argmax(prediction)
predicted_class = class_labels[predicted_class_idx]
img_display = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
plt.imshow(img_display)
plt.title(f'Predicted Class: {predicted_class}')
plt.axis('off')
plt.show()
print(f'Predicted Class: {predicted_class}')
predict_custom_image(r"Automobile.jpg")
REG NO: 211422243XXX NAME: XXXXXXX
OUTPUT:
Epoch 1/10
1563/1563 [==============================] - 14s 9ms/step - loss: 1.4275 -accuracy: 0.4898
- val_loss: 1.1954 - val_accuracy: 0.5832
Epoch 2/10
1563/1563 [==============================] - 14s 9ms/step - loss: 1.0617-accuracy: 0.6271
- val_loss: 1.0365 - val_accuracy: 0.6359
Epoch 3/10
1563/1563 [==============================] - 16s 10ms/step - loss:0.9271- accuracy: 0.6766
-val_loss: 0.9413 - val_accuracy: 0.6670
Epoch 4/10
1563/1563 [==============================] - 16s 10ms/step - loss:0.8355- accuracy: 0.7089
- val_loss: 0.9285 - val_accuracy: 0.6838
Predicted Class: Automobile
REG NO: 211422243XXX NAME: XXXXXXX
PROGRAM:
#10.a Human Face Detection
import os
import cv2
import matplotlib.pyplot as plt
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
image_path = "group photo.jpg"
img = cv2.imread(image_path)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
img_rgb_overlay = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img_rgb_overlay)
plt.axis("off")
plt.show()
print("Detected Faces:", faces)
OUTPUT:
REG NO: 211422243XXX NAME: XXXXXXX
PROGRAM:
#10.b Human Face Detection
import torch
from torchvision.models.detection import fasterrcnn_resnet50_fpn
from torchvision.transforms import functional as F
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.patches as patches
model = fasterrcnn_resnet50_fpn(pretrained=True)
model.eval()
image_path = "h1.png"
image = Image.open(image_path).convert("RGB")
REG NO: 211422243XXX NAME: XXXXXXX
image_tensor = F.to_tensor(image).unsqueeze(0)
with torch.no_grad():
predictions = model(image_tensor)
boxes = predictions[0]["boxes"]
labels = predictions[0]["labels"]
scores = predictions[0]["scores"]
threshold = 0.8
human_boxes = [box for i, box in enumerate(boxes) if labels[i] == 1 and scores[i] > threshold]
fig, ax = plt.subplots(1, figsize=(10, 6))
ax.imshow(image)
for box in human_boxes:
x1, y1, x2, y2 = box.tolist()
rect = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, edgecolor="red", facecolor="none")
ax.add_patch(rect)
plt.axis("off")
plt.show()
OUTPUT:
REG NO: 211422243XXX NAME: XXXXXXX
PROGRAM:
#11.Chat Bot
import google.generativeai as genai
API_KEY = 'Your-API-Key'
genai.configure(api_key=API_KEY)
def chatbot():
print("Chatbot: Hello! Type 'exit' to end the chat.")
model = genai.GenerativeModel("gemini-1.5-pro-latest")
while True:
user_input = input("You: ")
if user_input.lower() == "exit":
print("Chatbot: Goodbye!")
break
response = model.generate_content(user_input)
print("Chatbot:", response.text)
chatbot()
REG NO: 211422243XXX NAME: XXXXXXX
OUTPUT:
Chatbot: Hello! Type 'exit' to end the chat.
You: hi
Chatbot: Hi there! How can I help you today?
You: who is prime minister of india
Chatbot: The current Prime Minister of India is Narendra Modi.
You: what is weather in chennai
Chatbot: Chennai's weather is typically hot and humid.
REG NO: 211422243XXX NAME: XXXXXXX