[go: up one dir, main page]

0% found this document useful (0 votes)
16 views24 pages

0 Aimlfinal

The document describes implementations of various search algorithms and machine learning models. It includes implementations of uninformed search algorithms like BFS and DFS on a graph. It also includes implementations of informed search algorithms A* and memory-bounded A* on a grid to find the shortest path. The document further includes implementations of naive Bayes classifier on iris dataset, Bayesian networks, linear and random forest regression on California housing dataset, decision trees and random forests on iris dataset, and support vector machine on iris dataset.

Uploaded by

arvindhrk05
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
16 views24 pages

0 Aimlfinal

The document describes implementations of various search algorithms and machine learning models. It includes implementations of uninformed search algorithms like BFS and DFS on a graph. It also includes implementations of informed search algorithms A* and memory-bounded A* on a grid to find the shortest path. The document further includes implementations of naive Bayes classifier on iris dataset, Bayesian networks, linear and random forest regression on California housing dataset, decision trees and random forests on iris dataset, and support vector machine on iris dataset.

Uploaded by

arvindhrk05
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 24

Ex.No : 1 .

Implementation of Uninformed search algorithms


(BFS, DFS)
Date :

PROGRAM:

graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}

def bfs(graph, start, goal):


queue = [[start]]
visited = set()

while queue:
path = queue.pop(0)
node = path[-1]

if node == goal:
return path

if node not in visited:


visited.add(node)

for adjacent in graph[node]:


new_path = list(path)
new_path.append(adjacent)
queue.append(new_path)

return "No path found"

def dfs(graph, start, goal):


stack = [[start]]
visited = set()

while stack:
path = stack.pop()
node = path[-1]

if node == goal:
return path

if node not in visited:


visited.add(node)

for adjacent in graph[node]:


new_path = list(path)
new_path.append(adjacent)
stack.append(new_path)

return "No path found"

print(bfs(graph, 'A', 'F'))


print(dfs(graph, 'A', 'F'))

Output:
Ex.No : 2 .Implementation of Informed search algorithms (A*,
memory-bounded A*)
Date :

PROGRAM:

import heapq

grid = [[0, 0, 1, 0, 0, 0, 1, 0],


[0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0]]

start = (0, 0)
goal = (5, 7)

# A* Algorithm
def astar(grid, start, goal):
open_list = [(0, start)]
closed_list = set()
g_score = {start: 0} # start node's g score is 0
h_score = {start: heuristic(start, goal)}
parent = {start: None}

while open_list:
current = heapq.heappop(open_list)[1]

if current == goal:
return reconstruct_path(parent, current)

closed_list.add(current)

for neighbor in get_neighbors(grid, current):


if neighbor in closed_list:
continue

new_g_score = g_score[current] + 1
if neighbor not in [item[1] for item in open_list]:
heapq.heappush(open_list, (new_g_score + heuristic(neighbor, goal),
neighbor))
elif new_g_score >= g_score[neighbor]:
continue

parent[neighbor] = current
g_score[neighbor] = new_g_score
h_score[neighbor] = heuristic(neighbor, goal)

return None

# Memory-Bounded A* Algorithm
# Memory-Bounded A* Algorithm
def memory_bounded_astar(grid, start, goal, memory_limit):
open_list = [(0, start)]
closed_list = set()
g_score = {start: 0} # start node's g score is 0
h_score = {start: heuristic(start, goal)} # start node's h score is estimated usin
g the heuristic function
parent = {start: None}

while open_list:
current = heapq.heappop(open_list)[1]

if current == goal:
return reconstruct_path(parent, current)

closed_list.add(current)

for neighbor in get_neighbors(grid, current):


if neighbor in closed_list:
continue

new_g_score = g_score[current] + 1

if neighbor not in [item[1] for item in open_list]:


heapq.heappush(open_list, (new_g_score + heuristic(neighbor, goal),
neighbor))
elif new_g_score >= g_score[neighbor]:
continue
parent[neighbor] = current
g_score[neighbor] = new_g_score
h_score[neighbor] = heuristic(neighbor, goal)

while len(open_list) > memory_limit:


node_to_remove = heapq.nlargest(1, open_list)[0] # get the node with th
e highest f score
open_list.remove(node_to_remove) # remove the node from the open lis
t
del g_score[node_to_remove[1]] # remove the node's g score from the di
ctionary

return None

def heuristic(node, goal):


x1, y1 = node
x2, y2 = goal
return abs(x1 - x2) + abs(y1 - y2)

def get_neighbors(grid, node):


row, col = node
neighbors = []

# Check north neighbor


if row > 0 and grid[row-1][col] == 0:
neighbors.append((row-1, col))

# Check south neighbor


if row < len(grid) - 1 and grid[row+1][col] == 0:
neighbors.append((row+1, col))

# Check west neighbor


if col > 0 and grid[row][col-1] == 0:
neighbors.append((row, col-1))

# Check east neighbor


if col < len(grid[0]) - 1 and grid[row][col+1] == 0:
neighbors.append((row, col+1))

return neighbors

def reconstruct_path(parent, current):


path = [current]
while current in parent:
current = parent[current]
path.append(current)
path.reverse()
return path

memory_limit = 1000000000 # 1 GB

result = astar(grid, start, goal)


print(result)

result = memory_bounded_astar(grid, start, goal, memory_limit)


print(result)

OUTPUT:
Ex.No : 3 Implement Naive Bayes model

Date :

PROGRAM:

from sklearn.datasets import load_iris


from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
model = GaussianNB()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy: {accuracy}")

OUTPUT:
Ex.No : 4 Implement Bayesian Networks

Date :

PROGRAM:

from ctypes import c_double


# Import required libraries
from pgmpy.models import BayesianModel
from pgmpy.factors.discrete import TabularCPD
from pgmpy.inference import VariableElimination
import numpy as np

# Define the Bayesian Network


model = BayesianModel([('A', 'C'), ('B', 'C'), ('C', 'D'), ('C', 'E')])

# Define the Conditional Probability Distributions (CPDs)


a=[[0.4, 0.6]]
a_=np.reshape(a,(2,1))
b=[[0.7, 0.3]]
b_=np.reshape(b,(2,1))
c_=np.reshape([[0.1, 0.2, 0.7, 0.3],[0.8, 0.7, 0.2, 0.6],[0.1, 0.1, 0.1, 0.1]],(3,4))
cpd_a = TabularCPD(variable='A', variable_card=2, values=a_)
cpd_b = TabularCPD(variable='B', variable_card=2, values=b_)
cpd_c = TabularCPD(variable='C', variable_card=3,
values=c_,
evidence=['A', 'B'], evidence_card=[2, 2])
cpd_d = TabularCPD(variable='D', variable_card=2,
values=[[0.9, 0.3, 0.4], [0.1, 0.7, 0.6]],
evidence=['C'], evidence_card=[3])
cpd_e = TabularCPD(variable='E', variable_card=2,
values=[[0.3, 0.6, 0.8], [0.7, 0.4, 0.2]],
evidence=['C'], evidence_card=[3])

# Add the CPDs to the model


model.add_cpds(cpd_a, cpd_b, cpd_c, cpd_d, cpd_e)

# Check if the model is valid


model.check_model()

# Perform variable elimination inference


inference = VariableElimination(model)
# Compute the probability distribution of D given evidence of A=0 and E=1
query = inference.query(variables=['D'], evidence={'A': 0, 'E': 1})
print(query)

Output:
Ex.No : 5 Build Regression models

Date :

PROGRAM:

from sklearn.datasets import fetch_california_housing


from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error

# Load the California Housing dataset


housing = fetch_california_housing()

# Split the data into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(housing.data, housing.target, te
st_size=0.2, random_state=42)

# Train a linear regression model


lr = LinearRegression()
lr.fit(X_train, y_train)

# Make predictions on the testing set


y_pred_lr = lr.predict(X_test)

# Calculate the mean squared error for the linear regression model
mse_lr = mean_squared_error(y_test, y_pred_lr)
print(f"Linear Regression Mean Squared Error: {mse_lr:.2f}")

# Train a random forest regression model


rf = RandomForestRegressor(n_estimators=100, random_state=42)
rf.fit(X_train, y_train)

# Make predictions on the testing set


y_pred_rf = rf.predict(X_test)

# Calculate the mean squared error for the random forest regression model
mse_rf = mean_squared_error(y_test, y_pred_rf)
print(f"Random Forest Mean Squared Error: {mse_rf:.2f}")
OUTPUT:
Ex.No : 6 Build decision trees and random forests

Date :

PROGRAM:

from sklearn.datasets import load_iris


from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score

# Load the iris dataset


iris = load_iris()
X = iris.data
y = iris.target

# Split the data into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_st
ate=42)

# Initialize the decision tree model


dt_model = DecisionTreeClassifier(max_depth=3, random_state=42)

# Train the decision tree model on the training data


dt_model.fit(X_train, y_train)

# Evaluate the performance of the decision tree model on the testing data
dt_pred = dt_model.predict(X_test)
dt_acc = accuracy_score(y_test, dt_pred)

# Initialize the random forest model


rf_model = RandomForestClassifier(n_estimators=100, max_depth=3, random_
state=42)

# Train the random forest model on the training data


rf_model.fit(X_train, y_train)

# Evaluate the performance of the random forest model on the testing data
rf_pred = rf_model.predict(X_test)
rf_acc = accuracy_score(y_test, rf_pred)

# Output the accuracy scores of the models


print(f"Decision Tree Accuracy: {dt_acc}")
print(f"Random Forest Accuracy: {rf_acc}")

OUTPUT
Ex.No : 7 Build SVM model
Date :

PROGRAM:

# import required libraries


from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
# load dataset
iris = datasets.load_iris()

# split dataset into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target,
test_size=0.3, random_state=0)

# initialize SVM classifier with linear kernel


svm = SVC(kernel='linear')

# train SVM classifier on training set


svm.fit(X_train, y_train)

# make predictions on testing set


y_pred = svm.predict(X_test)
# calculate accuracy of predictions
accuracy = accuracy_score(y_test, y_pred)
# print accuracy of predictions
print('Accuracy:', accuracy)
Output :
Ex.No : 8 Implement ensembling technique
Date :

PROGRAM:

from sklearn.datasets import load_iris


from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

# load dataset
iris = load_iris()

# split dataset into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target,
test_size=0.3, random_state=0)

# initialize random forest classifier with 10 trees


rfc = RandomForestClassifier(n_estimators=10)

# train the random forest classifier on the training set


rfc.fit(X_train, y_train)
# make predictions on the testing set
y_pred = rfc.predict(X_test)

# calculate accuracy of predictions


accuracy = accuracy_score(y_test, y_pred)

# print accuracy of predictions


print('Accuracy:', accuracy)

Output:
Ex.No : 9 Implement clustering algorithms
Date :

PROGRAM:

from sklearn.cluster import KMeans


from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
# generate sample data
X, y_true = make_blobs(n_samples=300, centers=4, cluster_std=0.60,
random_state=0)
# initialize k-means clustering algorithm with 4 clusters
kmeans = KMeans(n_clusters=4)

# train the k-means clustering algorithm on the sample data


kmeans.fit(X)

# get the predicted cluster labels for the sample data


y_pred = kmeans.predict(X)

# plot the sample data with predicted cluster labels


plt.scatter(X[:, 0], X[:, 1], c=y_pred, s=50, cmap='viridis')
plt.show()

Output:
Ex.No : 10 Implement EM for Bayesian networks
Date :

PROGRAM:

import numpy as np
from collections import defaultdict

class BayesianNetwork:
def __init__(self, structure):
self.structure = structure
self.nodes = sorted(list(self.structure.keys()))
self.parents = defaultdict(list)
for child, parents in self.structure.items():
for parent in parents:
self.parents[child].append(parent)

class EM:
def __init__(self, bn, data):
self.bn = bn
self.data = data

def run(self, num_iterations=100, tolerance=1e-4):


# Initialize parameters
pi = defaultdict(float)
theta = defaultdict(lambda: defaultdict(float))
for i in range(len(self.bn.nodes)):
node = self.bn.nodes[i]
pi[node] = np.mean(self.data[:, i])
for parent in self.bn.parents[node]:
parent_index = self.bn.nodes.index(parent)
parent_data = self.data[:, parent_index]
for parent_value in np.unique(parent_data):
parent_mask = parent_data == parent_value
theta[parent][parent_value, node] = np.mean(self.data[parent_mask
, i])

# Run EM algorithm
for iteration in range(num_iterations):
# E-step: Compute expected sufficient statistics
pi_new = defaultdict(float)
theta_new = defaultdict(lambda: defaultdict(float))
for i in range(len(self.bn.nodes)):
node = self.bn.nodes[i]
pi_new[node] = np.mean(self.data[:, i])
for parent in self.bn.parents[node]:
parent_index = self.bn.nodes.index(parent)
parent_data = self.data[:, parent_index]
for parent_value in np.unique(parent_data):
parent_mask = parent_data == parent_value
theta_new[parent][parent_value, node] = np.mean(self.data[pare
nt_mask, i])

# M-step: Update parameters


pi_change = np.abs(np.array(list(pi.values())) - np.array(list(pi_new.valu
es()))).max()
theta_change = 0
for node in theta:
node_theta_change = np.abs(np.array(list(theta[node].values())) - np.a
rray(list(theta_new[node].values()))).max()
theta_change = max(theta_change, node_theta_change)
if pi_change < tolerance and theta_change < tolerance:
break

pi = pi_new
theta = theta_new

return pi, theta

# Example usage
structure = {0: [], 1: [0], 2: [0], 3: [1, 2]}
data = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [1, 1, 0, 0], [1, 1, 1, 1]])
bn = BayesianNetwork(structure)
em = EM(bn, data)
pi, theta = em.run()
print("pi:", pi)
print("theta:", theta)
Output:
Ex.No : 11 Implement EM for Bayesian networks
Date :

PROGRAM:

# Step 1: Load the MNIST dataset using Keras


from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()

# Step 2: Preprocess the data


x_train = x_train / 255.0
x_test = x_test / 255.0

# Step 3: Split the data into training and testing sets


from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2)

# Step 4: Define the neural network architecture


from keras.models import Sequential
from keras.layers import Dense, Flatten

model = Sequential()
model.add(Flatten(input_shape=(28, 28)))
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))

# Step 5: Compile the model


model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metri
cs=['accuracy'])

# Step 6: Train the model


model.fit(x_train, y_train, batch_size=32, epochs=10, validation_data=(x_val, y
_val))

# Step 7: Evaluate the model


loss, accuracy = model.evaluate(x_test, y_test)
print("Test accuracy:", accuracy)
OUTPUT:
Ex.No : 12 Build deep learning NN models
Date :

PROGRAM:

# Import necessary libraries


import tensorflow as tf
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, D
ropout

# Load the CIFAR-10 dataset


(x_train, y_train), (x_test, y_test) = cifar10.load_data()

# Normalize the data


x_train = x_train / 255.0
x_test = x_test / 255.0

# Define the model architecture


model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
MaxPooling2D((2, 2)),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D((2, 2)),
Conv2D(128, (3, 3), activation='relu'),
MaxPooling2D((2, 2)),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(10, activation='softmax')
])

# Compile the model


model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metri
cs=['accuracy'])

# Train the model


model.fit(x_train, y_train, epochs=10, validation_data=(x_test, y_test))

# Evaluate the model


test_loss, test_acc = model.evaluate(x_test, y_test)
print(f'Test accuracy: {test_acc}')

Output:

You might also like