[go: up one dir, main page]

0% found this document useful (0 votes)
9 views16 pages

Ai ML

The document contains various algorithms and implementations for solving problems in computer science, including N-Queens, search algorithms (BFS, DFS, A*, etc.), and machine learning algorithms (KNN, SVM, etc.). Each session provides code examples and explanations for the respective algorithms. Additionally, there are exercises related to R programming at the end.

Uploaded by

Prerna Jha
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
9 views16 pages

Ai ML

The document contains various algorithms and implementations for solving problems in computer science, including N-Queens, search algorithms (BFS, DFS, A*, etc.), and machine learning algorithms (KNN, SVM, etc.). Each session provides code examples and explanations for the respective algorithms. Additionally, there are exercises related to R programming at the end.

Uploaded by

Prerna Jha
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 16

# Session 1

#a) Solve N-Queen Problem without Recursion

def print_solution(board):
for row in board:
print(" ".join(str(x) for x in row))
def is_safe(board, row, col, n):
for i in range(col):
if board[row][i] == 1:
return False
for i, j in zip(range(row, -1, -1), range(col, -1, -1)):
if board[i][j] == 1:
return False
for i, j in zip(range(row, n), range(col, -1, -1)):
if board[i][j] == 1:
return False
return True
def solve_n_queen(n):
board = [[0] * n for _ in range(n)]
col = 0
stack = []
while col < n:
found = False
for row in range(n):
if is_safe(board, row, col, n):
board[row][col] = 1
stack.append((row, col))
col += 1
found = True
break
if not found:
if not stack:
break
row, col = stack.pop()
board[row][col] = 0
col -= 1
while col >= 0 and board[row][col] == 0:
row, col = stack.pop()
print_solution(board)
solve_n_queen(4)

# b) Backtracking approach for N-Queens

def solve_n_queen_backtracking(board, col, n):


if col >= n:
return True
for row in range(n):
if is_safe(board, row, col, n):
board[row][col] = 1
if solve_n_queen_backtracking(board, col + 1, n):
return True
board[row][col] = 0
return False
def solve_backtracking(n):
board = [[0] * n for _ in range(n)]
if solve_n_queen_backtracking(board, 0, n):
print_solution(board)
else:
print("No solution exists")
solve_backtracking(4)

# Session 2
#a) Min-Max Algorithm

def minmax(depth, node_index, is_max, values, alpha, beta):


if depth == 3:
return values[node_index]
if is_max:
best = float('-inf')
for i in range(2):
val = minmax(depth + 1, node_index * 2 + i, False, values, alpha, beta)
best = max(best, val)
alpha = max(alpha, best)
if beta <= alpha:
break
return best
else:
best = float('inf')
for i in range(2):
val = minmax(depth + 1, node_index * 2 + i, True, values, alpha, beta)
best = min(best, val)
beta = min(beta, best)
if beta <= alpha:
break
return best

values = [3, 5, 6, 9, 1, 2, 0, -1]


print("The optimal value is:", minmax(0, 0, True, values, float('-inf'), float('inf')))

# Session 3
#a) Alpha-Beta Pruning Algorithm

def alpha_beta_pruning(depth, node_index, is_max, values, alpha, beta):


if depth == 3:
return values[node_index]
if is_max:
best = float('-inf')
for i in range(2):
val = alpha_beta_pruning(depth + 1, node_index * 2 + i, False, values, alpha, beta)
best = max(best, val)
alpha = max(alpha, best)
if beta <= alpha:
break
return best
else:
best = float('inf')
for i in range(2):
val = alpha_beta_pruning(depth + 1, node_index * 2 + i, True, values, alpha, beta)
best = min(best, val)
beta = min(beta, best)
if beta <= alpha:
break
return best
values = [3, 5, 6, 9, 1, 2, 0, -1]
print("The optimal value with Alpha-Beta Pruning is:", alpha_beta_pruning(0, 0, True, values,
float('-inf'), float('inf')))

# Session 4
#a) Breadth-First Search (BFS)
from collections import deque
def bfs(graph, start):
visited = set()
queue = deque([start])
visited.add(start)
while queue:
node = queue.popleft()
print(node, end=" ")
for neighbor in graph[node]:
if neighbor not in visited:
visited.add(neighbor)
queue.append(neighbor)
graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}
print("Breadth-First Search:")
bfs(graph, 'A')

# Session 5
#a) Depth-First Search (DFS)

def dfs(graph, node, visited):


if node not in visited:
print(node, end=" ")
visited.add(node)
for neighbor in graph[node]:
dfs(graph, neighbor, visited)
visited = set()
print("\nDepth-First Search:")
dfs(graph, 'A', visited)

# Session 6
#a) Iterative Deepening Depth-First Search (IDDFS)

def iddfs(graph, start, max_depth):


def dls(node, depth):
if depth == 0:
return [node]
if depth < 0:
return []
path = []
for neighbor in graph.get(node, []):
path.extend(dls(neighbor, depth - 1))
return path
for depth in range(max_depth + 1):
result = dls(start, depth)
if result:
print(f"Depth {depth}:", result)
graph = {
'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': []
}
print("\nIterative Deepening Depth-First Search:")
iddfs(graph, 'A', 3)

# Session 7
#a) Best-First Search
import heapq

def best_first_search(graph, start, goal):


open_list = [(0, start)]
heapq.heapify(open_list)
visited = set()
while open_list:
_, current = heapq.heappop(open_list)
if current in visited:
continue
print(current, end=" ")
visited.add(current)
if current == goal:
return
for neighbor, cost in graph.get(current, []):
if neighbor not in visited:
heapq.heappush(open_list, (cost, neighbor))
graph = {
'A': [('B', 1), ('C', 3)],
'B': [('D', 4), ('E', 2)],
'C': [('F', 5)],
'D': [],
'E': [('F', 1)],
'F': []
}
print("\nBest-First Search:")
best_first_search(graph, 'A', 'F')

# Session 8
#a) A* Algorithm
def a_star(graph, start, goal, h):
open_list = [(0, start)]
heapq.heapify(open_list)
came_from = {}
g_score = {start: 0}
while open_list:
_, current = heapq.heappop(open_list)
if current == goal:
path = []
while current in came_from:
path.append(current)
current = came_from[current]
path.append(start)
print("Path:", " -> ".join(reversed(path)))
return
for neighbor, cost in graph.get(current, []):
tentative_g_score = g_score.get(current, float('inf')) + cost
if tentative_g_score < g_score.get(neighbor, float('inf')):
came_from[neighbor] = current
g_score[neighbor] = tentative_g_score
f_score = tentative_g_score + h.get(neighbor, float('inf'))
heapq.heappush(open_list, (f_score, neighbor))
graph = {
'A': [('B', 1), ('C', 3)],
'B': [('D', 4), ('E', 2)],
'C': [('F', 5)],
'D': [],
'E': [('F', 1)],
'F': []
}
h = {'A': 6, 'B': 2, 'C': 4, 'D': 6, 'E': 1, 'F': 0}
print("\nA* Algorithm:")
a_star(graph, 'A', 'F', h)

# Session 9
#a) AO* Algorithm
def ao_star(graph, start):
def dfs(node, visited):
if node in visited:
return []
visited.add(node)
path = []
if isinstance(graph[node], list):
for option in graph[node]:
sub_path = []
for sub_node in option:
sub_path.extend(dfs(sub_node, visited))
path.extend(sub_path)
else:
path.append(node)
return path
visited = set()
solution = dfs(start, visited)
print("Solution Path:", solution)
graph = {
'A': [['B', 'C'], ['D']],
'B': [['E']],
'C': [['G']],
'D': [['G']],
'E': [['F']],
'F': [],
'G': []
}
print("\nAO* Algorithm:")
ao_star(graph, 'A')

# Session 10
#a) IDA* Algorithm
def ida_star(graph, start, goal, h):
def dfs_f_limit(node, g, f_limit, path):
path.append(node)
f = g + h[node]
if f > f_limit:
path.pop()
return f
if node == goal:
print("Path:", " -> ".join(path))
return True
min_limit = float('inf')
for neighbor, cost in graph.get(node, []):
if neighbor not in path:
result = dfs_f_limit(neighbor, g + cost, f_limit, path)
if result is True:
return True
if isinstance(result, (int, float)):
min_limit = min(min_limit, result)
path.pop()
return min_limit
f_limit = h[start]
while True:
path = []
result = dfs_f_limit(start, 0, f_limit, path)
if result is True:
return
if result == float('inf'):
print("No Solution")
return
f_limit = result
graph = {
'A': [('B', 1), ('C', 3)],
'B': [('D', 4), ('E', 2)],
'C': [('F', 5)],
'D': [],
}

# Session 11
#a) K-Nearest Neighbor Algorithm
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
# Load dataset
iris = load_iris()
X = iris.data
y = iris.target
# Split dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# KNN Classifier
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
print("K-Nearest Neighbors Accuracy:", accuracy_score(y_test, y_pred))

# b) Naïve Bayes Algorithm


from sklearn.naive_bayes import GaussianNB
# Naïve Bayes Classifier
nb = GaussianNB()
nb.fit(X_train, y_train)
y_pred_nb = nb.predict(X_test)
print("Naïve Bayes Accuracy:", accuracy_score(y_test, y_pred_nb))

# Session 12
#a) Decision Trees
from sklearn.tree import DecisionTreeClassifier
# Decision Tree Classifier
dt = DecisionTreeClassifier()
dt.fit(X_train, y_train)
y_pred_dt = dt.predict(X_test)
print("Decision Tree Accuracy:", accuracy_score(y_test, y_pred_dt))

# b) Logistic Regression
from sklearn.linear_model import LogisticRegression
# Logistic Regression
lr = LogisticRegression()
lr.fit(X_train, y_train)
y_pred_lr = lr.predict(X_test)
print("Logistic Regression Accuracy:", accuracy_score(y_test, y_pred_lr))

# Session 13
#a) Support Vector Machines
from sklearn.svm import SVC
# SVM Classifier
svm = SVC()
svm.fit(X_train, y_train)
y_pred_svm = svm.predict(X_test)
print("SVM Accuracy:", accuracy_score(y_test, y_pred_svm))

# Session 14
#a) Linear Regression
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
# Linear Regression
X_reg = X[:, :1] # Use only one feature for simplicity
y_reg = X[:, 1] # Target variable
X_train_reg, X_test_reg, y_train_reg, y_test_reg = train_test_split(X_reg, y_reg,
test_size=0.3, random_state=42)
lr_reg = LinearRegression()
lr_reg.fit(X_train_reg, y_train_reg)
y_pred_reg = lr_reg.predict(X_test_reg)
print("Linear Regression MSE:", mean_squared_error(y_test_reg, y_pred_reg))

# b) Polynomial Regression
from sklearn.preprocessing import PolynomialFeatures
# Polynomial Regression
poly = PolynomialFeatures(degree=2)
X_poly = poly.fit_transform(X_reg)
X_train_poly, X_test_poly, y_train_poly, y_test_poly = train_test_split(X_poly, y_reg,
test_size=0.3, random_state=42)
poly_reg = LinearRegression()
poly_reg.fit(X_train_poly, y_train_poly)
y_pred_poly = poly_reg.predict(X_test_poly)
print("Polynomial Regression MSE:", mean_squared_error(y_test_poly, y_pred_poly))

# Session 15
#a) Support Vector Regression
from sklearn.svm import SVR
# SVR
svr = SVR()
svr.fit(X_train_reg, y_train_reg)
y_pred_svr = svr.predict(X_test_reg)
print("Support Vector Regression MSE:", mean_squared_error(y_test_reg, y_pred_svr))

# Session 16
#a) Artificial Neural Network
from sklearn.neural_network import MLPClassifier
# ANN Classifier
ann = MLPClassifier(hidden_layer_sizes=(10, 10), max_iter=500)
ann.fit(X_train, y_train)
y_pred_ann = ann.predict(X_test)
print("Artificial Neural Network Accuracy:", accuracy_score(y_test, y_pred_ann))

# b) Feed Forward Neural Network


# The same implementation as ANN can be extended using TensorFlow/Keras for custom
architectures.

# Session 17
#a) Principal Component Analysis
from sklearn.decomposition import PCA
# PCA
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)
print("PCA reduced shape:", X_pca.shape)
# b) Linear Discriminant Analysis
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
# LDA
lda = LinearDiscriminantAnalysis(n_components=2)
X_lda = lda.fit_transform(X, y)
print("LDA reduced shape:", X_lda.shape)

# Session 18
#a) Apriori Algorithm
from mlxtend.frequent_patterns import apriori, association_rules
import pandas as pd
# Example Data
data = {
'Milk': [1, 0, 1, 0, 1],
'Bread': [1, 1, 1, 1, 1],
'Butter': [0, 1, 0, 1, 0]
}
df = pd.DataFrame(data)

# Apriori
frequent_itemsets = apriori(df, min_support=0.5, use_colnames=True)
rules = association_rules(frequent_itemsets, metric="confidence", min_threshold=0.7)
print("Apriori Frequent Itemsets:")
print(frequent_itemsets)

# b) FP-Tree Algorithm
# Implementation of FP-Tree requires specialized libraries or manual implementation, which
can be extended as needed.

# Session 19
#a) K-Means Algorithm
from sklearn.cluster import KMeans
# K-Means Clustering
kmeans = KMeans(n_clusters=3)
kmeans.fit(X)
print("K-Means Cluster Centers:", kmeans.cluster_centers_)

# Session 20
#a) DBSCAN Algorithm
from sklearn.cluster import DBSCAN
# DBSCAN Clustering
dbscan = DBSCAN(eps=0.5, min_samples=5)
dbscan.fit(X)
print("DBSCAN Labels:", dbscan.labels_)

# Session 1 to Session 4: Simple Exercises using R


# Question 1
vector <- c(5, 7, 9, 11, 13, 13, 11, 9, 7, 5)
sum_vector <- sum(vector)
mean_vector <- mean(vector)
max_value <- max(vector)
min_value <- min(vector)
length_vector <- length(vector)
# Variance and standard deviation using formulas
variance_manual <- sum((vector - mean_vector)^2) / length_vector
std_dev_manual <- sqrt(variance_manual)
# Variance and standard deviation using R functions
variance_r <- var(vector)
std_dev_r <- sd(vector)
# Sorting in decreasing order
sorted_vector <- sort(vector, decreasing = TRUE)

# Question 2
# Vector of first 50 even numbers
even_numbers <- seq(2, 100, by = 2)
# Vector 30 down to 1
down_to_one <- 30:1

# Question 3
vector_with_na <- c(1, 2, 3, 4, NA, 6, NA, 8, 9, 10)
missing_locations <- is.na(vector_with_na)

# Question 4
char_vector <- c("This", "is", "a", "character", "vector")
index_is <- which(char_vector == "is")

# Question 5
seven_point_scale <- c(1:7)
names(seven_point_scale) <- c("Bad", "Somewhat bad", "Not good", "Ok", "Good", "Very
good", "Excellent")
feedback <- seven_point_scale[c("Good", "Very good", "Excellent", "Ok", "Bad")]
average_feedback <- mean(feedback)

# Question 6
string1 <- "Hello"
string2 <- "World"
concatenated_string <- paste(string1, string2)

# Question 7
long_string <- "Hello, world! Welcome to R programming."
cleaned_string <- gsub("[[:punct:]]", "", long_string)
words <- unlist(strsplit(cleaned_string, " "))
word_count <- length(words)
distinct_words <- unique(words)
distinct_word_count <- length(distinct_words)
# Question 8
# Save and read data
vector_data <- c(1, 2, 3, 4, 5)
list_data <- list(a = 1, b = 2:5, c = "R")
array_data <- array(1:8, dim = c(2, 2, 2))
data_frame_data <- data.frame(Name = c("Alice", "Bob"), Age = c(25, 30))
factor_data <- factor(c("High", "Low", "Medium", "High"))
write(vector_data, "vector.txt")
write.csv(data_frame_data, "data_frame.csv")
read_vector <- scan("vector.txt")
read_data_frame <- read.csv("data_frame.csv")
# Operations
data_frame_data <- data_frame_data[order(data_frame_data$Age), ]
list_length <- length(list_data)
new_list <- c(list_data, d = "New")
array_element <- array_data[1, 1, 1]
data_frame_component <- data_frame_data$Name

# Question 9
matrix1 <- matrix(1:25, nrow = 5, byrow = TRUE)
matrix2 <- matrix(25:1, nrow = 5, byrow = TRUE)
matrix_add <- matrix1 + matrix2
matrix_subtract <- matrix1 - matrix2
matrix_multiply <- matrix1 * matrix2

# Question 10
transpose_matrix <- t(matrix1)

# Question 11
inverse_matrix <- solve(matrix1[1:3, 1:3])

# Question 12
factors <- factor(c("A", "B", "A", "C", "B", "A"))
occurrences <- table(factors)

# Question 13
find_min_max <- function(arr) {
min_value <- min(arr)
max_value <- max(arr)
return(list(min = min_value, max = max_value))}
array3d <- array(1:27, dim = c(3, 3, 3))
min_max <- find_min_max(array3d)

# Question 14
symmetric_matrix <- matrix(c(4, 1, 1, 3), nrow = 2)
eigen_values <- eigen(symmetric_matrix)$values
eigen_vectors <- eigen(symmetric_matrix)$vectors
# Question 15
states <- sample(c("State1", "State2", "State3", "State4", "State5"), 20, replace = TRUE)
factor_states <- factor(states)
state_frequency <- table(factor_states)

# Question 16
income <- c(20000, 30000, 40000, 80000, 120000, 55000, 10000)
income_factors <- cut(income, breaks = c(10000, 50000, 100000, 150000), labels =
c("10K-50K", "50K-100K", "100K-150K"))
income_frequency <- table(income_factors)

# Question 17
# Exploring string functions
string <- "Learning R is fun"
str_length <- nchar(string)
sub_string <- substr(string, 1, 8)
upper_case <- toupper(string)
lower_case <- tolower(string)
# Exploring plotting
x <- 1:10
y <- x^2
plot(x, y, type = "b", col = "blue", main = "Plot Example", xlab = "X", ylab = "Y")

# Session 5 and Session 6: Analysis Using R


# Question 18
# Reusing Income factor from Question 16
income <- c(20000, 30000, 40000, 80000, 120000, 55000, 10000)
income_factors <- cut(income, breaks = c(10000, 50000, 100000, 150000), labels =
c("10K-50K", "50K-100K", "100K-150K"))
# Creating a State factor for these individuals
states <- factor(c("State1", "State2", "State1", "State3", "State2", "State3", "State1"))
# Two-way frequency table
frequency_table <- table(income_factors, states)
print(frequency_table)

# Question 19
# Details of all variables
summary_vector <- summary(vector)
data_type_vector <- typeof(vector)
stem(vector)

# Question 20
# Generating marks data
set.seed(123)
marks <- rbinom(50, size = 100, prob = 0.6)
grades <- cut(marks, breaks = c(-Inf, 40, 60, 80, Inf), labels = c("D", "C", "B", "A"))
# Random seriousness data
seriousness <- factor(sample(c("Very Serious", "Serious", "Not Serious"), 50, replace =
TRUE))
# Chi-square test
chi_square_test <- chisq.test(table(grades, seriousness))
print(chi_square_test)

# Question 21
# Generating data for class
set.seed(123)
marks <- rnorm(50, mean = 70, sd = 10)
gender <- factor(sample(c("Male", "Female"), 50, replace = TRUE))
data <- data.frame(Marks = marks, Gender = gender)
# Box plot
boxplot(Marks ~ Gender, data = data, main = "Boxplot of Marks by Gender", col = c("blue",
"pink"), xlab = "Gender", ylab = "Marks")

# Question 22
# Using airquality dataset
data(airquality)
airquality_clean <- na.omit(airquality)

# Scatter plot
plot(airquality_clean$Ozone, airquality_clean$Solar.R, main = "Ozone vs Solar Radiation",
xlab = "Ozone", ylab = "Solar Radiation", col = "blue", pch = 19)

# Linear regression
linear_model <- lm(Ozone ~ Solar.R, data = airquality_clean)
summary(linear_model)

# Question 23
# Multiple regression
multi_regression <- lm(Ozone ~ Solar.R + Wind + Temp, data = airquality_clean)
summary(multi_regression)

# Question 24
# Using iris dataset
data(iris)
iris_subset <- iris[1:100, ]
iris_subset$IsSetosa <- ifelse(iris_subset$Species == "setosa", 1, 0)

# Logistic regression
logistic_model <- glm(IsSetosa ~ Sepal.Length, data = iris_subset, family = binomial)
summary(logistic_model)

# Question 25
# Plotting date/month vs temperature
plot(airquality$Temp, type = "l", main = "Temperature over Time", xlab = "Index", ylab =
"Temperature")
# Moving average
moving_avg <- filter(airquality$Temp, rep(1/3, 3), sides = 2)
lines(moving_avg, col = "red")

# Question 26
# Toy dataset (read 10,000 rows)
library(rpart)
toy_data <- read.csv("path/to/dataset.csv", nrows = 10000)
summary(toy_data)

decision_tree <- rpart(Illness ~ ., data = toy_data)


print(decision_tree)

# Question 27
# Random forest
library(randomForest)
random_forest_model <- randomForest(Illness ~ ., data = toy_data)
print(random_forest_model)

# Confusion matrix
print(random_forest_model$confusion)

# Question 28
# Rainfall dataset
rainfall_data <- read.csv("path/to/rainfall_dataset.csv")

# Decision tree
rainfall_tree <- rpart(Rainfall ~ ., data = rainfall_data)

# Random forest
rainfall_forest <- randomForest(Rainfall ~ ., data = rainfall_data)

# Compare results
print(rainfall_tree)
print(rainfall_forest)

# Question 29
# E-commerce dataset
customer_data <- read.csv("path/to/ecommerce_dataset.csv")

# Classification
library(e1071)
native_bayes <- naiveBayes(Spender ~ ., data = customer_data)

# KNN
library(class)
knn_result <- knn(train = customer_data[,-1], test = customer_data[,-1], cl =
customer_data$Spender, k = 5)

# Question 30
# K-means clustering
set.seed(123)
kmeans_result <- kmeans(customer_data[,-1], centers = 5)
print(kmeans_result)

# Question 31
# Classification and clustering with built-in datasets
# Example with iris dataset
iris_kmeans <- kmeans(iris[, -5], centers = 3)
print(iris_kmeans)

iris_naive_bayes <- naiveBayes(Species ~ ., data = iris)


print(iris_naive_bayes)

You might also like