06/01/2023, 09:02 Surya Prakash AIML Lab Manual - Jupyter Notebook
In [2]:
1 # Program 1
2 class Graph:
3 def __init__(self,adjac_lis):
4 self.adjac_lis = adjac_lis
5 def get_neighbours(self,v):
6 return self.adjac_lis[v]
7 def h(self,n):
8 H = {'A':1,'B':1, 'C':1,'D':1}
9 return H[n]
10 def a_star_algorithm(self,start,stop):
11 open_lst = set([start])
12 closed_lst = set([])
13 dist = {}
14 dist[start] = 0
15 prenode = {}
16 prenode[start] = start
17 while len(open_lst) > 0:
18 n = None
19 for v in open_lst:
20 if n == None or dist[v] + self.h(v) < dist[n] + self.h(n):
21 n = v;
22 if n == None:
23 print("path doesnot exist")
24 return None
25 if n == stop:
26 reconst_path = []
27 while prenode[n] != n:
28 reconst_path.append(n)
29 n = prenode[n]
30 reconst_path.append(start)
31 reconst_path.reverse()
32 print("path found: {".format(reconst_path))
33 return reconst_path
34 for (m, weight) in self.get_neighbours(n):
35 if m not in open_lst and m not in closed_lst:
36 open_lst.add(m)
37 prenode[m] = n
38 dist[m] = dist[n] + weight
39 else:
40 if dist[m] > dist[n] + weight:
41 dist[m] = dist[n] + weight
42 prenode[m] = n
43 if m in closed_lst:
44 closed_lst.remove(m)
45 open_lst.add(m)
46 open_lst.remove(n)
47 closed_lst.add(n)
48 print("Path does not exist")
49 return None
50 adjac_lis ={'A':[('B',1),('C',3),('D',7)],'B':[('D',5)],'C':[('D',12)]}
51 graph1=Graph(adjac_lis)
52 graph1.a_star_algorithm('A', 'D')
path found:['A', 'B', 'D']
Out[2]:
['A', 'B', 'D']
localhost:8889/notebooks/AI ML Lab/Surya Prakash AIML Lab Manual.ipynb 1/10
06/01/2023, 09:02 Surya Prakash AIML Lab Manual - Jupyter Notebook
In [3]:
1 def recAOStar(n):
2 global finalPath
3 print("Expanding Node:",n)
4 and_nodes = []
5 or_nodes =[]
6 if(n in allNodes):
7 if 'AND' in allNodes[n]:
8 and_nodes = allNodes[n]['AND']
9 if 'OR' in allNodes[n]:
10 or_nodes = allNodes[n]['OR']
11 if len(and_nodes)==0 and len(or_nodes)==0:
12 return
13
14 solvable = False
15 marked ={}
16
17 while not solvable:
18 if len(marked)==len(and_nodes)+len(or_nodes):
19 min_cost_least,min_cost_group_least = least_cost_group(and_nodes,or_nodes,{})
20 solvable = True
21 change_heuristic(n,min_cost_least)
22 optimal_child_group[n] = min_cost_group_least
23 continue
24 min_cost,min_cost_group = least_cost_group(and_nodes,or_nodes,marked)
25 is_expanded = False
26 if len(min_cost_group)>1:
27 if(min_cost_group[0] in allNodes):
28 is_expanded = True
29 recAOStar(min_cost_group[0])
30 if(min_cost_group[1] in allNodes):
31 is_expanded = True
32 recAOStar(min_cost_group[1])
33 else:
34 if(min_cost_group in allNodes):
35 is_expanded = True
36 recAOStar(min_cost_group)
37 if is_expanded:
38 min_cost_verify, min_cost_group_verify = least_cost_group(and_nodes, or_nodes, {})
39 if min_cost_group == min_cost_group_verify:
40 solvable = True
41 change_heuristic(n, min_cost_verify)
42 optimal_child_group[n] = min_cost_group
43 else:
44 solvable = True
45 change_heuristic(n, min_cost)
46 optimal_child_group[n] = min_cost_group
47 marked[min_cost_group]=1
48 return heuristic(n)
49
50 def least_cost_group(and_nodes, or_nodes, marked):
51 node_wise_cost = {}
52 for node_pair in and_nodes:
53 if not node_pair[0] + node_pair[1] in marked:
54 cost = 0
55 cost = cost + heuristic(node_pair[0]) + heuristic(node_pair[1]) + 2
56 node_wise_cost[node_pair[0] + node_pair[1]] = cost
57 for node in or_nodes:
58 if not node in marked:
59 cost = 0
60 cost = cost + heuristic(node) + 1
61 node_wise_cost[node] = cost
62 min_cost = 999999
63 min_cost_group = None
64 for costKey in node_wise_cost:
65 if node_wise_cost[costKey] < min_cost:
66 min_cost = node_wise_cost[costKey]
67 min_cost_group = costKey
68 return [min_cost, min_cost_group]
69
70 def heuristic(n):
71 return H_dist[n]
72
73 def change_heuristic(n, cost):
74 H_dist[n] = cost
75 return
76
77 def print_path(node):
78 print(optimal_child_group[node], end="")
79 node = optimal_child_group[node]
80 if len(node) > 1:
81 if node[0] in optimal_child_group:
82 print("->", end="")
83 print_path(node[0])
84 if node[1] in optimal_child_group:
85 print("->", end="")
86 print_path(node[1])
87 else:
88 if node in optimal_child_group:
89 print("->", end="")
90 print_path(node)
localhost:8889/notebooks/AI ML Lab/Surya Prakash AIML Lab Manual.ipynb 3/10
06/01/2023, 09:02 Surya Prakash AIML Lab Manual - Jupyter Notebook
91 H_dist = {
92 'A': -1,
93 'B': 4,
94 'C': 2,
95 'D': 3,
96 'E': 6,
97 'F': 8,
98 'G': 2,
99 'H': 0,
100 'I': 0,
101 'J': 0
102 }
103 allNodes = {
104 'A': {'AND': [('C', 'D')], 'OR': ['B']},
105 'B': {'OR': ['E', 'F']},
106 'C': {'OR': ['G'], 'AND': [('H', 'I')]},
107 'D': {'OR': ['J']}
108 }
109 optimal_child_group = {}
110 optimal_cost = recAOStar('A')
111 print('Nodes which gives optimal cost are')
112 print_path('A')
113 print('\nOptimal
Expanding Node: A Cost is :: ', optimal_cost)
Expanding Node: B
Expanding Node: C
Expanding Node: D
Nodes which gives optimal cost are
CD->HI->J
Optimal Cost is :: 5
localhost:8889/notebooks/AI ML Lab/Surya Prakash AIML Lab Manual.ipynb 4/10
06/01/2023, 09:02 Surya Prakash AIML Lab Manual - Jupyter Notebook
In [4]:
1 # Program 3
2 import csv
3
4 with open("prog3.csv") as f:
5 csv_file = csv.reader(f)
6 data = list(csv_file)
7
8 specific = data[0][:-1]
9 general = [['?' for i in range(len(specific))] for j in range(len(specific))]
10
11 for i in data:
12 if i[-1] == "Yes":
13 for j in range(len(specific)):
14 if i[j] != specific[j]:
15 specific[j] = "?"
16 general[j][j] = "?"
17
18 elif i[-1] == "No":
19 for j in range(len(specific)):
20 if i[j] != specific[j]:
21 general[j][j] = specific[j]
22 else:
23 general[j][j] = "?"
24
25 print("\nStep " + str(data.index(i)+1) + " of Candidate Elimination Algorithm")
26 print(specific)
27 print(general)
28 gh = []
29 for i in general:
30 for j in i:
31 if j != '?':
32 gh.append(i)
33 break
34 print("\nFinal Specific hypothesis:\n", specific)
35 print("\nFinal General hypothesis:\n", gh)
Step 1 of Candidate Elimination Algorithm
['sunny', 'warm', 'normal', 'strong', 'warm', 'same']
[['?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?', '?',
'?'], ['?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?', '?', '?']]
Step 2 of Candidate Elimination Algorithm
['sunny', 'warm', '?', 'strong', 'warm', 'same']
[['?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?', '?',
'?'], ['?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?', '?', '?']]
Step 3 of Candidate Elimination Algorithm
['sunny', 'warm', '?', 'strong', 'warm', 'same']
[['sunny', '?', '?', '?', '?', '?'], ['?', 'warm', '?', '?', '?', '?'], ['?', '?', '?', '?', '?', '?'], ['?', '?', '?',
'?', '?', '?'], ['?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?', '?', 'same']]
Step 4 of Candidate Elimination Algorithm
['sunny', 'warm', '?', 'strong', '?', '?']
[['sunny', '?', '?', '?', '?', '?'], ['?', 'warm', '?', '?', '?', '?'], ['?', '?', '?', '?', '?', '?'], ['?', '?', '?',
'?', '?', '?'], ['?', '?', '?', '?', '?', '?'], ['?', '?', '?', '?', '?', '?']]
Final Specific hypothesis:
['sunny', 'warm', '?', 'strong', '?', '?']
Final General hypothesis:
[['sunny', '?', '?', '?', '?', '?'], ['?', 'warm', '?', '?', '?', '?']]
localhost:8889/notebooks/AI ML Lab/Surya Prakash AIML Lab Manual.ipynb 5/10
06/01/2023, 09:02 Surya Prakash AIML Lab Manual - Jupyter Notebook
In [5]:
1 # Program 4
2 import pandas as pd
3 from pprint import pprint
4 from sklearn.feature_selection import mutual_info_classif
5 from collections import Counter
6
7 def id3(df, target_attribute, attribute_names, default_class=None):
8 cnt=Counter(x for x in df[target_attribute])
9 if len(cnt)==1:
10 return next(iter(cnt))
11
12 elif df.empty or (not attribute_names):
13 return default_class
14
15 else:
16 gainz = mutual_info_classif(df[attribute_names],df[target_attribute],discrete_features=True)
17 index_of_max=gainz.tolist().index(max(gainz))
18 best_attr=attribute_names[index_of_max]
19 tree={best_attr:{}}
20 remaining_attribute_names=[i for i in attribute_names if i!=best_attr]
21
22 for attr_val, data_subset in df.groupby(best_attr):
23 subtree=id3(data_subset, target_attribute, remaining_attribute_names,default_class)
24 tree[best_attr][attr_val]=subtree
25
26 return tree
27 df=pd.read_csv("prog4.csv")
28
29 attribute_names=df.columns.tolist()
30 print("List of attribut name")
31
32 attribute_names.remove("PlayTennis")
33
34 for colname in df.select_dtypes("object"):
35 df[colname], _ = df[colname].factorize()
36
37 print(df)
38
39 tree= id3(df,"PlayTennis", attribute_names)
40 print("The tree structure")
41 pprint(tree)
List of attribut name
outlook temp humidity windy PlayTennis
0 0 0 0 False 0
1 0 0 0 True 0
2 1 0 0 False 1
3 2 1 0 False 1
4 2 2 1 False 1
5 2 2 1 True 0
6 1 2 1 True 1
7 0 1 0 False 0
8 0 2 1 False 1
9 2 1 1 False 1
10 0 1 1 True 1
11 1 1 0 True 1
12 1 0 1 False 1
13 2 1 0 True 0
The tree structure
{'outlook': {0: {'humidity': {0: 0, 1: 1}},
1: 1,
2: {'windy': {False: 1, True: 0}}}}
localhost:8889/notebooks/AI ML Lab/Surya Prakash AIML Lab Manual.ipynb 6/10
06/01/2023, 09:02 Surya Prakash AIML Lab Manual - Jupyter Notebook
In [6]:
1 # Program 5
2 import numpy as np
3
4 X = np.array(([2, 9], [1, 5], [3, 6]), dtype=float)
5 y = np.array(([92], [86], [89]), dtype=float)
6
7 # scale units
8 X = X/np.amax(X, axis=0)
9 y = y/100
10
11 class Neural_Network(object):
12 def __init__(self):
13
14 self.inputSize = 2
15 self.outputSize = 2
16 self.hiddenSize = 4
17 self.W1 = np.random.randn(self.inputSize, self.hiddenSize)
18 self.W2 = np.random.randn(self.hiddenSize, self.outputSize)
19
20 def forward(self, X):
21 self.z = np.dot(X, self.W1)
22 self.z2 = self.sigmoid(self.z)
23 self.z3 = np.dot(self.z2, self.W2)
24 o = self.sigmoid(self.z3)
25 return o
26
27 def sigmoid(self, s):
28 return 1/(1+np.exp(-s))
29
30 def sigmoidPrime(self, s):
31 return s * (1 - s)
32
33 def backward(self, X, y, o):
34 self.o_error = y - o
35 self.o_delta = self.o_error*self.sigmoidPrime(o)
36 self.z2_error = self.o_delta.dot(self.W2.T)
37 self.z2_delta = self.z2_error*self.sigmoidPrime(self.z2)
38 self.W1 += X.T.dot(self.z2_delta)
39 self.W2 += self.z2.T.dot(self.o_delta)
40
41 def train (self, X, y):
42 o = self.forward(X)
43 self.backward(X, y, o)
44
45 NN = Neural_Network()
46 print ("\nInput: \n" + str(X))
47 print ("\nActual Output: \n" + str(y))
48 print ("\nPredicted Output: \n" + str(NN.forward(X)))
49 print ("\nLoss: \n" + str(np.mean(np.square(y - NN.forward(X)))))
Input:
[[0.66666667 1. ]
[0.33333333 0.55555556]
[1. 0.66666667]]
Actual Output:
[[0.92]
[0.86]
[0.89]]
Predicted Output:
[[0.49211237 0.37794696]
[0.50715326 0.34596148]
[0.48104659 0.40879093]]
Loss:
0.21074179886944763
localhost:8889/notebooks/AI ML Lab/Surya Prakash AIML Lab Manual.ipynb 7/10
06/01/2023, 09:02 Surya Prakash AIML Lab Manual - Jupyter Notebook
In [8]:
1 # Program 6
2 # Program 6
3 import pandas as pd
4 from sklearn.preprocessing import LabelEncoder
5 from sklearn.naive_bayes import GaussianNB
6
7 data = pd.read_csv('prog6.csv')
8 print("The first 5 Values of data is :\n", data.head())
9 X = data.iloc[:, :-1]
10 print("\nThe First 5 values of the train data is\n", X.head())
11 y = data.iloc[:, -1]
12 print("\nThe First 5 values of train output is\n", y.head())
13
14 le_outlook = LabelEncoder()
15 X.Outlook = le_outlook.fit_transform(X.Outlook)
16 le_Temperature = LabelEncoder()
17 X.Temperature = le_Temperature.fit_transform(X.Temperature)
18 le_Humidity = LabelEncoder()
19 X.Humidity = le_Humidity.fit_transform(X.Humidity)
20 le_Windy = LabelEncoder()
21 X.Windy = le_Windy.fit_transform(X.Windy)
22
23 print("\nNow the Train output is\n", X.head())
24
25 le_PlayTennis = LabelEncoder()
26 y = le_PlayTennis.fit_transform(y)
27 print("\nNow the Train output is\n",y)
28
29 from sklearn.model_selection import train_test_split
30 X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.20)
31 classifier = GaussianNB()
32 classifier.fit(X_train, y_train)
33
34 from sklearn.metrics import accuracy_score
35 print("Accuracy is:", accuracy_score(classifier.predict(X_test), y_test))
The first 5 Values of data is :
Outlook Temperature Humidity Windy PlayTennis
0 Sunny Hot High False No
1 Sunny Hot High True No
2 Overcast Hot High False Yes
3 Rainy Mild High False Yes
4 Rainy Cool Normal False Yes
The First 5 values of the train data is
Outlook Temperature Humidity Windy
0 Sunny Hot High False
1 Sunny Hot High True
2 Overcast Hot High False
3 Rainy Mild High False
4 Rainy Cool Normal False
The First 5 values of train output is
0 No
1 No
2 Yes
3 Yes
4 Yes
Name: PlayTennis, dtype: object
Now the Train output is
Outlook Temperature Humidity Windy
0 2 1 0 0
1 2 1 0 1
2 0 1 0 0
3 1 2 0 0
4 1 0 1 0
Now the Train output is
[0 0 1 1 1 0 1 0 1 1 1 1 1 0]
Accuracy is: 0.6666666666666666
localhost:8889/notebooks/AI ML Lab/Surya Prakash AIML Lab Manual.ipynb 8/10
06/01/2023, 09:02 Surya Prakash AIML Lab Manual - Jupyter Notebook
In [12]:
1 # Program 7
2 from sklearn import datasets
3 from sklearn import metrics
4 from sklearn.cluster import KMeans
5 from sklearn.model_selection import train_test_split
6
7 iris = datasets.load_iris()
8 X_train,X_test,y_train,y_test = train_test_split(iris.data,iris.target)
9 model =KMeans(n_clusters=3)
10 model.fit(X_train,y_train)
11 model.score
12 print('K-Mean: ',metrics.accuracy_score(y_test,model.predict(X_test)))
13
14 from sklearn.mixture import GaussianMixture
15 model2 = GaussianMixture(n_components=3)
16 model2.fit(X_train,y_train)
17 model2.score
18 print('EM Algorithm:',metrics.accuracy_score(y_test,model2.predict(X_test)))
K-Mean: 0.02631578947368421
EM Algorithm: 0.9736842105263158
In [13]:
1 # Program 8
2 from sklearn.datasets import load_iris
3 iris = load_iris()
4
5 from sklearn.model_selection import train_test_split
6 x_train, x_test, y_train, y_test = train_test_split(iris.data,iris.target,random_state=0)
7
8 from sklearn.neighbors import KNeighborsClassifier
9 knn = KNeighborsClassifier(n_neighbors = 5)
10 knn.fit(x_train,y_train)
11
12 for i,item in enumerate(x_test):
13 prediction = knn.predict([item])
14 print("Actual : ", iris['target_names'][y_test[i]])
15 print("Prediction : ", iris['target_names'][prediction], " \n")
16 print("Classification Accuracy : ",knn.score(x_test,y_test))
Actual : virginica
Prediction : ['virginica']
Actual : versicolor
Prediction : ['versicolor']
Actual : setosa
Prediction : ['setosa']
Actual : virginica
Prediction : ['virginica']
Actual : setosa
Prediction : ['setosa']
Actual : virginica
Prediction : ['virginica']
Actual : setosa
P di ti [' t ']
localhost:8889/notebooks/AI ML Lab/Surya Prakash AIML Lab Manual.ipynb 9/10
06/01/2023, 09:02 Surya Prakash AIML Lab Manual - Jupyter Notebook
In [14]:
1 # Program 9
2 from math import ceil
3 import numpy as np
4 from scipy import linalg
5
6 def lowess(x, y, f, iterations):
7 n = len(x)
8 r = int(ceil(f * n))
9 h = [np.sort(np.abs(x - x[i]))[r] for i in range(n)]
10 w = np.clip(np.abs((x[:, None] - x[None, :]) / h), 0.0, 1.0)
11 w = (1 - w ** 3) ** 3
12 yest = np.zeros(n)
13 delta = np.ones(n)
14 for iteration in range(iterations):
15 for i in range(n):
16 weights = delta * w[:, i]
17 b = np.array([np.sum(weights * y), np.sum(weights * y * x)])
18 A = np.array([[np.sum(weights), np.sum(weights * x)],[np.sum(weights * x), np.sum(weights * x * x)]])
19 beta = linalg.solve(A, b)
20 yest[i] = beta[0] + beta[1] * x[i]
21
22 residuals = y - yest
23 s = np.median(np.abs(residuals))
24 delta = np.clip(residuals / (6.0 * s), -1, 1)
25 delta = (1 - delta ** 2) ** 2
26
27 return yest
28
29 import math
30 n = 100
31 x = np.linspace(0, 2 * math.pi, n)
32 y = np.sin(x) + 0.3 * np.random.randn(n)
33 f =0.25
34 iterations=3
35 yest = lowess(x, y, f, iterations)
36
37 import matplotlib.pyplot as plt
38 plt.plot(x,y,"r.", color="green")
39 plt.plot(x,yest,"b-",color="red")
C:\Users\prsur\AppData\Local\Temp\ipykernel_90004\1619400893.py:38: UserWarning: color is redundantly defined by the 'colo
r' keyword argument and the fmt string "r." (-> color='r'). The keyword argument will take precedence.
plt.plot(x,y,"r.", color="green")
C:\Users\prsur\AppData\Local\Temp\ipykernel_90004\1619400893.py:39: UserWarning: color is redundantly defined by the 'colo
r' keyword argument and the fmt string "b-" (-> color='b'). The keyword argument will take precedence.
plt.plot(x,yest,"b-",color="red")
Out[14]:
[<matplotlib.lines.Line2D at 0x1d320484370>]
In [ ]:
localhost:8889/notebooks/AI ML Lab/Surya Prakash AIML Lab Manual.ipynb 10/10