KEMBAR78
AAL Programs | PDF
0% found this document useful (0 votes)
39 views12 pages

AAL Programs

The programs demonstrate machine learning algorithms like KNN, K-Means clustering, locally weighted regression, backpropagation neural networks and genetic algorithms. Program 6 implements Q-learning reinforcement learning.

Uploaded by

Malavika 20 AIML
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
39 views12 pages

AAL Programs

The programs demonstrate machine learning algorithms like KNN, K-Means clustering, locally weighted regression, backpropagation neural networks and genetic algorithms. Program 6 implements Q-learning reinforcement learning.

Uploaded by

Malavika 20 AIML
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 12

PROGRAM-1

Python Program to Implement and Demonstrate KNN Algorithm

import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics

names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width',


'Class']

# Read dataset to pandas dataframe


dataset = pd.read_csv("8-dataset.csv", names=names)
X = dataset.iloc[:, :-1]
y = dataset.iloc[:, -1]
print(X.head())
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.10)

classifier = KNeighborsClassifier(n_neighbors=5).fit(Xtrain, ytrain)

ypred = classifier.predict(Xtest)

i = 0
print ("\n-------------------------------------------------------------
------------")
print ('%-25s %-25s %-25s' % ('Original Label', 'Predicted Label',
'Correct/Wrong'))
print ("---------------------------------------------------------------
----------")
for label in ytest:
print ('%-25s %-25s' % (label, ypred[i]), end="")
if (label == ypred[i]):
print (' %-25s' % ('Correct'))
else:
print (' %-25s' % ('Wrong'))
i = i + 1
print ("---------------------------------------------------------------
----------")
print("\nConfusion Matrix:\n",metrics.confusion_matrix(ytest, ypred))
print ("---------------------------------------------------------------
----------")
print("\nClassification Report:\n",metrics.classification_report(ytest,
ypred))
print ("---------------------------------------------------------------
----------")
print('Accuracy of the classifer is %0.2f' %
metrics.accuracy_score(ytest,ypred))
print ("---------------------------------------------------------------
----------")

OUTPUT:
PROGRAM -2

Python Program to Implement and Demonstrate K-Means and EM Algorithm


Machine Learning

from sklearn.cluster import KMeans


from sklearn.mixture import GaussianMixture
import sklearn.metrics as metrics
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

names = ['Sepal_Length','Sepal_Width','Petal_Length','Petal_Width',
'Class']

dataset = pd.read_csv("8-dataset.csv", names=names)

X = dataset.iloc[:, :-1]

label = {'Iris-setosa': 0,'Iris-versicolor': 1, 'Iris-virginica': 2}

y = [label[c] for c in dataset.iloc[:, -1]]

plt.figure(figsize=(14,7))
colormap=np.array(['red','lime','black'])

# REAL PLOT
plt.subplot(1,3,1)
plt.title('Real')
plt.scatter(X.Petal_Length,X.Petal_Width,c=colormap[y])

# K-PLOT
model=KMeans(n_clusters=3, random_state=0).fit(X)
plt.subplot(1,3,2)
plt.title('KMeans')
plt.scatter(X.Petal_Length,X.Petal_Width,c=colormap[model.labels_])

print('The accuracy score of K-Mean: ',metrics.accuracy_score(y,


model.labels_))
print('The Confusion matrixof K-Mean:\n',metrics.confusion_matrix(y,
model.labels_))

# GMM PLOT
gmm=GaussianMixture(n_components=3, random_state=0).fit(X)
y_cluster_gmm=gmm.predict(X)
plt.subplot(1,3,3)
plt.title('GMM Classification')
plt.scatter(X.Petal_Length,X.Petal_Width,c=colormap[y_cluster_gmm])

print('The accuracy score of EM: ',metrics.accuracy_score(y,


y_cluster_gmm))
print('The Confusion matrix of EM:\n ',metrics.confusion_matrix(y,
y_cluster_gmm))

OUTPUT:
-
PROGRAM -3

Python Program to Implement and Demonstrate Locally Weighted


Regression Algorithm

import matplotlib.pyplot as plt


import pandas as pd
import numpy as np

def kernel(point, xmat, k):


m,n = np.shape(xmat)
weights = np.mat(np.eye((m)))
for j in range(m):
diff = point - X[j]
weights[j,j] = np.exp(diff*diff.T/(-2.0*k**2))
return weights

def localWeight(point, xmat, ymat, k):


wei = kernel(point,xmat,k)
W = (X.T*(wei*X)).I*(X.T*(wei*ymat.T))
return W

def localWeightRegression(xmat, ymat, k):


m,n = np.shape(xmat)
ypred = np.zeros(m)
for i in range(m):
ypred[i] = xmat[i]*localWeight(xmat[i],xmat,ymat,k)
return ypred

# load data points


data = pd.read_csv('10-dataset.csv')
bill = np.array(data.total_bill)
tip = np.array(data.tip)

#preparing and add 1 in bill


mbill = np.mat(bill)
mtip = np.mat(tip)

m= np.shape(mbill)[1]
one = np.mat(np.ones(m))
X = np.hstack((one.T,mbill.T))
#set k here
ypred = localWeightRegression(X,mtip,0.5)
SortIndex = X[:,1].argsort(0)
xsort = X[SortIndex][:,0]

fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(bill,tip, color='green')
ax.plot(xsort[:,1],ypred[SortIndex], color = 'red', linewidth=5)
plt.xlabel('Total bill')
plt.ylabel('Tip')
plt.show();

OUTPUT:
PROGRAM -4
Build an aritificial neural networks by implementing the Backpropagation
algorithm and test the same using appropiate datasets.

import numpy as np
inputNeurons=2
hiddenlayerNeurons=4
outputNeurons=2
iteration=6000
input = np.random.randint(1,5,inputNeurons)
output = np.array([1.0,0.0])
hidden_layer=np.random.rand(1,hiddenlayerNeurons)
hidden_biass=np.random.rand(1,hiddenlayerNeurons)
output_bias=np.random.rand(1,outputNeurons)
hidden_weights=np.random.rand(inputNeurons,hiddenlayerNeurons)
output_weights=np.random.rand(hiddenlayerNeurons,outputNeurons)
def sigmoid (layer):
return 1/(1 + np.exp(-layer))
def gradient(layer):
return layer*(1-layer)
for i in range(iteration):
hidden_layer=np.dot(input,hidden_weights)
hidden_layer=sigmoid(hidden_layer+hidden_biass)
output_layer=np.dot(hidden_layer,output_weights)
output_layer=sigmoid(output_layer+output_bias)
error = (output-output_layer)
gradient_outputLayer=gradient(output_layer)
error_terms_output=gradient_outputLayer * error
error_terms_hidden=gradient(hidden_layer)*np.dot(error_terms_output,outpu
t_weights.T)
gradient_hidden_weights =
np.dot(input.reshape(inputNeurons,1),error_terms_hidden.reshape(1,hiddenlaye
rNeurons))
gradient_ouput_weights =
np.dot(hidden_layer.reshape(hiddenlayerNeurons,1),error_terms_output.reshape
(1,outputNeurons))
hidden_weights = hidden_weights + 0.05*gradient_hidden_weights
output_weights = output_weights + 0.05*gradient_ouput_weights
if i<50 or i>iteration-50:
print("**********************")
print("iteration:",i,"::::",error)
print("###output###",output_layer)

OUTPUT:
**********************
iteration: 0 :::: [[ 0.1038751 -0.93964688]]
###output######## [[0.8961249 0.93964688]]
**********************
iteration: 1 :::: [[ 0.10375536 -0.93920093]]
###output######## [[0.89624464 0.93920093]]
**********************
iteration: 2 :::: [[ 0.10363609 -0.9387491 ]]
###output######## [[0.89636391 0.9387491 ]]
**********************
iteration: 3 :::: [[ 0.1035173 -0.93829127]]
###output######## [[0.8964827 0.93829127]]
**********************
iteration: 4 :::: [[ 0.10339898 -0.93782735]]
###output######## [[0.89660102 0.93782735]]

**********************
iteration: 5997 :::: [[ 0.02336208 -0.02451314]]
###output######## [[0.97663792 0.02451314]]
**********************
iteration: 5998 :::: [[ 0.02336012 -0.02451085]]
###output######## [[0.97663988 0.02451085]]
**********************
iteration: 5999 :::: [[ 0.02335816 -0.02450856]]
###output######## [[0.97664184 0.02450856]]
PROGRAM -5
DEMONSTRATE GENETIC ALGORITHM BY TAKING A SUITABLE DATA FOR
ANY SIMPLE APPLICATION.
##Program 5
import random

POPULATION_SIZE = 100
GENES =
'''abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890, .-
;:_!"#%&/()=?@${[]}'''
TARGET = "I love GeeksforGeeks"

def create_individual():
return [random.choice(GENES) for _ in range(len(TARGET))]

def calculate_fitness(individual):
count=0
for gene, target_gene in zip(individual, TARGET):
if gene != target_gene:
count=count+1
return count

def mate(parent1, parent2):


child_chromosome = []
for p1, p2 in zip(parent1, parent2):
prob = random.random()
if prob < 0.45:
child_chromosome.append(p1)
elif prob < 0.90:
child_chromosome.append(p2)
else:
child_chromosome.append(random.choice(GENES))

return child_chromosome

def main():
generation = 1
found = False
population = [create_individual() for _ in range(POPULATION_SIZE)]

while not found:


population = sorted(population, key=calculate_fitness)

if calculate_fitness(population[0]) <= 0:
found = True
break
new_generation = population[:int(0.1 * POPULATION_SIZE)]

for _ in range(int(0.9 * POPULATION_SIZE)):


parent1 = random.choice(population[:50])
parent2 = random.choice(population[:50])
child = mate(parent1, parent2)
new_generation.append(child)

population = new_generation

print(f"Generation: {generation}\tString:
{''.join(population[0])}\tFitness: {calculate_fitness(population[0])}")
generation += 1

print(f"Generation: {generation}\tString:
{''.join(population[0])}\tFitness: {calculate_fitness(population[0])}")

main()

PROGRAM 6
Q LEARNING
##Program 6
import numpy as np
# Estado terminal
terminal = 5
# Possiveis acoes
actions = ['UP','DW','LF','RG']
# Recompensas
rws = np.array([-1]*6)
rws[5] = 10
# Duas trajetorias
paths = [(0, ['UP','UP','UP','RG']), (4, ['RG','RG','LF','UP'])]
# Constantes
alpha = 0.5
gamma = 0.8

def print_value(value):
print('[' + str(value[2]) + ' ' + str(value[5]))
print(str(value[1]) + ' ' + str(value[4]))
print(str(value[0]) + ' ' + str(value[3]) + ']\n')

def update_value(value, state, action):


index = actions.index(action)
next_state = state
rw = 0
if action == 'UP':
if state == 2 or state == 5:
rw = -10
else:
next_state = state + 1

elif action == 'DW':


if state == 0 or state == 3:
rw = -10
else:
next_state = state - 1

elif action == 'LF':


if state == 0 or state == 1 or state == 2:
rw = -10
else:
next_state = state - 3

elif action == 'RG':


if state == 3 or state == 4 or state == 5:
rw = -10
else:
next_state = state + 3

if rw == 0:
rw = rws[next_state]

value[index][state] = value[index][state] + alpha * (rw + gamma *


max(value[i][next_state] for i in range(4)) - value[index][state])
return value, next_state

def return_policy(value):
policy = np.array([' ']*6)
policy[5] = '+10'

for state in range(5):


policy[state] = actions[np.argmax([value[action][state] for action
in range(4)])]

print(policy[2] + ' ' + policy[5])


print(policy[1] + ' ' + policy[4])
print(policy[0] + ' ' + policy[3]+ '\n')

def main():
# Inicializar matriz Q com valores 0, considerando as quatro acoes
value = [np.zeros(6),np.zeros(6),np.zeros(6),np.zeros(6)]
for i in range(len(paths)):
state = paths[i][0]
actions = paths[i][1]
for action in actions:
value, state = update_value(value, state, action)
if state == terminal:
break
# Acao UP
print_value(value[0])
# Acao DW
print_value(value[1])
# Acao LF
print_value(value[2])
# Acao RG
print_value(value[3])
# Politica
return_policy(value)

if __name__ == '__main__':
main()

You might also like