Exp1-Write a program to generate following logic functions using McCulloch-Pitts neuron
and appropriate values for weights, bias and threshold
a)) XOR logic function
import numpy as np
import pandas as pd
inputs = np.array([[0,0],[0,1],[1,0],[1,1]])
expected_output = np.array([[0],[1],[1],[0]])
epochs = 100
lr = 0.1
def sigmoid (x):
return 1/(1 + np.exp(-x))
def sigmoid_derivative(x):
return x * (1 - x)
hidden_weights = np.random.uniform(size=(2,2))
hidden_bias =np.random.uniform(size=(1,2))
output_weights = np.random.uniform(size=(2,1))
output_bias = np.random.uniform(size=(1,1))
print(*hidden_weights)
print(*hidden_bias)
print(*output_weights)
print(*output_bias
Exp2.How to Implement CNN in Python?
1. #importing the required libraries
fromtensorflow.keras.datasetsimportmnist
fromtensorflow.keras.modelsimportSequential
fromtensorflow.keras.layersimportConv2D
fromtensorflow.keras.layersimportMaxPool2D
fromtensorflow.keras.layersimportFlatten
fromtensorflow.keras.layersimportDropout
from tensorflow.keras.layers import Dense
2. #loading data
(X_train,y_train) , (X_test,y_test)=mnist.load_data()
3. #reshaping data
X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], X_train.shape[2], 1))
X_test = X_test.reshape((X_test.shape[0],X_test.shape[1],X_test.shape[2],1))
4. #checking the shape after reshaping
print(X_train.shape)
print(X_test.shape)
5. #normalizing the pixel values
X_train=X_train/255
X_test=X_test/255
6. #defining model
model=Sequential()model=Sequential()
7. #adding convolution layer
model.add(Conv2D(32,(3,3),activation=’relu’,input_shape=(28,28,1)))
8. #adding pooling layer
model.add(MaxPool2D(2,2))
9. #adding fully connected layer
model.add(Flatten())
model.add(Dense(100,activation=’relu’))
10. #adding output layer
model.add(Dense(10,activation=’softmax’))
11. #compiling the model
model.compile(loss=’sparse_categorical_crossentropy’,optimizer=’adam’,metrics=[‘accuracy’])
12. #fitting the model
model.fit(X_train,y_train,epochs=10)
Exp-3) Implement Auto-encoders for any of the task including.
a) Data Compression
Let’s import the required libraries
1 import numpy as np
2 from keras.layers import Input, Dense
3 from keras.models import Model
4 from keras.datasets import mnist
5 import matplotlib.pyplot as plt
Declaration of Hidden Layers and Variables
1 # this is the size of our encoded representations
2 encoding_dim = 32 # 32 floats -> compression of factor 24.5, assuming the input is 784 floats
4 # this is our input placeholder
5 input_img = Input(shape=(784,))
6 # "encoded" is the encoded representation of the input
7 encoded = Dense(encoding_dim, activation='relu')(input_img)
8 # "decoded" is the lossy reconstruction of the input
9 decoded = Dense(784, activation='sigmoid')(encoded)
10 # this model maps an input to its reconstruction
11 autoencoder = Model(input_img, decoded
12 decoder_layer = autoencoder.layers[-1]
13 # create the decoder model
14 decoder = Model(encoded_input, decoder_layer(encoded_input))
15 # configure our model to use a per-pixel binary crossentropy loss, and the Adadelta optimizer:
16 autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
Preparing the input data (MNIST Dataset)
1 (x_train, _), (x_test, _) = mnist.load_data()
2 # normalize all values between 0 and 1 and we will flatten the 28x28 images into vectors of size 784.
3 x_train = x_train.astype('float32') / 255.
4 x_test = x_test.astype('float32') / 255.
5 x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
6 x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
7 print x_train.shape
8 print x_test.shape
Training Autoencoders for 50 epochs
1 autoencoder.fit(x_train, x_train,
2 epochs=50,
3 batch_size=256,
4 shuffle=True,
5 validation_data=(x_test, x_test))
6 # encode and decode some digits
7 # note that we take them from the *test* set
8 encoded_imgs = encoder.predict(x_test)
9 decoded_imgs = decoder.predict(encoded_imgs)
Exp-4) Implement an artificial neural network on GPUs
import numpy as np
import tensorflow as tf
from datetime import datetime
# Choose which device you want to test on: either 'cpu' or 'gpu'
devices = ['cpu', 'gpu']
# Choose size of the matrix to be used.
Returns
-------
out : results of the operations as the time taken
"""
# Define operations to be computed on selected device
with tf.device(device):
random_matrix = tf.random_uniform(shape=shape, minval=0, maxval=1)
elapsed_time = datetime.now() - start_time
return result, elapsed_time
if __name__ == '__main__':
# Run the computations and print summary of each run
for device in devices:
print("--" * 20)
for shape in shapes:
_, time_taken = compute_operations(device, shape)
# Print the result and also the time taken on the selected device
print("Input shape:", shape, "using Device:", device, "took: {:.2f}".format(time_taken.seconds +
time_taken.microseconds/1e6))
#print("Computation on shape:", shape, "using Device:", device, "took:")
print("--" * 20)
Results from running on CPU:
Computation
), using Device: 'cpu' took: 32.81s
Computation on shape: (50, 50), using Device: 'gpu' took: 0.03s
Computation on shape: (500, 500), using Device: 'gpu' took: 0.04s
Computation on shape: (1000, 1000), using Device: 'gpu' took: 0.04s
Computation on shape: (10000, 10000), using Device: 'gpu' took: 0.05s
Exp-5) Design RNN or its variant including LSTM or GRU.
a) LSTM Network for Regression
# LSTM for international airline passengers problem with regression framing
import numpy as np
import matplotlib.pyplot as plt
from pandas import read_csv
import math
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
# fix random seed for reproducibility
tf.random.set_seed(7)
# load the dataset
dataframe = read_csv('airline-passengers.csv', usecols=[1], engine='python')
dataset = dataframe.values
dataset = dataset.astype('float32')
# normalize the dataset
_error', optimizer='adam')
model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# calculate root mean squared error
trainScore = np.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = np.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
# shift test predictions for plotting
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
# plot baseline and predictions
plt.plot(scaler.inverse_transform(dataset))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()
Exp-6) Design and implement a CNN for Image Classification
Step 1: Choose a Dataset
from keras.models import Sequential
import tensorflow as tf
import tensorflow_datasets as tfds
tf.enable_eager_execution()
from keras.layers.core import Dense, Activation, Dropout, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD, RMSprop, adam
from keras.utils import np_utils
from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier
from sklearn import metricsfrom sklearn.utils import shuffle
from sklearn.model_selection import train_test_splitimport matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import os
import cv2
import randomfrom numpy import *
from PIL import Image
import theano
Step 2: Prepare Dataset for Training
path_test = "/content/drive/My Drive/semester 5 - ai ml/datasetHomeAssign/TRAIN"
CATEGORIES = ["EOSINOPHIL", "LYMPHOCYTE", "MONOCYTE", "NEUTROPHIL"]
print(img_array.shape)IMG_SIZE =200
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
Step 3: Create Training Data
training = []def createTrainingData():
for category in CATEGORIES:
path = os.path.join(path_test, category)
class_num = CATEGORIES.index(category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path,img))
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
training.append([new_array, class_num])createTrainingData()
Step 4: Shuffle the Dataset
random.shuffle(training)
Step 5: Assigning Labels and Features
X =[]
y =[]for features, label in training:
X.append(features)
y.append(label)
X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 3)
Step 6: Normalising X and Converting Labels to Categorical Data
X = X.astype('float32')
X /= 255
from keras.utils import np_utils
Y = np_utils.to_categorical(y, 4)
print(Y[100])
print(shape(Y))
Step 7: Split X and Y for Use in CNN
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 4)
Step 8: Define, Compile and Train the CNN Model
batch_size = 16
nb_classes =4
nb_epochs = 5
img_rows, img_columns = 200, 200
img_channel = 3
nb_filters = 32
nb_pool = 2
nb_conv = 3
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, (3,3), padding='same', activation=tf.nn.relu,
input_shape=(200, 200, 3)),
tf.keras.layers.MaxPooling2D((2, 2), strides=2),
tf.keras.layers.Conv2D(32, (3,3), padding='same', activation=tf.nn.relu),
tf.keras.layers.MaxPooling2D((2, 2), strides=2),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(4, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
model.fit(X_train, y_train, batch_size = batch_size, epochs = nb_epochs, verbose = 1, validation_data = (X_test,
y_test))
Step 9: Accuracy and Score of Model
score = model.evaluate(X_test, y_test, verbose = 0 )
print("Test Score: ", score[0])
print("Test accuracy: ", score[1])
Exp7- Build a Multiclass classifier using the CNN model. Use MNIST or any other suitable
dataset.
# baseline cnn model for mnist
from numpy import mean
from numpy import std
from matplotlib import pyplot as plt
from sklearn.model_selection import KFold
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.optimizers import SGD
# load train and test dataset
def load_dataset():
# load dataset
(trainX, trainY), (testX, testY) = mnist.load_data()
# reshape dataset to have a single channel
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))
testX = testX.reshape((testX.shape[0], 28, 28, 1))
# one hot encode target values
trainY = to_categorical(trainY)
testY = to_categorical(testY)
return trainX, trainY, testX, testY
# scale pixels
def prep_pixels(train, test):
# convert from integers to floats
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize to range 0-1
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return normalized images
return train_norm, test_norm
# define cnn model
def define_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28,
1)))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(10, activation='softmax'))
# compile model
opt = SGD(learning_rate=0.01, momentum=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
# evaluate a model using k-fold cross-validation
def evaluate_model(dataX, dataY, n_folds=5):
scores, histories = list(), list()
# prepare cross validation
kfold = KFold(n_folds, shuffle=True, random_state=1)
# enumerate splits
for train_ix, test_ix in kfold.split(dataX):
# define model
model = define_model()
# select rows for train and test
trainX, trainY, testX, testY = dataX[train_ix], dataY[train_ix], dataX[test_ix], dataY[test_ix]
# fit model
history = model.fit(trainX, trainY, epochs=10, batch_size=32, validation_data=(testX, testY),
verbose=0)
# evaluate model
_, acc = model.evaluate(testX, testY, verbose=0)
print('> %.3f' % (acc * 100.0))
# stores scores
scores.append(acc)
histories.append(history)
return scores, histories
# plot diagnostic learning curves
def summarize_diagnostics(histories):
for i in
def summarize_performance(scores):
# print summary
print('Accuracy: mean=%.3f std=%.3f, n=%d' % (mean(scores)*100, std(scores)*100, len(scores)))
# box and whisker plots of results
plt.boxplot(scores)
plt.show()
# run the test harness for evaluating a model
def run_test_harness():
# load dataset
trainX, trainY, testX, testY = load_dataset()
# prepare pixel data
trainX, testX = prep_pixels(trainX, testX)
# evaluate model
scores, histories = evaluate_model(trainX, trainY)
# learning curves
summarize_diagnostics(histories)
# summarize estimated performance
summarize_performance(scores)
# entry point, run the test harness
run_test_harness()