TECHNO-INDIA UNIVERSITY
DEEP LEARNING
LAB ASSIGNMENTS
MCA - A
NIKHIL AGARWAL - 1171
Assignment-6:
1. Write a algorithm to tackle MNIST dataset to identify hand written digits
from 28X28 black and white images utilizing multi layer logistics regressions.
Ans.
from tensorflow.keras.datasets import mnist
(training_dataset_x, training_dataset_y), (test_dataset_x,
test_dataset_y) = mnist.load_data()
#downloading MNIST data set
import matplotlib.pyplot as plt
figure = plt.gcf()
figure.set_size_inches(10, 10)
for i in range(1, 10):
plt.subplot(3, 3, i)
axis = plt.gca()
axis.set_title(str(training_dataset_y[i]))
plt.imshow(training_dataset_x[i].reshape(28, 28),
cmap='gray')
plt.show()
training_dataset_x = training_dataset_x.reshape(-1, 28 * 28)
test_dataset_x = test_dataset_x.reshape(-1, 28 * 28)
training_dataset_x = training_dataset_x / 255
test_dataset_x = test_dataset_x / 255
import tensorflow
from tensorflow.keras.utils import to_categorical
training_dataset_y = to_categorical(training_dataset_y)
test_dataset_y = to_categorical(test_dataset_y)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Dense(512, input_dim=28 * 28, activation='relu',
name='Hidden-1'))
model.add(Dense(256, activation='relu', name='Hidden-2'))
model.add(Dense(10, activation='softmax', name='Output'))
model.compile('adam', loss='categorical_crossentropy',
metrics=['categorical_accuracy'])
hist = model.fit(training_dataset_x, training_dataset_y,
epochs=10, batch_size=64,
validation_split=0.2)
import matplotlib.pyplot as plt
figure = plt.gcf()
figure.set_size_inches((15, 5))
plt.title('Loss - Epoch Graphics')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.plot(range(1, len(hist.history['loss']) + 1),
hist.history['loss'])
plt.plot(range(1, len(hist.history['val_loss']) + 1),
hist.history['val_loss'])
plt.legend(['Loss', 'Validation Loss'])
plt.show()
figure = plt.gcf()
figure.set_size_inches((15, 5))
plt.title('Categorical Accuracy - Epoch Graphics')
plt.xlabel('Epoch')
plt.ylabel('Categorical Accuracy')
plt.plot(range(1, len(hist.history['categorical_accuracy']) +
1),
hist.history['categorical_accuracy'])
plt.plot(range(1,
len(hist.history['val_categorical_accuracy']) + 1),
hist.history['val_categorical_accuracy'])
plt.legend(['Categorical Accuracy', 'Validation Categorical
Accuracy'])
plt.show()
#Let's look at some graphics
eval_result = model.evaluate(test_dataset_x, test_dataset_y)
for i in range(len(eval_result)):
print(f'{model.metrics_names[i]} ---> {eval_result[i]}')
# testing our model
import numpy as np
import matplotlib.pyplot as plt
img_data = plt.imread('/kaggle/input/testing/digit.bmp')
gray_img_data = np.average(img_data, weights=[0.3, 0.59,
0.11], axis=2)
plt.subplot(1, 2, 1)
plt.imshow(img_data)
plt.subplot(1, 2, 2)
plt.imshow(gray_img_data, cmap='gray')
plt.show()
gray_img_data = gray_img_data / 255
gray_img_data = gray_img_data.reshape((1, 28 * 28))
predict_result = model.predict(gray_img_data)
number = np.argmax(predict_result[0])
print(number)
Output :
Assignment-7:
2. consider an image and apply the convolution layer, activation layer, and
pooling layer operation to extract the inside feature.
Ans.
# import the necessary libraries
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from itertools import product
# set the param
plt.rc('figure', autolayout=True)
plt.rc('image', cmap='magma')
# define the kernel
kernel = tf.constant([[-1, -1, -1],
[-1, 8, -1],
[-1, -1, -1],
])
# load the image
image = tf.io.read_file('Ganesh.jpg')
image = tf.io.decode_jpeg(image, channels=1)
image = tf.image.resize(image, size=[300, 300])
# plot the image
img = tf.squeeze(image).numpy()
plt.figure(figsize=(5, 5))
plt.imshow(img, cmap='gray')
plt.axis('off')
plt.title('Original Gray Scale image')
plt.show();
# Reformat
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.expand_dims(image, axis=0)
kernel = tf.reshape(kernel, [*kernel.shape, 1, 1])
kernel = tf.cast(kernel, dtype=tf.float32)
# convolution layer
conv_fn = tf.nn.conv2d
image_filter = conv_fn(
input=image,
filters=kernel,
strides=1, # or (1, 1)
padding='SAME',
)
plt.figure(figsize=(15, 5))
# Plot the convolved image
plt.subplot(1, 3, 1)
plt.imshow(
tf.squeeze(image_filter)
)
plt.axis('off')
plt.title('Convolution')
# activation layer
relu_fn = tf.nn.relu
# Image detection
image_detect = relu_fn(image_filter)
plt.subplot(1, 3, 2)
plt.imshow(
# Reformat for plotting
tf.squeeze(image_detect)
)
plt.axis('off')
plt.title('Activation')
# Pooling layer
pool = tf.nn.pool
image_condense = pool(input=image_detect,
window_shape=(2, 2),
pooling_type='MAX',
strides=(2, 2),
padding='SAME',
)
plt.subplot(1, 3, 3)
plt.imshow(tf.squeeze(image_condense))
plt.axis('off')
plt.title('Pooling')
plt.show()
Output :
Assignment- 8:
3. Determine whether a movie review is positive , negative or neutral sentiments
using simple RNN .
Ans.
# Importing necessary libraries
import pandas as pd
from tensorflow.keras import layers
from tensorflow.keras.layers import SimpleRNN, LSTM, GRU,
Bidirectional, Dense, Embedding
from tensorflow.keras.datasets import imdb
from tensorflow.keras.models import Sequential
import numpy as np
# Getting reviews with words that come under 5000
# most occurring words in the entire corpus of textual review data
vocab_size = 5000
(x_train, y_train), (x_test, y_test) =
imdb.load_data(num_words=vocab_size)
# Printing the first review from the training set
print(x_train[0])
# Getting all the words from word_index dictionary
word_idx = imdb.get_word_index()
# Originally the index number of a value and not a key,
# hence converting the index as key and the words as values
word_idx = {i: word for word, i in word_idx.items()}
# Printing the first review from the training set in its original
words
print([word_idx[i] for i in x_train[0]])
# Get the minimum and the maximum length of reviews
print("Max length of a review:: ", len(max((x_train+x_test),
key=len)))
print("Min length of a review:: ", len(min((x_train+x_test),
key=len)))
# Importing sequence module for padding sequences
from tensorflow.keras.preprocessing import sequence
# Keeping a fixed length of all reviews to max 400 words
max_words = 400
# Padding sequences to a fixed length of 400 words
x_train = sequence.pad_sequences(x_train, maxlen=max_words)
x_test = sequence.pad_sequences(x_test, maxlen=max_words)
# Splitting the training set into training and validation sets
x_valid, y_valid = x_train[:64], y_train[:64]
x_train_, y_train_ = x_train[64:], y_train[64:]
# Fixing every word's embedding size to be 32
embd_len = 32
# Creating a Sequential model named "Simple_RNN"
RNN_model = Sequential(name="Simple_RNN")
RNN_model.add(Embedding(vocab_size,
embd_len,
input_length=max_words))
# Adding a SimpleRNN layer with 128 units and 'tanh' activation
function
# In case of a stacked (more than one layer of RNN), use
return_sequences=True
RNN_model.add(SimpleRNN(128,
activation='tanh',
return_sequences=False))
RNN_model.add(Dense(1, activation='sigmoid'))
# Printing the summary of the model architecture
print(RNN_model.summary())
# Compiling the model with binary cross-entropy loss, Adam optimizer,
and accuracy metric
RNN_model.compile(
loss="binary_crossentropy",
optimizer='adam',
metrics=['accuracy']
)
# Training the model on the training data
history = RNN_model.fit(x_train_, y_train_,
batch_size=64,
epochs=5,
verbose=1,
validation_data=(x_valid, y_valid))
# Printing model score on test data
print()
print("Simple_RNN Score---> ", RNN_model.evaluate(x_test, y_test,
verbose=0))
OUTPUT :
Assignment- 9 :
4.Determine whether a movie review expresses positive, negative or neutral
sentiment using LSTM ?
Ans.
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.datasets import imdb
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense
from tensorflow.keras.preprocessing.text import Tokenizer
np.random.seed(42)
tf.random.set_seed(42)
max_features = 10000
maxlen = 200
(x_train, y_train), (x_test, y_test) =
imdb.load_data(num_words=max_features)
x_train = pad_sequences(x_train, maxlen=maxlen)
x_test = pad_sequences(x_test, maxlen=maxlen)
model = Sequential()
model.add(Embedding(max_features, 128, input_length=maxlen)) #
Embedding layer
model.add(LSTM(128, return_sequences=False)) # LSTM layer
model.add(Dense(1, activation='sigmoid')) # Output layer
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
model.summary()
batch_size = 32
epochs = 5
history = model.fit(x_train, y_train, batch_size=batch_size,
epochs=epochs, validation_split=0.2)
score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
def predict_sentiment(text, model, tokenizer, maxlen=200):
# Preprocess the text
sequences = tokenizer.texts_to_sequences([text])
padded_sequences = pad_sequences(sequences, maxlen=maxlen)
# Predict sentiment
prediction = model.predict(padded_sequences)
# Interpret the result
sentiment = 'Positive' if prediction[0][0] > 0.5 else 'Negative'
print(f'Text: {text}')
print(f'Sentiment: {sentiment} (Confidence: {prediction[0]
[0]:.2f})')
tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(imdb.get_word_index().keys())
user_input = "very bad movie."
predict_sentiment(user_input, model, tokenizer)
Output :
Model: "sequential_1"
Text: very bad movie.
Sentiment: Negative (Confidence: 0.47)
Model: "sequential_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================
================
embedding_2 (Embedding) (None, 200, 128) 1280000
lstm_2 (LSTM) (None, 128) 131584
dense_2 (Dense) (None, 1) 129
=================================================
================
Total params: 1411713 (5.39 MB)
Trainable params: 1411713 (5.39 MB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
Epoch 1/5
625/625 [==============================] - 233s 368ms/step -
loss: 0.4855 - accuracy: 0.7689 - val_loss: 0.3985 - val_accuracy: 0.8330
Epoch 2/5
625/625 [==============================] - 229s 367ms/step -
loss: 0.2944 - accuracy: 0.8803 - val_loss: 0.3347 - val_accuracy: 0.8614
Epoch 3/5
625/625 [==============================] - 230s 368ms/step -
loss: 0.2059 - accuracy: 0.9214 - val_loss: 0.3880 - val_accuracy: 0.8414
Epoch 4/5
625/625 [==============================] - 229s 366ms/step -
loss: 0.1476 - accuracy: 0.9463 - val_loss: 0.3879 - val_accuracy: 0.8610
Epoch 5/5 625/625 [==============================] - 230s
368ms/step - loss: 0.0982 - accuracy: 0.9658 - val_loss: 0.4775 - val_accuracy:
0.8552 782/782 [==============================] -
88s 113ms/step - loss: 0.4963 - accuracy: 0.8434
Test score: 0.4963449537754059 Test accuracy: 0.8433600068092346
1/1 [==============================] - 0s 404ms/step
Text: very bad movie.
Sentiment: Positive (Confidence: 0.71)