KEMBAR78
DL Lab Manual | PDF | Artificial Neural Network | Accuracy And Precision
0% found this document useful (0 votes)
42 views18 pages

DL Lab Manual

Uploaded by

codingofcode
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
42 views18 pages

DL Lab Manual

Uploaded by

codingofcode
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 18

TASK 1

Implement Perceptron training algorithm to classify flowers in IRIS dataset.


from sklearn.datasets import load_iris

from sklearn.model_selection import train_test_split

from sklearn.preprocessing import StandardScaler

from sklearn.linear_model import Perceptron

from sklearn.metrics import accuracy_score

# Load the Iris dataset

iris = load_iris()

X = iris.data

y = iris.target

# Split the data into training and testing sets

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)

# Standardize the features

sc = StandardScaler()

X_train_std = sc.fit_transform(X_train)

X_test_std = sc.transform(X_test)

# Train the Perceptron model

ppn = Perceptron(max_iter=40, eta0=0.1, random_state=1)

ppn.fit(X_train_std, y_train)

# Predict the test set results

y_pred = ppn.predict(X_test_std)

# Evaluate the accuracy

print(f'Accuracy: {accuracy_score(y_test, y_pred):.2f}')


output: Accuracy: 0.87

TASK 2
Implement Activation Functions in Neural Networks and analyse their usage.
import numpy as np

import matplotlib.pyplot as plt

# Sigmoid function

def sigmoid(x):

return 1 / (1 + np.exp(-x))

# Tanh function

def tanh(x):

return np.tanh(x)

# ReLU function

def relu(x):

return np.maximum(0, x)

# Leaky ReLU function

def leaky_relu(x, alpha=0.01):

return np.where(x > 0, x, x * alpha)

# Plotting the activation functions

x = np.linspace(-10, 10, 100)

plt.figure(figsize=(10, 8))

plt.plot(x, sigmoid(x), label='Sigmoid')

plt.plot(x, tanh(x), label='Tanh')

plt.plot(x, relu(x), label='ReLU')

plt.plot(x, leaky_relu(x), label='Leaky ReLU')

plt.title('Activation Functions')
plt.legend()

plt.grid(True)

plt.show()

output:

TASK 3
Build a three-layer Artificial Neural Network by implementing the Back
propagation algorithm.
import tensorflow as tf

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import Dense

from sklearn.datasets import make_classification

from sklearn.model_selection import train_test_split

from sklearn.preprocessing import StandardScaler

# Generate a binary classification dataset

X, y = make_classification(n_samples=1000, n_features=20, n_classes=2, random_state=1)

# Split the data into training and testing sets

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)

# Standardize the features

sc = StandardScaler()

X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)

# Build the neural network model

model = Sequential([

Dense(64, input_dim=20, activation='relu'),

Dense(64, activation='relu'),

Dense(1, activation='sigmoid')

])

# Compile the model

model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

# Train the model

model.fit(X_train, y_train, epochs=100, batch_size=10, verbose=1)

# Evaluate the model

loss, accuracy = model.evaluate(X_test, y_test, verbose=0)

print(f'Test Accuracy: {accuracy:.2f}')

output:

Epoch 100/100

70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 1.0000 - loss:


3.6077e-04

Test Accuracy: 0.80

TASK 4
Design a GRU-based deep learning model for IMDB dataset. Compare the
performance of GRU based model with LSTM based model
import tensorflow as tf

from tensorflow.keras.datasets import imdb

from tensorflow.keras.preprocessing.sequence import pad_sequences

from tensorflow.keras.models import Sequential


from tensorflow.keras.layers import Embedding, GRU, LSTM, Dense

# Load the IMDB dataset

max_features = 10000

maxlen = 500

(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)

# Pad sequences to ensure uniform length

x_train = pad_sequences(x_train, maxlen=maxlen)

x_test = pad_sequences(x_test, maxlen=maxlen)

# Build the GRU model

gru_model = Sequential([

Embedding(max_features, 128, input_length=maxlen),

GRU(128, return_sequences=True),

GRU(128),

Dense(1, activation='sigmoid')

])

# Compile the model

gru_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

# Train the GRU model

gru_model.fit(x_train, y_train, epochs=10, batch_size=32, validation_split=0.2)

# Evaluate the GRU model

gru_loss, gru_accuracy = gru_model.evaluate(x_test, y_test)

print(f'GRU Test Accuracy: {gru_accuracy:.2f}')

# Build the LSTM model

lstm_model = Sequential([
Embedding(max_features, 128, input_length=maxlen),

LSTM(128, return_sequences=True),

LSTM(128),

Dense(1, activation='sigmoid')

])

# Compile the model

lstm_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

# Train the LSTM model

lstm_model.fit(x_train, y_train, epochs=10, batch_size=32, validation_split=0.2)

# Evaluate the LSTM model

lstm_loss, lstm_accuracy = lstm_model.evaluate(x_test, y_test)

print(f'LSTM Test Accuracy: {lstm_accuracy:.2f}')

output:
625/625 ━━━━━━━━━━━━━━━━━━━━ 952s 2s/step - accuracy: 0.6574 - loss:
0.6075 - val_accuracy: 0.6122 - val_loss: 0.6454

Epoch 2/10

625/625 ━━━━━━━━━━━━━━━━━━━━ 953s 1s/step - accuracy: 0.8015 - loss:


0.4347 - val_accuracy: 0.8688 - val_loss: 0.3200

Epoch 3/10

625/625 ━━━━━━━━━━━━━━━━━━━━ 926s 1s/step - accuracy: 0.9004 - loss:


0.2498 - val_accuracy: 0.8438 - val_loss: 0.3622

Epoch 4/10

TASK 5
Build a Deep Neural Network for multi class text classification using Reuters
dataset
from tensorflow.keras.datasets import reuters

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import Dense, Dropout, Activation


from tensorflow.keras.preprocessing.text import Tokenizer

from tensorflow.keras.utils import to_categorical

# Load the Reuters dataset

max_words = 10000

(x_train, y_train), (x_test, y_test) = reuters.load_data(num_words=max_words)

# Tokenize and prepare the data

tokenizer = Tokenizer(num_words=max_words)

x_train = tokenizer.sequences_to_matrix(x_train, mode='binary')

x_test = tokenizer.sequences_to_matrix(x_test, mode='binary')

# Convert labels to categorical format

y_train = to_categorical(y_train)

y_test = to_categorical(y_test)

# Build the model

model = Sequential([

Dense(512, input_shape=(max_words,), activation='relu'),

Dropout(0.5),

Dense(512, activation='relu'),

Dropout(0.5),

Dense(46, activation='softmax')

])

# Compile the model

model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

# Train the model

model.fit(x_train, y_train, epochs=5, batch_size=32, validation_split=0.2)


# Evaluate the model

score = model.evaluate(x_test, y_test, batch_size=32)

print(f'Test Accuracy: {score[1]:.2f}')

output:
Epoch 1/5

225/225 ━━━━━━━━━━━━━━━━━━━━ 23s 91ms/step - accuracy: 0.5532 - loss:


1.9485 - val_accuracy: 0.7557 - val_loss: 1.0683

Epoch 2/5

225/225 ━━━━━━━━━━━━━━━━━━━━ 40s 88ms/step - accuracy: 0.8023 - loss:


0.8478 - val_accuracy: 0.7974 - val_loss: 0.9167

Epoch 3/5

225/225 ━━━━━━━━━━━━━━━━━━━━ 21s 92ms/step - accuracy: 0.8870 - loss:


0.5064 - val_accuracy: 0.8169 - val_loss: 0.9042

Epoch 4/5

225/225 ━━━━━━━━━━━━━━━━━━━━ 40s 86ms/step - accuracy: 0.9216 - loss:


0.3146 - val_accuracy: 0.8047 - val_loss: 0.9600

Epoch 5/5

225/225 ━━━━━━━━━━━━━━━━━━━━ 20s 89ms/step - accuracy: 0.9420 - loss:


0.2659 - val_accuracy: 0.8063 - val_loss: 1.0059

71/71 ━━━━━━━━━━━━━━━━━━━━ 1s 13ms/step - accuracy: 0.8090 - loss:


1.0918

Test Accuracy: 0.80

TASK 6
Design a model for MNIST hand written digit classification using Deep
Convolution Neural networks.
import tensorflow as tf

from tensorflow.keras.datasets import mnist

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout

from tensorflow.keras.utils import to_categorical

# Load the MNIST dataset


(x_train, y_train), (x_test, y_test) = mnist.load_data()

# Reshape and normalize the data

x_train = x_train.reshape(x_train.shape[0], 28, 28, 1).astype('float32') / 255

x_test = x_test.reshape(x_test.shape[0], 28, 28, 1).astype('float32') / 255

# Convert labels to categorical format

y_train = to_categorical(y_train, 10)

y_test = to_categorical(y_test, 10)

# Build the CNN model

model = Sequential([

Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1)),

MaxPooling2D(pool_size=(2, 2)),

Conv2D(64, kernel_size=(3, 3), activation='relu'),

MaxPooling2D(pool_size=(2, 2)),

Flatten(),

Dense(128, activation='relu'),

Dropout(0.5),

Dense(10, activation='softmax')

])

# Compile the model

model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

# Train the model

model.fit(x_train, y_train, epochs=10, batch_size=128, validation_split=0.2)

# Evaluate the model

test_loss, test_acc = model.evaluate(x_test, y_test)

print(f'Test Accuracy: {test_acc:.2f}')


output:
Epoch 9/10

375/375 ━━━━━━━━━━━━━━━━━━━━ 86s 124ms/step - accuracy: 0.9890 - loss:


0.0346 - val_accuracy: 0.9895 - val_loss: 0.0386

Epoch 10/10

375/375 ━━━━━━━━━━━━━━━━━━━━ 44s 116ms/step - accuracy: 0.9897 - loss:


0.0330 - val_accuracy: 0.9899 - val_loss: 0.0358

313/313 ━━━━━━━━━━━━━━━━━━━━ 3s 9ms/step - accuracy: 0.9883 - loss:


0.0321

Test Accuracy: 0.99

TASK 7
Train a simple Recurrent Neural Network using an Embedding layer and a
Simple RNN layer for movie review classification problem.
import tensorflow as tf

from tensorflow.keras.datasets import imdb

from tensorflow.keras.preprocessing.sequence import pad_sequences

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import Embedding, SimpleRNN, Dense

# Load the IMDB dataset

max_features = 10000

maxlen = 500

(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)

# Pad sequences to ensure uniform length

x_train = pad_sequences(x_train, maxlen=maxlen)

x_test = pad_sequences(x_test, maxlen=maxlen)

# Build the Simple RNN model

model = Sequential([

Embedding(max_features, 128, input_length=maxlen),


SimpleRNN(128),

Dense(1, activation='sigmoid')

])

# Compile the model

model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

# Train the model

model.fit(x_train, y_train, epochs=10, batch_size=32, validation_split=0.2)

# Evaluate the model

test_loss, test_acc = model.evaluate(x_test, y_test)

print(f'Test Accuracy: {test_acc:.2f}')

output:

3m

import tensorflow as tf

from tensorflow.keras.datasets import imdb

from tensorflow.keras.preprocessing.sequence import pad_sequences

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import Embedding, SimpleRNN, Dense

# Load the IMDB dataset

max_features = 10000

maxlen = 500

(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)

# Pad sequences to ensure uniform length

x_train = pad_sequences(x_train, maxlen=maxlen)

x_test = pad_sequences(x_test, maxlen=maxlen)

# Build the Simple RNN model

model = Sequential([

Embedding(max_features, 128, input_length=maxlen),


SimpleRNN(128),

Dense(1, activation='sigmoid')

])

# Compile the model

model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

# Train the model

model.fit(x_train, y_train, epochs=3, batch_size=32, validation_split=0.2)

# Evaluate the model

test_loss, test_acc = model.evaluate(x_test, y_test)

print(f'Test Accuracy: {test_acc:.2f}')

Epoch 1/3

625/625 ━━━━━━━━━━━━━━━━━━━━ 163s 258ms/step - accuracy: 0.5608 - loss:


0.6761 - val_accuracy: 0.7552 - val_loss: 0.5849

Epoch 2/3

625/625 ━━━━━━━━━━━━━━━━━━━━ 202s 258ms/step - accuracy: 0.7697 - loss:


0.5009 - val_accuracy: 0.7888 - val_loss: 0.4899

Epoch 3/3

625/625 ━━━━━━━━━━━━━━━━━━━━ 203s 260ms/step - accuracy: 0.8163 - loss:


0.4192 - val_accuracy: 0.8042 - val_loss: 0.4520

782/782 ━━━━━━━━━━━━━━━━━━━━ 49s 63ms/step - accuracy: 0.8095 - loss:


0.4510

Test Accuracy: 0.81

TASK 8
Build a Deep learning model using LSTM layer in Keras for IMDB dataset.
import tensorflow as tf

from tensorflow.keras.datasets import imdb

from tensorflow.keras.preprocessing.sequence import pad_sequences

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import Embedding, LSTM, Dense

# Load the IMDB dataset


max_features = 10000

maxlen = 500

(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)

# Pad sequences to ensure uniform length

x_train = pad_sequences(x_train, maxlen=maxlen)

x_test = pad_sequences(x_test, maxlen=maxlen)

# Build the LSTM model

model = Sequential([

Embedding(max_features, 128, input_length=maxlen),

LSTM(128),

Dense(1, activation='sigmoid')

])

# Compile the model

model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

# Train the model

model.fit(x_train, y_train, epochs=10, batch_size=32, validation_split=0.2)

# Evaluate the model

test_loss, test_acc = model.evaluate(x_test, y_test)

print(f'Test Accuracy: {test_acc:.2f}')

output:

Epoch 1/2

625/625 ━━━━━━━━━━━━━━━━━━━━ 656s 1s/step - accuracy: 0.6974 - loss:


0.5501 - val_accuracy: 0.8432 - val_loss: 0.3764

Epoch 2/2

625/625 ━━━━━━━━━━━━━━━━━━━━ 643s 1s/step - accuracy: 0.8397 - loss:


0.3722 - val_accuracy: 0.8312 - val_loss: 0.3874
782/782 ━━━━━━━━━━━━━━━━━━━━ 217s 278ms/step - accuracy: 0.8384 - loss:
0.3834

Test Accuracy: 0.84

TASK 9 Design a Neural network with various optimization algorithms and


analyse their performance using Keras
import tensorflow as tf

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import Dense

from sklearn.datasets import make_classification

from sklearn.model_selection import train_test_split

from sklearn.preprocessing import StandardScaler

# Generate a binary classification dataset

X, y = make_classification(n_samples=1000, n_features=20, n_classes=2, random_state=1)

# Split the data into training and testing sets

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)

# Standardize the features

sc = StandardScaler()

X_train = sc.fit_transform(X_train)

X_test = sc.transform(X_test)

# Build the neural network model

model = Sequential([

Dense(64, input_dim=20, activation='relu'),

Dense(64, activation='relu'),

Dense(1, activation='sigmoid')

])

# Compile the model with different optimizers


optimizers = ['sgd', 'adam', 'rmsprop']

for opt in optimizers:

print(f'\nTraining with {opt} optimizer:')

model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])

model.fit(X_train, y_train, epochs=50, batch_size=10, verbose=0)

loss, accuracy = model.evaluate(X_test, y_test, verbose=0)

print(f'Test Accuracy with {opt}: {accuracy:.2f}')

output:

Training with sgd optimizer:

Test Accuracy with sgd: 0.84

Training with adam optimizer:

TASK 10
Design a Deep Learning Model to classify the movie reviews as Positive or
Negative based on the text content of reviews using IMDB dataset.
import tensorflow as tf

from tensorflow.keras.datasets import imdb

from tensorflow.keras.preprocessing.sequence import pad_sequences

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import Embedding, LSTM, Dense

# Load the IMDB dataset

max_features = 10000

maxlen = 500

(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)

# Pad sequences to ensure uniform length

x_train = pad_sequences(x_train, maxlen=maxlen)

x_test = pad_sequences(x_test, maxlen=maxlen)


# Build the LSTM model

model = Sequential([

Embedding(max_features, 128, input_length=maxlen),

LSTM(128),

Dense(1, activation='sigmoid')

])

# Compile the model

model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

# Train the model

model.fit(x_train, y_train, epochs=10, batch_size=32, validation_split=0.2)

# Evaluate the model

test_loss, test_acc = model.evaluate(x_test, y_test)

print(f'Test Accuracy: {test_acc:.2f}')

output:

TASK 11 Apply One Hot Encoding for categorical sequence data.


from sklearn.preprocessing import OneHotEncoder

import numpy as np

# Example categorical data

categories = np.array(['apple', 'banana', 'orange', 'banana', 'orange', 'apple']).reshape(-1, 1)

# Apply One-Hot Encoding

encoder = OneHotEncoder(sparse=False)

one_hot_encoded = encoder.fit_transform(categories)

print('Original Categories:\n', categories.flatten())

print('One-Hot Encoded:\n', one_hot_encoded)

output:
Original Categories:

['apple' 'banana' 'orange' 'banana' 'orange' 'apple']

One-Hot Encoded:

[[1. 0. 0.]

[0. 1. 0.]

[0. 0. 1.]

[0. 1. 0.]

[0. 0. 1.]

[1. 0. 0.]]

TASK 12 Design a Deep Learning framework for Object Detection


import tensorflow as tf

from tensorflow.keras.applications import MobileNetV2

from tensorflow.keras.layers import Dense, GlobalAveragePooling2D

from tensorflow.keras.models import Model

# Load the MobileNetV2 model pre-trained on ImageNet

base_model = MobileNetV2(weights='imagenet', include_top=False)

# Add custom layers for object detection

x = base_model.output

x = GlobalAveragePooling2D()(x)

x = Dense(1024, activation='relu')(x)

predictions = Dense(10, activation='softmax')(x) # Assuming 10 classes

# Create the final model

model = Model(inputs=base_model.input, outputs=predictions)

# Compile the model

model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

# Summary of the model


model.summary()

output

You might also like