NMJ40603 – Artificial Intelligence
Deep learning example code (VGG16)
Case study: Face recognition for three students.
Subject 1 Subject 2 Subject 3
Hui Kris Niva
Before starting run this code, make sure your images are ready in your google drive. Images must divide into
three types of folders; training, validation and testing.
Example: VGG 16 training using Google Colab
Line Code Note
1 import os
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
2 !pip install Augmentor
3 from google.colab import drive
drive.mount('/content/drive')
4 from keras.layers import Input, Lambda, Conv2D, MaxPool
2D ,Dense, Flatten, Dropout
from keras.models import Model
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerato
r
from keras.models import Sequential
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
5 # re-size all the images to this Make sure to
IMAGE_SIZE = [224, 224] use your own
folder path
train_path = '/content/drive/MyDrive/
DeeplearningVGGandResnet/dataset/training'
valid_path = '/content/drive/MyDrive/
DeeplearningVGGandResnet/dataset/validation'
6 vgg = VGG16(input_shape=IMAGE_SIZE + [3], weights='imag For variable
enet', include_top=False) prediction,
value 3
# don't train existing weights represents
for layer in vgg.layers: number of
layer.trainable = False classes. In
case uses three
# our layers - you can add more if you want faces, hence, 3
x = Flatten()(vgg.output) is used.
prediction = Dense(3, activation='softmax')(x)
# create a model object
model = Model(inputs=vgg.input, outputs=prediction)
# view the structure of the model
model.summary()
# tell the model what cost and optimization method to u
se
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
7 #Using ImageDataGenerator to read images from directori
es
#Rescale all images by 1/255
from keras.preprocessing.image import ImageDataGenerato
r
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(train_
path,target_size=(224, 224), batch_size=32,
class_mode='categorical')
validation_set = test_datagen.flow_from_directory(valid
_path, target_size=(224, 224), batch_size=32,
class_mode='categorical')
8 from os import listdir
label = listdir(train_path)
numClass = len(label)
print (label)
9 r = model.fit(training_set, epochs=20, validation_data=
validation_set, validation_steps=1)
10 model.save("/content/drive/MyDrive/Deeplearning- Name and save deep
VGGandResnet/modelVGG16_ep20.h5") learning model into
your own folder
11 import matplotlib as mpl
mpl.rcParams.update(mpl.rcParamsDefault)
# loss
plt.figure(1)
plt.plot(r.history['loss'], label='train loss')
plt.plot(r.history['val_loss'], label='val loss')
plt.legend()
plt.title("train-val loss graph")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.show()
# plt.savefig("/content/drive/MyDrive/FYP/8to2ep20/
LossVal_loss.png")
# accuracies
plt.figure(2)
plt.plot(r.history['accuracy'], label='train acc')
plt.plot(r.history['val_accuracy'], label='val acc')
plt.legend()
plt.title("train-val accuracy graph")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.show()
# plt.savefig("/content/drive/MyDrive/FYP/8to2ep20/
AccVal_acc.png")
12 #traning evaluation - accuracy, specificity, sensitivit
y and etc
#find the code by yourselves
Example: VGG 16 testing using Google Colab
Line Code Note
Similar with training codes from line 1 to 4
5 # re-size all the images to this Make sure to
IMAGE_SIZE = [224, 224] use your own
folder path
train_path = '/content/drive/MyDrive/
DeeplearningVGGandResnet/dataset/training'
test_path = '/content/drive/MyDrive/
DeeplearningVGGandResnet/dataset/testing'
6 vgg = VGG16(input_shape=IMAGE_SIZE + [3], weights='imag
enet', include_top=False)
# don't train existing weights
for layer in vgg.layers:
layer.trainable = False
# our layers - you can add more if you want
x = Flatten()(vgg.output)
prediction = Dense(3, activation='softmax')(x)
# create a model object
model = Model(inputs=vgg.input, outputs=prediction)
# view the structure of the model. No need to view for
testing
# model.summary()
# tell the model what cost and optimization method to u
se
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
7 #Using ImageDataGenerator to read images from directori Look similar
es with training
code line 7,
#Rescale all images by 1/255 but there has
difference at
from keras.preprocessing.image import ImageDataGenerato testing set
r variable.
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
valid_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(train_
path, target_size=(224, 224), batch_size=32,
class_mode='categorical')
testing_set = valid_datagen.flow_from_directory(test_pa
th, target_size=(224, 224), batch_size=32,
class_mode='categorical')
from os import listdir
label = listdir(train_path)
numClass = len(label)
print (label)
from keras.callbacks import ModelCheckpoint, EarlyStopp Use function
ing callbacks to
call saved
#checkpoint model. Make
checkpoint = ModelCheckpoint("/content/drive/MyDrive/ sure to have
Deeplearning-VGGandResnet/modelVGG16_ep20.h5",verbose= testing set as
1, save_best_only = True) validation
data.
# # don't train existing weights
# for layer in vgg.layers:
# layer.trainable = False
r = model.fit(training_set, epochs=20, validation_data=
testing_set, validation_steps=1,
shuffle=True,
callbacks=checkpoint)
#confusion matrix
#find yourselves