import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import utils
import os
%matplotlib inline
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Dense, Input, Dropout,Flatten, Conv2D
from tensorflow.keras.layers import BatchNormalization, Activation, MaxPooling2D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.utils import plot_model
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from IPython.display import SVG, Image
from livelossplot import PlotLossesKerasTF
import tensorflow as tf
print("Tensorflow version:", tf.__version__)
Tensorflow version: 2.11.0
def plot_example_images(plt):
img_size = 48
plt.figure(0, figsize=(12,20))
ctr = 0
for expression in os.listdir("train/"):
if expression=='.DS_Store':
continue
for i in range(1,6):
ctr += 1
plt.subplot(7,5,ctr)
img = load_img("train/"+expression+"/"+os.listdir("train/" + expression)[i], target_size=(img_size, img_size))
plt.imshow(img, cmap="gray")
plt.tight_layout()
return plt
plot_example_images(plt)
<module 'matplotlib.pyplot' from '/Users/thomas/miniforge3/lib/python3.9/site-packages/matplotlib/pyplot.py'>
for expression in os.listdir("train/"):
if expression=='.DS_Store':
continue
print(str(len(os.listdir("train/" + expression))) + " " + expression + " images")
7214 happy images 4830 sad images 4097 fear images 3171 surprise images 4965 neutral images 3995 angry images 436 disgust images
img_size = 48
batch_size = 64
datagen_train = ImageDataGenerator(horizontal_flip=True)
train_generator = datagen_train.flow_from_directory("train/",
target_size=(img_size,img_size),
color_mode="grayscale",
batch_size=batch_size,
class_mode='categorical',
shuffle=True)
datagen_validation = ImageDataGenerator(horizontal_flip=True)
validation_generator = datagen_validation.flow_from_directory("test/",
target_size=(img_size,img_size),
color_mode="grayscale",
batch_size=batch_size,
class_mode='categorical',
shuffle=False)
Found 28708 images belonging to 7 classes. Found 7178 images belonging to 7 classes.
# Initialising the CNN
model = Sequential()
# 1 - Convolution
model.add(Conv2D(64,(3,3), padding='same', input_shape=(48, 48,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 2nd Convolution layer
model.add(Conv2D(128,(5,5), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 3rd Convolution layer
model.add(Conv2D(512,(3,3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 4th Convolution layer
model.add(Conv2D(512,(3,3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Flattening
model.add(Flatten())
# Fully connected layer 1st layer
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
# Fully connected layer 2nd layer
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(7, activation='softmax'))
opt = Adam(lr=0.0005)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
WARNING:absl:`lr` is deprecated, please use `learning_rate` instead, or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 48, 48, 64) 640 batch_normalization (BatchN (None, 48, 48, 64) 256 ormalization) activation (Activation) (None, 48, 48, 64) 0 max_pooling2d (MaxPooling2D (None, 24, 24, 64) 0 ) dropout (Dropout) (None, 24, 24, 64) 0 conv2d_1 (Conv2D) (None, 24, 24, 128) 204928 batch_normalization_1 (Batc (None, 24, 24, 128) 512 hNormalization) activation_1 (Activation) (None, 24, 24, 128) 0 max_pooling2d_1 (MaxPooling (None, 12, 12, 128) 0 2D) dropout_1 (Dropout) (None, 12, 12, 128) 0 conv2d_2 (Conv2D) (None, 12, 12, 512) 590336 batch_normalization_2 (Batc (None, 12, 12, 512) 2048 hNormalization) activation_2 (Activation) (None, 12, 12, 512) 0 max_pooling2d_2 (MaxPooling (None, 6, 6, 512) 0 2D) dropout_2 (Dropout) (None, 6, 6, 512) 0 conv2d_3 (Conv2D) (None, 6, 6, 512) 2359808 batch_normalization_3 (Batc (None, 6, 6, 512) 2048 hNormalization) activation_3 (Activation) (None, 6, 6, 512) 0 max_pooling2d_3 (MaxPooling (None, 3, 3, 512) 0 2D) dropout_3 (Dropout) (None, 3, 3, 512) 0 flatten (Flatten) (None, 4608) 0 dense (Dense) (None, 256) 1179904 batch_normalization_4 (Batc (None, 256) 1024 hNormalization) activation_4 (Activation) (None, 256) 0 dropout_4 (Dropout) (None, 256) 0 dense_1 (Dense) (None, 512) 131584 batch_normalization_5 (Batc (None, 512) 2048 hNormalization) activation_5 (Activation) (None, 512) 0 dropout_5 (Dropout) (None, 512) 0 dense_2 (Dense) (None, 7) 3591 ================================================================= Total params: 4,478,727 Trainable params: 4,474,759 Non-trainable params: 3,968 _________________________________________________________________
%%time
epochs = 15
steps_per_epoch = train_generator.n//train_generator.batch_size
validation_steps = validation_generator.n//validation_generator.batch_size
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,
patience=2, min_lr=0.00001, mode='auto')
checkpoint = ModelCheckpoint("model_weights.h5", monitor='val_accuracy',
save_weights_only=True, mode='max', verbose=1)
callbacks = [PlotLossesKerasTF(), checkpoint, reduce_lr]
history = model.fit(
x=train_generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data = validation_generator,
validation_steps = validation_steps,
callbacks=callbacks
)
accuracy training (min: 0.337, max: 0.666, cur: 0.664) validation (min: 0.254, max: 0.640, cur: 0.636) Loss training (min: 0.892, max: 1.727, cur: 0.892) validation (min: 0.972, max: 1.973, cur: 0.991) Epoch 15: saving model to model_weights.h5 448/448 [==============================] - 249s 556ms/step - loss: 0.8915 - accuracy: 0.6642 - val_loss: 0.9915 - val_accuracy: 0.6357 - lr: 1.0000e-04 CPU times: user 6h 8min 53s, sys: 20min 55s, total: 6h 29min 49s Wall time: 1h 3min 2s
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)