Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
2
2023-286
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
1
Merge Requests
1
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
2023-286
2023-286
Commits
383f8035
Commit
383f8035
authored
May 27, 2023
by
Fernando W.H.K - IT20116606
💻
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
.py file added successfully
parent
0c887a48
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
360 additions
and
0 deletions
+360
-0
facial-expression-recognition-cnn.py
facial-expression-recognition-cnn.py
+360
-0
No files found.
facial-expression-recognition-cnn.py
0 → 100644
View file @
383f8035
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#import necessary libraries
import
numpy
as
np
import
os
import
matplotlib.pyplot
as
plt
import
tensorflow
as
tf
import
keras
import
cv2
from
sklearn.metrics
import
classification_report
,
confusion_matrix
from
keras.models
import
Sequential
from
keras.layers
import
Conv2D
,
MaxPooling2D
,
Flatten
,
Dense
,
Dropout
from
keras.preprocessing.image
import
ImageDataGenerator
from
tensorflow.keras.callbacks
import
EarlyStopping
,
ModelCheckpoint
,
ReduceLROnPlateau
# In[2]:
#load the FER-2013 dataset
data_path
=
'./facial_dataset'
train_dir
=
os
.
path
.
join
(
data_path
,
'/train'
)
test_dir
=
os
.
path
.
join
(
data_path
,
'/test'
)
# In[3]:
img_shape
=
48
batch_size
=
64
train_data_path
=
'./facial_dataset/train/'
test_data_path
=
'./facial_dataset/test/'
# In[7]:
# Define data augmentation parameters
train_datagen
=
ImageDataGenerator
(
rescale
=
1
/
255.
,
# Data Augmentation
rotation_range
=
10
,
zoom_range
=
0.2
,
width_shift_range
=
0.1
,
height_shift_range
=
0.1
,
horizontal_flip
=
True
,
fill_mode
=
'nearest'
,
)
test_datagen
=
ImageDataGenerator
(
rescale
=
1
/
255.
,
)
train_data
=
train_datagen
.
flow_from_directory
(
train_data_path
,
class_mode
=
"categorical"
,
target_size
=
(
img_shape
,
img_shape
),
color_mode
=
'rgb'
,
shuffle
=
True
,
batch_size
=
batch_size
,
subset
=
'training'
,
)
test_data
=
test_datagen
.
flow_from_directory
(
test_data_path
,
class_mode
=
"categorical"
,
target_size
=
(
img_shape
,
img_shape
),
color_mode
=
"rgb"
,
shuffle
=
False
,
batch_size
=
batch_size
,
)
# In[8]:
from
keras.layers
import
BatchNormalization
def
Create_CNN_Model
():
model
=
Sequential
()
#CNN1
model
.
add
(
Conv2D
(
32
,
(
3
,
3
),
activation
=
'relu'
,
input_shape
=
(
img_shape
,
img_shape
,
3
)))
model
.
add
(
BatchNormalization
())
model
.
add
(
Conv2D
(
64
,(
3
,
3
),
activation
=
'relu'
,
padding
=
'same'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
MaxPooling2D
(
pool_size
=
(
2
,
2
),
padding
=
'same'
))
model
.
add
(
Dropout
(
0.25
))
#CNN2
model
.
add
(
Conv2D
(
64
,
(
3
,
3
),
activation
=
'relu'
,
))
model
.
add
(
BatchNormalization
())
model
.
add
(
Conv2D
(
128
,(
3
,
3
),
activation
=
'relu'
,
padding
=
'same'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
MaxPooling2D
(
pool_size
=
(
2
,
2
),
padding
=
'same'
))
model
.
add
(
Dropout
(
0.25
))
#CNN3
model
.
add
(
Conv2D
(
128
,
(
3
,
3
),
activation
=
'relu'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
Conv2D
(
256
,(
3
,
3
),
activation
=
'relu'
,
padding
=
'same'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
MaxPooling2D
(
pool_size
=
(
2
,
2
),
padding
=
'same'
))
model
.
add
(
Dropout
(
0.25
))
#Output
model
.
add
(
Flatten
())
model
.
add
(
Dense
(
1024
,
activation
=
'relu'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
Dropout
(
0.25
))
model
.
add
(
Dense
(
512
,
activation
=
'relu'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
Dropout
(
0.25
))
model
.
add
(
Dense
(
256
,
activation
=
'relu'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
Dropout
(
0.25
))
model
.
add
(
Dense
(
128
,
activation
=
'relu'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
Dropout
(
0.25
))
model
.
add
(
Dense
(
64
,
activation
=
'relu'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
Dropout
(
0.25
))
model
.
add
(
Dense
(
32
,
activation
=
'relu'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
Dropout
(
0.25
))
model
.
add
(
Dense
(
7
,
activation
=
'softmax'
))
return
model
# In[9]:
CNN_Model
=
Create_CNN_Model
()
CNN_Model
.
summary
()
CNN_Model
.
compile
(
optimizer
=
"adam"
,
loss
=
'categorical_crossentropy'
,
metrics
=
[
'accuracy'
])
# In[10]:
# Create Callback Checkpoint
checkpoint_path
=
"CNN_Model_Checkpoint"
Checkpoint
=
ModelCheckpoint
(
checkpoint_path
,
monitor
=
"val_accuracy"
,
save_best_only
=
True
)
# Create Early Stopping Callback to monitor the accuracy
Early_Stopping
=
EarlyStopping
(
monitor
=
'val_accuracy'
,
patience
=
15
,
restore_best_weights
=
True
,
verbose
=
1
)
# Create ReduceLROnPlateau Callback to reduce overfitting by decreasing learning rate
Reducing_LR
=
tf
.
keras
.
callbacks
.
ReduceLROnPlateau
(
monitor
=
'val_loss'
,
factor
=
0.2
,
patience
=
2
,
# min_lr=0.000005,
verbose
=
1
)
callbacks
=
[
Early_Stopping
,
Reducing_LR
]
steps_per_epoch
=
train_data
.
n
//
train_data
.
batch_size
validation_steps
=
test_data
.
n
//
test_data
.
batch_size
# In[12]:
CNN_history
=
CNN_Model
.
fit
(
train_data
,
validation_data
=
test_data
,
epochs
=
80
,
batch_size
=
batch_size
,
callbacks
=
callbacks
,
steps_per_epoch
=
steps_per_epoch
,
validation_steps
=
validation_steps
)
# In[13]:
CNN_Score
=
CNN_Model
.
evaluate
(
test_data
)
print
(
" Test Loss: {:.5f}"
.
format
(
CNN_Score
[
0
]))
print
(
"Test Accuracy: {:.2f}
%
"
.
format
(
CNN_Score
[
1
]
*
100
))
# In[14]:
CNN_Score
=
CNN_Model
.
evaluate
(
train_data
)
print
(
" Train Loss: {:.5f}"
.
format
(
CNN_Score
[
0
]))
print
(
"Train Accuracy: {:.2f}
%
"
.
format
(
CNN_Score
[
1
]
*
100
))
# In[15]:
def
plot_curves
(
history
):
loss
=
history
.
history
[
"loss"
]
val_loss
=
history
.
history
[
"val_loss"
]
accuracy
=
history
.
history
[
"accuracy"
]
val_accuracy
=
history
.
history
[
"val_accuracy"
]
epochs
=
range
(
len
(
history
.
history
[
"loss"
]))
plt
.
figure
(
figsize
=
(
15
,
5
))
#plot loss
plt
.
subplot
(
1
,
2
,
1
)
plt
.
plot
(
epochs
,
loss
,
label
=
"training_loss"
)
plt
.
plot
(
epochs
,
val_loss
,
label
=
"val_loss"
)
plt
.
title
(
"Loss"
)
plt
.
xlabel
(
"epochs"
)
plt
.
legend
()
#plot accuracy
plt
.
subplot
(
1
,
2
,
2
)
plt
.
plot
(
epochs
,
accuracy
,
label
=
"training_accuracy"
)
plt
.
plot
(
epochs
,
val_accuracy
,
label
=
"val_accuracy"
)
plt
.
title
(
"Accuracy"
)
plt
.
xlabel
(
"epochs"
)
plt
.
legend
()
#plt.tight_layout()
# In[16]:
plot_curves
(
CNN_history
)
# In[17]:
CNN_Predictions
=
CNN_Model
.
predict
(
test_data
)
# Choosing highest probalbilty class in every prediction
CNN_Predictions
=
np
.
argmax
(
CNN_Predictions
,
axis
=
1
)
# In[18]:
test_data
.
class_indices
# In[19]:
import
seaborn
as
sns
from
sklearn.metrics
import
confusion_matrix
fig
,
ax
=
plt
.
subplots
(
figsize
=
(
15
,
10
))
cm
=
confusion_matrix
(
test_data
.
labels
,
CNN_Predictions
)
sns
.
heatmap
(
cm
,
annot
=
True
,
fmt
=
'g'
,
ax
=
ax
)
ax
.
set_xlabel
(
'Predicted labels'
,
fontsize
=
15
,
fontweight
=
'bold'
)
ax
.
set_ylabel
(
'True labels'
,
fontsize
=
15
,
fontweight
=
'bold'
)
ax
.
set_title
(
'CNN Confusion Matrix'
,
fontsize
=
20
,
fontweight
=
'bold'
)
# In[20]:
# Print classification report and confusion matrix
print
(
'Classification report:'
)
print
(
classification_report
(
test_data
.
labels
,
CNN_Predictions
))
# In[21]:
Emotion_Classes
=
[
'Angry'
,
'Disgust'
,
'Fear'
,
'Happy'
,
'Neutral'
,
'Sad'
,
'Surprise'
]
# In[22]:
# Shuffling Test Data to show diffrent classes
test_preprocessor
=
ImageDataGenerator
(
rescale
=
1
/
255.
,
)
test_generator
=
test_preprocessor
.
flow_from_directory
(
test_data_path
,
class_mode
=
"categorical"
,
target_size
=
(
img_shape
,
img_shape
),
color_mode
=
"rgb"
,
shuffle
=
True
,
batch_size
=
batch_size
,
)
# In[23]:
# Display 10 random pictures from the dataset with their labels
Random_batch
=
np
.
random
.
randint
(
0
,
len
(
test_generator
)
-
1
)
Random_Img_Index
=
np
.
random
.
randint
(
0
,
batch_size
-
1
,
10
)
fig
,
axes
=
plt
.
subplots
(
nrows
=
2
,
ncols
=
5
,
figsize
=
(
25
,
10
),
subplot_kw
=
{
'xticks'
:
[],
'yticks'
:
[]})
for
i
,
ax
in
enumerate
(
axes
.
flat
):
Random_Img
=
test_generator
[
Random_batch
][
0
][
Random_Img_Index
[
i
]]
Random_Img_Label
=
np
.
argmax
(
test_generator
[
Random_batch
][
1
][
Random_Img_Index
[
i
]])
Model_Prediction
=
np
.
argmax
(
CNN_Model
.
predict
(
tf
.
expand_dims
(
Random_Img
,
axis
=
0
)
,
verbose
=
0
))
ax
.
imshow
(
Random_Img
)
if
Emotion_Classes
[
Random_Img_Label
]
==
Emotion_Classes
[
Model_Prediction
]:
color
=
"green"
else
:
color
=
"red"
ax
.
set_title
(
f
"True: {Emotion_Classes[Random_Img_Label]}
\n
Predicted: {Emotion_Classes[Model_Prediction]}"
,
color
=
color
)
plt
.
show
()
plt
.
tight_layout
()
# In[24]:
CNN_Model
.
save
(
"Facial_Expressions.h5"
)
# In[25]:
from
IPython.display
import
FileLink
FileLink
(
"Facial_Expressions.h5"
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment