Commit 8e64eec6 authored by PiumikaAlwis's avatar PiumikaAlwis

Facial Emotion Recognition add

parent 79298611
# Default ignored files
/shelf/
/workspace.xml
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$">
<excludeFolder url="file://$MODULE_DIR$/venv" />
</content>
<orderEntry type="jdk" jdkName="Python 3.8" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>
\ No newline at end of file
<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Project Default" />
<inspection_tool class="HtmlUnknownTag" enabled="true" level="WARNING" enabled_by_default="true">
<option name="myValues">
<value>
<list size="7">
<item index="0" class="java.lang.String" itemvalue="nobr" />
<item index="1" class="java.lang.String" itemvalue="noembed" />
<item index="2" class="java.lang.String" itemvalue="comment" />
<item index="3" class="java.lang.String" itemvalue="noscript" />
<item index="4" class="java.lang.String" itemvalue="embed" />
<item index="5" class="java.lang.String" itemvalue="script" />
<item index="6" class="java.lang.String" itemvalue="from" />
</list>
</value>
</option>
<option name="myCustomValuesEnabled" value="true" />
</inspection_tool>
<inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
<option name="ignoredErrors">
<list>
<option value="N802" />
<option value="N803" />
<option value="N806" />
</list>
</option>
</inspection_tool>
<inspection_tool class="PyUnresolvedReferencesInspection" enabled="true" level="WARNING" enabled_by_default="true">
<option name="ignoredIdentifiers">
<list>
<option value="object.__setitem__" />
<option value="object.__getitem__" />
<option value="model.values" />
</list>
</option>
</inspection_tool>
</profile>
</component>
\ No newline at end of file
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8" project-jdk-type="Python SDK" />
<component name="PyCharmProfessionalAdvertiser">
<option name="shown" value="true" />
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/Emotion_detection_with_CNN-main.iml" filepath="$PROJECT_DIR$/.idea/Emotion_detection_with_CNN-main.iml" />
</modules>
</component>
</project>
\ No newline at end of file
import cv2
import os
import json
import random
import io
import numpy as np
from anaconda_navigator.exceptions import exception_handler
from flask import Flask, render_template, Response, request, send_from_directory
from flask_cors import CORS, cross_origin
from keras.models import load_model, model_from_json
from base64 import decodestring
from emotion_detection import predict_image
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
path = os.getcwd()
# file Upload
UPLOAD_FOLDER = os.path.join(path, 'uploads')
TEMP_FOLDER = os.path.join(path, 'rendered')
if not os.path.isdir(TEMP_FOLDER):
os.mkdir(TEMP_FOLDER)
if not os.path.isdir(UPLOAD_FOLDER):
os.mkdir(UPLOAD_FOLDER)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['TEMP_FOLDER'] = TEMP_FOLDER
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/upload', methods=['POST'])
@cross_origin()
def image_upload():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
# flash('No file part')
return {'data': 'No file part'}
file = request.files['file']
if file.filename == '':
# flash('No file selected for uploading')
return {'data': 'No file selected for uploading'}
if file and allowed_file(file.filename):
upl_path = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
file.save(upl_path)
predicted_image, result_image = predict_image(upl_path)
_hash = str(random.getrandbits(128))
cv2.imwrite('rendered/result_' + _hash + '.jpg', result_image)
renders = os.path.join(app.root_path, app.config['TEMP_FOLDER'])
# response = flask.jsonify({'some': 'data'})
# response.headers.add('Access-Control-Allow-Origin', '*')
# print(type(result_image))
# return app.make_response((result_image.blob, 200,{'Access-Control-Allow-Origin':'*', 'Content-Type':'image/jpeg'}))
# response = send_file(io.BytesIO(result_image),
# mimetype='image/jpeg',as_attachment=True,attachment_filename='%s.jpg' % 1000)
# response.headers = {'Access-Control-Allow-Origin':'*', 'type':'image/jpeg'}
# return response
# response.content_type = 'image/jpeg'
return send_from_directory(directory=renders, filename='result_' + _hash + '.jpg')
else:
# flash('Allowed file types are txt, pdf, png, jpg, jpeg, gif')
return {'data': 'Allowed file types are png, jpeg, jpg'}
@app.route("/test", endpoint='index()')
@exception_handler
def index():
pass
if __name__ == "__main__":
app.run(host='127.0.0.1', port=5000, debug=True)
print("OK")
import cv2
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Initialize image data generator with rescaling
train = ImageDataGenerator(rescale=1. / 255)
validation = ImageDataGenerator(rescale=1. / 255)
# Preprocess all test images
trainGen = train.flow_from_directory(
'data/train',
target_size=(48, 48),
batch_size=64,
color_mode="grayscale",
class_mode='categorical')
# Preprocess all train images
valGen = validation.flow_from_directory(
'data/test',
target_size=(48, 48),
batch_size=64,
color_mode="grayscale",
class_mode='categorical')
# create model structure
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48, 48, 1)))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(7, activation='softmax'))
cv2.ocl.setUseOpenCL(False)
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001, decay=1e-6), metrics=['accuracy'])
# Train the neural network/model
model_info = model.fit_generator(
trainGen,
steps_per_epoch=28709 // 64,
epochs=5,
validation_data=valGen,
validation_steps=7178 // 64)
# save model structure in jason file
model_json = model.to_json()
with open("emotion_model.json", "w") as json_file:
json_file.write(model_json)
# save trained model weight in .h5 file
model.save_weights('emotion_model.h5')
import cv2
import numpy as np
from keras.models import model_from_json
from keras.preprocessing.image import img_to_array, array_to_img
from os import listdir
from os.path import isfile, join
faceExpression = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
# create model by loading JSON
json_file = open('model/emotion_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
emotion_model = model_from_json(loaded_model_json)
# load weights
emotion_model.load_weights("model/emotion_model.h5")
print("Loaded model from disk")
face_classifier = cv2.CascadeClassifier('./haarcascades/haarcascade_frontalface_default.xml')
def emotion_detection(image):
gray = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
if faces == ():
return (0,0,0,0), np.zeros((48,48), np.uint8), image
allfaces = []
rects = []
# detect each and every face available on cam area
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y-50), (x+w, y+h+10), (0, 255, 0), 4)
roi_gray = gray[y:y + h, x:x + w]
roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
allfaces.append(roi_gray)
rects.append((x,w,y,h))
return rects, allfaces, image
def predict_image(img_path):
img = cv2.imread(img_path)
rects, faces, image = emotion_detection(img)
i = 0
for face in faces:
print(face[0])
roi = face.astype("float") / 255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
# Emotion prediction
emotion_prediction = emotion_model.predict(roi)
maxindex = int(np.argmax(emotion_prediction))
label = faceExpression[maxindex]
label_position = (rects[i][0] + int((rects[i][1] / 2)), abs(rects[i][2] - 10))
i += 1
cv2.putText(image, label, label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)
image = image[:, :, ::-1]
print(image.shape)
return array_to_img(image), img
{"class_name": "Sequential", "config": {"name": "sequential", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 48, 48, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "conv2d_input"}}, {"class_name": "Conv2D", "config": {"name": "conv2d", "trainable": true, "batch_input_shape": [null, 48, 48, 1], "dtype": "float32", "filters": 32, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Conv2D", "config": {"name": "conv2d_1", "trainable": true, "dtype": "float32", "filters": 64, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d", "trainable": true, "dtype": "float32", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2], "data_format": "channels_last"}}, {"class_name": "Dropout", "config": {"name": "dropout", "trainable": true, "dtype": "float32", "rate": 0.25, "noise_shape": null, "seed": null}}, {"class_name": "Conv2D", "config": {"name": "conv2d_2", "trainable": true, "dtype": "float32", "filters": 128, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_1", "trainable": true, "dtype": "float32", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2], "data_format": "channels_last"}}, {"class_name": "Conv2D", "config": {"name": "conv2d_3", "trainable": true, "dtype": "float32", "filters": 128, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_2", "trainable": true, "dtype": "float32", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2], "data_format": "channels_last"}}, {"class_name": "Dropout", "config": {"name": "dropout_1", "trainable": true, "dtype": "float32", "rate": 0.25, "noise_shape": null, "seed": null}}, {"class_name": "Flatten", "config": {"name": "flatten", "trainable": true, "dtype": "float32", "data_format": "channels_last"}}, {"class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 1024, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Dropout", "config": {"name": "dropout_2", "trainable": true, "dtype": "float32", "rate": 0.5, "noise_shape": null, "seed": null}}, {"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "dtype": "float32", "units": 7, "activation": "softmax", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "keras_version": "2.5.0", "backend": "tensorflow"}
\ No newline at end of file
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
import cv2
import numpy as np
from tensorflow.keras.models import model_from_json
faceExpression = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
# create model by loading JSON
json_file = open('model/emotion_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
emotion_model = model_from_json(loaded_model_json)
# load weights
emotion_model.load_weights("model/emotion_model.h5")
print("Loaded model from disk")
# capture with PC webcam
cap = cv2.VideoCapture(0)
while True:
# draw bounding box around face by using haar
ret, frame = cap.read()
frame = cv2.resize(frame, (1280, 720))
if not ret:
break
face_detector = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# face detector
num_faces = face_detector.detectMultiScale(gray_frame, scaleFactor=1.3, minNeighbors=5)
# detect each and every face available on cam area
for (x, y, w, h) in num_faces:
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (0, 255, 0), 4)
roi_gray_frame = gray_frame[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray_frame, (48, 48)), -1), 0)
# Emotion prediction
emotion_prediction = emotion_model.predict(cropped_img)
maxindex = int(np.argmax(emotion_prediction))
cv2.putText(frame, faceExpression[maxindex], (x+5, y-20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
cv2.imshow('Emotion Detection', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment