Commit 652b1203 authored by Chamod Ishankha's avatar Chamod Ishankha

Inital commit & complete ai model and fast apis

parents
*.keras filter=lfs diff=lfs merge=lfs -text
.keras
\ No newline at end of file
{"fear": 0, "happy": 1, "neutral": 2, "sad": 3, "surprise": 4}
\ No newline at end of file
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Real-time Emotion Detection</title>
</head>
<body>
<h1>Real-time Emotion Detection</h1>
<video id="video" width="640" height="480" autoplay></video>
<canvas id="canvas" width="640" height="480" style="display: none;"></canvas>
<h2 id="emotionData">Emotion: </h2>
<script>
const video = document.getElementById('video');
const canvas = document.getElementById('canvas');
const emotionData = document.getElementById('emotionData');
const ctx = canvas.getContext('2d');
// Get access to the webcam
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia({ video: true }).then(function(stream) {
video.srcObject = stream;
video.play();
});
}
const ws = new WebSocket('ws://localhost:8000/ws/emotion');
ws.onmessage = function(event) {
const emotions = JSON.parse(event.data);
console.log('Emotions:', emotions[0]?.emotion);
emotionData.innerText = 'Emotions:'+ emotions[0]?.emotion;
};
video.addEventListener('play', () => {
const processVideo = () => {
if (video.paused || video.ended) {
return;
}
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
canvas.toBlob(blob => {
ws.send(blob);
}, 'image/jpeg');
setTimeout(processVideo, 100);
};
processVideo();
});
</script>
</body>
</html>
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Live Video Streaming</title>
</head>
<body>
<video id="video" width="640" height="480" autoplay></video>
<script>
const video = document.getElementById('video');
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
// Access the user's camera
navigator.mediaDevices.getUserMedia({ video: true })
.then((stream) => {
video.srcObject = stream;
})
.catch((error) => {
console.error('Error accessing camera:', error);
});
// Open WebSocket connection to the backend
const socket = new WebSocket('ws://localhost:8080/api/v1/baby-care/emotional/video-process');
socket.binaryType = 'blob'; // Set binary type for sending binary data arraybuffer
socket.onopen = () => {
console.log('WebSocket connection opened');
// Send video frames over WebSocket
video.addEventListener('play', () => {
const processVideo = () => {
if (video.paused || video.ended) {
return;
}
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
canvas.toBlob(blob => {
socket.send(blob);
}, 'image/jpeg');
setTimeout(processVideo, 100);
};
processVideo();
});
}
socket.onclose = (event) => {
console.log('WebSocket connection closed with code:', event.code);
console.log('Reason:', event.reason);
};
socket.onerror = (error) => {
console.error('WebSocket error:', error);
};
// Convert data URI to Blob
function dataURItoBlob(dataURI) {
const byteString = atob(dataURI.split(',')[1]);
const arrayBuffer = new ArrayBuffer(byteString.length);
const byteArray = new Uint8Array(arrayBuffer);
for (let i = 0; i < byteString.length; i++) {
byteArray[i] = byteString.charCodeAt(i);
}
return new Blob([arrayBuffer], { type: 'image/jpeg' });
}
</script>
</body>
</html>
File added
{"accuracy": [0.2976909875869751, 0.375, 0.39547497034072876, 0.453125, 0.4639185965061188, 0.59375, 0.4998708665370941, 0.609375, 0.5229092240333557, 0.4375, 0.5442946553230286, 0.546875, 0.5589648485183716, 0.515625, 0.567539632320404, 0.5625, 0.5845343470573425, 0.5625, 0.595640242099762, 0.625, 0.6085025072097778, 0.640625, 0.6178521513938904, 0.640625, 0.628286600112915, 0.703125, 0.6353633999824524, 0.703125, 0.6455911993980408, 0.71875, 0.6548892259597778, 0.546875, 0.6587633490562439, 0.625, 0.6655818819999695, 0.6875, 0.6753964424133301, 0.703125, 0.6831448078155518, 0.765625, 0.6908931136131287, 0.734375, 0.6958004236221313, 0.671875, 0.7059248685836792, 0.703125, 0.7119169235229492, 0.625, 0.7193036675453186, 0.703125, 0.729376494884491, 0.6875, 0.7390877604484558, 0.703125, 0.7389844655990601, 0.75, 0.7525182366371155, 0.75, 0.7589751482009888, 0.78125, 0.764708936214447, 0.78125, 0.7712692022323608, 0.78125, 0.7799989581108093, 0.6875, 0.7864042520523071, 0.765625, 0.7930161952972412, 0.84375, 0.796270489692688, 0.8125, 0.8103724122047424, 0.796875, 0.813471794128418, 0.859375, 0.8185856938362122, 0.859375, 0.8265406489372253, 0.8125, 0.8329975605010986, 0.765625, 0.8328425884246826, 0.875, 0.8410041928291321, 0.8125, 0.8495273590087891, 0.78125, 0.8531948924064636, 0.84375, 0.860891580581665, 0.8125, 0.8626478910446167, 0.9375, 0.8686915636062622, 0.90625, 0.8728756904602051, 0.828125, 0.8786610960960388, 0.828125], "loss": [1.5705541372299194, 1.5174508094787598, 1.425675868988037, 1.2873256206512451, 1.306281328201294, 1.1237505674362183, 1.233146071434021, 1.0533636808395386, 1.1799224615097046, 1.385098934173584, 1.1345518827438354, 1.1147291660308838, 1.096571683883667, 1.324110507965088, 1.070943832397461, 1.1431936025619507, 1.0342344045639038, 1.2034275531768799, 1.0119702816009521, 0.9350066184997559, 0.9866832494735718, 0.9566265344619751, 0.9634737968444824, 0.9221519231796265, 0.9390735626220703, 0.9650204181671143, 0.9218453168869019, 0.7349858283996582, 0.8994826078414917, 0.8936837315559387, 0.8829118013381958, 0.9801700115203857, 0.8669087290763855, 0.9203119874000549, 0.8541799783706665, 0.8441341519355774, 0.8285936117172241, 0.8060116767883301, 0.8134356737136841, 0.7687097787857056, 0.7937242984771729, 0.7549698352813721, 0.7823596596717834, 0.8036171197891235, 0.7634396553039551, 0.7248154878616333, 0.7442789673805237, 0.8795504570007324, 0.7267162799835205, 0.6231706142425537, 0.7054077386856079, 0.7689968943595886, 0.6901741623878479, 0.7451149225234985, 0.680387556552887, 0.7440769672393799, 0.6549450755119324, 0.4908699691295624, 0.6426704525947571, 0.5166120529174805, 0.6239100098609924, 0.536339282989502, 0.6060417294502258, 0.617926299571991, 0.5873093008995056, 0.6737983226776123, 0.5704875588417053, 0.643718957901001, 0.5526097416877747, 0.3981083929538727, 0.5420911312103271, 0.5096111297607422, 0.519573986530304, 0.5688757300376892, 0.5020158290863037, 0.3178268074989319, 0.49052736163139343, 0.40116506814956665, 0.47370514273643494, 0.4882490634918213, 0.4540075957775116, 0.4395477771759033, 0.4508621394634247, 0.35633400082588196, 0.4305827021598816, 0.596126139163971, 0.4150138795375824, 0.47679102420806885, 0.4001390039920807, 0.5011431574821472, 0.3858100473880768, 0.4814748167991638, 0.3739665150642395, 0.3376801908016205, 0.36109063029289246, 0.2875820994377136, 0.3467596769332886, 0.4400397539138794, 0.3341004252433777, 0.4914041757583618], "val_accuracy": [0.32229167222976685, 0.3636363744735718, 0.45645833015441895, 0.34545454382896423, 0.4827083349227905, 0.5090909004211426, 0.5172916650772095, 0.5272727012634277, 0.5391666889190674, 0.5090909004211426, 0.5506250262260437, 0.4909090995788574, 0.5652083158493042, 0.5636363625526428, 0.5664583444595337, 0.4727272689342499, 0.5879166722297668, 0.581818163394928, 0.5991666913032532, 0.5090909004211426, 0.6022916436195374, 0.4727272689342499, 0.6052083373069763, 0.4727272689342499, 0.6104166507720947, 0.581818163394928, 0.6129166483879089, 0.6363636255264282, 0.6177083253860474, 0.581818163394928, 0.6193749904632568, 0.5636363625526428, 0.6235416531562805, 0.6545454263687134, 0.6335416436195374, 0.6909090876579285, 0.6287500262260437, 0.581818163394928, 0.6343749761581421, 0.6727272868156433, 0.6397916674613953, 0.5090909004211426, 0.6327083110809326, 0.6363636255264282, 0.6427083611488342, 0.5454545617103577, 0.6389583349227905, 0.6909090876579285, 0.6416666507720947, 0.6363636255264282, 0.6449999809265137, 0.5636363625526428, 0.6433333158493042, 0.7272727489471436, 0.6347916722297668, 0.6363636255264282, 0.6481249928474426, 0.7454545497894287, 0.6508333086967468, 0.6000000238418579, 0.6454166769981384, 0.7454545497894287, 0.6456249952316284, 0.6727272868156433, 0.6583333611488342, 0.6181818246841431, 0.6568750143051147, 0.581818163394928, 0.6556249856948853, 0.6181818246841431, 0.6504166722297668, 0.5272727012634277, 0.6433333158493042, 0.6181818246841431, 0.6545833349227905, 0.5636363625526428, 0.6512500047683716, 0.6181818246841431, 0.6527083516120911, 0.6363636255264282, 0.6589583158493042, 0.7090908885002136, 0.6616666913032532, 0.6181818246841431, 0.6625000238418579, 0.7818182110786438, 0.6539583206176758, 0.6181818246841431, 0.6600000262260437, 0.6727272868156433, 0.6608333587646484, 0.6909090876579285, 0.6612499952316284, 0.6727272868156433, 0.6585416793823242, 0.7636363506317139, 0.6637499928474426, 0.6000000238418579, 0.6541666388511658, 0.6727272868156433], "val_loss": [1.5343430042266846, 1.5266432762145996, 1.3304814100265503, 1.435189962387085, 1.2585406303405762, 1.2127329111099243, 1.195613980293274, 1.2213810682296753, 1.1420385837554932, 1.1847765445709229, 1.112355351448059, 1.2022355794906616, 1.07564377784729, 1.062819004058838, 1.0621610879898071, 1.243219256401062, 1.0370583534240723, 1.042972445487976, 1.0089151859283447, 1.0751644372940063, 0.992677628993988, 1.1268575191497803, 1.0001654624938965, 1.2536569833755493, 0.975214958190918, 1.0192499160766602, 0.9671199321746826, 0.8942896127700806, 0.9592579007148743, 1.105083703994751, 0.9499025344848633, 0.9062893986701965, 0.9552563428878784, 0.8546270132064819, 0.9339199066162109, 0.7568639516830444, 0.9286920428276062, 0.9396328926086426, 0.9221140742301941, 0.9606314301490784, 0.908830463886261, 1.0448306798934937, 0.9080472588539124, 1.0469626188278198, 0.9121472239494324, 0.990936279296875, 0.9188786745071411, 0.7588252425193787, 0.9191960692405701, 0.7755096554756165, 0.9024714231491089, 1.1123439073562622, 0.9015814065933228, 0.7833319902420044, 0.9072504639625549, 0.9857335090637207, 0.9006909132003784, 0.811176598072052, 0.9015006422996521, 0.9603177309036255, 0.9080201983451843, 0.7693710923194885, 0.9176881909370422, 0.8324093818664551, 0.906954824924469, 0.7968869805335999, 0.9069080352783203, 1.109345555305481, 0.9316781759262085, 0.8779458999633789, 0.9250050783157349, 1.5164889097213745, 0.9442240595817566, 0.8470063805580139, 0.9197851419448853, 1.1143767833709717, 0.9364324808120728, 1.0861839056015015, 0.9356476068496704, 0.7916710376739502, 0.945420503616333, 0.88861083984375, 0.958299458026886, 1.0336371660232544, 0.94699627161026, 0.5913316607475281, 0.9511658549308777, 1.130181908607483, 0.9670429229736328, 0.8489187359809875, 0.9646119475364685, 0.8428248167037964, 0.9838746190071106, 1.0784193277359009, 0.9925997853279114, 0.5360360741615295, 0.9869673848152161, 1.2523311376571655, 0.983586311340332, 1.2178685665130615]}
\ No newline at end of file
import json
import cv2
import numpy as np
from fastapi import FastAPI, WebSocket
from keras.models import load_model
from keras.utils import img_to_array
# Initialize the FastAPI app
app = FastAPI()
# Load the model
emotion_model = load_model('emotion_model.keras')
# Load class indices used during training
with open('class_indices.json', 'r') as f:
class_indices = json.load(f)
# Convert indices back to labels
emotion_labels = [None] * len(class_indices)
for class_name, index in class_indices.items():
emotion_labels[index] = class_name
# Load the haarcascade file for face detection
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
@app.websocket("/ws/emotion")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
try:
while True:
# Receive image data from the WebSocket
data = await websocket.receive_bytes()
# Convert the data to a numpy array
nparr = np.frombuffer(data, np.uint8)
frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# Convert color to grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Perform face detection
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
emotions = []
for (x, y, w, h) in faces:
# Extract grayscale face ROI
face_gray = gray[y:y + h, x:x + w]
# Resize the image to 48x48 for model
face_gray = cv2.resize(face_gray, (48, 48))
# Convert image to array
img = img_to_array(face_gray)
# Reshape image into format for model
img = np.expand_dims(img, axis=0)
# Normalize the image
img /= 255
# Get the prediction from model
prediction = emotion_model.predict(img)
# Get the index of the highest predicted value
max_index = np.argmax(prediction[0])
# Get the label corresponding to the prediction
emotion_prediction = emotion_labels[max_index]
emotions.append({
"emotion": emotion_prediction,
"bounding_box": {
"x": int(x),
"y": int(y),
"width": int(w),
"height": int(h)
}
})
# Send the emotion predictions back to the client
await websocket.send_text(json.dumps(emotions))
except Exception as e:
await websocket.close()
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment