Commit 845aac2c authored by Ishankha K.C's avatar Ishankha K.C

update main.py and write to retrive live feed from cam module

parent ca60fa65
import json import json
import cv2 import cv2
import numpy as np import numpy as np
import logging
from fastapi import FastAPI, WebSocket from fastapi import FastAPI, WebSocket
from keras.models import load_model from keras.models import load_model
from keras.utils import img_to_array from keras.utils import img_to_array
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# Initialize the FastAPI app # Initialize the FastAPI app
app = FastAPI() app = FastAPI()
...@@ -77,10 +81,13 @@ async def websocket_endpoint(websocket: WebSocket): ...@@ -77,10 +81,13 @@ async def websocket_endpoint(websocket: WebSocket):
} }
}) })
logging.info(f"Detected emotions: {emotions}")
# Send the emotion predictions back to the client # Send the emotion predictions back to the client
await websocket.send_text(json.dumps(emotions)) await websocket.send_text(json.dumps(emotions))
except Exception as e: except Exception as e:
logging.error(f"An error occurred: {e}")
await websocket.close() await websocket.close()
if __name__ == "__main__": if __name__ == "__main__":
......
import asyncio
import json
import traceback
import cv2
import numpy as np
import logging
from fastapi import FastAPI, WebSocket
from keras.models import load_model
from keras.utils import img_to_array
import urllib.request
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# Initialize the FastAPI app
app = FastAPI()
# Load the model
emotion_model = load_model('emotion_model.keras')
# Load class indices used during training
with open('class_indices.json', 'r') as f:
class_indices = json.load(f)
# Convert indices back to labels
emotion_labels = [None] * len(class_indices)
for class_name, index in class_indices.items():
emotion_labels[index] = class_name
# Load the haarcascade file for face detection
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
# Replace with your IP camera stream URL
url = 'http://192.168.1.7/cam-hi.jpg'
@app.websocket("/ws/emotion")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
try:
while True:
try:
# Capture frame from the IP camera stream
img_resp = urllib.request.urlopen(url)
imgnp = np.array(bytearray(img_resp.read()), dtype=np.uint8)
frame = cv2.imdecode(imgnp, cv2.IMREAD_COLOR)
# Flip the image vertically and horizontally (flipCode=-1)
flipped_frame = cv2.flip(frame, -1)
# Convert color to grayscale
gray = cv2.cvtColor(flipped_frame, cv2.COLOR_BGR2GRAY)
# Perform face detection
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
emotions = []
for (x, y, w, h) in faces:
# Extract grayscale face ROI
face_gray = gray[y:y + h, x:x + w]
# Resize the image to 48x48 for the model
face_gray = cv2.resize(face_gray, (48, 48))
# Convert image to array
img = img_to_array(face_gray)
# Reshape image into format for model
img = np.expand_dims(img, axis=0)
# Normalize the image
img /= 255
# Get the prediction from the model
prediction = emotion_model.predict(img)
# Get the index of the highest predicted value
max_index = np.argmax(prediction[0])
# Get the label corresponding to the prediction
emotion_prediction = emotion_labels[max_index]
emotions.append({
"emotion": emotion_prediction,
"bounding_box": {
"x": int(x),
"y": int(y),
"width": int(w),
"height": int(h)
}
})
logging.info(f"Detected emotions: {emotions}")
# Send the emotion predictions back to the client
await websocket.send_text(json.dumps(emotions))
except Exception as e:
logging.error(f"Error processing frame: {e}")
traceback.print_exc()
await websocket.close()
await asyncio.sleep(1)
except Exception as e:
logging.error(f"WebSocket error: {e}")
traceback.print_exc()
await websocket.close()
finally:
logging.info("Closing WebSocket connection")
await websocket.close()
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
fastapi==0.112.1
uvicorn==0.30.6
absl-py==2.1.0
appdirs==1.4.4
astunparse==1.6.3
atomicwrites==1.4.0
backports.weakref==1.0.post1
bkcharts==0.2
black==19.10b0
brotlipy==0.7.0
certifi==2021.10.8
comtypes==1.1.10
constantly==15.1.0
cssselect==1.1.0
cytoolz==0.11.0
daal==2021.4.0
daal4py==2021.5.0
datashape
et-xmlfile==1.1.0
flatbuffers==24.3.25
fonttools==4.25.0
gast==0.5.4
google-pasta==0.2.0
graphviz==0.20.3
grpcio
h5py==3.11.0
inflection==0.5.1
keras==3.2.1
markdown-it-py==3.0.0
mccabe==0.6.1
mdurl==0.1.2
ml-dtypes==0.3.2
mpmath==1.2.1
munkres==1.1.4
mypy-extensions==0.4.3
namex==0.0.8
numpy==1.23.4
opencv-python==4.9.0.80
opt-einsum==3.3.0
optree==0.11.0
pathspec==0.7.0
patsy==0.5.2
pep8==1.7.1
Pillow==9.0.1
protobuf==4.25.3
pyasn1-modules==0.2.8
pycosat==0.6.3
pycurl==7.44.1
PyDispatcher==2.0.5
pydot==2.0.0
Pygments==2.17.2
pyls-spyder==0.4.0
pyparsing==3.1.2
pyreadline==2.1
pytest==7.1.1
python-lsp-jsonrpc==1.0.0
python-lsp-server==1.2.4
pytz==2021.3
pywin32==302
PyYAML==6.0
queuelib==1.5.0
rich==13.7.1
scikit-learn-intelex==2021.20220215.102710
scipy==1.13.0
sip==4.19.13
statsmodels==0.13.2
tables==3.6.1
tabulate==0.8.9
tbb==2021.12.0
tensorboard==2.16.2
tensorboard-data-server==0.7.2
tensorflow==2.16.1
tensorflow-intel==2.16.1
tensorflow-io-gcs-filesystem==0.31.0
termcolor==2.4.0
typing_extensions==4.11.0
webencodings==0.5.1
win-unicode-console==0.5
wincertstore==0.2
xlwings==0.24.9
zict==2.0.0
import cv2
import urllib.request
import numpy as np
# Replace the URL with the IP camera's stream URL
url = 'http://192.168.1.7/cam-hi.jpg'
cv2.namedWindow("live Cam Testing", cv2.WINDOW_AUTOSIZE)
# Create a VideoCapture object
cap = cv2.VideoCapture(url)
# Check if the IP camera stream is opened successfully
if not cap.isOpened():
print("Failed to open the IP camera stream")
exit()
# Read and display video frames
while True:
# Read a frame from the video stream
img_resp = urllib.request.urlopen(url)
imgnp = np.array(bytearray(img_resp.read()), dtype=np.uint8)
# ret, frame = cap.read()
im = cv2.imdecode(imgnp, -1)
# Flip the image vertically and horizontally (flipCode=-1)
flipped_im = cv2.flip(im, -1)
cv2.imshow('live Cam Testing', flipped_im)
key = cv2.waitKey(5)
if key == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment