Commit 3b6c555b authored by Shehan Liyanage's avatar Shehan Liyanage

add all changes

parent dd70590a
File added
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or using Docker, you probably want to ignore it.
Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# dotenv
.env
.env.*.local
# virtualenv
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyderworkspace
# Rope project settings
.ropeproject
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
# Note: Comment this out if you have project-level dependencies in a `.python-version` file.
.python-version
# Pylint
pylint.log
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
from flask import Flask, request, jsonify
import base64
import cv2
import numpy as np
import mediapipe as mp
import tensorflow as tf
import logging
app = Flask(__name__)
# Load the model
model = tf.keras.models.load_model('./finalModel.h5')
mp_holistic = mp.solutions.holistic
holistic = mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5)
sequence = []
sentence = []
predictions = []
threshold = 0.7
actions = ["haloo", "dannwa", "mama", "obata", "puluwan", "suba", "udaasanak"]
def mediapipe_detection(image, model):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image.flags.writeable = False
results = model.process(image)
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return image, results
def extract_keypoints(results):
pose = np.array([[res.x, res.y, res.z, res.visibility] for res in results.pose_landmarks.landmark]).flatten() if results.pose_landmarks else np.zeros(33*4)
face = np.array([[res.x, res.y, res.z] for res in results.face_landmarks.landmark]).flatten() if results.face_landmarks else np.zeros(468*3)
lh = np.array([[res.x, res.y, res.z] for res in results.left_hand_landmarks.landmark]).flatten() if results.left_hand_landmarks else np.zeros(21*3)
rh = np.array([[res.x, res.y, res.z] for res in results.right_hand_landmarks.landmark]).flatten() if results.right_hand_landmarks else np.zeros(21*3)
return np.concatenate([pose, face, lh, rh])
@app.route('/endpoint', methods=['POST'])
def process_frame():
try:
data = request.json
frame_data = data['frame']
# Fix incorrect padding for base64 string
frame_data += "=" * ((4 - len(frame_data) % 4) % 4)
# Decode the base64 string to an image
decoded_data = base64.b64decode(frame_data)
np_data = np.frombuffer(decoded_data, dtype=np.uint8)
frame = cv2.imdecode(np_data, cv2.IMREAD_COLOR)
if frame is None:
raise ValueError("Failed to decode image")
# Process the frame with Mediapipe and your model
image, results = mediapipe_detection(frame, holistic)
keypoints = extract_keypoints(results)
sequence.append(keypoints)
sequence = sequence[-50:]
if len(sequence) == 50:
res = model.predict(np.expand_dims(sequence, axis=0))[0]
predictions.append(np.argmax(res))
# Implement your prediction logic
if np.unique(predictions[-25:])[0] == np.argmax(res):
if res[np.argmax(res)] > threshold:
if len(sentence) > 0:
if actions[np.argmax(res)] != sentence[-1]:
sentence.append(actions[np.argmax(res)])
else:
sentence.append(actions[np.argmax(res)])
if len(sentence) > 5:
sentence = sentence[-5:]
# Print predictions and sentence to the console
print(f"Predicted: {actions[np.argmax(res)]}")
print(f"Sentence: {sentence}")
return jsonify({'message': 'Frame processed', 'sentence': sentence}), 200
except Exception as e:
logging.exception("Exception in /endpoint")
return jsonify({'message': 'Error processing frame', 'error': str(e)}), 500
if __name__ == '__main__':
app.run(debug=True, port=8000)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment