Commit e0f7c98c authored by Thavananthan Sivaselvayoganathan's avatar Thavananthan Sivaselvayoganathan

Merge branch 'master' of http://gitlab.sliit.lk/2020_077/2020_077 into thavananthan_dev

parents 2912e253 793cc69c
...@@ -20,3 +20,4 @@ dataq/mask_rcnn_coco.h5 ...@@ -20,3 +20,4 @@ dataq/mask_rcnn_coco.h5
/reveng/output /reveng/output
*.pyc *.pyc
dataq/utils.zip dataq/utils.zip
/static/output
from werkzeug.utils import secure_filename
import requests
import shutil import shutil
from flask import send_file from flask import send_file
from reveng.processInput import checkCommon, processInput from reveng.processInput import checkCommon, processInput
from flask import Flask, render_template, request, redirect from flask import url_for, flash, Flask, render_template, request, redirect
import json import json
import sys import sys
import os import os
from flask_cors import CORS, cross_origin
REVENG_DIR = os.path.dirname(os.path.abspath(__file__)) REVENG_DIR = os.path.dirname(os.path.abspath(__file__))
TEMPLATE = os.path.join(REVENG_DIR, "\\templates") TEMPLATE = os.path.join(REVENG_DIR, "\\templates")
...@@ -12,24 +16,29 @@ STATIC = os.path.join(REVENG_DIR, "\\static") ...@@ -12,24 +16,29 @@ STATIC = os.path.join(REVENG_DIR, "\\static")
app = Flask(__name__) app = Flask(__name__)
app.config["DEBUG"] = True app.config["DEBUG"] = True
cors = CORS(app)
@app.route('/test', methods=['GET']) @app.route('/test', methods=['GET'])
def testApi(): def testApi():
return "<h1>The API is working</h1>" return "<h1>The API is working</h1>"
##DATAQ # DATAQ
# route to redirect Home page # route to redirect Home page
@app.route('/') @app.route('/')
def home(): def home():
return render_template('homePage.html') return render_template('homePage.html')
@app.route('/cam') @app.route('/cam')
def openCam(): def openCam():
os.system('python dataq\detect.py') os.system('python dataq\detect.py')
return render_template('homePage.html')
# END DATAQ
## END DATAQ
# route to redirect About Us page # route to redirect About Us page
@app.route('/about') @app.route('/about')
...@@ -37,24 +46,32 @@ def about(): ...@@ -37,24 +46,32 @@ def about():
return render_template('about.html') return render_template('about.html')
# route to redirect Sign Translation page
@app.route('/signToText')
def signToTextEngine():
return render_template('signToText.html')
# route to redirect Text Translation page # route to redirect Text Translation page
@app.route('/tts') @app.route('/tts')
def textToSignLanguage(): def textToSignEngine():
return render_template('index.html') return render_template('textToSign.html')
# route to display GIF image to the user # route to display GIF image to the user
@app.route('/tts/response/', methods=['POST']) @app.route('/tts/get/<msg>', methods=['GET', 'POST'])
def response(): def response(msg):
clearoutputfolder()
print(msg)
responseGIF = processInput(msg)
clearoutputfolder() clearoutputfolder()
message = request.get_json() return responseGIF
responseGIF = processInput(message['message'])
return send_file(responseGIF, mimetype='image/gif')
# clear the OUTPUT folder after displaying the GIF image # clear the OUTPUT folder after displaying the GIF image
def clearoutputfolder(): def clearoutputfolder():
folder = os.path.join(os.path.dirname(__file__), 'reveng\output') folder = os.path.join(os.path.dirname(__file__), 'static\output')
for filename in os.listdir(folder): for filename in os.listdir(folder):
file_path = os.path.join(folder, filename) file_path = os.path.join(folder, filename)
try: try:
...@@ -66,24 +83,12 @@ def clearoutputfolder(): ...@@ -66,24 +83,12 @@ def clearoutputfolder():
print('Failed to delete %s. Reason: %s' % (file_path, e)) print('Failed to delete %s. Reason: %s' % (file_path, e))
# route to redirect Sign Translation page
@app.route('/textToSign')
def textToSignEngine():
return render_template('textToSign.html')
# route to redirect Contact Us page # route to redirect Contact Us page
@app.route('/contact') @app.route('/contactUs')
def contactUs(): def contactUs():
return render_template('contactUs.html') return render_template('contactUs.html')
# for testing purpose
@app.route('/signToText')
def signToTextEngine():
return render_template('signToText.html')
host = "localhost" host = "localhost"
app.run(host=host, port=3000) app.run(host=host, port=3000)
......
# This file is for Frontends ###################33
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('homePage.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/signToText')
def signToTextEngine():
return render_template('signToText.html')
@app.route('/textToSign')
def textToSignEngine():
return render_template('textToSign.html')
@app.route('/textToSign/response/', methods=['POST'])
def response():
return render_template('about.html')
@app.route('/contactUs')
def contactUs():
return render_template('contactUs.html')
if __name__ == "__main__":
app.run(debug=True)
...@@ -17,10 +17,13 @@ from distutils.version import StrictVersion ...@@ -17,10 +17,13 @@ from distutils.version import StrictVersion
from PIL import Image from PIL import Image
from tensorflow import keras from tensorflow import keras
sys.path.append('.') sys.path.append('.')
from translation.modelUp import sign_predict #from translation.modelUp import sign_predict
from translation.trans import Translation
from object_detection.utils import label_map_util from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util from object_detection.utils import visualization_utils as vis_util
from skimage.transform import resize
#Set Base Path #Set Base Path
PATH = os.path.dirname(os.path.abspath(__file__)) PATH = os.path.dirname(os.path.abspath(__file__))
...@@ -80,9 +83,11 @@ def run_inference_for_single_image(image, graph): ...@@ -80,9 +83,11 @@ def run_inference_for_single_image(image, graph):
output_dict['detection_masks'] = output_dict['detection_masks'][0] output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict return output_dict
translation = Translation()
#initialize camera #initialize camera
cap = cv2.VideoCapture(0) cap = cv2.VideoCapture(0)
from PIL import Image
try: try:
with detection_graph.as_default(): with detection_graph.as_default():
with tf.compat.v1.Session() as sess: with tf.compat.v1.Session() as sess:
...@@ -102,40 +107,51 @@ try: ...@@ -102,40 +107,51 @@ try:
tensor_name) tensor_name)
while True: while True:
ret, image_np = cap.read() # session for detect.py
with sess.as_default():
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0) ret, image_np = cap.read()
# Actual detection. temp_image = image_np.copy()
output_dict = run_inference_for_single_image(image_np, detection_graph) # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
# Visualization of the results of a detection. image_np_expanded = np.expand_dims(image_np, axis=0)
vis_util.visualize_boxes_and_labels_on_image_array( # Actual detection.
image_np, output_dict = run_inference_for_single_image(image_np, detection_graph)
output_dict['detection_boxes'], # Visualization of the results of a detection.
output_dict['detection_classes'], vis_util.visualize_boxes_and_labels_on_image_array(
output_dict['detection_scores'], image_np,
category_index, output_dict['detection_boxes'],
instance_masks=output_dict.get('detection_masks'), output_dict['detection_classes'],
use_normalized_coordinates=True, output_dict['detection_scores'],
line_thickness=8) category_index,
instance_masks=output_dict.get('detection_masks'),
score = round(100*output_dict['detection_scores'][0]) use_normalized_coordinates=True,
#send the request to translation component here line_thickness=8)
###
# I will be sending a POST request to u. a hand picture score = round(100*output_dict['detection_scores'][0])
if score > 80: #send the request to translation component here
print(image_np_expanded.shape) ###
# sign_predict(image_np) <-- still under development # I will be sending a POST request to u. a hand picture
#waiting for the API on that component to be built print(category_index)
# end send request if score > 80:
#print(image_np_expanded.shape)
## Press Q to close the camera img = Image.fromarray(temp_image)
cv2.imshow('Hand Detector. Press Q to close', cv2.resize(image_np, (800, 600))) #print(image_np.shape)
img.save(os.getcwd() + "\\" + "capture.jpg")
if cv2.waitKey(25) & 0xFF == ord('q'): # array_for_translation = resize(image_np,(224,224,3))
cap.release() # print(array_for_translation.shape)
cv2.destroyAllWindows() translation.sign_predict(os.getcwd() + "\\" + "capture.jpg")
break
# sign_predict(image_np) <-- still under development
#waiting for the API on that component to be built
# end send request
## Press Q to close the camera
cv2.imshow('Hand Detector. Press Q to close', cv2.resize(image_np, (800, 600)))
if cv2.waitKey(25) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
except Exception as e: except Exception as e:
print(e) print(e)
cap.release() cap.release()
\ No newline at end of file
<html>
<body>
HELLO
<img id="image">
<script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/2.3.0/socket.io.js">
</script>
<script>
const socket = io.connect('http://localhost:5000');
socket.on('image',(data)=>{
console.log('data',data);
});
function callGoogle(){
window.open('/google')
}
</script>
<button onclick="callGoogle()">
google
</button>
</body>
</html>
\ No newline at end of file
#from reveng.processInput import processInput #from reveng.processInput import processInput
from reveng.processSentence import processCommonSentence from reveng.processSentence import processCommonSentence
from reveng.processWord import processWord from reveng.processWord import processCommonWord
from reveng.getImages import getImagesCommonSentence from reveng.getImages import getImagesCommonSentence
from reveng.getImages import getImagesCommonWord from reveng.getImages import getImagesCommonWord
from reveng.getImages import getImagesRareWord from reveng.getImages import getImagesRareWord
......
### ###
# This file is used as a main file for the user-inputs which have same words/sentences # This file is used as a main file for the user-inputs which have same words/sentences
# appears in common.json file # appears in common.json file
# @author Amashi Bastiansz | IT17143950 # @author Amashi Bastiansz | IT17143950
# @version 1.2 # @version 1.2
# @since 2020-07-30 # @since 2020-07-30
### ###
import json import json
import os import os
from reveng.processWord import processWord #from reveng.processWord import processWord
from reveng.processSentence import processCommonSentence, processRareSentence from reveng.processSentence import processCommonSentence, processRareSentence
REVENG = os.path.dirname(os.path.abspath(__file__)) REVENG = os.path.dirname(os.path.abspath(__file__))
...@@ -19,10 +19,10 @@ jsonPath = os.path.join(REVENG, 'common.json') ...@@ -19,10 +19,10 @@ jsonPath = os.path.join(REVENG, 'common.json')
def checkInJson(message): def checkInJson(message):
### ###
# This method is used to create a GIF image for the user-input which is similar # This method is used to create a GIF image for the user-input which is similar
# with a sentence appears in common.json file # with a sentence appears in common.json file
# Also, this method is used to create a GIF image for the user-input which is similar # Also, this method is used to create a GIF image for the user-input which is similar
# with a word appears in common.json file # with a word appears in common.json file
### ###
......
### ###
# This file is used to create firbase configuration for the application # This file is used to create firbase configuration for the application
# Each application has unique firbase configuration. # Each application has unique firbase configuration.
# @author Amashi Bastiansz | IT17143950 # @author Amashi Bastiansz | IT17143950
# @version 1.0 # @version 1.0
# @since 2020-08-13 # @since 2020-08-13
### ###
import json import json
...@@ -54,3 +54,8 @@ def getStorageInstance(): ...@@ -54,3 +54,8 @@ def getStorageInstance():
### ###
return store return store
def getStorageBucket():
return bucket
...@@ -62,21 +62,29 @@ def getImagesForCommonWord(message): ...@@ -62,21 +62,29 @@ def getImagesForCommonWord(message):
# @return call to generateGIF() method with selected hand images # @return call to generateGIF() method with selected hand images
### ###
images = store.child().list_files()
sendingToGIF = []
imageRes = [] imageRes = []
images = store.child().list_files()
for i in images: for i in images:
if i.name.startswith(message) and len(i.name) > 8: for word in message:
print("common image name =" + i.name) if i.name.startswith(word) and len(i.name) > 8:
imageRes.append(i) print(i.name)
returnArr = [] print("image name =" + i.name)
imageRes.append(i)
print(imageRes)
for i in imageRes: for i in imageRes:
url = i.generate_signed_url(datetime.timedelta(300), method='GET') url = i.generate_signed_url(datetime.timedelta(300), method='GET')
response = requests.get(url) response = requests.get(url)
images = io.BytesIO(response.content) imagesfromFirebase = io.BytesIO(response.content)
img = pigm.open(images) img = pigm.open(imagesfromFirebase)
returnArr.append(img) sendingToGIF.append(img)
return returnArr print("Type of array ", type(sendingToGIF))
return generateGIF(sendingToGIF)
def getImagesForRareWord(message): def getImagesForRareWord(message):
......
### ###
# This file is used to display the created GIF to user # This file is used to display the created GIF to user
# This file is still at TESTING PHASE # This file is still at TESTING PHASE
# @author Amashi Bastiansz | IT17143950 # @author Amashi Bastiansz | IT17143950
...@@ -34,7 +34,7 @@ def allowed_file(filename): ...@@ -34,7 +34,7 @@ def allowed_file(filename):
@app.route('/', methods=['POST']) @app.route('/', methods=['POST'])
def upload_image(): def upload_image():
### ###
# This method is used upload the selected file to the exact folder # This method is used upload the selected file to the exact folder
# after checking whether the extensions are matched. # after checking whether the extensions are matched.
# @return redirect to upload.html # @return redirect to upload.html
...@@ -64,7 +64,7 @@ def display_image(filename): ...@@ -64,7 +64,7 @@ def display_image(filename):
# @return display the file in the interface # @return display the file in the interface
### ###
return redirect(url_for('static', filename='uploads/' + filename), code=301) return redirect(url_for('static', filename='uploads/' + filename), code=301)
......
### ###
# This file is used to display the created GIF to the user on the interface # This file is used to display the created GIF to the user on the interface
# Still in TESTING phase # Still in TESTING phase
# @author Amashi Bastiansz | IT17143950 # @author Amashi Bastiansz | IT17143950
# @version 1.5 # @version 1.5
# @since 2020-10-01 # @since 2020-10-01
### ###
from flask import Flask from flask import Flask
......
### ###
# This file is used to: # This file is used to:
# generate a GIF image # generate a GIF image
# send the generated GIF to the folder # send the generated GIF to the folder
# delete the GIF from cache # delete the GIF from cache
# @author Amashi Bastiansz | IT17143950 # @author Amashi Bastiansz | IT17143950
# @version 1.5 # @version 1.5
# @since 2020-10-01 # @since 2020-10-01
### ###
import imageio import imageio
...@@ -16,41 +16,62 @@ import random ...@@ -16,41 +16,62 @@ import random
import cv2 import cv2
import PIL.Image as pigm import PIL.Image as pigm
import PIL.GifImagePlugin as gifHandler import PIL.GifImagePlugin as gifHandler
from gcloud.storage.blob import Blob
from reveng.firebaseConfig import getStorageInstance, getStorageBucket
PATH = os.path.dirname(os.path.abspath(__file__)) PATH = os.path.dirname(os.path.abspath(__file__))
gifName = ''.join(random.choices(string.ascii_uppercase +
string.digits, k=15)) STATIC_PATH = os.getcwd() + "\\static"
Blob.generate_signed_url
store = getStorageInstance()
bucket = getStorageBucket()
def generateGIF(images): def generateGIF(images):
### ###
# This method is used to generate a path for the created GIF image # This method is used to generate a path for the created GIF image
# @return the generated GIF path # @return the generated GIF path
### ###
gifName = ''.join(random.choices(string.ascii_uppercase +
gifPath = os.path.join(PATH + "\\output\\" + gifName + '.gif') string.digits, k=15))
blob = ''
gifPath = ''
blob = bucket.blob("output/"+gifName + ".gif")
gifPath = STATIC_PATH + "\\output\\" + gifName + ".gif"
imageio.mimwrite(gifPath, images, duration=0.5) imageio.mimwrite(gifPath, images, duration=0.5)
print(gifPath)
return gifPath with open(gifPath, 'rb') as gifImage:
blob.upload_from_file(gifImage)
blob.make_public()
response = {
"gifName": gifName,
"gifLocalPath": gifPath,
"gifPublicUrl": blob.public_url
}
return response
def sendGIF(): def sendGIF():
### ###
# This method is used to send the generated GIF to the exact folder to store it # This method is used to send the generated GIF to the exact folder to store it
### ###
x = imageio.mimread(os.path.join(PATH + "\\output\\" + gifName + '.gif')) x = imageio.mimread(os.path.join(PATH + "\\output\\" + gifName + '.gif'))
return x return x
def deleteFromCache(): def deleteFromCache():
### ###
# This method is used to delete the generated GIF from cache after uploading it to # This method is used to delete the generated GIF from cache after uploading it to
# the exact project folder # the exact project folder
# @return call to generateGIF() method with selected hand images # @return call to generateGIF() method with selected hand images
### ###
os.remove(os.path.join(PATH + "\\output\\" + gifName + '.gif')) os.remove(os.path.join(PATH + "\\output\\" + gifName + '.gif'))
import os
path_to_static = os.getcwd() + '\\static'
print(path_to_static)
\ No newline at end of file
...@@ -19,6 +19,7 @@ import PIL.Image as pigm ...@@ -19,6 +19,7 @@ import PIL.Image as pigm
from reveng.firebaseConfig import getStorageInstance from reveng.firebaseConfig import getStorageInstance
from reveng.checkCommon import checkInJson from reveng.checkCommon import checkInJson
from reveng.processSentence import processCommonSentence from reveng.processSentence import processCommonSentence
from reveng.processWord import processCommonWord
from reveng.getImages import getImagesForCommonWord, getImagesForRareWord from reveng.getImages import getImagesForCommonWord, getImagesForRareWord
from reveng.gifMaker import generateGIF from reveng.gifMaker import generateGIF
...@@ -121,3 +122,36 @@ def processInput(message): ...@@ -121,3 +122,36 @@ def processInput(message):
img = pigm.open(imagesfromFirebase) img = pigm.open(imagesfromFirebase)
sendingToGIF.append(img) sendingToGIF.append(img)
return generateGIF(sendingToGIF) return generateGIF(sendingToGIF)
else:
if checkInCommonWord(message):
return processCommonSentence(message)
else:
imageArray = []
sendingToGIF = []
images = store.child().list_files()
for i in images:
for item in message.split(" "):
if checkInCommonWord(item):
if i.name.startswith(item):
print("image name =" + i.name)
imageArray.append(i)
break
else:
for letter in list(item):
print("Length ", len(i.name))
if i.name.startswith(letter) and len(i.name) < 8:
print("image name =" + i.name)
imageArray.append(i)
break
for i in imageArray:
url = i.generate_signed_url(
datetime.timedelta(300), method='GET')
response = requests.get(url)
imagesfromFirebase = io.BytesIO(response.content)
img = pigm.open(imagesfromFirebase)
sendingToGIF.append(img)
return generateGIF(sendingToGIF)
###
# This file is used to check a user-entered sentence with json file
# and return the relevant GIF image
# @author Amashi Bastiansz | IT17143950
# @version 2.0
# @since 2020-07-15
###
import json
import os
from reveng.getImages import getImagesForCommonWord
REVENG = os.path.dirname(os.path.abspath(__file__))
def processCommonWord(message):
###
# This method is used to process a sentence which has similar sentences in common.json file
# and to return the GIF
###
splitMessage = message.split(" ")
gif = getImagesForCommonWord(splitMessage)
return gif
html {
background-color: white;
color: black;
}
h1 {
color: black;
margin-bottom: 0;
margin-top: 0;
text-align: center;
font-size: 40px;
}
h3 {
color: black;
font-size: 20px;
margin-top: 3px;
text-align: center;
}
#chatbox {
background-color: white;
margin-left: auto;
margin-right: auto;
width: 40%;
margin-top: 60px;
}
#userInput {
margin-left: auto;
margin-right: auto;
width: 40%;
margin-top: 60px;
}
#textInput {
width: 87%;
border: none;
border-bottom: 1px solid #009688;
font-family: monospace;
font-size: 17px;
}
#buttonInput {
padding: 3px;
font-family: monospace;
font-size: 17px;
background-color: white;
color: green;
border-color: green;
border-radius: 2px;
}
#buttonInput :hover {
background-color: green;
color: white;
}
.userText {
color: black;
font-family: monospace;
font-size: 17px;
text-align: right;
line-height: 30px;
}
.userText span {
background-color: #009688;
padding: 10px;
border-radius: 2px;
}
.botText {
color: black;
font-family: monospace;
font-size: 17px;
text-align: left;
line-height: 30px;
}
.botText span {
background-color: white;
padding: 10px;
border-radius: 2px;
color: black;
}
#tidbit {
position: absolute;
bottom: 0;
right: 0;
width: 300px;
}
...@@ -65,7 +65,7 @@ ...@@ -65,7 +65,7 @@
$.ajax({ $.ajax({
url: "http://127.0.0.1:3000/tts/response/", url: "http://127.0.0.1:3000/tts/response/",
data: JSON.stringify({ message: message }), data: JSON.stringify({ message: message }),
method: "POST", method: "GET",
contentType: "application/json", contentType: "application/json",
success: function (message) { success: function (message) {
var text = $("#textInput").val(); var text = $("#textInput").val();
......
<!--
* The file is to create a form for uploading and display files
* For TESTING purposes
*
* @author Amashi Bastiansz | IT17143950
* @version 1.0
* @since 2020-10-01
-->
<!doctype html>
<html>
<head>
<title>Python Flask File Upload Example</title>
</head>
<body>
<h2>Select a file to upload</h2>
<p>
{% with messages = get_flashed_messages() %}
{% if messages %}
<ul class=flashes>
{% for message in messages %}
<li>{{ message }}</li>
{% endfor %}
</ul>
{% endif %}
{% endwith %}
</p>
{% if filename %}
<div>
<img src="{{ url_for('display_image', filename=filename) }}">
</div>
{% endif %}
<form method="post" action="/" enctype="multipart/form-data">
<dl>
<p>
<input type="file" name="file" autocomplete="off" required>
</p>
</dl>
<p>
<input type="submit" value="Submit">
</p>
</form>
</body>
</html>
* {
margin: 0;
padding: 0;
border: 0;
outline: 0;
}
.chat {
width: 300px;
height: 500px;
background-image: url("http://cdn9.staztic.com/app/a/2063/2063481/whatsapp-wallpaper-pack-hd-600519-0-s-156x156.jpg");
background-size: cover;
background-position: center center;
position: absolute;
border: 5px solid #000;
border-radius: 20px;
overflow: hidden;
box-shadow: 0 0 20px #000;
top: 0;
left: 0;
right: 0;
bottom: 0;
margin: auto;
box-sizing: border-box;
}
.chat .messages {
position: absolute;
left: 0;
bottom: 60px;
height: auto;
width: 100%;
}
.chat .messages .message {
padding: 20px 10px;
width: 100%;
box-sizing: border-box;
}
.chat .messages .message .you,
.chat .messages .message .bot {
border-radius: 10px;
padding: 10px;
width: 75%;
position: relative;
}
.chat .messages .message .you img,
.chat .messages .message .bot img {
width: 100%;
border-radius: 10px;
display: block;
}
.chat .messages .message .you:before,
.chat .messages .message .bot:before {
padding: 5px;
border-radius: 5px;
position: absolute;
top: -15px;
font-weight: bold;
color: #fff;
font-size: 12px;
}
.chat .messages .message .you {
background: #2ecc71;
float: right;
}
.chat .messages .message .you:before {
content: "You:";
background-color: #2ecc71;
right: 10px;
}
.chat .messages .message .bot {
background-color: #3498db;
float: left;
}
.chat .messages .message .bot:before {
content: "Bot:";
background-color: #3498db;
left: 10px;
}
.chat .messages .message:after {
content: "";
clear: both;
display: table;
}
.chat .input {
position: absolute;
left: 0;
bottom: 0;
height: 60px;
width: 100%;
background: #ecf0f1;
}
.chat .input .text {
background-color: #fff;
border-radius: 20px;
height: 40px;
position: absolute;
top: 10px;
left: 10px;
width: 270px;
box-shadow: inset 0 0 5px #aaa;
line-height: 40px;
padding: 0 20px;
box-sizing: border-box;
}
...@@ -69,7 +69,7 @@ ...@@ -69,7 +69,7 @@
</a> </a>
<div class="dropdown-menu" aria-labelledby="navbarDropdown"> <div class="dropdown-menu" aria-labelledby="navbarDropdown">
<a class="dropdown-item" href="signToText">SSL to English Translator</a> <a class="dropdown-item" href="signToText">SSL to English Translator</a>
<a class="dropdown-item" href="textToSign">English to SSL Translator</a> <a class="dropdown-item" href="tts">English to SSL Translator</a>
</div> </div>
</li> </li>
<li class="nav-item @@contact"> <li class="nav-item @@contact">
......
...@@ -70,7 +70,7 @@ ...@@ -70,7 +70,7 @@
</a> </a>
<div class="dropdown-menu" aria-labelledby="navbarDropdown"> <div class="dropdown-menu" aria-labelledby="navbarDropdown">
<a class="dropdown-item" href="signToText">SSL to English Translator</a> <a class="dropdown-item" href="signToText">SSL to English Translator</a>
<a class="dropdown-item" href="textToSign">English to SSL Translator</a> <a class="dropdown-item" href="tts">English to SSL Translator</a>
</div> </div>
</li> </li>
<li class="nav-item active"> <li class="nav-item active">
......
...@@ -70,10 +70,10 @@ ...@@ -70,10 +70,10 @@
</a> </a>
<div class="dropdown-menu" aria-labelledby="navbarDropdown"> <div class="dropdown-menu" aria-labelledby="navbarDropdown">
<a class="dropdown-item" href="signToText">SSL to English Translator</a> <a class="dropdown-item" href="signToText">SSL to English Translator</a>
<a class="dropdown-item" href="textToSign">English to SSL Translator</a> <a class="dropdown-item" href="tts">English to SSL Translator</a>
</div> </div>
</li> </li>
<li class="nav-item active"> <li class="nav-item">
<a class="nav-link" href="contactUs">Contact Us</a> <a class="nav-link" href="contactUs">Contact Us</a>
</li> </li>
</ul> </ul>
...@@ -120,7 +120,7 @@ ...@@ -120,7 +120,7 @@
<div class="col-md-8"> <div class="col-md-8">
<h1 class="text-white" data-animation-out="fadeOutDown" data-delay-out="5" data-duration-in=".3" data-animation-in="fadeInUp" data-delay-in=".1">You can learn sign language by your own!</h1> <h1 class="text-white" data-animation-out="fadeOutDown" data-delay-out="5" data-duration-in=".3" data-animation-in="fadeInUp" data-delay-in=".1">You can learn sign language by your own!</h1>
<p class="text-muted mb-4" data-animation-out="fadeOutDown" data-delay-out="5" data-duration-in=".3" data-animation-in="fadeInUp" data-delay-in=".4">We are offering you a chance to learn sign language easily through our translator</p> <p class="text-muted mb-4" data-animation-out="fadeOutDown" data-delay-out="5" data-duration-in=".3" data-animation-in="fadeInUp" data-delay-in=".4">We are offering you a chance to learn sign language easily through our translator</p>
<a href="textToSign" class="btn btn-primary" data-animation-out="fadeOutDown" data-delay-out="5" data-duration-in=".3" data-animation-in="zoomIn" data-delay-in=".7">Let's Learn Sign Language</a> <a href="tts" class="btn btn-primary" data-animation-out="fadeOutDown" data-delay-out="5" data-duration-in=".3" data-animation-in="zoomIn" data-delay-in=".7">Let's Learn Sign Language</a>
</div> </div>
</div> </div>
</div> </div>
......
<!--
* The file is to create a form for Text to SSL Translator
* For TESTING purposes
*
* @author Amashi Bastiansz | IT17143950
* @version 1.0
* @since 2020-10-01
-->
<!DOCTYPE html>
<html>
<head>
<link
rel="stylesheet"
type="text/css"
href="{{url_for('static', filename='styles/style.css')}}"
/>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>
</head>
<body>
<h1 style="color: black">Text to SSL Translator</h1>
<div>
<div id="chatbox">
<p class="botText">
<span
>Welcome to Text to Sign Convertor.
<br />
Enter the sentence you need to translate into SSL</span
>
</p>
</div>
<form action="" method="POST" id="form">
<div id="userInput">
<input
id="textInput"
name="msg"
maxlength="50"
placeholder="Enter text here"
/>
<br /><br />
<button id="buttonInput" type="submit">TRANSLATE</button>
</div>
</form>
</div>
<script src="https://code.jquery.com/jquery-3.2.1.min.js"></script>
<script>
$("#textInput").keypress(function (e) {
if (e.which == 13) {
getResponseSign();
}
});
$("#buttonInput").click(function () {
getResponseSign();
});
$("#form").on("submit", function (e) {
var message = $("#textInput").val();
e.preventDefault();
$.ajax({
url: "http://localhost:3000/tts/response/",
data: JSON.stringify({ message: message }),
method: "POST",
contentType: "application/json",
success: function (message) {
var text = $("#textInput").val();
var userHtml = '<p class="userText"><span>' + text + "</span></p>";
$("#textInput").val("");
$("#chatbox").append(userHtml);
document
.getElementById("userInput")
.scrollIntoView({ block: "start", behavior: "smooth" });
},
});
});
</script>
</body>
</html>
...@@ -39,7 +39,15 @@ ...@@ -39,7 +39,15 @@
<!--Favicon--> <!--Favicon-->
<link rel="shortcut icon" href="static/images/EasyTalkFavicon_v1.png" type="image/x-icon"> <link rel="shortcut icon" href="static/images/EasyTalkFavicon_v1.png" type="image/x-icon">
<link rel="icon" href="static/images/EasyTalkFavicon_v1.png" type="image/x-icon"> <link rel="icon" href="static/images/EasyTalkFavicon_v1.png" type="image/x-icon">
<script>
function goPython(){
$.ajax({
url: ""
}).done(function() {
alert('finished python script');;
});
}
</script>
</head> </head>
<body> <body>
...@@ -71,7 +79,7 @@ ...@@ -71,7 +79,7 @@
</a> </a>
<div class="dropdown-menu" aria-labelledby="navbarDropdown"> <div class="dropdown-menu" aria-labelledby="navbarDropdown">
<a class="dropdown-item" href="signToText">SSL to English Translator</a> <a class="dropdown-item" href="signToText">SSL to English Translator</a>
<a class="dropdown-item" href="textToSign">English to SSL Translator</a> <a class="dropdown-item" href="tts">English to SSL Translator</a>
</div> </div>
</li> </li>
<li class="nav-item @@contact"> <li class="nav-item @@contact">
...@@ -107,7 +115,7 @@ ...@@ -107,7 +115,7 @@
<div class="col-12"> <div class="col-12">
<form id="cameraForm" class="justify-content-center"> <form id="cameraForm" class="justify-content-center">
<div class="text-center"> <div class="text-center">
<button type="submit" class="btn btn-primary mb-2" id="userButtonInput" style="height: 75%;">Open My Webcamera</button> <button type="submit" class="btn btn-primary mb-2" id="userButtonInput" style="height: 75%;" onclick="goPython()">Open My Webcamera</button>
</div> </div>
</form> </form>
</div> </div>
......
...@@ -39,12 +39,22 @@ ...@@ -39,12 +39,22 @@
<link rel="shortcut icon" href="static/images/EasyTalkFavicon_v1.png" type="image/x-icon"> <link rel="shortcut icon" href="static/images/EasyTalkFavicon_v1.png" type="image/x-icon">
<link rel="icon" href="static/images/EasyTalkFavicon_v1.png" type="image/x-icon"> <link rel="icon" href="static/images/EasyTalkFavicon_v1.png" type="image/x-icon">
<link rel="stylesheet" href="static/styles/chatbot-test.css"/>
<script>window.console = window.console || function(t) {};</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/prefixfree/1.0.7/prefixfree.min.js"></script>
<script>
if (document.location.search.match(/type=embed/gi)) {
window.parent.postMessage("resize", "*");
}
</script>
<script src="chrome-extension://mooikfkahbdckldjjndioackbalphokd/assets/prompt.js"></script>
</head> </head>
<body> <body>
<!-- header --> <!-- header -->
<header class="fixed-top"> <!--<header class="fixed-top">
<!-- navbar -->
<div class="navigation w-100"> <div class="navigation w-100">
<div class="container"> <div class="container">
<nav class="navbar navbar-expand-lg navbar-dark p-0"> <nav class="navbar navbar-expand-lg navbar-dark p-0">
...@@ -69,7 +79,7 @@ ...@@ -69,7 +79,7 @@
</a> </a>
<div class="dropdown-menu" aria-labelledby="navbarDropdown"> <div class="dropdown-menu" aria-labelledby="navbarDropdown">
<a class="dropdown-item" href="signToText">SSL to English Translator</a> <a class="dropdown-item" href="signToText">SSL to English Translator</a>
<a class="dropdown-item" href="textToSign">English to SSL Translator</a> <a class="dropdown-item" href="tts">English to SSL Translator</a>
</div> </div>
</li> </li>
<li class="nav-item @@contact"> <li class="nav-item @@contact">
...@@ -80,58 +90,33 @@ ...@@ -80,58 +90,33 @@
</nav> </nav>
</div> </div>
</div> </div>
</header> </header>-->
<!-- /header --> <!-- /header -->
<!-- page title -->
<section class="page-title-section overlay" data-background="static/images/backgrounds/page-title.jpg">
<div class="container">
<div class="row">
<div class="col-md-8">
<ul class="list-inline custom-breadcrumb">
<li class="list-inline-item"><a class="h2 text-primary font-secondary" href="textToSign">English to SSL Translator</a></li>
</ul>
<br/>
<p class="text-lighten">Now you can translate any English text to SSL only by entering it here!!</p>
</div>
</div>
</div>
</section>
<!-- /page title -->
<!-- Text Translator --> <!-- Text Translator -->
<section class="section">
<div class="container"> <div class="chat">
<div class="row"> <div class="messages">
<div class="col-12"> <div class="message">
<h2 class="section-title text-center" style="margin-top: -5%;">Let's Start!!</h2> <div class="bot">
<div id="chatbox"> Welcome to Text to Sign Language Translator!! Now you can enter any phrase you need to translate into sign language
<p class="botText"> <br/>
<span>Enter the sentence you need to translate into SSL</span> Make sure to enter phrase like "Good Morning" or "Sorry"
</p>
</div>
<br/>
<form id="userForm" class="justify-content-center">
<div class="form-row align-items-center">
<div class="col-auto" id="userInput">
<label class="sr-only" for="userTextInput">Enter Your Text</label>
<input type="text" class="form-control-mb-2" id="userTextInput" placeholder="Enter any text" style="height: 80%; width:100%"/>
</div>
<div class="col-auto">
<button type="submit" class="btn btn-primary mb-2" id="userButtonInput" style="height: 75%;">Translate to Sign</button>
</div> </div>
</div> </div>
</form> </div>
</div> <div class="input">
</div> <form action="#" id="chat" method="post">
</div> <input class="text" contenteditable="" placeholder="Type your message here...">
</section> </form>
</div>
</div>
<!-- /Text Translator --> <!-- /Text Translator -->
<!-- footer --> <!-- footer -->
<footer> <!--<footer>
<!-- copyright -->
<div class="copyright py-4 bg-footer"> <div class="copyright py-4 bg-footer">
<div class="container"> <div class="container">
<div class="row"> <div class="row">
...@@ -154,7 +139,7 @@ ...@@ -154,7 +139,7 @@
</div> </div>
</div> </div>
</div> </div>
</footer> </footer>-->
<!-- /footer --> <!-- /footer -->
<!-- jQuery --> <!-- jQuery -->
...@@ -173,37 +158,64 @@ ...@@ -173,37 +158,64 @@
<!-- Main Script --> <!-- Main Script -->
<script src="static/js/script.js"></script> <script src="static/js/script.js"></script>
<script> <script src="https://cpwebassets.codepen.io/assets/common stopExecutionOnTimeout-157cd5b220a5c80d4ff8e0e70ac069bffd87a61252088146915e8726e5d9f147.js"></script>
$("#userTextInput").keypress(function(e){
if(e.which == 13){ <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.1.3/jquery.min.js"></script>
//getResponseSign();
} <script id="rendered-js">
}); (function() {
var app;
$("userButtonInput").click(function(){ $(document).ready(function() {
//getResponseSign(); return app.init();
}); });
$("#userForm").on("submit", function (e) { app = {
var message = $("#textInput").val(); init: function() {
e.preventDefault(); return this.bind_events();
$.ajax({ },
url: "http://127.0.0.1:5000/textToSign/response/",
data: JSON.stringify({ message: message }), bind_events: function() {
method: "POST", return $(document).on("submit", "#chat", function(e) {
contentType: "application/json", app.send_message();
success: function (message) { return e.preventDefault();
var text = $("#userTextInput").val();
var userHtml = '<p class="userText"><span>' + text + "</span></p>";
$("#userTextInput").val("");
$("#chatbox").append(userHtml);
document
.getElementById("userInput")
.scrollIntoView({ block: "start", behavior: "smooth" });
},
}); });
}); },
</script>
send_message: function() {
var msg;
msg = $(".text").val().trim();
if (msg) {
$(".text").val("");
$(".messages").append("<div class='message'><div class='you'>" + msg + "</div></div>");
return this.check(msg);
}
},
check: function(msg) {
var keyword;
if (msg != null) {
return this.get_gif(msg);
} else {
return this.bot_post("Wrong syntax ''gif me keyword''.");
}
},
bot_post: function(msg) {
return $(".messages").append("<div class='message'><div class='bot'>" + msg + "</div></div>");
},
get_gif: function(keyword) {
console.log(keyword)
return $.get(`http://localhost:3000/tts/get/${keyword}` , function(data) {
var index;
console.log(data)
return app.bot_post("<img src='" + data.gifPublicUrl + "' alt='' />");
});
}
};
}).call(this);
</script>
</body> </body>
</html> </html>
\ No newline at end of file
from textvoice.textVoiceAs import TextVoiceAs
\ No newline at end of file
# -*- coding: utf-8 -*-
import functools
import sys
from .OneGramDist import OneGramDist
from . import spell
#import spell
from . import temp
from . import speech
import os
class TextVoiceAs:
def genarate(self,letters):
sentence = ''.join(sys.argv[1:]).lower()
if not sentence:
sentence = letters
# onegrams = OneGramDist(filename='count_10M_gb.txt')
#onegrams = OneGramDist(filename='count_1M_gb.txt.gz')
onegrams = OneGramDist(filename= os.path.join(os.path.dirname(os.path.abspath(__file__)),'count_1w.txt'))
onegram_fitness = functools.partial(temp.onegram_log, onegrams)
#print(sentence)
words = temp.segment(sentence, word_seq_fitness=onegram_fitness)
# print(words)
output = [spell.correction(words[index]) for index in range(0,len(words))]
speech.inputs(' '.join(output))
print(' '.join(output))
return
2020_077
component for sign classification & translation
K. Bavanraj | IT17032766
\ No newline at end of file
...@@ -38,3 +38,5 @@ def myCam(): ...@@ -38,3 +38,5 @@ def myCam():
cv2.destroyAllWindows() cv2.destroyAllWindows()
return return
myCam()
0 one 0 A
1 two 1 B
2 five3 2 C
3 face4 3 D
4 three5 4 E
5 F
6 G
7 H
8 I
9 J
10 K
11 L
12 M
13 N
14 O
15 P
16 Q
17 R
18 S
19 T
20 U
21 V
22 W
23 X
24 Y
25 Z
...@@ -5,19 +5,39 @@ ...@@ -5,19 +5,39 @@
# * @since 2020-08-15 # * @since 2020-08-15
# imports # imports
import tensorflow as tf
import tensorflow.keras import tensorflow.keras
from PIL import Image, ImageOps from PIL import Image, ImageOps
import numpy as np import numpy as np
import os import os
PATH = os.path.dirname(__file__) import sys
sys.path.append('.')
from textvoice.textVoiceAs import TextVoiceAs
import string
tensorflow.compat.v1.disable_eager_execution()
def sign_predict(): class Translation:
LOCATION = os.path.dirname(__file__)
Letters_ = ""
Fletter_ = ""
Sletter_ = ""
Tex = TextVoiceAs()
# def sign_predict():
# Disable scientific notation for clarity # Disable scientific notation for clarity
np.set_printoptions(suppress=True) np.set_printoptions(suppress=True)
# Load the model # Load the model
model = tensorflow.keras.models.load_model(PATH + '/converted_keras/keras_model.h5') g2=tf.Graph()
# with g2.as_default():
sess2 = tf.compat.v1.Session()
with sess2.as_default():
model_p = tensorflow.keras.models.load_model(LOCATION + '/converted_keras/keras_model.h5')
model_p._make_predict_function()
# print(model_p.summary())
#model = tensorflow.keras.models.load_model('C:/Users/user/Documents/A to Y 1000 dataset/keras_model.h5')
# Create the array of the right shape to feed into the keras model # Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is # The 'length' or number of images you can put into the array is
...@@ -26,30 +46,56 @@ def sign_predict(): ...@@ -26,30 +46,56 @@ def sign_predict():
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
#data = np.ndarray(shape=(1, 480, 640, 3), dtype=np.float32) #data = np.ndarray(shape=(1, 480, 640, 3), dtype=np.float32)
# Replace this with the path to your image def sign_predict(self, loc):
image = Image.open('C:/Users/user/Documents/GitHub/2020_077/opencv_frame_0.png') #with self.g2.as_default():
with self.sess2.as_default():
# resize the image to a 224x224 with the same strategy as in TM2: # Replace this with the path to your image
# resizing the image to be at least 224x224 and then cropping from the center # 'C:/Users/user/Documents/GitHub/2020_077/opencv_frame_0.png'
image = Image.open(loc)
size = (224, 224) # resize the image to a 224x224 with the same strategy as in TM2:
#size = (640, 480) # resizing the image to be at least 224x224 and then cropping from the center
image = ImageOps.fit(image, size, Image.ANTIALIAS)
# turn the image into a numpy array size = (224, 224)
image_array = np.asarray(image) #size = (640, 480)
print(image_array) image = ImageOps.fit(image, size, Image.ANTIALIAS)
# display the resized image # turn the image into a numpy array
image.show() image_array = np.asarray(image)
#print(image_array)
# Normalize the image # display the resized image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1 #image.show()
# Load the image into the array # Normalize the image
data[0] = normalized_image_array normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# run the inference # Load the image into the array
prediction = model.predict(data) self.data[0] = normalized_image_array
print(prediction) # run the inference
return #sess2.run(tf.global_variables_initializer())
prediction = self.model_p.predict(self.data)
#print(prediction.shape)
#print(type(prediction))
#print(prediction[0, 2])
for alpha in range(26):
if prediction[0, alpha] >= 0.8:
self.Sletter_ = string.ascii_lowercase[alpha]
if alpha == 25:
self.Tex.genarate(self.Letters_)
print(self.Letters_)
self.Letters_ = ""
self.Sletter_ = ""
self.Fletter_ = ""
if self.Sletter_ != self.Fletter_:
print(self.Sletter_)
self.Letters_ = self.Letters_ + self.Sletter_
#break
self.Fletter_ = self.Sletter_
# if os.path.exists(loc):
# os.remove(loc)
# else:
# print("The file does not exist")
return
from trans import Translation
import sys
sys.path.append('.')
#from textvoice.textVoiceAs import TextVoiceAs
def transMain():
t1 = Translation()
#Tex = TextVoiceAs()
location1 = 'C:/Users/user/Documents/GitHub/2020_077/a.jpg'
location2 = 'C:/Users/user/Documents/GitHub/2020_077/c.jpg'
location3 = 'C:/Users/user/Documents/GitHub/2020_077/d.jpg'
location4 = 'C:/Users/user/Documents/GitHub/2020_077/e.jpg'
location5 = 'C:/Users/user/Documents/GitHub/2020_077/g.jpg'
location6 = 'C:/Users/user/Documents/GitHub/2020_077/i.jpg'
location7 = 'C:/Users/user/Documents/GitHub/2020_077/n.jpg'
location8 = 'C:/Users/user/Documents/GitHub/2020_077/o.jpg'
location9 = 'C:/Users/user/Documents/GitHub/2020_077/y.jpg'
location10 = 'C:/Users/user/Documents/GitHub/2020_077/z.jpg'
#Tex.genarate('cab')
t1.sign_predict(location7)
t1.sign_predict(location6)
t1.sign_predict(location2)
t1.sign_predict(location4)
t1.sign_predict(location3)
t1.sign_predict(location1)
t1.sign_predict(location9)
t1.sign_predict(location10)
# t1.sign_predict(location3)
#Tex.genarate(t1.sign_predict(location5))
t1.sign_predict(location5)
t1.sign_predict(location8)
t1.sign_predict(location8)
t1.sign_predict(location3)
t1.sign_predict(location1)
t1.sign_predict(location9)
t1.sign_predict(location10)
transMain()
\ No newline at end of file
# * The trans.py file is created to predict signs with images
# *
# * @author K.Bavanraj | IT17032766
# * @version 1.0
# * @since 2020-08-15
# imports
import tensorflow.keras
from PIL import Image, ImageOps
import numpy as np
import os
import string
tensorflow.compat.v1.disable_eager_execution()
def sign_predict(loc):
PATH = os.path.dirname(__file__)
# def sign_predict():
# Disable scientific notation for clarity
np.set_printoptions(suppress=True)
# Load the model
model = tensorflow.keras.models.load_model(PATH + '/converted_keras/keras_model.h5')
#model = tensorflow.keras.models.load_model('C:/Users/user/Documents/A to Y 1000 dataset/keras_model.h5')
# Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
#data = np.ndarray(shape=(1, 480, 640, 3), dtype=np.float32)
# Replace this with the path to your image
# 'C:/Users/user/Documents/GitHub/2020_077/opencv_frame_0.png'
image = Image.open(loc)
# resize the image to a 224x224 with the same strategy as in TM2:
# resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
#size = (640, 480)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
# turn the image into a numpy array
image_array = np.asarray(image)
# print(image_array)
# display the resized image
# image.show()
# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
#self.model.run_eagerly = False
# run the inference
print("Before model*****************")
prediction = model.predict(data)
print("After model*****************")
# print(prediction)
# print(prediction.shape)
# print(type(prediction))
#print(prediction[0, 2])
for alpha in range(26):
if prediction[0, alpha] >= 0.8:
print(string.ascii_uppercase[alpha])
# return string.ascii_uppercase[alpha]
break
print("After Classification*****************")
# if os.path.exists(loc):
# os.remove(loc)
# else:
# print("The file does not exist")
# return
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment