Commit f3e62b8f authored by Manoj Kumar's avatar Manoj Kumar

Merge branch 'master' into 'manoj_dev'

# Conflicts:
#   dataq/detect.py
parents 6e44faa1 793cc69c
...@@ -62,17 +62,11 @@ def textToSignEngine(): ...@@ -62,17 +62,11 @@ def textToSignEngine():
@app.route('/tts/get/<msg>', methods=['GET', 'POST']) @app.route('/tts/get/<msg>', methods=['GET', 'POST'])
def response(msg): def response(msg):
clearoutputfolder() clearoutputfolder()
# message = request.get_json()
# responseGIF = processInput(message['message'])
print(msg) print(msg)
responseGIF = processInput(msg) responseGIF = processInput(msg)
# response = {
# "gifPath": responseGIF
# }
clearoutputfolder() clearoutputfolder()
return responseGIF return responseGIF
# return send_file(responseGIF, mimetype='image/gif')
# clear the OUTPUT folder after displaying the GIF image # clear the OUTPUT folder after displaying the GIF image
......
...@@ -107,47 +107,51 @@ try: ...@@ -107,47 +107,51 @@ try:
tensor_name) tensor_name)
while True: while True:
ret, image_np = cap.read() # session for detect.py
temp_image = image_np.copy() with sess.as_default():
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0) ret, image_np = cap.read()
# Actual detection. temp_image = image_np.copy()
output_dict = run_inference_for_single_image(image_np, detection_graph) # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
# Visualization of the results of a detection. image_np_expanded = np.expand_dims(image_np, axis=0)
vis_util.visualize_boxes_and_labels_on_image_array( # Actual detection.
image_np, output_dict = run_inference_for_single_image(image_np, detection_graph)
output_dict['detection_boxes'], # Visualization of the results of a detection.
output_dict['detection_classes'], vis_util.visualize_boxes_and_labels_on_image_array(
output_dict['detection_scores'], image_np,
category_index, output_dict['detection_boxes'],
instance_masks=output_dict.get('detection_masks'), output_dict['detection_classes'],
use_normalized_coordinates=True, output_dict['detection_scores'],
line_thickness=8) category_index,
instance_masks=output_dict.get('detection_masks'),
score = round(100*output_dict['detection_scores'][0]) use_normalized_coordinates=True,
#send the request to translation component here line_thickness=8)
###
# I will be sending a POST request to u. a hand picture
if score > 80:
#print(image_np_expanded.shape)
img = Image.fromarray(temp_image)
#print(image_np.shape)
img.save(os.getcwd() + "\\" + "capture.jpg")
# array_for_translation = resize(image_np,(224,224,3))
# print(array_for_translation.shape)
translation.sign_predict(os.getcwd() + "\\" + "capture.jpg")
# sign_predict(image_np) <-- still under development score = round(100*output_dict['detection_scores'][0])
#waiting for the API on that component to be built #send the request to translation component here
# end send request ###
# I will be sending a POST request to u. a hand picture
## Press Q to close the camera print(category_index)
cv2.imshow('Hand Detector. Press Q to close', cv2.resize(image_np, (800, 600))) if score > 80:
#print(image_np_expanded.shape)
if cv2.waitKey(25) & 0xFF == ord('q'): img = Image.fromarray(temp_image)
cap.release() #print(image_np.shape)
cv2.destroyAllWindows() img.save(os.getcwd() + "\\" + "capture.jpg")
break # array_for_translation = resize(image_np,(224,224,3))
# print(array_for_translation.shape)
translation.sign_predict(os.getcwd() + "\\" + "capture.jpg")
# sign_predict(image_np) <-- still under development
#waiting for the API on that component to be built
# end send request
## Press Q to close the camera
cv2.imshow('Hand Detector. Press Q to close', cv2.resize(image_np, (800, 600)))
if cv2.waitKey(25) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
except Exception as e: except Exception as e:
print(e) print(e)
cap.release() cap.release()
\ No newline at end of file
<html>
<body>
HELLO
<img id="image">
<script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/2.3.0/socket.io.js">
</script>
<script>
const socket = io.connect('http://localhost:5000');
socket.on('image',(data)=>{
console.log('data',data);
});
function callGoogle(){
window.open('/google')
}
</script>
<button onclick="callGoogle()">
google
</button>
</body>
</html>
\ No newline at end of file
html {
background-color: white;
color: black;
}
h1 {
color: black;
margin-bottom: 0;
margin-top: 0;
text-align: center;
font-size: 40px;
}
h3 {
color: black;
font-size: 20px;
margin-top: 3px;
text-align: center;
}
#chatbox {
background-color: white;
margin-left: auto;
margin-right: auto;
width: 40%;
margin-top: 60px;
}
#userInput {
margin-left: auto;
margin-right: auto;
width: 40%;
margin-top: 60px;
}
#textInput {
width: 87%;
border: none;
border-bottom: 1px solid #009688;
font-family: monospace;
font-size: 17px;
}
#buttonInput {
padding: 3px;
font-family: monospace;
font-size: 17px;
background-color: white;
color: green;
border-color: green;
border-radius: 2px;
}
#buttonInput :hover {
background-color: green;
color: white;
}
.userText {
color: black;
font-family: monospace;
font-size: 17px;
text-align: right;
line-height: 30px;
}
.userText span {
background-color: #009688;
padding: 10px;
border-radius: 2px;
}
.botText {
color: black;
font-family: monospace;
font-size: 17px;
text-align: left;
line-height: 30px;
}
.botText span {
background-color: white;
padding: 10px;
border-radius: 2px;
color: black;
}
#tidbit {
position: absolute;
bottom: 0;
right: 0;
width: 300px;
}
<!--
* The file is to create a form for uploading and display files
* For TESTING purposes
*
* @author Amashi Bastiansz | IT17143950
* @version 1.0
* @since 2020-10-01
-->
<!doctype html>
<html>
<head>
<title>Python Flask File Upload Example</title>
</head>
<body>
<h2>Select a file to upload</h2>
<p>
{% with messages = get_flashed_messages() %}
{% if messages %}
<ul class=flashes>
{% for message in messages %}
<li>{{ message }}</li>
{% endfor %}
</ul>
{% endif %}
{% endwith %}
</p>
{% if filename %}
<div>
<img src="{{ url_for('display_image', filename=filename) }}">
</div>
{% endif %}
<form method="post" action="/" enctype="multipart/form-data">
<dl>
<p>
<input type="file" name="file" autocomplete="off" required>
</p>
</dl>
<p>
<input type="submit" value="Submit">
</p>
</form>
</body>
</html>
<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet" href="static/styles/chatbot-test.css"/>
<script>
window.console = window.console || function(t) {};
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/prefixfree/1.0.7/prefixfree.min.js"></script>
<script>
if (document.location.search.match(/type=embed/gi)) {
window.parent.postMessage("resize", "*");
}
</script>
<script src="chrome-extension://mooikfkahbdckldjjndioackbalphokd/assets/prompt.js"></script>
</head>
<body translate="no" data-new-gr-c-s-check-loaded="14.990.0" data-gr-ext-installed="">
<div class="chat">
<div class="messages">
<div class="message">
<div class="bot">
Send something like "good morning " or "sorry"
</div>
</div>
</div>
<div class="input">
<form action="#" id="chat" method="post">
<input class="text" contenteditable="" placeholder="Type your message here...">
</form>
</div>
</div>
<script src="https://cpwebassets.codepen.io/assets/common stopExecutionOnTimeout-157cd5b220a5c80d4ff8e0e70ac069bffd87a61252088146915e8726e5d9f147.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.1.3/jquery.min.js"></script>
<script id="rendered-js">
(function() {
var app;
$(document).ready(function() {
return app.init();
});
app = {
//api_key: "dc6zaTOxFJmzC", // Public API key from giphy.com
init: function() {
return this.bind_events();
},
bind_events: function() {
return $(document).on("submit", "#chat", function(e) {
app.send_message();
return e.preventDefault();
});
},
send_message: function() {
var msg;
msg = $(".text").val().trim();
if (msg) {
$(".text").val("");
$(".messages").append("<div class='message'><div class='you'>" + msg + "</div></div>");
return this.check(msg);
}
},
check: function(msg) {
var keyword;
if (msg != null) {
return this.get_gif(msg);
} else {
return this.bot_post("Wrong syntax ''gif me keyword''.");
}
},
bot_post: function(msg) {
return $(".messages").append("<div class='message'><div class='bot'>" + msg + "</div></div>");
},
get_gif: function(keyword) {
console.log(keyword)
return $.get(`http://localhost:3000/tts/get/${keyword}` , function(data) {
var index;
console.log(data)
return app.bot_post("<img src='" + data.gifPublicUrl + "' alt='' />");
}
);
}
};
}).call(this);
</script>
</body>
</html>
\ No newline at end of file
<!--
* The file is to create a form for Text to SSL Translator
* For TESTING purposes
*
* @author Amashi Bastiansz | IT17143950
* @version 1.0
* @since 2020-10-01
-->
<!DOCTYPE html>
<html>
<head>
<link
rel="stylesheet"
type="text/css"
href="{{url_for('static', filename='styles/style.css')}}"
/>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>
</head>
<body>
<h1 style="color: black">Text to SSL Translator</h1>
<div>
<div id="chatbox">
<p class="botText">
<span
>Welcome to Text to Sign Convertor.
<br />
Enter the sentence you need to translate into SSL</span
>
</p>
</div>
<form action="" method="POST" id="form">
<div id="userInput">
<input
id="textInput"
name="msg"
maxlength="50"
placeholder="Enter text here"
/>
<br /><br />
<button id="buttonInput" type="submit">TRANSLATE</button>
</div>
</form>
</div>
<script src="https://code.jquery.com/jquery-3.2.1.min.js"></script>
<script>
$("#textInput").keypress(function (e) {
if (e.which == 13) {
getResponseSign();
}
});
$("#buttonInput").click(function () {
getResponseSign();
});
$("#form").on("submit", function (e) {
var message = $("#textInput").val();
e.preventDefault();
$.ajax({
url: "http://localhost:3000/tts/response/",
data: JSON.stringify({ message: message }),
method: "POST",
contentType: "application/json",
success: function (message) {
var text = $("#textInput").val();
var userHtml = '<p class="userText"><span>' + text + "</span></p>";
$("#textInput").val("");
$("#chatbox").append(userHtml);
document
.getElementById("userInput")
.scrollIntoView({ block: "start", behavior: "smooth" });
},
});
});
</script>
</body>
</html>
from textvoice.textVoiceAs import TextVoiceAs
\ No newline at end of file
# -*- coding: utf-8 -*-
import functools
import sys
from .OneGramDist import OneGramDist
from . import spell
#import spell
from . import temp
from . import speech
import os
class TextVoiceAs:
def genarate(self,letters):
sentence = ''.join(sys.argv[1:]).lower()
if not sentence:
sentence = letters
# onegrams = OneGramDist(filename='count_10M_gb.txt')
#onegrams = OneGramDist(filename='count_1M_gb.txt.gz')
onegrams = OneGramDist(filename= os.path.join(os.path.dirname(os.path.abspath(__file__)),'count_1w.txt'))
onegram_fitness = functools.partial(temp.onegram_log, onegrams)
#print(sentence)
words = temp.segment(sentence, word_seq_fitness=onegram_fitness)
# print(words)
output = [spell.correction(words[index]) for index in range(0,len(words))]
speech.inputs(' '.join(output))
print(' '.join(output))
return
0 one 0 A
1 two 1 B
2 five3 2 C
3 face4 3 D
4 three5 4 E
5 F
6 G
7 H
8 I
9 J
10 K
11 L
12 M
13 N
14 O
15 P
16 Q
17 R
18 S
19 T
20 U
21 V
22 W
23 X
24 Y
25 Z
...@@ -5,25 +5,38 @@ ...@@ -5,25 +5,38 @@
# * @since 2020-08-15 # * @since 2020-08-15
# imports # imports
import tensorflow as tf
import tensorflow.keras import tensorflow.keras
from PIL import Image, ImageOps from PIL import Image, ImageOps
import numpy as np import numpy as np
import os import os
import sys
sys.path.append('.')
from textvoice.textVoiceAs import TextVoiceAs
import string import string
tensorflow.compat.v1.disable_eager_execution() tensorflow.compat.v1.disable_eager_execution()
class Translation: class Translation:
PATH = os.path.dirname(__file__) LOCATION = os.path.dirname(__file__)
Letters_ = ""
Fletter_ = ""
Sletter_ = ""
Tex = TextVoiceAs()
# def sign_predict(): # def sign_predict():
# Disable scientific notation for clarity # Disable scientific notation for clarity
np.set_printoptions(suppress=True) np.set_printoptions(suppress=True)
# Load the model # Load the model
model = tensorflow.keras.models.load_model(PATH + '/converted_keras/keras_model.h5') g2=tf.Graph()
# with g2.as_default():
sess2 = tf.compat.v1.Session()
with sess2.as_default():
model_p = tensorflow.keras.models.load_model(LOCATION + '/converted_keras/keras_model.h5')
model_p._make_predict_function()
# print(model_p.summary())
#model = tensorflow.keras.models.load_model('C:/Users/user/Documents/A to Y 1000 dataset/keras_model.h5') #model = tensorflow.keras.models.load_model('C:/Users/user/Documents/A to Y 1000 dataset/keras_model.h5')
# Create the array of the right shape to feed into the keras model # Create the array of the right shape to feed into the keras model
...@@ -34,48 +47,55 @@ class Translation: ...@@ -34,48 +47,55 @@ class Translation:
#data = np.ndarray(shape=(1, 480, 640, 3), dtype=np.float32) #data = np.ndarray(shape=(1, 480, 640, 3), dtype=np.float32)
def sign_predict(self, loc): def sign_predict(self, loc):
#with self.g2.as_default():
# Replace this with the path to your image with self.sess2.as_default():
# 'C:/Users/user/Documents/GitHub/2020_077/opencv_frame_0.png'
image = Image.open(loc) # Replace this with the path to your image
# 'C:/Users/user/Documents/GitHub/2020_077/opencv_frame_0.png'
# resize the image to a 224x224 with the same strategy as in TM2: image = Image.open(loc)
# resizing the image to be at least 224x224 and then cropping from the center
# resize the image to a 224x224 with the same strategy as in TM2:
size = (224, 224) # resizing the image to be at least 224x224 and then cropping from the center
#size = (640, 480)
image = ImageOps.fit(image, size, Image.ANTIALIAS) size = (224, 224)
#size = (640, 480)
# turn the image into a numpy array image = ImageOps.fit(image, size, Image.ANTIALIAS)
image_array = np.asarray(image)
#print(image_array) # turn the image into a numpy array
image_array = np.asarray(image)
# display the resized image #print(image_array)
#image.show()
# display the resized image
# Normalize the image #image.show()
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Normalize the image
# Load the image into the array normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
self.data[0] = normalized_image_array
# Load the image into the array
# run the inference self.data[0] = normalized_image_array
print("Before model*****************") # run the inference
prediction = self.model.predict(self.data) #sess2.run(tf.global_variables_initializer())
print("After model*****************") prediction = self.model_p.predict(self.data)
#print(prediction) #print(prediction.shape)
#print(prediction.shape) #print(type(prediction))
#print(type(prediction)) #print(prediction[0, 2])
#print(prediction[0, 2]) for alpha in range(26):
for alpha in range(26): if prediction[0, alpha] >= 0.8:
if prediction[0, alpha] >= 0.8: self.Sletter_ = string.ascii_lowercase[alpha]
print(string.ascii_uppercase[alpha]) if alpha == 25:
#return string.ascii_uppercase[alpha] self.Tex.genarate(self.Letters_)
break print(self.Letters_)
self.Letters_ = ""
print("After Classification*****************") self.Sletter_ = ""
# if os.path.exists(loc): self.Fletter_ = ""
# os.remove(loc) if self.Sletter_ != self.Fletter_:
# else: print(self.Sletter_)
# print("The file does not exist") self.Letters_ = self.Letters_ + self.Sletter_
#break
self.Fletter_ = self.Sletter_
# if os.path.exists(loc):
# os.remove(loc)
# else:
# print("The file does not exist")
return return
from trans import Translation from trans import Translation
import TextVoice import sys
sys.path.append('.')
#from textvoice.textVoiceAs import TextVoiceAs
if __name__ == '__main__': def transMain():
t1 = Translation() t1 = Translation()
Tex = TextVoice() #Tex = TextVoiceAs()
location1 = 'C:/Users/user/Documents/GitHub/2020_077/test1.jpg' location1 = 'C:/Users/user/Documents/GitHub/2020_077/a.jpg'
location2 = 'C:/Users/user/Documents/GitHub/2020_077/test2.jpg' location2 = 'C:/Users/user/Documents/GitHub/2020_077/c.jpg'
location3 = 'C:/Users/user/Documents/GitHub/2020_077/test3.jpg' location3 = 'C:/Users/user/Documents/GitHub/2020_077/d.jpg'
location4 = 'C:/Users/user/Documents/GitHub/2020_077/test4.jpg' location4 = 'C:/Users/user/Documents/GitHub/2020_077/e.jpg'
location5 = 'C:/Users/user/Documents/GitHub/2020_077/test55.jpg' location5 = 'C:/Users/user/Documents/GitHub/2020_077/g.jpg'
location6 = 'C:/Users/user/Documents/GitHub/2020_077/i.jpg'
location7 = 'C:/Users/user/Documents/GitHub/2020_077/n.jpg'
location8 = 'C:/Users/user/Documents/GitHub/2020_077/o.jpg'
location9 = 'C:/Users/user/Documents/GitHub/2020_077/y.jpg'
location10 = 'C:/Users/user/Documents/GitHub/2020_077/z.jpg'
Tex.getInput() #Tex.genarate('cab')
#getInput('t1.sign_predict(location1)')
# t1.sign_predict(location2) t1.sign_predict(location7)
t1.sign_predict(location6)
t1.sign_predict(location2)
t1.sign_predict(location4)
t1.sign_predict(location3)
t1.sign_predict(location1)
t1.sign_predict(location9)
t1.sign_predict(location10)
# t1.sign_predict(location3) # t1.sign_predict(location3)
# t1.sign_predict(location4) #Tex.genarate(t1.sign_predict(location5))
# t1.sign_predict(location5) t1.sign_predict(location5)
# t1.sign_predict(location2) t1.sign_predict(location8)
# t1.sign_predict(location3) t1.sign_predict(location8)
# t1.sign_predict(location4) t1.sign_predict(location3)
# t1.sign_predict(location5) t1.sign_predict(location1)
# t1.sign_predict(location1) t1.sign_predict(location9)
# t1.sign_predict(location4) t1.sign_predict(location10)
\ No newline at end of file
transMain()
\ No newline at end of file
# * The trans.py file is created to predict signs with images
# *
# * @author K.Bavanraj | IT17032766
# * @version 1.0
# * @since 2020-08-15
# imports
import tensorflow.keras
from PIL import Image, ImageOps
import numpy as np
import os
import string
tensorflow.compat.v1.disable_eager_execution()
def sign_predict(loc):
PATH = os.path.dirname(__file__)
# def sign_predict():
# Disable scientific notation for clarity
np.set_printoptions(suppress=True)
# Load the model
model = tensorflow.keras.models.load_model(PATH + '/converted_keras/keras_model.h5')
#model = tensorflow.keras.models.load_model('C:/Users/user/Documents/A to Y 1000 dataset/keras_model.h5')
# Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
#data = np.ndarray(shape=(1, 480, 640, 3), dtype=np.float32)
# Replace this with the path to your image
# 'C:/Users/user/Documents/GitHub/2020_077/opencv_frame_0.png'
image = Image.open(loc)
# resize the image to a 224x224 with the same strategy as in TM2:
# resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
#size = (640, 480)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
# turn the image into a numpy array
image_array = np.asarray(image)
# print(image_array)
# display the resized image
# image.show()
# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
#self.model.run_eagerly = False
# run the inference
print("Before model*****************")
prediction = model.predict(data)
print("After model*****************")
# print(prediction)
# print(prediction.shape)
# print(type(prediction))
#print(prediction[0, 2])
for alpha in range(26):
if prediction[0, alpha] >= 0.8:
print(string.ascii_uppercase[alpha])
# return string.ascii_uppercase[alpha]
break
print("After Classification*****************")
# if os.path.exists(loc):
# os.remove(loc)
# else:
# print("The file does not exist")
# return
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment