Commit f3e62b8f authored by Manoj Kumar's avatar Manoj Kumar

Merge branch 'master' into 'manoj_dev'

# Conflicts:
#   dataq/detect.py
parents 6e44faa1 793cc69c
......@@ -62,17 +62,11 @@ def textToSignEngine():
@app.route('/tts/get/<msg>', methods=['GET', 'POST'])
def response(msg):
clearoutputfolder()
# message = request.get_json()
# responseGIF = processInput(message['message'])
print(msg)
responseGIF = processInput(msg)
# response = {
# "gifPath": responseGIF
# }
clearoutputfolder()
return responseGIF
# return send_file(responseGIF, mimetype='image/gif')
# clear the OUTPUT folder after displaying the GIF image
......
......@@ -107,6 +107,9 @@ try:
tensor_name)
while True:
# session for detect.py
with sess.as_default():
ret, image_np = cap.read()
temp_image = image_np.copy()
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
......@@ -128,6 +131,7 @@ try:
#send the request to translation component here
###
# I will be sending a POST request to u. a hand picture
print(category_index)
if score > 80:
#print(image_np_expanded.shape)
img = Image.fromarray(temp_image)
......
<html>
<body>
HELLO
<img id="image">
<script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/2.3.0/socket.io.js">
</script>
<script>
const socket = io.connect('http://localhost:5000');
socket.on('image',(data)=>{
console.log('data',data);
});
function callGoogle(){
window.open('/google')
}
</script>
<button onclick="callGoogle()">
google
</button>
</body>
</html>
\ No newline at end of file
html {
background-color: white;
color: black;
}
h1 {
color: black;
margin-bottom: 0;
margin-top: 0;
text-align: center;
font-size: 40px;
}
h3 {
color: black;
font-size: 20px;
margin-top: 3px;
text-align: center;
}
#chatbox {
background-color: white;
margin-left: auto;
margin-right: auto;
width: 40%;
margin-top: 60px;
}
#userInput {
margin-left: auto;
margin-right: auto;
width: 40%;
margin-top: 60px;
}
#textInput {
width: 87%;
border: none;
border-bottom: 1px solid #009688;
font-family: monospace;
font-size: 17px;
}
#buttonInput {
padding: 3px;
font-family: monospace;
font-size: 17px;
background-color: white;
color: green;
border-color: green;
border-radius: 2px;
}
#buttonInput :hover {
background-color: green;
color: white;
}
.userText {
color: black;
font-family: monospace;
font-size: 17px;
text-align: right;
line-height: 30px;
}
.userText span {
background-color: #009688;
padding: 10px;
border-radius: 2px;
}
.botText {
color: black;
font-family: monospace;
font-size: 17px;
text-align: left;
line-height: 30px;
}
.botText span {
background-color: white;
padding: 10px;
border-radius: 2px;
color: black;
}
#tidbit {
position: absolute;
bottom: 0;
right: 0;
width: 300px;
}
<!--
* The file is to create a form for uploading and display files
* For TESTING purposes
*
* @author Amashi Bastiansz | IT17143950
* @version 1.0
* @since 2020-10-01
-->
<!doctype html>
<html>
<head>
<title>Python Flask File Upload Example</title>
</head>
<body>
<h2>Select a file to upload</h2>
<p>
{% with messages = get_flashed_messages() %}
{% if messages %}
<ul class=flashes>
{% for message in messages %}
<li>{{ message }}</li>
{% endfor %}
</ul>
{% endif %}
{% endwith %}
</p>
{% if filename %}
<div>
<img src="{{ url_for('display_image', filename=filename) }}">
</div>
{% endif %}
<form method="post" action="/" enctype="multipart/form-data">
<dl>
<p>
<input type="file" name="file" autocomplete="off" required>
</p>
</dl>
<p>
<input type="submit" value="Submit">
</p>
</form>
</body>
</html>
<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet" href="static/styles/chatbot-test.css"/>
<script>
window.console = window.console || function(t) {};
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/prefixfree/1.0.7/prefixfree.min.js"></script>
<script>
if (document.location.search.match(/type=embed/gi)) {
window.parent.postMessage("resize", "*");
}
</script>
<script src="chrome-extension://mooikfkahbdckldjjndioackbalphokd/assets/prompt.js"></script>
</head>
<body translate="no" data-new-gr-c-s-check-loaded="14.990.0" data-gr-ext-installed="">
<div class="chat">
<div class="messages">
<div class="message">
<div class="bot">
Send something like "good morning " or "sorry"
</div>
</div>
</div>
<div class="input">
<form action="#" id="chat" method="post">
<input class="text" contenteditable="" placeholder="Type your message here...">
</form>
</div>
</div>
<script src="https://cpwebassets.codepen.io/assets/common stopExecutionOnTimeout-157cd5b220a5c80d4ff8e0e70ac069bffd87a61252088146915e8726e5d9f147.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.1.3/jquery.min.js"></script>
<script id="rendered-js">
(function() {
var app;
$(document).ready(function() {
return app.init();
});
app = {
//api_key: "dc6zaTOxFJmzC", // Public API key from giphy.com
init: function() {
return this.bind_events();
},
bind_events: function() {
return $(document).on("submit", "#chat", function(e) {
app.send_message();
return e.preventDefault();
});
},
send_message: function() {
var msg;
msg = $(".text").val().trim();
if (msg) {
$(".text").val("");
$(".messages").append("<div class='message'><div class='you'>" + msg + "</div></div>");
return this.check(msg);
}
},
check: function(msg) {
var keyword;
if (msg != null) {
return this.get_gif(msg);
} else {
return this.bot_post("Wrong syntax ''gif me keyword''.");
}
},
bot_post: function(msg) {
return $(".messages").append("<div class='message'><div class='bot'>" + msg + "</div></div>");
},
get_gif: function(keyword) {
console.log(keyword)
return $.get(`http://localhost:3000/tts/get/${keyword}` , function(data) {
var index;
console.log(data)
return app.bot_post("<img src='" + data.gifPublicUrl + "' alt='' />");
}
);
}
};
}).call(this);
</script>
</body>
</html>
\ No newline at end of file
<!--
* The file is to create a form for Text to SSL Translator
* For TESTING purposes
*
* @author Amashi Bastiansz | IT17143950
* @version 1.0
* @since 2020-10-01
-->
<!DOCTYPE html>
<html>
<head>
<link
rel="stylesheet"
type="text/css"
href="{{url_for('static', filename='styles/style.css')}}"
/>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>
</head>
<body>
<h1 style="color: black">Text to SSL Translator</h1>
<div>
<div id="chatbox">
<p class="botText">
<span
>Welcome to Text to Sign Convertor.
<br />
Enter the sentence you need to translate into SSL</span
>
</p>
</div>
<form action="" method="POST" id="form">
<div id="userInput">
<input
id="textInput"
name="msg"
maxlength="50"
placeholder="Enter text here"
/>
<br /><br />
<button id="buttonInput" type="submit">TRANSLATE</button>
</div>
</form>
</div>
<script src="https://code.jquery.com/jquery-3.2.1.min.js"></script>
<script>
$("#textInput").keypress(function (e) {
if (e.which == 13) {
getResponseSign();
}
});
$("#buttonInput").click(function () {
getResponseSign();
});
$("#form").on("submit", function (e) {
var message = $("#textInput").val();
e.preventDefault();
$.ajax({
url: "http://localhost:3000/tts/response/",
data: JSON.stringify({ message: message }),
method: "POST",
contentType: "application/json",
success: function (message) {
var text = $("#textInput").val();
var userHtml = '<p class="userText"><span>' + text + "</span></p>";
$("#textInput").val("");
$("#chatbox").append(userHtml);
document
.getElementById("userInput")
.scrollIntoView({ block: "start", behavior: "smooth" });
},
});
});
</script>
</body>
</html>
from textvoice.textVoiceAs import TextVoiceAs
\ No newline at end of file
# -*- coding: utf-8 -*-
import functools
import sys
from .OneGramDist import OneGramDist
from . import spell
#import spell
from . import temp
from . import speech
import os
class TextVoiceAs:
def genarate(self,letters):
sentence = ''.join(sys.argv[1:]).lower()
if not sentence:
sentence = letters
# onegrams = OneGramDist(filename='count_10M_gb.txt')
#onegrams = OneGramDist(filename='count_1M_gb.txt.gz')
onegrams = OneGramDist(filename= os.path.join(os.path.dirname(os.path.abspath(__file__)),'count_1w.txt'))
onegram_fitness = functools.partial(temp.onegram_log, onegrams)
#print(sentence)
words = temp.segment(sentence, word_seq_fitness=onegram_fitness)
# print(words)
output = [spell.correction(words[index]) for index in range(0,len(words))]
speech.inputs(' '.join(output))
print(' '.join(output))
return
0 one
1 two
2 five3
3 face4
4 three5
0 A
1 B
2 C
3 D
4 E
5 F
6 G
7 H
8 I
9 J
10 K
11 L
12 M
13 N
14 O
15 P
16 Q
17 R
18 S
19 T
20 U
21 V
22 W
23 X
24 Y
25 Z
......@@ -5,25 +5,38 @@
# * @since 2020-08-15
# imports
import tensorflow as tf
import tensorflow.keras
from PIL import Image, ImageOps
import numpy as np
import os
import sys
sys.path.append('.')
from textvoice.textVoiceAs import TextVoiceAs
import string
tensorflow.compat.v1.disable_eager_execution()
class Translation:
PATH = os.path.dirname(__file__)
LOCATION = os.path.dirname(__file__)
Letters_ = ""
Fletter_ = ""
Sletter_ = ""
Tex = TextVoiceAs()
# def sign_predict():
# Disable scientific notation for clarity
np.set_printoptions(suppress=True)
# Load the model
model = tensorflow.keras.models.load_model(PATH + '/converted_keras/keras_model.h5')
g2=tf.Graph()
# with g2.as_default():
sess2 = tf.compat.v1.Session()
with sess2.as_default():
model_p = tensorflow.keras.models.load_model(LOCATION + '/converted_keras/keras_model.h5')
model_p._make_predict_function()
# print(model_p.summary())
#model = tensorflow.keras.models.load_model('C:/Users/user/Documents/A to Y 1000 dataset/keras_model.h5')
# Create the array of the right shape to feed into the keras model
......@@ -34,6 +47,8 @@ class Translation:
#data = np.ndarray(shape=(1, 480, 640, 3), dtype=np.float32)
def sign_predict(self, loc):
#with self.g2.as_default():
with self.sess2.as_default():
# Replace this with the path to your image
# 'C:/Users/user/Documents/GitHub/2020_077/opencv_frame_0.png'
......@@ -58,22 +73,27 @@ class Translation:
# Load the image into the array
self.data[0] = normalized_image_array
# run the inference
print("Before model*****************")
prediction = self.model.predict(self.data)
print("After model*****************")
#print(prediction)
#sess2.run(tf.global_variables_initializer())
prediction = self.model_p.predict(self.data)
#print(prediction.shape)
#print(type(prediction))
#print(prediction[0, 2])
for alpha in range(26):
if prediction[0, alpha] >= 0.8:
print(string.ascii_uppercase[alpha])
#return string.ascii_uppercase[alpha]
break
print("After Classification*****************")
self.Sletter_ = string.ascii_lowercase[alpha]
if alpha == 25:
self.Tex.genarate(self.Letters_)
print(self.Letters_)
self.Letters_ = ""
self.Sletter_ = ""
self.Fletter_ = ""
if self.Sletter_ != self.Fletter_:
print(self.Sletter_)
self.Letters_ = self.Letters_ + self.Sletter_
#break
self.Fletter_ = self.Sletter_
# if os.path.exists(loc):
# os.remove(loc)
# else:
......
from trans import Translation
import TextVoice
import sys
sys.path.append('.')
#from textvoice.textVoiceAs import TextVoiceAs
if __name__ == '__main__':
def transMain():
t1 = Translation()
Tex = TextVoice()
#Tex = TextVoiceAs()
location1 = 'C:/Users/user/Documents/GitHub/2020_077/test1.jpg'
location2 = 'C:/Users/user/Documents/GitHub/2020_077/test2.jpg'
location3 = 'C:/Users/user/Documents/GitHub/2020_077/test3.jpg'
location4 = 'C:/Users/user/Documents/GitHub/2020_077/test4.jpg'
location5 = 'C:/Users/user/Documents/GitHub/2020_077/test55.jpg'
location1 = 'C:/Users/user/Documents/GitHub/2020_077/a.jpg'
location2 = 'C:/Users/user/Documents/GitHub/2020_077/c.jpg'
location3 = 'C:/Users/user/Documents/GitHub/2020_077/d.jpg'
location4 = 'C:/Users/user/Documents/GitHub/2020_077/e.jpg'
location5 = 'C:/Users/user/Documents/GitHub/2020_077/g.jpg'
location6 = 'C:/Users/user/Documents/GitHub/2020_077/i.jpg'
location7 = 'C:/Users/user/Documents/GitHub/2020_077/n.jpg'
location8 = 'C:/Users/user/Documents/GitHub/2020_077/o.jpg'
location9 = 'C:/Users/user/Documents/GitHub/2020_077/y.jpg'
location10 = 'C:/Users/user/Documents/GitHub/2020_077/z.jpg'
Tex.getInput()
#getInput('t1.sign_predict(location1)')
# t1.sign_predict(location2)
# t1.sign_predict(location3)
# t1.sign_predict(location4)
# t1.sign_predict(location5)
# t1.sign_predict(location2)
#Tex.genarate('cab')
t1.sign_predict(location7)
t1.sign_predict(location6)
t1.sign_predict(location2)
t1.sign_predict(location4)
t1.sign_predict(location3)
t1.sign_predict(location1)
t1.sign_predict(location9)
t1.sign_predict(location10)
# t1.sign_predict(location3)
# t1.sign_predict(location4)
# t1.sign_predict(location5)
# t1.sign_predict(location1)
# t1.sign_predict(location4)
\ No newline at end of file
#Tex.genarate(t1.sign_predict(location5))
t1.sign_predict(location5)
t1.sign_predict(location8)
t1.sign_predict(location8)
t1.sign_predict(location3)
t1.sign_predict(location1)
t1.sign_predict(location9)
t1.sign_predict(location10)
transMain()
\ No newline at end of file
# * The trans.py file is created to predict signs with images
# *
# * @author K.Bavanraj | IT17032766
# * @version 1.0
# * @since 2020-08-15
# imports
import tensorflow.keras
from PIL import Image, ImageOps
import numpy as np
import os
import string
tensorflow.compat.v1.disable_eager_execution()
def sign_predict(loc):
PATH = os.path.dirname(__file__)
# def sign_predict():
# Disable scientific notation for clarity
np.set_printoptions(suppress=True)
# Load the model
model = tensorflow.keras.models.load_model(PATH + '/converted_keras/keras_model.h5')
#model = tensorflow.keras.models.load_model('C:/Users/user/Documents/A to Y 1000 dataset/keras_model.h5')
# Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
#data = np.ndarray(shape=(1, 480, 640, 3), dtype=np.float32)
# Replace this with the path to your image
# 'C:/Users/user/Documents/GitHub/2020_077/opencv_frame_0.png'
image = Image.open(loc)
# resize the image to a 224x224 with the same strategy as in TM2:
# resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
#size = (640, 480)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
# turn the image into a numpy array
image_array = np.asarray(image)
# print(image_array)
# display the resized image
# image.show()
# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
#self.model.run_eagerly = False
# run the inference
print("Before model*****************")
prediction = model.predict(data)
print("After model*****************")
# print(prediction)
# print(prediction.shape)
# print(type(prediction))
#print(prediction[0, 2])
for alpha in range(26):
if prediction[0, alpha] >= 0.8:
print(string.ascii_uppercase[alpha])
# return string.ascii_uppercase[alpha]
break
print("After Classification*****************")
# if os.path.exists(loc):
# os.remove(loc)
# else:
# print("The file does not exist")
# return
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment