Commit a8fd4b8c authored by Manoj Kumar's avatar Manoj Kumar

Merge branch 'master' into 'amashi_dev'

# Conflicts:
#   reveng/processInput.py
parents 5afa6014 3604f065
......@@ -35,6 +35,8 @@ def home():
@app.route('/cam')
def openCam():
os.system('python dataq\detect.py')
return render_template('homePage.html')
# END DATAQ
......
......@@ -107,48 +107,51 @@ try:
tensor_name)
while True:
ret, image_np = cap.read()
temp_image = image_np.copy()
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
output_dict = run_inference_for_single_image(image_np, detection_graph)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
score = round(100*output_dict['detection_scores'][0])
#send the request to translation component here
###
# I will be sending a POST request to u. a hand picture
print(category_index)
if score > 80:
#print(image_np_expanded.shape)
img = Image.fromarray(temp_image)
#print(image_np.shape)
img.save(os.getcwd() + "\\" + "capture.jpg")
# array_for_translation = resize(image_np,(224,224,3))
# print(array_for_translation.shape)
translation.sign_predict(os.getcwd() + "\\" + "capture.jpg")
# session for detect.py
with sess.as_default():
ret, image_np = cap.read()
temp_image = image_np.copy()
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
output_dict = run_inference_for_single_image(image_np, detection_graph)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
# sign_predict(image_np) <-- still under development
#waiting for the API on that component to be built
# end send request
## Press Q to close the camera
cv2.imshow('Hand Detector. Press Q to close', cv2.resize(image_np, (800, 600)))
if cv2.waitKey(25) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
score = round(100*output_dict['detection_scores'][0])
#send the request to translation component here
###
# I will be sending a POST request to u. a hand picture
print(category_index)
if score > 80:
#print(image_np_expanded.shape)
img = Image.fromarray(temp_image)
#print(image_np.shape)
img.save(os.getcwd() + "\\" + "capture.jpg")
# array_for_translation = resize(image_np,(224,224,3))
# print(array_for_translation.shape)
translation.sign_predict(os.getcwd() + "\\" + "capture.jpg")
# sign_predict(image_np) <-- still under development
#waiting for the API on that component to be built
# end send request
## Press Q to close the camera
cv2.imshow('Hand Detector. Press Q to close', cv2.resize(image_np, (800, 600)))
if cv2.waitKey(25) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
except Exception as e:
print(e)
cap.release()
\ No newline at end of file
......@@ -39,15 +39,7 @@
<!--Favicon-->
<link rel="shortcut icon" href="static/images/EasyTalkFavicon_v1.png" type="image/x-icon">
<link rel="icon" href="static/images/EasyTalkFavicon_v1.png" type="image/x-icon">
<script>
function goPython(){
$.ajax({
url: ""
}).done(function() {
alert('finished python script');;
});
}
</script>
</head>
<body>
......@@ -115,7 +107,9 @@
<div class="col-12">
<form id="cameraForm" class="justify-content-center">
<div class="text-center">
<button type="submit" class="btn btn-primary mb-2" id="userButtonInput" style="height: 75%;" onclick="goPython()">Open My Webcamera</button>
<a href="http://localhost:3000/cam">
open cam
</a>
</div>
</form>
</div>
......
from textvoice.textVoiceAs import TextVoiceAs
\ No newline at end of file
# -*- coding: utf-8 -*-
import functools
import sys
from .OneGramDist import OneGramDist
from . import spell
#import spell
from . import temp
from . import speech
import os
class TextVoiceAs:
def genarate(self,letters):
sentence = ''.join(sys.argv[1:]).lower()
if not sentence:
sentence = letters
# onegrams = OneGramDist(filename='count_10M_gb.txt')
#onegrams = OneGramDist(filename='count_1M_gb.txt.gz')
onegrams = OneGramDist(filename= os.path.join(os.path.dirname(os.path.abspath(__file__)),'count_1w.txt'))
onegram_fitness = functools.partial(temp.onegram_log, onegrams)
#print(sentence)
words = temp.segment(sentence, word_seq_fitness=onegram_fitness)
# print(words)
output = [spell.correction(words[index]) for index in range(0,len(words))]
speech.inputs(' '.join(output))
print(' '.join(output))
return
0 one
1 two
2 five3
3 face4
4 three5
0 A
1 B
2 C
3 D
4 E
5 F
6 G
7 H
8 I
9 J
10 K
11 L
12 M
13 N
14 O
15 P
16 Q
17 R
18 S
19 T
20 U
21 V
22 W
23 X
24 Y
25 Z
......@@ -5,25 +5,38 @@
# * @since 2020-08-15
# imports
import tensorflow as tf
import tensorflow.keras
from PIL import Image, ImageOps
import numpy as np
import os
import sys
sys.path.append('.')
from textvoice.textVoiceAs import TextVoiceAs
import string
tensorflow.compat.v1.disable_eager_execution()
class Translation:
PATH = os.path.dirname(__file__)
LOCATION = os.path.dirname(__file__)
Letters_ = ""
Fletter_ = ""
Sletter_ = ""
Tex = TextVoiceAs()
# def sign_predict():
# Disable scientific notation for clarity
np.set_printoptions(suppress=True)
# Load the model
model = tensorflow.keras.models.load_model(PATH + '/converted_keras/keras_model.h5')
g2=tf.Graph()
# with g2.as_default():
sess2 = tf.compat.v1.Session()
with sess2.as_default():
model_p = tensorflow.keras.models.load_model(LOCATION + '/converted_keras/keras_model.h5')
model_p._make_predict_function()
# print(model_p.summary())
#model = tensorflow.keras.models.load_model('C:/Users/user/Documents/A to Y 1000 dataset/keras_model.h5')
# Create the array of the right shape to feed into the keras model
......@@ -34,48 +47,55 @@ class Translation:
#data = np.ndarray(shape=(1, 480, 640, 3), dtype=np.float32)
def sign_predict(self, loc):
# Replace this with the path to your image
# 'C:/Users/user/Documents/GitHub/2020_077/opencv_frame_0.png'
image = Image.open(loc)
# resize the image to a 224x224 with the same strategy as in TM2:
# resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
#size = (640, 480)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
# turn the image into a numpy array
image_array = np.asarray(image)
#print(image_array)
# display the resized image
#image.show()
# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
self.data[0] = normalized_image_array
# run the inference
print("Before model*****************")
prediction = self.model.predict(self.data)
print("After model*****************")
#print(prediction)
#print(prediction.shape)
#print(type(prediction))
#print(prediction[0, 2])
for alpha in range(26):
if prediction[0, alpha] >= 0.8:
print(string.ascii_uppercase[alpha])
#return string.ascii_uppercase[alpha]
break
print("After Classification*****************")
# if os.path.exists(loc):
# os.remove(loc)
# else:
# print("The file does not exist")
#with self.g2.as_default():
with self.sess2.as_default():
# Replace this with the path to your image
# 'C:/Users/user/Documents/GitHub/2020_077/opencv_frame_0.png'
image = Image.open(loc)
# resize the image to a 224x224 with the same strategy as in TM2:
# resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
#size = (640, 480)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
# turn the image into a numpy array
image_array = np.asarray(image)
#print(image_array)
# display the resized image
#image.show()
# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
self.data[0] = normalized_image_array
# run the inference
#sess2.run(tf.global_variables_initializer())
prediction = self.model_p.predict(self.data)
#print(prediction.shape)
#print(type(prediction))
#print(prediction[0, 2])
for alpha in range(26):
if prediction[0, alpha] >= 0.8:
self.Sletter_ = string.ascii_lowercase[alpha]
if alpha == 25:
self.Tex.genarate(self.Letters_)
print(self.Letters_)
self.Letters_ = ""
self.Sletter_ = ""
self.Fletter_ = ""
if self.Sletter_ != self.Fletter_:
print(self.Sletter_)
self.Letters_ = self.Letters_ + self.Sletter_
#break
self.Fletter_ = self.Sletter_
# if os.path.exists(loc):
# os.remove(loc)
# else:
# print("The file does not exist")
return
from trans import Translation
import TextVoice
import sys
sys.path.append('.')
#from textvoice.textVoiceAs import TextVoiceAs
if __name__ == '__main__':
def transMain():
t1 = Translation()
Tex = TextVoice()
#Tex = TextVoiceAs()
location1 = 'C:/Users/user/Documents/GitHub/2020_077/test1.jpg'
location2 = 'C:/Users/user/Documents/GitHub/2020_077/test2.jpg'
location3 = 'C:/Users/user/Documents/GitHub/2020_077/test3.jpg'
location4 = 'C:/Users/user/Documents/GitHub/2020_077/test4.jpg'
location5 = 'C:/Users/user/Documents/GitHub/2020_077/test55.jpg'
location1 = 'C:/Users/user/Documents/GitHub/2020_077/a.jpg'
location2 = 'C:/Users/user/Documents/GitHub/2020_077/c.jpg'
location3 = 'C:/Users/user/Documents/GitHub/2020_077/d.jpg'
location4 = 'C:/Users/user/Documents/GitHub/2020_077/e.jpg'
location5 = 'C:/Users/user/Documents/GitHub/2020_077/g.jpg'
location6 = 'C:/Users/user/Documents/GitHub/2020_077/i.jpg'
location7 = 'C:/Users/user/Documents/GitHub/2020_077/n.jpg'
location8 = 'C:/Users/user/Documents/GitHub/2020_077/o.jpg'
location9 = 'C:/Users/user/Documents/GitHub/2020_077/y.jpg'
location10 = 'C:/Users/user/Documents/GitHub/2020_077/z.jpg'
Tex.getInput()
#getInput('t1.sign_predict(location1)')
# t1.sign_predict(location2)
#Tex.genarate('cab')
t1.sign_predict(location7)
t1.sign_predict(location6)
t1.sign_predict(location2)
t1.sign_predict(location4)
t1.sign_predict(location3)
t1.sign_predict(location1)
t1.sign_predict(location9)
t1.sign_predict(location10)
# t1.sign_predict(location3)
# t1.sign_predict(location4)
# t1.sign_predict(location5)
# t1.sign_predict(location2)
# t1.sign_predict(location3)
# t1.sign_predict(location4)
# t1.sign_predict(location5)
# t1.sign_predict(location1)
# t1.sign_predict(location4)
\ No newline at end of file
#Tex.genarate(t1.sign_predict(location5))
t1.sign_predict(location5)
t1.sign_predict(location8)
t1.sign_predict(location8)
t1.sign_predict(location3)
t1.sign_predict(location1)
t1.sign_predict(location9)
t1.sign_predict(location10)
transMain()
\ No newline at end of file
# * The trans.py file is created to predict signs with images
# *
# * @author K.Bavanraj | IT17032766
# * @version 1.0
# * @since 2020-08-15
# imports
import tensorflow.keras
from PIL import Image, ImageOps
import numpy as np
import os
import string
tensorflow.compat.v1.disable_eager_execution()
def sign_predict(loc):
PATH = os.path.dirname(__file__)
# def sign_predict():
# Disable scientific notation for clarity
np.set_printoptions(suppress=True)
# Load the model
model = tensorflow.keras.models.load_model(PATH + '/converted_keras/keras_model.h5')
#model = tensorflow.keras.models.load_model('C:/Users/user/Documents/A to Y 1000 dataset/keras_model.h5')
# Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
#data = np.ndarray(shape=(1, 480, 640, 3), dtype=np.float32)
# Replace this with the path to your image
# 'C:/Users/user/Documents/GitHub/2020_077/opencv_frame_0.png'
image = Image.open(loc)
# resize the image to a 224x224 with the same strategy as in TM2:
# resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
#size = (640, 480)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
# turn the image into a numpy array
image_array = np.asarray(image)
# print(image_array)
# display the resized image
# image.show()
# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
#self.model.run_eagerly = False
# run the inference
print("Before model*****************")
prediction = model.predict(data)
print("After model*****************")
# print(prediction)
# print(prediction.shape)
# print(type(prediction))
#print(prediction[0, 2])
for alpha in range(26):
if prediction[0, alpha] >= 0.8:
print(string.ascii_uppercase[alpha])
# return string.ascii_uppercase[alpha]
break
print("After Classification*****************")
# if os.path.exists(loc):
# os.remove(loc)
# else:
# print("The file does not exist")
# return
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment