Commit a8fd4b8c authored by Manoj Kumar's avatar Manoj Kumar

Merge branch 'master' into 'amashi_dev'

# Conflicts:
#   reveng/processInput.py
parents 5afa6014 3604f065
...@@ -35,6 +35,8 @@ def home(): ...@@ -35,6 +35,8 @@ def home():
@app.route('/cam') @app.route('/cam')
def openCam(): def openCam():
os.system('python dataq\detect.py') os.system('python dataq\detect.py')
return render_template('homePage.html')
# END DATAQ # END DATAQ
......
...@@ -107,48 +107,51 @@ try: ...@@ -107,48 +107,51 @@ try:
tensor_name) tensor_name)
while True: while True:
ret, image_np = cap.read() # session for detect.py
temp_image = image_np.copy() with sess.as_default():
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0) ret, image_np = cap.read()
# Actual detection. temp_image = image_np.copy()
output_dict = run_inference_for_single_image(image_np, detection_graph) # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
# Visualization of the results of a detection. image_np_expanded = np.expand_dims(image_np, axis=0)
vis_util.visualize_boxes_and_labels_on_image_array( # Actual detection.
image_np, output_dict = run_inference_for_single_image(image_np, detection_graph)
output_dict['detection_boxes'], # Visualization of the results of a detection.
output_dict['detection_classes'], vis_util.visualize_boxes_and_labels_on_image_array(
output_dict['detection_scores'], image_np,
category_index, output_dict['detection_boxes'],
instance_masks=output_dict.get('detection_masks'), output_dict['detection_classes'],
use_normalized_coordinates=True, output_dict['detection_scores'],
line_thickness=8) category_index,
instance_masks=output_dict.get('detection_masks'),
score = round(100*output_dict['detection_scores'][0]) use_normalized_coordinates=True,
#send the request to translation component here line_thickness=8)
###
# I will be sending a POST request to u. a hand picture
print(category_index)
if score > 80:
#print(image_np_expanded.shape)
img = Image.fromarray(temp_image)
#print(image_np.shape)
img.save(os.getcwd() + "\\" + "capture.jpg")
# array_for_translation = resize(image_np,(224,224,3))
# print(array_for_translation.shape)
translation.sign_predict(os.getcwd() + "\\" + "capture.jpg")
# sign_predict(image_np) <-- still under development score = round(100*output_dict['detection_scores'][0])
#waiting for the API on that component to be built #send the request to translation component here
# end send request ###
# I will be sending a POST request to u. a hand picture
## Press Q to close the camera print(category_index)
cv2.imshow('Hand Detector. Press Q to close', cv2.resize(image_np, (800, 600))) if score > 80:
#print(image_np_expanded.shape)
if cv2.waitKey(25) & 0xFF == ord('q'): img = Image.fromarray(temp_image)
cap.release() #print(image_np.shape)
cv2.destroyAllWindows() img.save(os.getcwd() + "\\" + "capture.jpg")
break # array_for_translation = resize(image_np,(224,224,3))
# print(array_for_translation.shape)
translation.sign_predict(os.getcwd() + "\\" + "capture.jpg")
# sign_predict(image_np) <-- still under development
#waiting for the API on that component to be built
# end send request
## Press Q to close the camera
cv2.imshow('Hand Detector. Press Q to close', cv2.resize(image_np, (800, 600)))
if cv2.waitKey(25) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
except Exception as e: except Exception as e:
print(e) print(e)
cap.release() cap.release()
\ No newline at end of file
...@@ -39,15 +39,7 @@ ...@@ -39,15 +39,7 @@
<!--Favicon--> <!--Favicon-->
<link rel="shortcut icon" href="static/images/EasyTalkFavicon_v1.png" type="image/x-icon"> <link rel="shortcut icon" href="static/images/EasyTalkFavicon_v1.png" type="image/x-icon">
<link rel="icon" href="static/images/EasyTalkFavicon_v1.png" type="image/x-icon"> <link rel="icon" href="static/images/EasyTalkFavicon_v1.png" type="image/x-icon">
<script>
function goPython(){
$.ajax({
url: ""
}).done(function() {
alert('finished python script');;
});
}
</script>
</head> </head>
<body> <body>
...@@ -115,7 +107,9 @@ ...@@ -115,7 +107,9 @@
<div class="col-12"> <div class="col-12">
<form id="cameraForm" class="justify-content-center"> <form id="cameraForm" class="justify-content-center">
<div class="text-center"> <div class="text-center">
<button type="submit" class="btn btn-primary mb-2" id="userButtonInput" style="height: 75%;" onclick="goPython()">Open My Webcamera</button> <a href="http://localhost:3000/cam">
open cam
</a>
</div> </div>
</form> </form>
</div> </div>
......
from textvoice.textVoiceAs import TextVoiceAs
\ No newline at end of file
# -*- coding: utf-8 -*-
import functools
import sys
from .OneGramDist import OneGramDist
from . import spell
#import spell
from . import temp
from . import speech
import os
class TextVoiceAs:
def genarate(self,letters):
sentence = ''.join(sys.argv[1:]).lower()
if not sentence:
sentence = letters
# onegrams = OneGramDist(filename='count_10M_gb.txt')
#onegrams = OneGramDist(filename='count_1M_gb.txt.gz')
onegrams = OneGramDist(filename= os.path.join(os.path.dirname(os.path.abspath(__file__)),'count_1w.txt'))
onegram_fitness = functools.partial(temp.onegram_log, onegrams)
#print(sentence)
words = temp.segment(sentence, word_seq_fitness=onegram_fitness)
# print(words)
output = [spell.correction(words[index]) for index in range(0,len(words))]
speech.inputs(' '.join(output))
print(' '.join(output))
return
0 one 0 A
1 two 1 B
2 five3 2 C
3 face4 3 D
4 three5 4 E
5 F
6 G
7 H
8 I
9 J
10 K
11 L
12 M
13 N
14 O
15 P
16 Q
17 R
18 S
19 T
20 U
21 V
22 W
23 X
24 Y
25 Z
...@@ -5,25 +5,38 @@ ...@@ -5,25 +5,38 @@
# * @since 2020-08-15 # * @since 2020-08-15
# imports # imports
import tensorflow as tf
import tensorflow.keras import tensorflow.keras
from PIL import Image, ImageOps from PIL import Image, ImageOps
import numpy as np import numpy as np
import os import os
import sys
sys.path.append('.')
from textvoice.textVoiceAs import TextVoiceAs
import string import string
tensorflow.compat.v1.disable_eager_execution() tensorflow.compat.v1.disable_eager_execution()
class Translation: class Translation:
PATH = os.path.dirname(__file__) LOCATION = os.path.dirname(__file__)
Letters_ = ""
Fletter_ = ""
Sletter_ = ""
Tex = TextVoiceAs()
# def sign_predict(): # def sign_predict():
# Disable scientific notation for clarity # Disable scientific notation for clarity
np.set_printoptions(suppress=True) np.set_printoptions(suppress=True)
# Load the model # Load the model
model = tensorflow.keras.models.load_model(PATH + '/converted_keras/keras_model.h5') g2=tf.Graph()
# with g2.as_default():
sess2 = tf.compat.v1.Session()
with sess2.as_default():
model_p = tensorflow.keras.models.load_model(LOCATION + '/converted_keras/keras_model.h5')
model_p._make_predict_function()
# print(model_p.summary())
#model = tensorflow.keras.models.load_model('C:/Users/user/Documents/A to Y 1000 dataset/keras_model.h5') #model = tensorflow.keras.models.load_model('C:/Users/user/Documents/A to Y 1000 dataset/keras_model.h5')
# Create the array of the right shape to feed into the keras model # Create the array of the right shape to feed into the keras model
...@@ -34,48 +47,55 @@ class Translation: ...@@ -34,48 +47,55 @@ class Translation:
#data = np.ndarray(shape=(1, 480, 640, 3), dtype=np.float32) #data = np.ndarray(shape=(1, 480, 640, 3), dtype=np.float32)
def sign_predict(self, loc): def sign_predict(self, loc):
#with self.g2.as_default():
# Replace this with the path to your image with self.sess2.as_default():
# 'C:/Users/user/Documents/GitHub/2020_077/opencv_frame_0.png'
image = Image.open(loc) # Replace this with the path to your image
# 'C:/Users/user/Documents/GitHub/2020_077/opencv_frame_0.png'
# resize the image to a 224x224 with the same strategy as in TM2: image = Image.open(loc)
# resizing the image to be at least 224x224 and then cropping from the center
# resize the image to a 224x224 with the same strategy as in TM2:
size = (224, 224) # resizing the image to be at least 224x224 and then cropping from the center
#size = (640, 480)
image = ImageOps.fit(image, size, Image.ANTIALIAS) size = (224, 224)
#size = (640, 480)
# turn the image into a numpy array image = ImageOps.fit(image, size, Image.ANTIALIAS)
image_array = np.asarray(image)
#print(image_array) # turn the image into a numpy array
image_array = np.asarray(image)
# display the resized image #print(image_array)
#image.show()
# display the resized image
# Normalize the image #image.show()
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Normalize the image
# Load the image into the array normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
self.data[0] = normalized_image_array
# Load the image into the array
# run the inference self.data[0] = normalized_image_array
print("Before model*****************") # run the inference
prediction = self.model.predict(self.data) #sess2.run(tf.global_variables_initializer())
print("After model*****************") prediction = self.model_p.predict(self.data)
#print(prediction) #print(prediction.shape)
#print(prediction.shape) #print(type(prediction))
#print(type(prediction)) #print(prediction[0, 2])
#print(prediction[0, 2]) for alpha in range(26):
for alpha in range(26): if prediction[0, alpha] >= 0.8:
if prediction[0, alpha] >= 0.8: self.Sletter_ = string.ascii_lowercase[alpha]
print(string.ascii_uppercase[alpha]) if alpha == 25:
#return string.ascii_uppercase[alpha] self.Tex.genarate(self.Letters_)
break print(self.Letters_)
self.Letters_ = ""
print("After Classification*****************") self.Sletter_ = ""
# if os.path.exists(loc): self.Fletter_ = ""
# os.remove(loc) if self.Sletter_ != self.Fletter_:
# else: print(self.Sletter_)
# print("The file does not exist") self.Letters_ = self.Letters_ + self.Sletter_
#break
self.Fletter_ = self.Sletter_
# if os.path.exists(loc):
# os.remove(loc)
# else:
# print("The file does not exist")
return return
from trans import Translation from trans import Translation
import TextVoice import sys
sys.path.append('.')
#from textvoice.textVoiceAs import TextVoiceAs
if __name__ == '__main__': def transMain():
t1 = Translation() t1 = Translation()
Tex = TextVoice() #Tex = TextVoiceAs()
location1 = 'C:/Users/user/Documents/GitHub/2020_077/test1.jpg' location1 = 'C:/Users/user/Documents/GitHub/2020_077/a.jpg'
location2 = 'C:/Users/user/Documents/GitHub/2020_077/test2.jpg' location2 = 'C:/Users/user/Documents/GitHub/2020_077/c.jpg'
location3 = 'C:/Users/user/Documents/GitHub/2020_077/test3.jpg' location3 = 'C:/Users/user/Documents/GitHub/2020_077/d.jpg'
location4 = 'C:/Users/user/Documents/GitHub/2020_077/test4.jpg' location4 = 'C:/Users/user/Documents/GitHub/2020_077/e.jpg'
location5 = 'C:/Users/user/Documents/GitHub/2020_077/test55.jpg' location5 = 'C:/Users/user/Documents/GitHub/2020_077/g.jpg'
location6 = 'C:/Users/user/Documents/GitHub/2020_077/i.jpg'
location7 = 'C:/Users/user/Documents/GitHub/2020_077/n.jpg'
location8 = 'C:/Users/user/Documents/GitHub/2020_077/o.jpg'
location9 = 'C:/Users/user/Documents/GitHub/2020_077/y.jpg'
location10 = 'C:/Users/user/Documents/GitHub/2020_077/z.jpg'
Tex.getInput() #Tex.genarate('cab')
#getInput('t1.sign_predict(location1)')
# t1.sign_predict(location2) t1.sign_predict(location7)
t1.sign_predict(location6)
t1.sign_predict(location2)
t1.sign_predict(location4)
t1.sign_predict(location3)
t1.sign_predict(location1)
t1.sign_predict(location9)
t1.sign_predict(location10)
# t1.sign_predict(location3) # t1.sign_predict(location3)
# t1.sign_predict(location4) #Tex.genarate(t1.sign_predict(location5))
# t1.sign_predict(location5) t1.sign_predict(location5)
# t1.sign_predict(location2) t1.sign_predict(location8)
# t1.sign_predict(location3) t1.sign_predict(location8)
# t1.sign_predict(location4) t1.sign_predict(location3)
# t1.sign_predict(location5) t1.sign_predict(location1)
# t1.sign_predict(location1) t1.sign_predict(location9)
# t1.sign_predict(location4) t1.sign_predict(location10)
\ No newline at end of file
transMain()
\ No newline at end of file
# * The trans.py file is created to predict signs with images
# *
# * @author K.Bavanraj | IT17032766
# * @version 1.0
# * @since 2020-08-15
# imports
import tensorflow.keras
from PIL import Image, ImageOps
import numpy as np
import os
import string
tensorflow.compat.v1.disable_eager_execution()
def sign_predict(loc):
PATH = os.path.dirname(__file__)
# def sign_predict():
# Disable scientific notation for clarity
np.set_printoptions(suppress=True)
# Load the model
model = tensorflow.keras.models.load_model(PATH + '/converted_keras/keras_model.h5')
#model = tensorflow.keras.models.load_model('C:/Users/user/Documents/A to Y 1000 dataset/keras_model.h5')
# Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
#data = np.ndarray(shape=(1, 480, 640, 3), dtype=np.float32)
# Replace this with the path to your image
# 'C:/Users/user/Documents/GitHub/2020_077/opencv_frame_0.png'
image = Image.open(loc)
# resize the image to a 224x224 with the same strategy as in TM2:
# resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
#size = (640, 480)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
# turn the image into a numpy array
image_array = np.asarray(image)
# print(image_array)
# display the resized image
# image.show()
# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
#self.model.run_eagerly = False
# run the inference
print("Before model*****************")
prediction = model.predict(data)
print("After model*****************")
# print(prediction)
# print(prediction.shape)
# print(type(prediction))
#print(prediction[0, 2])
for alpha in range(26):
if prediction[0, alpha] >= 0.8:
print(string.ascii_uppercase[alpha])
# return string.ascii_uppercase[alpha]
break
print("After Classification*****************")
# if os.path.exists(loc):
# os.remove(loc)
# else:
# print("The file does not exist")
# return
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment