Commit 9561c00e authored by Manoj Kumar's avatar Manoj Kumar

Manoj dev

parent 285412d7
......@@ -12,4 +12,7 @@ dataq/mask_rcnn_coco.h5
/dataq/training
*.record
/dataq/faster_rcnn_inception_v2_coco_2018_01_28
/dataq/inference_graph
\ No newline at end of file
/dataq/inference_graph
/dataset
/dataq/__pycache__
/textvoice/__pycache__
\ No newline at end of file
{
"type": "service_account",
"project_id": "storage-9eed9",
"private_key_id": "e01388cf9d88ab888f72d3cbd76fc2e1a7f421e7",
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDh0qJNzyDw2MO4\nmBVANrJexrlavoLO1gZHaoPzzCoduJOEhkWP1E5/LqEulc1fiYkHwM5GCvnp0I48\n6/t/RLjmkoEBNQR4O/RvCLFNk6R6xwcC+IEU8i1gscorU1oouPYQhVUrjRJu8t6e\nOjmNW3jYwpl/2pM3148QBzNH+WctEGG8AH8qN4G0e9ky4QyArLxEOkyYIrlkI22Z\nj1yILJJsvZSRACXk3836SzyFtqpXZ2RWrKKcNY4bTWpytyhhipjnvrv2OAdBVCmi\nXRb7lT63UUu+h1vlZopTwXAALB6uJ7veiQ+fev7hL22rorhkx4TKk//EFdVgH4t6\n55f1n8yXAgMBAAECggEAGNY506T8UoEExcC0VaOuao4gxMI/UNBpUGF+scm6uBoc\n326FKBSJraNLRvxi4icWUyv5g+qhIf42Jq0ttAmy7GkglyllnHO7ppqmJYofhQFu\n2/zI9azNIN1tBubb+66cIaUIgQm7BUjhAzYULl/pIq4Eg/NeTb3/lhu0oC3mEyaN\nM34USNkchE3cdrTiaWQMXOp9+h/FNVvbj3YwQmotOhXd4iZVFPJNz41maT3wj6mT\nmMQdJqUO98e/q1zKA6epnwUndYVgfDZYCnadEvYy0gU6wZR8w1ihBMXnRE5Mgs+j\nKFmi/Ye98110d0XDpPzq9m70Dt5DUXvQ+mJbbhLigQKBgQD0tjMZIFA4IDPWfptf\ndDzHz7g0K6RyBj5WquPom+xAZismraeZYpLeIRnh+M0Vw0+KZMKdCjIcaBnBK3s0\nCZ3XuZOMJ0FM7g+FIybZOVq2VVTxOZLTx3wVQ3Jq+D5BYZHCO1/y54+FTNe9fFmt\nc3tMbKgQtW+TuSXfv+u1p8bTzwKBgQDsPWAEViR3++dhKCYZFCna2WogNyg2IwrY\nlxiRc2j2q0G84BIcut0tJ3+PuxIlkb6E6EOYpq0nXtx0CSLCmutuQvQeqmxtjdca\nGPkrhL2YSUPIzbgSNZdWL5ni22hLgG8fJI2TOs5ewtGV4ICg3WOFPyrsAll2yxch\n/et7byaEuQKBgDuQXb+v7da7f+Klzovdby3zkZKhTNW0T1gpIHBuA4NpWvd/LVxY\ncgc/Z11pggHaGHJHNFelM61WaW5rdwGjlZf8w71JkMt45q4xcDNPkpTvEE3nBaYm\n2D0UvbTr17tyVFhCu4gUdQqtOZYVvn5JralciUwH3bDHUEC2JlmBDRRhAoGBAI1W\n4x11FHicxABC7xhOEgSX04DWvz40lQakz9QmAoSiTcBdJzj9K9saKJ2rxN2pZx1J\ngTfRcQN/I9JgrOVVnsnnUPsP57NcC3fXvSwmTPDR1LZNLnXmjlbIP/+UWb/iZZly\nWyndIx9a294Z5ZtZGXPdKSbu+Ouz8nEp5DuDcr4xAoGAM8CvtzQL+J8Dc9Tt82SO\noV0PRrWNy6ISuJ3zcrZklabO7zaJrrvf9WRac557mPPkCX9aNxSqduivS2NIPEp1\nXGPqkvBRPRJKq1CnvifFZAnFPuiqmR/gHqGot9x3Z2ikoEZLa4rSUoVIU8M5AQrW\nNvp60LjIlEaojktgfAdWC7o=\n-----END PRIVATE KEY-----\n",
"client_email": "firebase-adminsdk-sstjw@storage-9eed9.iam.gserviceaccount.com",
"client_id": "100932427645847514490",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/firebase-adminsdk-sstjw%40storage-9eed9.iam.gserviceaccount.com"
}
#imports
import os
import sys
import json
import shutil
from xml_to_csv import converter
PATH = os.path.dirname(__file__)
FILEPATH = os.path.join(PATH,'detect.py')
#Create Directory
if not os.path.exists('dataq\\image'):
os.makedirs('dataq\\image\\train')
os.makedirs('dataq\\image\\test')
#XML to CSV
converter()
#Create LabelMap
if not os.path.exists('dataq\\training'):
os.makedirs('dataq\\training')
f = open("dataq\\training\\labelmap.pbtxt","a")
f.write("item {"
" \n "
" id:1"
" \n "
" name:'hand' "
" \n"
"}")
f.close()
#Move RCNN Config
shutil.copyfile('dataq\\faster_rcnn_inception_v2_pets.config','dataq\\training\\faster_rcnn_inception_v2_pets.config')
#Edit FRCNN config and make it to our requirements - REPLACE WITH PATH
with open("dataq\\training\\faster_rcnn_inception_v2_pets.config") as frcnn_config:
configFile = frcnn_config.readlines()
print(configFile[105])
quotes = " "+""
configFile[105] = ' fine_tune_checkpoint:"D:/manobran technologies/SLIIT 4th/cdap/2020_077/dataq/object_detection/faster_rcnn_inception_v2_coco_2018_01_28/model.ckpt"\n'
configFile[122] = ' input_path: "D:/manobran technologies/SLIIT 4th/cdap/2020_077/dataq/train.record"\n'
configFile[124] = ' label_map_path: "D:/manobran technologies/SLIIT 4th/cdap/2020_077/dataq/training/labelmap.pbtxt"\n'
configFile[134] = ' input_path: "D:/manobran technologies/SLIIT 4th/cdap/2020_077/dataq/test.record"\n'
configFile[136] = ' label_map_path: "D:/manobran technologies/SLIIT 4th/cdap/2020_077/dataq/training/labelmap.pbtxt"\n'
with open("dataq\\training\\faster_rcnn_inception_v2_pets.config","w") as frcnn_config:
frcnn_config.writelines(configFile)
#start training
#object detection / legacy / train.py
\ No newline at end of file
exec(open(FILEPATH).read())
\ No newline at end of file
......@@ -5,7 +5,7 @@ import sys
import tarfile
import tensorflow as tf
import zipfile
import cv2
from distutils.version import StrictVersion
from collections import defaultdict
from io import StringIO
......@@ -27,7 +27,6 @@ from object_detection.utils import visualization_utils as vis_util
MODEL_NAME = 'D:\manobran technologies\SLIIT 4th\cdap\\2020_077\dataq\inference_graph'
MODEL_FILE = MODEL_NAME + '.tar.gz'
PATH_TO_FROZEN_GRAPH = MODEL_NAME + '\\frozen_inference_graph.pb'
PATH_TO_LABELS = os.path.join('D:\manobran technologies\SLIIT 4th\cdap\\2020_077\dataq\\training','labelmap.pbtxt')
......@@ -44,20 +43,19 @@ with detection_graph.as_default():
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
def run_inference_for_single_image(image, graph):
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
......@@ -67,7 +65,7 @@ def run_inference_for_single_image(image, graph):
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
# outputs are float32 numpy arrays
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
......@@ -77,10 +75,7 @@ def run_inference_for_single_image(image, graph):
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
#def get_score(array_score):
import cv2
cap = cv2.VideoCapture(0)
try:
with detection_graph.as_default():
......@@ -89,10 +84,12 @@ try:
ops = tf.compat.v1.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
key_detections = [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
]
for key in key_detections:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.compat.v1.get_default_graph().get_tensor_by_name(
......@@ -120,10 +117,10 @@ try:
###
# I will be sending a POST request to u. a hand picture
if score > 80:
print('Sending to translate component')
print(image_np_expanded)
#waiting for the API on that component to be built
# end send request
cv2.imshow('object_detection', cv2.resize(image_np, (800, 600)))
cv2.imshow('Hand Detector. Press Q to close', cv2.resize(image_np, (800, 600)))
if cv2.waitKey(25) & 0xFF == ord('q'):
cap.release()
......
const express = require("express");
const pythonShell = require("python-shell");
const app = express();
const port = process.env.PORT || 4100;
app.post("/images", (req, res) => {
var image = req.body.file;
var options = {
args: [image],
};
pythonShell.PythonShell.run("detect.py",options,function(err,data){
if(err)
console.log('Error');
res.status(200).json({
"data":data
})
});
});
app.listen(port, () => console.log(`Server running on port ${port} 🔥`));
{
"name": "dataq",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"repository": {
"type": "git",
"url": "http://gitlab.sliit.lk/2020_077/2020_077/dataq"
},
"author": "Manoj",
"license": "ISC",
"dependencies": {
"express": "^4.17.1",
"python-shell": "^2.0.1"
}
}
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from distutils.version import StrictVersion
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
#from tf.keras.Models import
# This is needed since the notebook is stored in the object_detection folder.
if StrictVersion(tf.__version__) < StrictVersion('1.15.3'):
raise ImportError('Please upgrade your TensorFlow installation to v1.15.3 or later!')
sys.path.append('..')
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
MODEL_NAME = 'D:\manobran technologies\SLIIT 4th\cdap\\2020_077\dataq\inference_graph'
MODEL_FILE = MODEL_NAME + '.tar.gz'
#DOWNLOAD_BASE = "http://download.tensorflow.org/models/object_detection/"
PATH_TO_FROZEN_GRAPH = MODEL_NAME + '\\frozen_inference_graph.pb'
PATH_TO_LABELS = os.path.join('D:\manobran technologies\SLIIT 4th\cdap\\2020_077\dataq\\training','labelmap.pbtxt')
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.compat.v1.GraphDef()
with tf.compat.v2.io.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
def run_inference_for_single_image(image, graph):
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
#def get_score(array_score):
import cv2
cap = cv2.VideoCapture(0)
try:
with detection_graph.as_default():
with tf.compat.v1.Session() as sess:
# Get handles to input and output tensors
ops = tf.compat.v1.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.compat.v1.get_default_graph().get_tensor_by_name(
tensor_name)
while True:
ret, image_np = cap.read()
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
output_dict = run_inference_for_single_image(image_np, detection_graph)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
score = round(100*output_dict['detection_scores'][0])
#send the request to bavan here
###
# I will be sending a POST request to u. a hand picture
if score > 90:
print('Sending to translate component')
#waiting for the API on that component to be built
# end send request
cv2.imshow('object_detection', cv2.resize(image_np, (800, 600)))
if cv2.waitKey(25) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
except Exception as e:
print(e)
cap.release()
\ No newline at end of file
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from distutils.version import StrictVersion
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
#from tf.keras.Models import
# This is needed since the notebook is stored in the object_detection folder.
if StrictVersion(tf.__version__) < StrictVersion('1.9.0'):
raise ImportError('Please upgrade your TensorFlow installation to v1.9.* or later!')
sys.path.append('..')
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
MODEL_NAME = 'inference_graph'
PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'
PATH_TO_LABELS = 'training/labelmap.pbtxt'
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.compat.v1.GraphDef()
with tf.compat.v2.io.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
def run_inference_for_single_image(image, graph):
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
import cv2
cap = cv2.VideoCapture(0)
try:
with detection_graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
while True:
ret, image_np = cap.read()
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
output_dict = run_inference_for_single_image(image_np, detection_graph)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
cv2.imshow('object_detection', cv2.resize(image_np, (800, 600)))
if cv2.waitKey(25) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
except Exception as e:
print(e)
cap.release()
\ No newline at end of file
......@@ -10,7 +10,14 @@
const socket = io.connect('http://localhost:5000');
socket.on('image',(data)=>{
console.log('data',data);
})
});
function callGoogle(){
window.open('/google')
}
</script>
<button onclick="callGoogle()">
google
</button>
</body>
</html>
\ No newline at end of file
......@@ -55,27 +55,29 @@ def getImagesCommonWord(message):
img = pigm.open(images)
sendingToGIF.append(img)
# # img.show()
# print(type(img))
generateGIF(sendingToGIF)
#print(type(img))
return generateGIF(sendingToGIF)
def getImagesRareWord(message):
images = store.child().list_files()
imageRes = []
for letter in message:
for i in images:
if i.name == letter + ".jpg":
print ("image name =" + i.name)
imageRes.append(i)
sendingToGIF =[]
for i in images:
if i.name == message + ".jpg":
print ("image name =" + i.name)
imageRes.append(i)
for i in imageRes:
url = i.generate_signed_url(datetime.timedelta(300), method = 'GET')
response = requests.get(url)
images = io.BytesIO(response.content)
img = pigm.open(images)
sendingToGIF.append(img)
sendingToGIF=img
# # img.show()
# print(type(img))
generateGIF(sendingToGIF)
print("RARE EKE "+ str(type(sendingToGIF)))
return sendingToGIF
#generateGIF(sendingToGIF)
def getImagesCommonSentence(sentence):
sendingToGIF =[]
......@@ -87,7 +89,6 @@ def getImagesCommonSentence(sentence):
print ("image name =" + i.name)
imageRes.append(i)
for i in imageRes:
url = i.generate_signed_url(datetime.timedelta(300), method = 'GET')
response = requests.get(url)
......
......@@ -9,6 +9,9 @@ def generateGIF(images):
imageio.mimwrite(os.path.join(PATH + "\\output\\" + gifName + '.gif'), images, duration = 0.5)
return sendGIF()
# def generateGIFRare(images):
# RARE_GIF = imageio.mimwrite(os.path.join(PATH + "\\output\\" + gifName + '.gif'), images, duration = 0.5)
# return RARE_GIF
def sendGIF():
x = imageio.mimread(os.path.join(PATH + "\\output\\" + gifName + '.gif'))
print(x)
......
......@@ -2,7 +2,7 @@ import json,os
from core.getImages import getImagesCommonWord
from core.getImages import getImagesRareWord
from core.gifMaker import generateGIF
REVENG = os.path.dirname(os.path.abspath(__file__))
jsonPath = os.path.join(REVENG, 'common.json')
......@@ -23,5 +23,8 @@ def processWord(message):
for letter in message:
lower_case_word = letter.lower()
wordArr.append(lower_case_word)
getImagesRareWord(list(wordArr))
\ No newline at end of file
imageSetArray = []
for item in wordArr:
imageSetArray.append(getImagesRareWord(item))
print(type(imageSetArray[0]))
generateGIF(imageSetArray)
\ No newline at end of file
......@@ -38,7 +38,7 @@
</div>
</form>
<!--<p id="req"></p>-->
<img src="{{ user_image }}" alt="User Image" />
<!-- <img src="{{ user_image }}" alt="User Image" /> -->
</div>
<script src="https://code.jquery.com/jquery-3.2.1.min.js"></script>
......
......@@ -10,11 +10,8 @@ const path = require('path');
const server = require('http').Server(app);
const io = require('socket.io')(server);
const cv = require('opencv4nodejs');
const port = process.env.PORT || 5000;
const video = new cv.VideoCapture(0);
app.get("/", (req, res) => {
res.sendFile(path.join(__dirname,'index.html'));
......@@ -28,39 +25,40 @@ setInterval(()=>{
server.listen(port, () => {
console.log(`Server running on port ${port}`);
});
// app.get('/test', function (req, res) {
// console.log('Server working');
// res.status(200).json({
// "server_status": "OK"
// })
app.get('/test', function (req, res) {
console.log('Server working');
res.status(200).json({
"server_status": "OK"
})
// })
})
// //version 01
// app.get('/dalembert', callD_almbert);
//version 01
app.get('/google', callD_almbert);
// function callD_almbert(req, res) {
// var spawn = require('child_process').spawn;
// console.log('Creating spawn');
function callD_almbert(req, res) {
var spawn = require('child_process').spawn;
console.log('Creating spawn');
// var filePath = "./test-python-module/google.py";
// var process = spawn('python', [filePath,
// "https://www.google.com"
// ]);
var filePath = "./dataq/detect.py";
var process = spawn('python', [filePath,
"https://www.google.com"
]);
// console.log('request ok!');
console.log('request ok!');
// process.stdout.on('data',function(data){
// res.send(data.toString());
// });
// }
process.stdout.on('data',function(data){
res.send(data.toString());
});
}
// // end of version 01
......
......@@ -8,7 +8,7 @@ import spell
import temp
import speech
import os
if __name__ == '__main__':
......@@ -17,7 +17,7 @@ if __name__ == '__main__':
sentence = 'welcomebacktohoem'
# onegrams = OneGramDist(filename='count_10M_gb.txt')
#onegrams = OneGramDist(filename='count_1M_gb.txt.gz')
onegrams = OneGramDist(filename='count_1w.txt')
onegrams = OneGramDist(filename= os.path.join(os.path.dirname(os.path.abspath(__file__)),'count_1w.txt'))
onegram_fitness = functools.partial(temp.onegram_log, onegrams)
#print(sentence)
words = temp.segment(sentence, word_seq_fitness=onegram_fitness)
......
......@@ -4,12 +4,13 @@ Created on Thu Aug 20 14:14:17 2020
@author: nanthu
"""
import re
import re,os
from collections import Counter
def words(text): return re.findall(r'\w+', text.lower())
PATH = os.path.dirname(os.path.abspath(__file__))
WORDS = Counter(words(open('big.txt').read()))
WORDS = Counter(words(open(os.path.join(PATH,'big.txt')).read()))
def P(word, N=sum(WORDS.values())):
"Probability of `word`."
......
......@@ -13,8 +13,8 @@ from skimage.transform import resize
import pandas as pd
import numpy as np
train_dir = 'C:/Users/user/Desktop/REsearch data/asl_dataset_2/asl_dataset/asl_alphabet_train/asl_alphabet_train/'
test_dir = 'C:/Users/user/Desktop/REsearch data/asl_dataset_2/asl_dataset/asl_alphabet_test/asl_alphabet_test/'
train_dir = 'D:\manobran technologies\SLIIT 4th\cdap\\2020_077\dataset\\asl_dataset\\asl_alphabet_train\\asl_alphabet_train'
test_dir = 'D:\manobran technologies\SLIIT 4th\cdap\\2020_077\dataset\\asl_dataset\\asl_alphabet_test\\asl_alphabet_test'
imageSize=50
......@@ -136,7 +136,7 @@ plotHistogram(X_train[1])
from glob import glob
import random
multipleImages = glob('C:/Users/user/Desktop/REsearch data/asl_dataset_2/asl_dataset/asl_alphabet_train/asl_alphabet_train/A/**')
multipleImages = glob('D:\manobran technologies\SLIIT 4th\cdap\\2020_077\dataset\\asl_dataset\\asl_alphabet_train\\asl_alphabet_train\A\**')
def plotThreeImages(images):
r = random.sample(images, 3)
plt.figure(figsize=(16,16))
......@@ -150,7 +150,7 @@ def plotThreeImages(images):
plotThreeImages(multipleImages)
print("A")
multipleImages = glob('C:/Users/user/Desktop/REsearch data/asl_dataset_2/asl_dataset/asl_alphabet_train/asl_alphabet_train/A/**')
multipleImages = glob('D:\manobran technologies\SLIIT 4th\cdap\\2020_077\dataset\\asl_dataset\\asl_alphabet_train\\asl_alphabet_train\A\**')
i_ = 0
plt.rcParams['figure.figsize'] = (10.0, 10.0)
plt.subplots_adjust(wspace=0, hspace=0)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment