Commit c2f6c991 authored by Pamal-Ranasinghe's avatar Pamal-Ranasinghe

basic translation app is done

parent c624512c
File added
from nltk.tokenize import word_tokenize from nltk.tokenize import word_tokenize
from loguru import logger from loguru import logger
from .awsOperation import AwsOperation
import os import os
import json import json
import cv2
import numpy as np
import glob
from moviepy.editor import *
class WordModel: class WordModel:
...@@ -18,6 +22,8 @@ class WordModel: ...@@ -18,6 +22,8 @@ class WordModel:
try: try:
logger.info('word_pre_process - hits') logger.info('word_pre_process - hits')
clip_arr = []
# Identify all the takens # Identify all the takens
para_tokenize = word_tokenize(self.para) para_tokenize = word_tokenize(self.para)
logger.info('Tokenized Words : ' ,para_tokenize) logger.info('Tokenized Words : ' ,para_tokenize)
...@@ -33,6 +39,25 @@ class WordModel: ...@@ -33,6 +39,25 @@ class WordModel:
if w not in stop_words: if w not in stop_words:
filtered_sentence.append(w) filtered_sentence.append(w)
aws = AwsOperation('rpserverone')
bucket = aws.s3_connector()
extension = ".mp4"
# sign_names = ["this", "beautiful", "day"]
for i in range(0, len(filtered_sentence)):
print(type(i))
print(str(filtered_sentence[i]) + extension)
bucket.download_file(str(filtered_sentence[i])+extension, 'D:/s3_tute/'+str(filtered_sentence[i])+extension)
for filename in glob.glob('D:/s3_tute/*.mp4'):
clip = VideoFileClip(filename)
clip_arr.append(clip)
final = concatenate_videoclips(clip_arr)
final.write_videofile("final_out.mp4")
return json.loads(json.dumps({ return json.loads(json.dumps({
"filtered_words" : filtered_sentence, "filtered_words" : filtered_sentence,
"tokens" : para_tokenize, "tokens" : para_tokenize,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment