Commit 379b35be authored by Perera J.K.H.A.K.'s avatar Perera J.K.H.A.K.

Initial commit

parents
Pipeline #6193 failed with stages
# Created by .ignore support plugin (hsz.mobi)
### Python template
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# IPython Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# dotenv
.env
# virtualenv
venv/
ENV/
# Spyder project settings
.spyderproject
# Rope project settings
.ropeproject
### VirtualEnv template
# Virtualenv
# http://iamzed.com/2009/05/07/a-primer-on-virtualenv/
[Bb]in
[Ii]nclude
[Ll]ib
[Ll]ib64
[Ll]ocal
[Ss]cripts
pyvenv.cfg
.venv
pip-selfcheck.json
### JetBrains template
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/**/usage.statistics.xml
.idea/**/dictionaries
.idea/**/shelf
# AWS User-specific
.idea/**/aws.xml
# Generated files
.idea/**/contentModel.xml
# Sensitive or high-churn files
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
.idea/**/dbnavigator.xml
# Gradle
.idea/**/gradle.xml
.idea/**/libraries
# Gradle and Maven with auto-import
# When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using
# auto-import.
# .idea/artifacts
# .idea/compiler.xml
# .idea/jarRepositories.xml
# .idea/modules.xml
# .idea/*.iml
# .idea/modules
# *.iml
# *.ipr
# CMake
cmake-build-*/
# Mongo Explorer plugin
.idea/**/mongoSettings.xml
# File-based project format
*.iws
# IntelliJ
out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# SonarLint plugin
.idea/sonarlint/
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
# Editor-based Rest Client
.idea/httpRequests
# Android studio 3.1+ serialized cache file
.idea/caches/build_file_checksums.ser
# idea folder, uncomment if you don't need it
# .idea
\ No newline at end of file
# Default ignored files
/shelf/
/workspace.xml
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$">
<excludeFolder url="file://$MODULE_DIR$/venv" />
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9 (lecture_recorder)" project-jdk-type="Python SDK" />
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/lecture_recorder.iml" filepath="$PROJECT_DIR$/.idea/lecture_recorder.iml" />
</modules>
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>
\ No newline at end of file
# import pyaudio
# import wave
#
# CHUNK = 1024
# FORMAT = pyaudio.paInt16
# CHANNELS = 2
# RATE = 44100
#
#
# def record(trigger):
# p = pyaudio.PyAudio()
#
# stream = p.open(format=FORMAT,
# channels=CHANNELS,
# rate=RATE,
# input=True,
# frames_per_buffer=CHUNK)
#
# print("Start recording")
#
# frames = []
#
# try:
# while trigger:
# data = stream.read(CHUNK)
# frames.append(data)
# except KeyboardInterrupt:
# print("Done recording")
# except Exception as e:
# print(str(e))
#
# sample_width = p.get_sample_size(FORMAT)
#
# stream.stop_stream()
# stream.close()
# p.terminate()
#
# return sample_width, frames
#
#
# def record_to_file(file_path):
# wf = wave.open(file_path, 'wb')
# wf.setnchannels(CHANNELS)
# sample_width, frames = record()
# wf.setsampwidth(sample_width)
# wf.setframerate(RATE)
# wf.writeframes(b''.join(frames))
# wf.close()
#
#
# if __name__ == '__main__':
# print('#' * 80)
# print("Please speak word(s) into the microphone")
# print('Press Ctrl+C to stop the recording')
#
# record_to_file('output.mp3')
#
# print("Result written to output.wav")
# print('#' * 80)
#
# import tkinter
# import tkinter as tk
# import tkinter.messagebox
import pyaudio
import wave
import os
class RecAUD:
def __init__(self, chunk=1024, frmat=pyaudio.paInt16, channels=2, rate=44100, py=pyaudio.PyAudio()):
# Start Tkinter and set Title
# self.main = tkinter.Tk()
self.collections = []
# self.main.geometry('500x300')
# self.main.title('Record')
self.CHUNK = chunk
self.FORMAT = frmat
self.CHANNELS = channels
self.RATE = rate
self.p = py
self.frames = []
self.st = 1
self.stream = self.p.open(format=self.FORMAT, channels=self.CHANNELS, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK)
# Set Frames
# self.buttons = tkinter.Frame(self.main, padx=120, pady=20)
# Pack Frame
# self.buttons.pack(fill=tk.BOTH)
# Start and Stop buttons
# self.strt_rec = tkinter.Button(self.buttons, width=10, padx=10, pady=5, text='Start Recording', command=lambda: self.start_record())
# self.strt_rec.grid(row=0, column=0, padx=50, pady=5)
# self.stop_rec = tkinter.Button(self.buttons, width=10, padx=10, pady=5, text='Stop Recording', command=lambda: self.stop())
# self.stop_rec.grid(row=1, column=0, columnspan=1, padx=50, pady=5)
# tkinter.mainloop()
def start_record(self):
self.st = 1
self.frames = []
stream = self.p.open(format=self.FORMAT, channels=self.CHANNELS, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK)
while self.st == 1:
data = stream.read(self.CHUNK)
self.frames.append(data)
print("* recording")
# self.main.update()
stream.close()
wf = wave.open('test_recording.wav', 'wb')
wf.setnchannels(self.CHANNELS)
wf.setsampwidth(self.p.get_sample_size(self.FORMAT))
wf.setframerate(self.RATE)
wf.writeframes(b''.join(self.frames))
wf.close()
def stop(self):
self.st = 0
# Create an object of the ProgramGUI class to begin the program.
# guiAUD = RecAUD()
\ No newline at end of file
from multiprocessing import freeze_support
from tkinter import *
from tkinter import ttk
from pose_identifier.pose_identify import pose_identifier
from screen_recorder.screen_recorder import *
# from audio_recorder.audio_recorder import *
# from audio_recorder.audio_recorder import RecAUD
from multiprocessing import Process
import cv2
import numpy as np
import tkinter as tk
import threading
import moviepy.editor as mpe
import screen_recorder.screen_recorder
root = Tk()
root.title("white board")
root.geometry("1500x870+150+50")
root.configure(bg="#f2f3f5")
root.resizable(False, False)
current_x = 0
current_y = 0
color = 'black'
def locate_xy(work):
global current_x,current_y
current_x = work.x
current_y = work.y
def addLine(work):
global current_x, current_y
canvas.create_line((current_x, current_y, work.x, work.y), width=get_current_value(), fill=color, capstyle=ROUND, smooth=TRUE)
current_x, current_y = work.x, work.y
def show_color(new_color):
global color
color = new_color
def new_canvas():
canvas.delete('all')
display_pallete()
#icon
image_icon = PhotoImage(file="assets/logo.png")
root.iconphoto(False,image_icon)
eraser=PhotoImage(file="assets/eraser.png")
Button(root, image=eraser, bg="#f2f3f5", height=20, width=20, command=new_canvas).place(x=47,y=340)
colors = Canvas(root, bg="#ffffff", width=37, height=310, bd=0)
colors.place(x=40, y=10)
def display_pallete():
id = colors.create_rectangle((10,10,30,30), fill="black")
colors.tag_bind(id, '<Button-1>', lambda x: show_color('black'))
id = colors.create_rectangle((10,40,30,60), fill="gray")
colors.tag_bind(id, '<Button-1>', lambda x: show_color('gray'))
id = colors.create_rectangle((10,70,30,90), fill="brown4")
colors.tag_bind(id, '<Button-1>', lambda x: show_color('brown4'))
id = colors.create_rectangle((10,100,30,120), fill="yellow")
colors.tag_bind(id, '<Button-1>', lambda x: show_color('yellow'))
id = colors.create_rectangle((10,130,30,150), fill="blue")
colors.tag_bind(id, '<Button-1>', lambda x: show_color('blue'))
id = colors.create_rectangle((10,160,30,180), fill="orange")
colors.tag_bind(id, '<Button-1>', lambda x: show_color('orange'))
id = colors.create_rectangle((10,190,30,210), fill="pink")
colors.tag_bind(id, '<Button-1>', lambda x: show_color('pink'))
id = colors.create_rectangle((10,220,30,240), fill="brown4")
colors.tag_bind(id, '<Button-1>', lambda x: show_color('brown4'))
id = colors.create_rectangle((10,250,30,270), fill="purple")
colors.tag_bind(id, '<Button-1>', lambda x: show_color('purple'))
id = colors.create_rectangle((10,280,30,300), fill="red")
colors.tag_bind(id, '<Button-1>', lambda x: show_color('red'))
display_pallete()
canvas = Canvas(root, width=1350, height=700, background="white", cursor="hand2")
canvas.place(x=120,y=10)
canvas.bind('<Button-1>', locate_xy)
canvas.bind('<B1-Motion>', addLine)
############SLIDER##############
current_value = tk.DoubleVar()
def get_current_value():
return '{: .2f}'.format(current_value.get())
def slider_changed(event):
value_label.configure(text=get_current_value())
slider = ttk.Scale(root, from_=0, to=100, orient='horizontal', command=slider_changed, variable=current_value)
slider.place(x=10,y=380)
#value label
value_label = ttk.Label(root, text=get_current_value())
value_label.place(x=10, y=410)
# rec_aud = RecAUD()
############WEB CAMERA##############
def start_web_cam():
# process_one = Process(target=start_pose_identifier)
# process_one.start()
th = threading.Thread(target=pose_identifier)
th.start()
web_cam_start = Button(text ="Start Web Camera", command=start_web_cam).place(x=10,y=450)
def combine_audio(vidname="temp_vid.mp4", audname="test_recording.wav", outname="output.mp4", fps=10):
my_clip = mpe.VideoFileClip(vidname)
audio_background = mpe.AudioFileClip(audname)
print(str(audio_background.duration))
print(str(my_clip.duration))
print(str(my_clip.duration - audio_background.duration))
clip = my_clip.subclip(my_clip.duration - audio_background.duration, my_clip.duration)
final_clip = my_clip.set_audio(audio_background)
final_clip.write_videofile(outname,fps=fps)
############MERGE AUDIO AND VIDEO##############
merge_things = Button(text ="Merge", command=combine_audio).place(x=10,y=480)
############SCREEN RECORDER & AUDIO RECORDER##############
start_cap = Button(root, text='Start Recording', width=30, command=start_screen_capturing)
start_cap.place(x=285, y=760)
stop_cap = Button(root, text='Stop Recording', width=30, command=stop_screen_capturing)
stop_cap.place(x=535,y=760)
start_cap = Button(root, text='Pause Recording', width=30, command=pause_screen_capturing)
start_cap.place(x=785, y=760)
stop_cap = Button(root, text='Resume Recording', width=30, command=resume_screen_capturing)
stop_cap.place(x=1035,y=760)
if __name__ == '__main__':
freeze_support()
root.mainloop()
\ No newline at end of file
File added
import cv2
import pandas as pd
import mediapipe as mp
import numpy as np
# from sklearn.metrics import accuracy_score
import pickle
import threading
# def start_pose_identifier():
# t1=threading.Thread(target=pose_identifier)
# t1.start()
def pose_identifier():
mp_drawing = mp.solutions.drawing_utils # Drawing help ers
mp_holistic = mp.solutions.holistic # Mediapipe Solutions
with open('pose_identifier/body_language.pkl', 'rb') as f:
model = pickle.load(f)
cap = cv2.VideoCapture(0)
# Initiate holistic model
with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:
while cap.isOpened():
ret, frame = cap.read()
# Recolor Feed
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image.flags.writeable = False
# Make Detections
results = holistic.process(image)
# print(results.face_landmarks)
# face_landmarks, pose_landmarks, left_hand_landmarks, right_hand_landmarks
# Recolor image back to BGR for rendering
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# 1. Draw face landmarks
# mp_drawing.draw_landmarks(image, results.face_landmarks, mp_holistic.FACEMESH_CONTOURS,
# mp_drawing.DrawingSpec(color=(80, 110, 10), thickness=1, circle_radius=1),
# mp_drawing.DrawingSpec(color=(80, 256, 121), thickness=1, circle_radius=1)
# )
#
# # 2. Right hand
# mp_drawing.draw_landmarks(image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS,
# mp_drawing.DrawingSpec(color=(80, 22, 10), thickness=2, circle_radius=4),
# mp_drawing.DrawingSpec(color=(80, 44, 121), thickness=2, circle_radius=2)
# )
#
# # 3. Left Hand
# mp_drawing.draw_landmarks(image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS,
# mp_drawing.DrawingSpec(color=(121, 22, 76), thickness=2, circle_radius=4),
#
# mp_drawing.DrawingSpec(color=(121, 44, 250), thickness=2, circle_radius=2)
# )
#
# # 4. Pose Detections
# mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS,
# mp_drawing.DrawingSpec(color=(245, 117, 66), thickness=2, circle_radius=4),
# mp_drawing.DrawingSpec(color=(245, 66, 230), thickness=2, circle_radius=2)
# )
# Export coordinates
try:
# Extract Pose landmarks
pose = results.pose_landmarks.landmark
pose_row = list(np.array(
[[landmark.x, landmark.y, landmark.z, landmark.visibility] for landmark in pose]).flatten())
# Extract Face landmarks
face = results.face_landmarks.landmark
face_row = list(np.array(
[[landmark.x, landmark.y, landmark.z, landmark.visibility] for landmark in face]).flatten())
# Concate rows
row = pose_row + face_row
# # Append class name
# row.insert(0, class_name)
# # Export to CSV
# with open('coords.csv', mode='a', newline='') as f:
# csv_writer = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# csv_writer.writerow(row)
# Make Detections
X = pd.DataFrame([row])
body_language_class = model.predict(X)[0]
body_language_prob = model.predict_proba(X)[0]
print(body_language_class, body_language_prob)
# Grab ear coords
coords = tuple(np.multiply(
np.array(
(results.pose_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_EAR].x,
results.pose_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_EAR].y))
, [640, 480]).astype(int))
cv2.rectangle(image,
(coords[0], coords[1] + 5),
(coords[0] + len(body_language_class) * 20, coords[1] - 30),
(245, 117, 16), -1)
cv2.putText(image, body_language_class, coords,
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
# Get status box
cv2.rectangle(image, (0, 0), (250, 60), (245, 117, 16), -1)
# Display Class
cv2.putText(image, 'CLASS'
, (95, 12), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
cv2.putText(image, body_language_class.split(' ')[0]
, (90, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
# Display Probability
cv2.putText(image, 'PROB'
, (15, 12), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
cv2.putText(image, str(round(body_language_prob[np.argmax(body_language_prob)], 2))
, (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
except:
pass
cv2.imshow('Raw Webcam Feed', image)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
from PIL import ImageGrab
from audio_recorder.audio_recorder import RecAUD
from audio_recorder.audio_recorder import *
from multiprocessing import Process
import cv2
import numpy as np
import tkinter as tk
import threading
from time import sleep
rec_aud = RecAUD()
p = ImageGrab.grab()
a, b = p.size
filename=('temp_vid.mp4')
fourcc = cv2.VideoWriter_fourcc(*'avc1')
frame_rate = 16
out = cv2.VideoWriter()
def screen_capturing():
global capturing
capturing = True
th = threading.Thread(target=rec_aud.start_record)
th.start()
while capturing:
img = ImageGrab.grab()
frame = np.array(img)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
out.write(frame)
def start_screen_capturing():
if not out.isOpened():
out.open(filename, fourcc, frame_rate, (a, b))
print(' rec started')
t1 = threading.Thread(target=screen_capturing)
# t2 = threading.Thread(target=rec_aud.start_record)
t1.start()
# t2.start()
# process_one = Process(target=screen_capturing)
# process_two = Process(target=rec_aud.start_record)
#
# process_one.start()
# process_one.join()
#
# process_two.start()
# process_two.join()
def stop_screen_capturing():
global capturing
rec_aud.stop()
capturing = False
out.release()
print('complete')
def pause_screen_capturing():
global capturing
capturing = False
print("Paused")
def resume_screen_capturing():
global capturing
capturing = True
if not out.isOpened():
out.open(filename,fourcc, frame_rate,(a,b))
t1=threading.Thread(target=screen_capturing, daemon=True)
t1.start()
print("Resumed")
\ No newline at end of file
File added
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment