Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
E
Easy Quest - Smart Recruitment Tool with AI - Backend
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
22_23 - J 36
Easy Quest - Smart Recruitment Tool with AI - Backend
Commits
391d4c85
Commit
391d4c85
authored
Apr 13, 2023
by
H.M.C. Nadunithara Wijerathne
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'it19154640' into 'master'
Facial expressions See merge request
!5
parents
9d39d3a0
120e8bb5
Changes
16
Show whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
742 additions
and
40 deletions
+742
-40
NodeServer/src/config/types.ts
NodeServer/src/config/types.ts
+1
-1
NodeServer/src/models/Application.ts
NodeServer/src/models/Application.ts
+1
-1
NodeServer/src/routes/application.ts
NodeServer/src/routes/application.ts
+10
-6
NodeServer/src/routes/user.ts
NodeServer/src/routes/user.ts
+1
-1
NodeServer/src/utilities/apis/facial.ts
NodeServer/src/utilities/apis/facial.ts
+9
-0
PythonServer/.gitignore
PythonServer/.gitignore
+1
-0
PythonServer/README.md
PythonServer/README.md
+62
-23
PythonServer/baseModels/payloads.py
PythonServer/baseModels/payloads.py
+5
-0
PythonServer/main.py
PythonServer/main.py
+18
-1
PythonServer/requirements.txt
PythonServer/requirements.txt
+211
-0
PythonServer/routes/facial.py
PythonServer/routes/facial.py
+57
-0
PythonServer/routes/voice.py
PythonServer/routes/voice.py
+1
-6
PythonServer/scripts/blink_detection.py
PythonServer/scripts/blink_detection.py
+69
-0
PythonServer/scripts/parameters.py
PythonServer/scripts/parameters.py
+6
-1
PythonServer/scripts/web_socket.py
PythonServer/scripts/web_socket.py
+21
-0
PythonServer/server_env.yaml
PythonServer/server_env.yaml
+269
-0
No files found.
NodeServer/src/config/types.ts
View file @
391d4c85
...
@@ -125,7 +125,7 @@ export type ApplicationType = {
...
@@ -125,7 +125,7 @@ export type ApplicationType = {
time
:
string
;
time
:
string
;
link
:
string
;
link
:
string
;
videoRef
?:
string
;
videoRef
?:
string
;
voiceVerification
?:
number
;
voiceVerification
?:
string
;
};
};
score
:
{
score
:
{
primary
:
number
;
primary
:
number
;
...
...
NodeServer/src/models/Application.ts
View file @
391d4c85
...
@@ -11,7 +11,7 @@ const applicationSchema = new Schema<ApplicationType>({
...
@@ -11,7 +11,7 @@ const applicationSchema = new Schema<ApplicationType>({
time
:
String
,
time
:
String
,
link
:
String
,
link
:
String
,
videoRef
:
String
,
videoRef
:
String
,
voiceVerification
:
Number
,
voiceVerification
:
String
,
},
},
require
:
false
,
require
:
false
,
},
},
...
...
NodeServer/src/routes/application.ts
View file @
391d4c85
...
@@ -75,13 +75,11 @@ router.put(
...
@@ -75,13 +75,11 @@ router.put(
let
update
=
req
.
body
.
update
;
let
update
=
req
.
body
.
update
;
if
(
update
.
interview
?.
videoRef
)
{
if
(
update
.
interview
?.
videoRef
)
{
const
score
:
any
=
await
VoiceAPI
.
verifyVoice
({
VoiceAPI
.
verifyVoice
({
video_url
:
update
.
interview
?.
videoRef
,
video_url
:
update
.
interview
?.
videoRef
,
user_id
:
req
.
body
.
candidateId
,
user_id
:
req
.
body
.
candidateId
,
application_id
:
req
.
body
.
applicationId
,
application_id
:
req
.
body
.
applicationId
,
});
});
update
.
interview
.
voiceVerification
=
score
;
}
}
Application
.
findByIdAndUpdate
(
req
.
body
.
applicationId
,
{
Application
.
findByIdAndUpdate
(
req
.
body
.
applicationId
,
{
...
@@ -102,13 +100,19 @@ router.post(
...
@@ -102,13 +100,19 @@ router.post(
authMiddleware
,
authMiddleware
,
async
(
req
:
TypedRequest
<
{},
AnalyseApplicationPayload
>
,
res
)
=>
{
async
(
req
:
TypedRequest
<
{},
AnalyseApplicationPayload
>
,
res
)
=>
{
const
{
applicationId
,
startTime
,
endTime
}
=
req
.
body
;
const
{
applicationId
,
startTime
,
endTime
}
=
req
.
body
;
const
data
=
await
VoiceAPI
.
analyseVoice
(
{
const
payload
=
{
start
:
startTime
,
start
:
startTime
,
end
:
endTime
,
end
:
endTime
,
application_id
:
applicationId
,
application_id
:
applicationId
,
}
)
;
};
return
res
.
json
({
voice
:
data
});
try
{
const
voiceData
=
await
VoiceAPI
.
analyseVoice
(
payload
);
return
res
.
json
({
voice
:
voiceData
});
}
catch
(
error
)
{
return
res
.
status
(
500
).
send
(
error
);
}
}
}
);
);
...
...
NodeServer/src/routes/user.ts
View file @
391d4c85
...
@@ -21,7 +21,7 @@ router.post(
...
@@ -21,7 +21,7 @@ router.post(
update
.
resumeData
=
data
;
update
.
resumeData
=
data
;
}
}
if
(
req
.
body
?.
selfIntro
)
{
if
(
req
.
body
?.
selfIntro
)
{
await
VoiceAPI
.
enrollVoice
({
VoiceAPI
.
enrollVoice
({
user_id
:
req
.
query
.
userId
,
user_id
:
req
.
query
.
userId
,
video_url
:
req
.
body
.
selfIntro
,
video_url
:
req
.
body
.
selfIntro
,
});
});
...
...
NodeServer/src/utilities/apis/facial.ts
0 → 100644
View file @
391d4c85
import
{
request
}
from
"
../requests
"
;
export
default
class
FacialAPI
{
static
analyseEyeBlinks
=
(
payload
:
{
start
:
number
;
end
:
number
;
application_id
:
string
;
})
=>
request
(
"
<BASE_URL>/facial/eye-blinks
"
,
"
POST
"
,
payload
);
}
PythonServer/.gitignore
View file @
391d4c85
...
@@ -3,6 +3,7 @@ __pycache__
...
@@ -3,6 +3,7 @@ __pycache__
voices/auth/embed/**
voices/auth/embed/**
voices/auth/temp/**
voices/auth/temp/**
voices/interviews/**
voices/interviews/**
videos/interviews/**
resumes
resumes
models/**
models/**
data/**
data/**
\ No newline at end of file
PythonServer/README.md
View file @
391d4c85
##
# Create conda python 3.9 env
##
Install python
conda create -n server tensorflow python=3.9
install python 3.9 and set the env path
## Create conda python 3.9 env
### PIP Packages
conda create -n server tensorflow python=3.9
gensim==3.8.1
<br/>
conda install scipy=1.9.3
<br/>
texthero==1.1.0
-
install gfortran : https://fortran-lang.org/learn/os_setup/install_gfortran
-
download and install correct scipy wheel https://pypi.org/project/scipy/#files : pip install
<filename>
.whl
-
download and install correct scipy wheel https://pypi.org/project/scipy/#files : pip install
<filename>
.whl
-
conda install scipy=1.9.3
pyresparser
<br/>
-
conda install -c conda-forge importlib_metadata
pip install gensim==3.8.1
<br/>
pip install texthero==1.1.0
<br/>
soundfile
conda install -c conda-forge importlib_metadata
<br/>
pip install pyresparser==1.0.6
<br/>
pip install soundfile==0.10.3.post1
<br/>
pip install librosa==0.9.2
pip install librosa==0.9.2
### CONDA Packages
tqdm
pandas
pytesseract
"uvicorn[standard]"
fastapi
moviepy
conda install -c blaze sqlite3
conda install -c blaze sqlite3
conda install tqdm=4.65.0
conda install pandas=1.5.3
conda install pytesseract
conda install "uvicorn[standard]"
conda install -c conda-forge fastapi
conda install websockets
pip install moviepy
pip install "libclang>=13.0.0"
pip install "tensorflow-io-gcs-filesystem>=0.23.1"
pip install pyannote.audio
pip install pyannote.core
pip install PyAudio
pip install python_speech_features
pip install fer==22.4.0
[OPTIONAL]
pip install pillow==9.0.0
conda install "numpy<1.24.0"
conda install cudatoolkit
conda update --all
### Run server
### Install dlib
-
Install Cmake : https://cmake.org/download/
-
Install C++ compiler : https://visualstudio.microsoft.com/visual-cpp-build-tools
-
pip install cmake
-
pip install dlib==19.24
### End dlib
pip install imutils==0.5.4
## Run server
uvicorn main:app --reload
uvicorn main:app --reload
##
#
Datasets & models
## Datasets & models
voice: https://drive.google.com/file/d/1wWsrN2Ep7x6lWqOXfr4rpKGYrJhWc8z7/view
voice: https://drive.google.com/file/d/1wWsrN2Ep7x6lWqOXfr4rpKGYrJhWc8z7/view
models: https://drive.google.com/file/d/1TWjIiyyInXHaXySKymM6VcsIR5YMVQ1V/view?usp=share_link
models: https://drive.google.com/file/d/1TWjIiyyInXHaXySKymM6VcsIR5YMVQ1V/view?usp=share_link
shape_predictor_68_face_landmarks: https://www.kaggle.com/datasets/sergiovirahonda/shape-predictor-68-face-landmarksdat
## Folders
voices>auth>embed
voices>auth>temp
voices>interviews
models
data>voice
### Other conda commands
conda list --export > conda-requirements.txt
DELETE ENV : conda remove -n ENV_NAME --all
pip freeze > pip-requirements.txt
EXPORT ENV : conda env export > server_env.yaml
CREATE ENV BY ENV FILE : conda env create -f server_env.yaml
LIST ENV : conda list --export > requirements.txt
INSTALL ENV : conda install --file requirements.txt
PythonServer/baseModels/payloads.py
View file @
391d4c85
...
@@ -25,3 +25,8 @@ class AnalyseVoice(BaseModel):
...
@@ -25,3 +25,8 @@ class AnalyseVoice(BaseModel):
start
:
int
start
:
int
end
:
int
end
:
int
application_id
:
str
application_id
:
str
class
CountBlinks
(
BaseModel
):
start
:
int
end
:
int
application_id
:
str
\ No newline at end of file
PythonServer/main.py
View file @
391d4c85
from
fastapi
import
FastAPI
from
fastapi
import
FastAPI
from
fastapi.middleware.cors
import
CORSMiddleware
# from fer import FER
import
routes.resume
as
resumes
import
routes.resume
as
resumes
import
routes.voice
as
voice
import
routes.voice
as
voice
import
routes.facial
as
facial
app
=
FastAPI
()
app
=
FastAPI
()
app
.
add_middleware
(
CORSMiddleware
,
allow_origins
=
[
"*"
],
allow_credentials
=
True
,
allow_methods
=
[
"*"
],
allow_headers
=
[
"*"
],
)
app
.
include_router
(
resumes
.
router
)
app
.
include_router
(
resumes
.
router
)
app
.
include_router
(
voice
.
router
)
app
.
include_router
(
voice
.
router
)
app
.
include_router
(
facial
.
router
)
@
app
.
get
(
"/"
)
@
app
.
get
(
"/"
)
def
read_root
():
def
read_root
():
return
{
"status"
:
"running"
}
return
{
"status"
:
"running"
}
PythonServer/requirements.txt
0 → 100644
View file @
391d4c85
# This file may be used to create an environment using:
# $ conda create --name <env> --file <this file>
# platform: win-64
_tflow_select=2.3.0=mkl
absl-py=1.3.0=py39haa95532_0
aiohttp=3.8.3=py39h2bbff1b_0
aiosignal=1.2.0=pyhd3eb1b0_0
alembic=1.10.3=pypi_0
antlr4-python3-runtime=4.9.3=pypi_0
appdirs=1.4.4=pyhd3eb1b0_0
asteroid-filterbanks=0.4.0=pypi_0
astunparse=1.6.3=py_0
async-timeout=4.0.2=py39haa95532_0
attrs=22.1.0=py39haa95532_0
audioread=3.0.0=pypi_0
backports-cached-property=1.0.2=pypi_0
blas=1.0=mkl
blinker=1.4=py39haa95532_0
blis=0.7.9=pypi_0
bottleneck=1.3.5=py39h080aedc_0
brotlipy=0.7.0=py39h2bbff1b_1003
ca-certificates=2023.01.10=haa95532_0
cachetools=4.2.2=pyhd3eb1b0_0
catalogue=1.0.2=pypi_0
certifi=2022.12.7=py39haa95532_0
cffi=1.15.1=py39h2bbff1b_3
chardet=5.1.0=pypi_0
charset-normalizer=2.0.4=pyhd3eb1b0_0
click=8.0.4=py39haa95532_0
cmaes=0.9.1=pypi_0
colorama=0.4.6=py39haa95532_0
colorlog=6.7.0=pypi_0
commonmark=0.9.1=pypi_0
contourpy=1.0.7=pypi_0
cryptography=39.0.1=py39h21b164f_0
cycler=0.11.0=pypi_0
cymem=2.0.7=pypi_0
decorator=5.1.1=pypi_0
docopt=0.6.2=pypi_0
docx2txt=0.8=pypi_0
einops=0.3.2=pypi_0
fer=22.4.0=pypi_0
filelock=3.11.0=pypi_0
flatbuffers=2.0.0=h6c2663c_0
flit-core=3.8.0=py39haa95532_0
fonttools=4.39.3=pypi_0
frozenlist=1.3.3=py39h2bbff1b_0
fsspec=2023.4.0=pypi_0
gast=0.4.0=pyhd3eb1b0_0
gensim=3.8.1=pypi_0
giflib=5.2.1=h8cc25b3_3
google-auth=2.6.0=pyhd3eb1b0_0
google-auth-oauthlib=0.4.4=pyhd3eb1b0_0
google-pasta=0.2.0=pyhd3eb1b0_0
greenlet=2.0.2=pypi_0
grpcio=1.42.0=py39hc60d5dd_0
h11=0.12.0=pyhd3eb1b0_0
h5py=3.7.0=py39h3de5c98_0
hdf5=1.10.6=h1756f20_1
hmmlearn=0.2.8=pypi_0
huggingface-hub=0.13.4=pypi_0
hyperpyyaml=1.2.0=pypi_0
icc_rt=2022.1.0=h6049295_2
icu=58.2=ha925a31_3
idna=3.4=py39haa95532_0
importlib-metadata=6.0.0=py39haa95532_0
importlib-resources=5.12.0=pypi_0
importlib_metadata=6.0.0=hd3eb1b0_0
intel-openmp=2021.4.0=haa95532_3556
joblib=1.2.0=pypi_0
jpeg=9e=h2bbff1b_1
jsonschema=4.17.3=pypi_0
julius=0.2.7=pypi_0
keras=2.10.0=py39haa95532_0
keras-preprocessing=1.1.2=pyhd3eb1b0_0
kiwisolver=1.4.4=pypi_0
libclang=16.0.0=pypi_0
libcurl=7.88.1=h86230a5_0
libpng=1.6.39=h8cc25b3_0
libprotobuf=3.20.3=h23ce68f_0
librosa=0.9.2=pypi_0
libssh2=1.10.0=hcd4344a_0
llvmlite=0.39.1=pypi_0
mako=1.2.4=pypi_0
markdown=3.4.1=py39haa95532_0
markupsafe=2.1.1=py39h2bbff1b_0
matplotlib=3.7.1=pypi_0
mkl=2021.4.0=haa95532_640
mkl-service=2.4.0=py39h2bbff1b_0
mkl_fft=1.3.1=py39h277e83a_0
mkl_random=1.2.2=py39hf11a4ad_0
mpmath=1.3.0=pypi_0
mtcnn=0.1.1=pypi_0
multidict=6.0.2=py39h2bbff1b_0
murmurhash=1.0.9=pypi_0
networkx=2.8.8=pypi_0
nltk=3.8.1=pypi_0
numba=0.56.4=pypi_0
numexpr=2.8.4=py39h5b0cc5e_0
numpy=1.23.5=py39h3b20f71_0
numpy-base=1.23.5=py39h4da318b_0
oauthlib=3.2.2=py39haa95532_0
omegaconf=2.3.0=pypi_0
opencv-contrib-python=4.7.0.72=pypi_0
opencv-python=4.7.0.72=pypi_0
openssl=1.1.1t=h2bbff1b_0
opt_einsum=3.3.0=pyhd3eb1b0_1
optuna=3.1.1=pypi_0
packaging=23.0=py39haa95532_0
pandas=2.0.0=pypi_0
pdfminer-six=20221105=pypi_0
pillow=9.5.0=pypi_0
pip=23.0.1=py39haa95532_0
plac=1.1.3=pypi_0
plotly=5.14.1=pypi_0
pooch=1.4.0=pyhd3eb1b0_0
preshed=3.0.8=pypi_0
primepy=1.3=pypi_0
protobuf=3.19.6=pypi_0
pyannote-audio=2.1.1=pypi_0
pyannote-core=4.5=pypi_0
pyannote-database=4.1.3=pypi_0
pyannote-metrics=3.2.1=pypi_0
pyannote-pipeline=2.3=pypi_0
pyasn1=0.4.8=pyhd3eb1b0_0
pyasn1-modules=0.2.8=py_0
pyaudio=0.2.13=pypi_0
pycparser=2.21=pyhd3eb1b0_0
pycryptodome=3.17=pypi_0
pydeprecate=0.3.2=pypi_0
pygments=2.14.0=pypi_0
pyjwt=2.4.0=py39haa95532_0
pyopenssl=23.0.0=py39haa95532_0
pyparsing=3.0.9=pypi_0
pyresparser=1.0.6=pypi_0
pyrsistent=0.19.3=pypi_0
pysocks=1.7.1=py39haa95532_0
python=3.9.16=h6244533_2
python-dateutil=2.8.2=pyhd3eb1b0_0
python-flatbuffers=2.0=pyhd3eb1b0_0
python-speech-features=0.6=pypi_0
pytorch-lightning=1.6.5=pypi_0
pytorch-metric-learning=1.7.3=pypi_0
pytz=2023.3=pypi_0
pyyaml=6.0=pypi_0
regex=2023.3.23=pypi_0
requests=2.28.1=py39haa95532_1
requests-oauthlib=1.3.0=py_0
resampy=0.4.2=pypi_0
rich=12.6.0=pypi_0
rsa=4.7.2=pyhd3eb1b0_1
ruamel-yaml=0.17.21=pypi_0
ruamel-yaml-clib=0.2.7=pypi_0
scikit-learn=1.2.2=pypi_0
scipy=1.9.3=py39h321e85e_1
semver=2.13.0=pypi_0
sentencepiece=0.1.97=pypi_0
setuptools=65.6.3=py39haa95532_0
shellingham=1.5.0.post1=pypi_0
simplejson=3.19.1=pypi_0
singledispatchmethod=1.0=pypi_0
six=1.16.0=pyhd3eb1b0_1
smart-open=6.3.0=pypi_0
snappy=1.1.9=h6c2663c_0
sortedcontainers=2.4.0=pypi_0
soundfile=0.10.3.post1=pypi_0
spacy=2.3.9=pypi_0
speechbrain=0.5.14=pypi_0
sqlalchemy=2.0.9=pypi_0
sqlite=3.41.1=h2bbff1b_0
sqlite3=3.8.6=0
srsly=1.0.6=pypi_0
sympy=1.11.1=pypi_0
tabulate=0.9.0=pypi_0
tenacity=8.2.2=pypi_0
tensorboard=2.10.0=py39haa95532_0
tensorboard-data-server=0.6.1=py39haa95532_0
tensorboard-plugin-wit=1.8.1=py39haa95532_0
tensorflow=2.10.0=mkl_py39ha510bab_0
tensorflow-base=2.10.0=mkl_py39h6a7f48e_0
tensorflow-estimator=2.10.0=py39haa95532_0
tensorflow-io-gcs-filesystem=0.31.0=pypi_0
termcolor=2.1.0=py39haa95532_0
texthero=1.1.0=pypi_0
thinc=7.4.6=pypi_0
threadpoolctl=3.1.0=pypi_0
torch=1.13.1=pypi_0
torch-audiomentations=0.11.0=pypi_0
torch-pitch-shift=1.2.3=pypi_0
torchaudio=0.13.1=pypi_0
torchmetrics=0.11.4=pypi_0
tqdm=4.65.0=py39hd4e2768_0
typer=0.7.0=pypi_0
typing_extensions=4.4.0=py39haa95532_0
tzdata=2023.3=pypi_0
unidecode=1.3.6=pypi_0
urllib3=1.26.15=py39haa95532_0
uvicorn=0.20.0=py39haa95532_0
vc=14.2=h21ff451_1
vs2015_runtime=14.27.29016=h5e58377_2
wasabi=0.10.1=pypi_0
websockets=10.4=py39h2bbff1b_1
werkzeug=2.2.3=py39haa95532_0
wheel=0.38.4=py39haa95532_0
win_inet_pton=1.1.0=py39haa95532_0
wincertstore=0.2=py39haa95532_2
wordcloud=1.8.2.2=pypi_0
wrapt=1.14.1=py39h2bbff1b_0
yarl=1.8.1=py39h2bbff1b_0
zipp=3.11.0=py39haa95532_0
zlib=1.2.13=h8cc25b3_0
PythonServer/routes/facial.py
0 → 100644
View file @
391d4c85
from
fastapi
import
APIRouter
,
WebSocket
,
WebSocketDisconnect
from
fer
import
FER
import
cv2
import
imutils
from
scripts.blink_detection
import
count_blinks
from
baseModels.payloads
import
CountBlinks
from
scripts.web_socket
import
ConnectionManager
import
json
manager
=
ConnectionManager
()
router
=
APIRouter
(
prefix
=
'/facial'
)
@
router
.
post
(
"/eye-blinks"
)
def
countBlinks
(
payload
:
CountBlinks
):
video_filename
=
'videos/interviews/'
+
payload
.
application_id
+
'.mp4'
count
=
count_blinks
(
video_filename
,
payload
.
start
,
payload
.
end
)
return
count
@
router
.
websocket
(
"/ws/eye-blinks"
)
async
def
countBlinks
(
websocket
:
WebSocket
,
application_id
:
str
,
start
:
str
,
end
:
str
):
await
manager
.
connect
(
websocket
)
try
:
while
True
:
video_filename
=
'videos/interviews/'
+
application_id
+
'.mp4'
count
=
count_blinks
(
video_filename
,
int
(
start
),
int
(
end
))
await
manager
.
send_private
(
json
.
dumps
({
"count"
:
count
,
"end"
:
True
}),
websocket
)
await
manager
.
disconnect
(
websocket
)
except
WebSocketDisconnect
:
await
manager
.
send_private
(
json
.
dumps
({
"end"
:
True
}),
websocket
)
await
manager
.
disconnect
(
websocket
)
return
@
router
.
websocket
(
"/ws/emotions"
)
async
def
emotions
(
websocket
:
WebSocket
,
application_id
:
str
):
await
manager
.
connect
(
websocket
)
video_filename
=
'videos/interviews/'
+
application_id
+
'.mp4'
face_detector
=
FER
(
mtcnn
=
True
)
cap
=
cv2
.
VideoCapture
(
video_filename
)
try
:
while
True
:
ret
,
frame
=
cap
.
read
()
if
not
ret
:
await
manager
.
send_private
(
json
.
dumps
({
"data"
:[],
"end"
:
True
}),
websocket
)
await
manager
.
disconnect
(
websocket
)
break
frame
=
imutils
.
resize
(
frame
,
width
=
800
)
emotions
=
face_detector
.
detect_emotions
(
frame
)
print
(
emotions
)
await
manager
.
send_private
(
json
.
dumps
({
"data"
:
emotions
,
"end"
:
False
}),
websocket
)
except
WebSocketDisconnect
:
await
manager
.
send_private
(
json
.
dumps
({
"data"
:[],
"end"
:
True
,
"status"
:
"Offline"
}),
websocket
)
await
manager
.
disconnect
(
websocket
)
PythonServer/routes/voice.py
View file @
391d4c85
...
@@ -44,7 +44,7 @@ def enroll(payload:EnrollVoice):
...
@@ -44,7 +44,7 @@ def enroll(payload:EnrollVoice):
@
router
.
post
(
"/verify"
)
@
router
.
post
(
"/verify"
)
def
verify
(
payload
:
VerifyVoice
):
def
verify
(
payload
:
VerifyVoice
):
video_filename
=
'v
oice
s/interviews/'
+
payload
.
application_id
+
'.mp4'
video_filename
=
'v
ideo
s/interviews/'
+
payload
.
application_id
+
'.mp4'
urllib
.
request
.
urlretrieve
(
payload
.
video_url
,
video_filename
)
urllib
.
request
.
urlretrieve
(
payload
.
video_url
,
video_filename
)
# Download video and save audio
# Download video and save audio
...
@@ -59,11 +59,6 @@ def verify(payload:VerifyVoice):
...
@@ -59,11 +59,6 @@ def verify(payload:VerifyVoice):
enroll_embs
=
np
.
load
(
"voices/auth/embed/"
+
payload
.
user_id
+
".npy"
)
enroll_embs
=
np
.
load
(
"voices/auth/embed/"
+
payload
.
user_id
+
".npy"
)
distance
=
euclidean
(
test_embs
,
enroll_embs
)
distance
=
euclidean
(
test_embs
,
enroll_embs
)
try
:
os
.
remove
(
video_filename
)
except
:
print
(
'error'
)
return
round
(
1
-
distance
,
5
)
return
round
(
1
-
distance
,
5
)
@
router
.
post
(
"/analyse"
)
@
router
.
post
(
"/analyse"
)
...
...
PythonServer/scripts/blink_detection.py
0 → 100644
View file @
391d4c85
import
cv2
import
dlib
import
imutils
import
numpy
as
np
from
imutils
import
face_utils
from
scipy.spatial
import
distance
as
dist
from
scripts.parameters
import
EYE_AR_THRESH
,
EYE_AR_CONSEC_FRAMES
def
eye_aspect_ratio
(
eye
):
A
=
dist
.
euclidean
(
eye
[
1
],
eye
[
5
])
B
=
dist
.
euclidean
(
eye
[
2
],
eye
[
4
])
C
=
dist
.
euclidean
(
eye
[
0
],
eye
[
3
])
ear
=
(
A
+
B
)
/
(
2.0
*
C
)
return
ear
detector
=
dlib
.
get_frontal_face_detector
()
predictor
=
dlib
.
shape_predictor
(
"models/shape_predictor_68_face_landmarks.dat"
)
(
lStart
,
lEnd
)
=
face_utils
.
FACIAL_LANDMARKS_IDXS
[
"left_eye"
]
(
rStart
,
rEnd
)
=
face_utils
.
FACIAL_LANDMARKS_IDXS
[
"right_eye"
]
def
count_blinks
(
filename
,
start
=
0
,
end
=
0
):
COUNTER
=
0
TOTAL
=
0
vs
=
cv2
.
VideoCapture
(
filename
)
fps
=
vs
.
get
(
cv2
.
CAP_PROP_FPS
)
start_frame
=
start
*
fps
end_frame
=
end
*
fps
current_frame
=
start_frame
vs
.
set
(
cv2
.
CAP_PROP_POS_FRAMES
,
start_frame
)
while
(
current_frame
<=
end_frame
):
ret
,
frame
=
vs
.
read
()
if
ret
==
True
:
frame
=
imutils
.
resize
(
frame
,
width
=
800
)
gray
=
cv2
.
cvtColor
(
frame
,
cv2
.
COLOR_BGR2GRAY
)
rects
=
detector
(
gray
,
0
)
current_frame
=
current_frame
+
1
print
(
'FRAME : '
+
str
(
current_frame
)
+
' Blinks :'
+
str
(
TOTAL
))
for
rect
in
rects
:
shape
=
predictor
(
gray
,
rect
)
shape
=
face_utils
.
shape_to_np
(
shape
)
leftEye
=
shape
[
lStart
:
lEnd
]
rightEye
=
shape
[
rStart
:
rEnd
]
leftEAR
=
eye_aspect_ratio
(
leftEye
)
rightEAR
=
eye_aspect_ratio
(
rightEye
)
ear
=
(
leftEAR
+
rightEAR
)
/
2.0
if
ear
<
EYE_AR_THRESH
:
COUNTER
+=
1
else
:
if
COUNTER
>=
EYE_AR_CONSEC_FRAMES
:
TOTAL
+=
1
COUNTER
=
0
else
:
break
vs
.
release
()
return
TOTAL
\ No newline at end of file
PythonServer/scripts/parameters.py
View file @
391d4c85
...
@@ -19,3 +19,8 @@ EMBED_LIST_FILE = "voices/auth/embed"
...
@@ -19,3 +19,8 @@ EMBED_LIST_FILE = "voices/auth/embed"
# Recognition
# Recognition
THRESHOLD
=
0.2
THRESHOLD
=
0.2
EMOTIONS
=
[
'Angry'
,
'Disgust'
,
'Fear'
,
'Happy'
,
'Sad'
,
'Surprise'
,
'Neutral'
]
EYE_AR_THRESH
=
0.2
EYE_AR_CONSEC_FRAMES
=
3
\ No newline at end of file
PythonServer/scripts/web_socket.py
0 → 100644
View file @
391d4c85
from
typing
import
List
from
fastapi
import
WebSocket
class
ConnectionManager
:
def
__init__
(
self
)
->
None
:
self
.
active_connections
:
List
[
WebSocket
]
=
[]
async
def
connect
(
self
,
websocket
:
WebSocket
):
await
websocket
.
accept
()
self
.
active_connections
.
append
(
websocket
)
def
disconnect
(
self
,
websocket
:
WebSocket
):
self
.
active_connections
.
remove
(
websocket
)
async
def
send_private
(
self
,
message
:
str
,
websocket
:
WebSocket
):
await
websocket
.
send_text
(
message
)
async
def
broadcast
(
self
,
message
:
str
):
for
connection
in
self
.
active_connections
:
await
connection
.
send_text
(
message
)
\ No newline at end of file
PythonServer/server_env.yaml
0 → 100644
View file @
391d4c85
name
:
pyserver
channels
:
-
blaze
-
conda-forge
-
defaults
dependencies
:
-
_tflow_select=2.3.0=mkl
-
absl-py=1.4.0=pyhd8ed1ab_0
-
aiohttp=3.8.4=py39ha55989b_0
-
aiosignal=1.3.1=pyhd8ed1ab_0
-
anyio=3.6.2=pyhd8ed1ab_0
-
aom=3.5.0=h63175ca_0
-
astunparse=1.6.3=pyhd8ed1ab_0
-
async-timeout=4.0.2=pyhd8ed1ab_0
-
attrs=22.2.0=pyh71513ae_0
-
blinker=1.5=pyhd8ed1ab_0
-
brotlipy=0.7.0=py39ha55989b_1005
-
bzip2=1.0.8=h8ffe710_4
-
ca-certificates=2022.12.7=h5b45459_0
-
cachetools=5.3.0=pyhd8ed1ab_0
-
certifi=2022.12.7=pyhd8ed1ab_0
-
cffi=1.15.1=py39h68f70e3_3
-
charset-normalizer=2.1.1=pyhd8ed1ab_0
-
click=8.1.3=win_pyhd8ed1ab_2
-
colorama=0.4.6=pyhd8ed1ab_0
-
cryptography=38.0.4=py39h58e9bdb_0
-
decorator=5.1.1=pyhd8ed1ab_0
-
expat=2.5.0=h63175ca_1
-
fastapi=0.95.0=pyhd8ed1ab_0
-
ffmpeg=5.1.2=gpl_h5b1d025_106
-
flatbuffers=23.3.3=h63175ca_0
-
font-ttf-dejavu-sans-mono=2.37=hab24e00_0
-
font-ttf-inconsolata=3.000=h77eed37_0
-
font-ttf-source-code-pro=2.038=h77eed37_0
-
font-ttf-ubuntu=0.83=hab24e00_0
-
fontconfig=2.14.2=hbde0cde_0
-
fonts-conda-ecosystem=1=0
-
fonts-conda-forge=1=0
-
freetype=2.12.1=h546665d_1
-
frozenlist=1.3.3=py39ha55989b_0
-
gast=0.4.0=pyh9f0ad1d_0
-
giflib=5.2.1=h64bf75a_3
-
google-auth=2.17.0=pyh1a96a4e_0
-
google-auth-oauthlib=0.4.6=pyhd8ed1ab_0
-
google-pasta=0.2.0=pyh8c360ce_0
-
grpcio=1.42.0=py39hc60d5dd_0
-
h11=0.14.0=pyhd8ed1ab_0
-
h5py=3.7.0=py39h3de5c98_0
-
hdf5=1.10.6=nompi_h5268f04_1114
-
icu=58.2=ha925a31_3
-
idna=3.4=pyhd8ed1ab_0
-
imageio=2.27.0=pyh24c5eb1_0
-
imageio-ffmpeg=0.4.8=pyhd8ed1ab_0
-
importlib-metadata=6.1.0=pyha770c72_0
-
importlib_metadata=6.1.0=hd8ed1ab_0
-
intel-openmp=2023.0.0=h57928b3_25922
-
jpeg=9e=hcfcfb64_3
-
keras=2.10.0=py39haa95532_0
-
keras-preprocessing=1.1.2=pyhd8ed1ab_0
-
krb5=1.20.1=h6609f42_0
-
lcms2=2.15=ha5c8aab_0
-
lerc=4.0.0=h63175ca_0
-
libblas=3.9.0=16_win64_mkl
-
libcblas=3.9.0=16_win64_mkl
-
libcurl=7.88.1=h68f0423_1
-
libdeflate=1.17=hcfcfb64_0
-
libexpat=2.5.0=h63175ca_1
-
libffi=3.4.2=h8ffe710_5
-
libhwloc=2.9.0=h51c2c0f_0
-
libiconv=1.17=h8ffe710_0
-
liblapack=3.9.0=16_win64_mkl
-
libopus=1.3.1=h8ffe710_1
-
libpng=1.6.39=h19919ed_0
-
libprotobuf=3.20.2=h12be248_0
-
libsqlite=3.40.0=hcfcfb64_0
-
libssh2=1.10.0=h680486a_3
-
libtiff=4.5.0=hf8721a0_2
-
libwebp-base=1.3.0=hcfcfb64_0
-
libxcb=1.13=hcd874cb_1004
-
libxml2=2.10.3=hc3477c8_6
-
libzlib=1.2.13=hcfcfb64_4
-
m2w64-gcc-libgfortran=5.3.0=6
-
m2w64-gcc-libs=5.3.0=7
-
m2w64-gcc-libs-core=5.3.0=7
-
m2w64-gmp=6.1.0=2
-
m2w64-libwinpthread-git=5.0.0.4634.697f757=2
-
markdown=3.4.3=pyhd8ed1ab_0
-
markupsafe=2.1.2=py39ha55989b_0
-
mkl=2022.1.0=h6a75c08_874
-
moviepy=1.0.3=pyhd8ed1ab_1
-
msys2-conda-epoch=20160418=1
-
multidict=6.0.4=py39ha55989b_0
-
oauthlib=3.2.2=pyhd8ed1ab_0
-
openh264=2.3.1=h63175ca_2
-
openjpeg=2.5.0=ha2aaf27_2
-
openssl=1.1.1t=hcfcfb64_0
-
opt_einsum=3.3.0=pyhd8ed1ab_1
-
packaging=23.0=pyhd8ed1ab_0
-
pandas=1.5.3=py39h2ba5b7c_1
-
pillow=9.4.0=py39hcebd2be_1
-
pip=23.0.1=pyhd8ed1ab_0
-
platformdirs=3.2.0=pyhd8ed1ab_0
-
pooch=1.7.0=pyha770c72_3
-
proglog=0.1.9=py_0
-
pthread-stubs=0.4=hcd874cb_1001
-
pthreads-win32=2.9.1=hfa6e2cd_3
-
pyasn1=0.4.8=py_0
-
pyasn1-modules=0.2.7=py_0
-
pycparser=2.21=pyhd8ed1ab_0
-
pydantic=1.10.7=py39ha55989b_0
-
pyjwt=2.6.0=pyhd8ed1ab_0
-
pyopenssl=23.1.1=pyhd8ed1ab_0
-
pysocks=1.7.1=pyh0701188_6
-
pytesseract=0.3.10=pyhd8ed1ab_0
-
python=3.9.16=h6244533_2
-
python-dateutil=2.8.2=pyhd8ed1ab_0
-
python-flatbuffers=23.1.21=pyhd8ed1ab_0
-
python_abi=3.9=2_cp39
-
pytz=2023.3=pyhd8ed1ab_0
-
pyu2f=0.1.5=pyhd8ed1ab_0
-
requests=2.28.2=pyhd8ed1ab_0
-
requests-oauthlib=1.3.1=pyhd8ed1ab_0
-
rsa=4.9=pyhd8ed1ab_0
-
scipy=1.9.3=py39hfbf2dce_2
-
setuptools=67.6.1=pyhd8ed1ab_0
-
six=1.16.0=pyh6c4a22f_0
-
snappy=1.1.10=hfb803bf_0
-
sniffio=1.3.0=pyhd8ed1ab_0
-
sqlite=3.41.1=h2bbff1b_0
-
sqlite3=3.8.6=0
-
starlette=0.26.1=pyhd8ed1ab_0
-
svt-av1=1.4.1=h63175ca_0
-
tbb=2021.8.0=h91493d7_0
-
tensorboard=2.10.0=py39haa95532_0
-
tensorboard-data-server=0.6.1=py39haa95532_0
-
tensorboard-plugin-wit=1.8.1=pyhd8ed1ab_0
-
tensorflow=2.10.0=mkl_py39ha510bab_0
-
tensorflow-base=2.10.0=mkl_py39h6a7f48e_0
-
tensorflow-estimator=2.10.0=py39haa95532_0
-
termcolor=2.2.0=pyhd8ed1ab_0
-
tk=8.6.12=h8ffe710_0
-
tqdm=4.65.0=pyhd8ed1ab_1
-
typing-extensions=4.5.0=hd8ed1ab_0
-
typing_extensions=4.5.0=pyha770c72_0
-
tzdata=2023c=h71feb2d_0
-
ucrt=10.0.22621.0=h57928b3_0
-
urllib3=1.26.15=pyhd8ed1ab_0
-
uvicorn=0.21.1=py39hcbf5309_0
-
vc=14.3=hb6edc58_10
-
vs2015_runtime=14.34.31931=h4c5c07a_10
-
werkzeug=2.2.3=pyhd8ed1ab_0
-
wheel=0.40.0=pyhd8ed1ab_0
-
win_inet_pton=1.1.0=pyhd8ed1ab_6
-
wrapt=1.15.0=py39ha55989b_0
-
x264=1!164.3095=h8ffe710_2
-
x265=3.5=h2d74725_3
-
xorg-libxau=1.0.9=hcd874cb_0
-
xorg-libxdmcp=1.1.3=hcd874cb_0
-
xz=5.2.6=h8d14728_0
-
yarl=1.8.2=py39ha55989b_0
-
zipp=3.15.0=pyhd8ed1ab_0
-
zlib=1.2.13=hcfcfb64_4
-
zstd=1.5.2=h12be248_6
-
pip
:
-
alembic==1.10.2
-
antlr4-python3-runtime==4.9.3
-
asteroid-filterbanks==0.4.0
-
audioread==3.0.0
-
backports-cached-property==1.0.2
-
blis==0.7.9
-
catalogue==1.0.2
-
chardet==5.1.0
-
cmaes==0.9.1
-
colorlog==6.7.0
-
commonmark==0.9.1
-
contourpy==1.0.7
-
cycler==0.11.0
-
cymem==2.0.7
-
dlib==19.24.0
-
docopt==0.6.2
-
docx2txt==0.8
-
einops==0.3.2
-
en-core-web-sm==2.3.1
-
fer==22.4.0
-
filelock==3.10.7
-
fonttools==4.39.3
-
fsspec==2023.3.0
-
gensim==3.8.1
-
greenlet==2.0.2
-
hmmlearn==0.2.8
-
huggingface-hub==0.13.3
-
hyperpyyaml==1.1.0
-
importlib-resources==5.12.0
-
imutils==0.5.4
-
joblib==1.2.0
-
jsonschema==4.17.3
-
julius==0.2.7
-
kiwisolver==1.4.4
-
librosa==0.9.2
-
llvmlite==0.39.1
-
mako==1.2.4
-
matplotlib==3.7.1
-
mpmath==1.3.0
-
mtcnn==0.1.1
-
murmurhash==1.0.9
-
networkx==2.8.8
-
nltk==3.8.1
-
numba==0.56.4
-
numpy==1.23.5
-
omegaconf==2.3.0
-
opencv-contrib-python==4.7.0.72
-
opencv-python==4.7.0.72
-
optuna==3.1.0
-
pdfminer-six==20221105
-
plac==1.1.3
-
plotly==5.14.0
-
preshed==3.0.8
-
primepy==1.3
-
protobuf==3.19.6
-
pyannote-audio==2.1.1
-
pyannote-core==4.5
-
pyannote-database==4.1.3
-
pyannote-metrics==3.2.1
-
pyannote-pipeline==2.3
-
pyaudio==0.2.13
-
pycryptodome==3.17
-
pydeprecate==0.3.2
-
pygments==2.14.0
-
pyparsing==3.0.9
-
pyresparser==1.0.6
-
pyrsistent==0.19.3
-
python-speech-features==0.6
-
pytorch-lightning==1.6.5
-
pytorch-metric-learning==1.7.3
-
pyyaml==6.0
-
regex==2023.3.23
-
resampy==0.4.2
-
rich==12.6.0
-
ruamel-yaml==0.17.21
-
ruamel-yaml-clib==0.2.7
-
scikit-learn==1.2.2
-
semver==2.13.0
-
sentencepiece==0.1.97
-
shellingham==1.5.0.post1
-
simplejson==3.18.4
-
singledispatchmethod==1.0
-
smart-open==6.3.0
-
sortedcontainers==2.4.0
-
soundfile==0.10.3.post1
-
spacy==2.3.9
-
speechbrain==0.5.14
-
sqlalchemy==2.0.7
-
srsly==1.0.6
-
sympy==1.11.1
-
tabulate==0.9.0
-
tenacity==8.2.2
-
texthero==1.1.0
-
thinc==7.4.6
-
threadpoolctl==3.1.0
-
torch==1.13.1
-
torch-audiomentations==0.11.0
-
torch-pitch-shift==1.2.3
-
torchaudio==0.13.1
-
torchmetrics==0.11.4
-
typer==0.7.0
-
unidecode==1.3.6
-
wasabi==0.10.1
-
wordcloud==1.8.2.2
prefix
:
C:\Users\User\miniconda3\envs\pyserver
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment