Commit f053fbef authored by H.M.C. Nadunithara Wijerathne's avatar H.M.C. Nadunithara Wijerathne

Merge branch 'master' of...

Merge branch 'master' of http://gitlab.sliit.lk/22_23-j-36/easy-quest-smart-recruitment-tool-with-ai-backend into it1924312
parents 2d3aa8d3 177d394a
......@@ -11,6 +11,7 @@
"author": "Namit Nathwani",
"license": "ISC",
"dependencies": {
"axios": "^1.3.4",
"bcryptjs": "^2.4.3",
"cors": "^2.8.5",
"express": "^4.18.2",
......
......@@ -13,3 +13,4 @@ export const DEFAULT_CONTROLS = {
use: true,
},
};
export const UTILITY_SERVER = "http://127.0.0.1:8000";
......@@ -47,6 +47,16 @@ export type AuthType = {
controls?: ControlsType;
};
export type ResumeDataType = {
skills: string[] | null;
degree: string[] | null;
designation: string[] | null;
experience: string[] | null;
company_names: string[] | null;
no_of_pages: number;
total_experience: number;
};
export type CandidateType = {
_id?: string;
name: string;
......@@ -60,6 +70,10 @@ export type CandidateType = {
dateOfBirth: string;
jobIds: string[];
profilePicture: string;
state: "INTIAL" | "READY";
resume?: string;
resumeData?: ResumeDataType;
selfIntro?: string;
};
export type OrganizationType = {
......@@ -87,6 +101,48 @@ export type ControlsType = {
};
};
export type JobType = {
_id: string;
title: string;
description: string;
primarySkills: string[];
secondarySkills?: string[];
salary: {
min: number;
max: number;
currency: string;
};
applications: string[];
organization: string;
};
export type ApplicationType = {
candidate: string;
job: string;
status: "Pending" | "Accepted" | "In progress" | "Rejected";
interview?: {
date: string;
time: string;
link: string;
videoRef?: string;
voiceVerification?: string;
};
score: {
primary: number;
primatyMatch: string[];
secondary: number;
secondaryMatch: string[];
similarity: number;
total: number;
};
};
export type AnalyseApplicationPayload = {
applicationId: string;
startTime: number;
endTime: number;
};
export interface TypedRequest<T extends Query, U> extends Request {
body: U;
query: T;
......
......@@ -8,11 +8,11 @@ app.use(cors());
// Routes
const authRoute = require("./routes/auth");
// Environment constants
const userRoute = require("./routes/user");
const jobsRoute = require("./routes/jobs");
const applicationsRoute = require("./routes/application");
// Service Initialisation
mongoose.connect(MONGO_URL, {
useFindAndModify: false,
useNewUrlParser: true,
......@@ -26,5 +26,8 @@ app.use(express.json());
// Routes
app.use("/auth", authRoute);
app.use("/user", userRoute);
app.use("/jobs", jobsRoute);
app.use("/applications", applicationsRoute);
app.listen(API_PORT, () => console.log(`Listening on port ${API_PORT}`));
import { Request, Response, NextFunction } from "express";
import * as jwt from "jsonwebtoken";
import { JWT_SECRET } from "../config/contants";
import { TypedRequest, USER_TYPE } from "../config/types";
import Auth from "../models/Auth";
export const authMiddleware = (
req: Request,
......@@ -21,3 +23,37 @@ export const authMiddleware = (
}
}
};
export const organizationMiddleware = async (
req: TypedRequest<{ userId: string }, any>,
res: Response,
next: NextFunction
) => {
try {
const org = await Auth.findOne({ userId: req.query.userId });
if (org && org.userType === USER_TYPE.ORGANIZATION) {
return next();
} else {
throw new Error("Organization not found");
}
} catch (error) {
return res.status(400).send(error);
}
};
export const candidateMiddleware = async (
req: TypedRequest<{ userId: string }, any>,
res: Response,
next: NextFunction
) => {
try {
const org = await Auth.findOne({ userId: req.query.userId });
if (org && org.userType === USER_TYPE.CANDIDATE) {
return next();
} else {
throw new Error("Candidate not found");
}
} catch (error) {
return res.status(400).send(error);
}
};
import { Schema, model } from "mongoose";
import { ApplicationType } from "../config/types";
const applicationSchema = new Schema<ApplicationType>({
candidate: { type: Schema.Types.ObjectId, ref: "candidates" },
job: { type: Schema.Types.ObjectId, ref: "jobs" },
status: { type: String, require: false, default: "Pending" },
interview: {
type: {
date: String,
time: String,
link: String,
videoRef: String,
voiceVerification: String,
},
require: false,
},
score: {
type: {
primary: Number,
primatyMatch: [String],
secondary: Number,
secondaryMatch: [String],
similarity: Number,
total: Number,
},
require: false,
},
});
const Application = model<ApplicationType>("applications", applicationSchema);
export default Application;
import { Schema, model } from "mongoose";
import { AddressType, CandidateType } from "../config/types";
import { AddressType, CandidateType, ResumeDataType } from "../config/types";
const AddressSchema = new Schema<AddressType>(
{
......@@ -19,6 +19,18 @@ const ContactsSchema = new Schema<AddressType>(
},
{ id: false }
);
const ResumeDataSchema = new Schema<ResumeDataType>(
{
skills: { type: [String], require: false },
degree: { type: [String], require: false },
designation: { type: [String], require: false },
experience: { type: [String], require: false },
company_names: { type: [String], require: false },
no_of_pages: { type: Number, require: false },
total_experience: { type: Number, require: false },
},
{ id: false }
);
const candidateSchema = new Schema<CandidateType>({
name: String,
......@@ -27,6 +39,10 @@ const candidateSchema = new Schema<CandidateType>({
dateOfBirth: String,
jobIds: [{ type: Schema.Types.ObjectId, ref: "jobs" }],
profilePicture: String,
state: { type: String, default: "INTIAL" },
resume: { type: String, require: false },
resumeData: { type: ResumeDataSchema, require: false },
selfIntro: { type: String, require: false },
});
const Candidates = model<CandidateType>("candidates", candidateSchema);
......
import { Schema, model } from "mongoose";
import { JobType } from "../config/types";
const jobSchema = new Schema<JobType>({
title: String,
description: String,
primarySkills: { type: [String], require: true },
secondarySkills: { type: [String], require: false },
salary: {
min: Number,
max: Number,
currency: String,
},
applications: [{ type: Schema.Types.ObjectId, ref: "applications" }],
organization: [{ type: Schema.Types.ObjectId, ref: "organizations" }],
});
const Jobs = model<JobType>("jobs", jobSchema);
export default Jobs;
import { Router } from "express";
import {
ApplicationType,
TypedRequest,
AnalyseApplicationPayload,
} from "../config/types";
import { authMiddleware, candidateMiddleware } from "../middlewares/auth";
import Application from "../models/Application";
import Jobs from "../models/Job";
import ResumeAPI from "../utilities/apis/resume";
import VoiceAPI from "../utilities/apis/voice";
const router = Router();
router.post(
"/apply",
authMiddleware,
candidateMiddleware,
async (
req: TypedRequest<
{ userId: string },
{ application: ApplicationType; resumeUrl: string }
>,
res
) => {
try {
const { application, resumeUrl } = req.body;
const job = await Jobs.findById(application.job);
const data: any = await ResumeAPI.getResumeScores({
user_id: req.query.userId,
resume_url: resumeUrl,
primary_skills: job.primarySkills,
secondary_skills: job.secondarySkills,
job_desc: job.description,
});
const score: ApplicationType["score"] = {
primary: data.primary_score,
primatyMatch: data.primary_match,
secondary: data.secondary_score,
secondaryMatch: data.secondary_match,
similarity: data.similarity,
total: data.primary_score + data.secondary_score + data.similarity,
};
const newApplication = new Application({ ...application, score });
const _application = await newApplication.save();
job.applications.push(_application.id);
await job.save();
return res.json({
success: true,
applicationId: _application.id,
});
} catch (error) {
return res.json({ success: false, error });
}
}
);
router.put(
"/update",
authMiddleware,
async (
req: TypedRequest<
{},
{
applicationId: string;
update: Partial<ApplicationType>;
candidateId: string;
}
>,
res
) => {
let update = req.body.update;
if (update.interview?.videoRef) {
try {
VoiceAPI.verifyVoice({
video_url: update.interview?.videoRef,
user_id: req.body.candidateId,
application_id: req.body.applicationId,
});
} catch (error) {}
}
Application.findByIdAndUpdate(req.body.applicationId, {
$set: update,
})
.then((_application) => {
res.json({
success: true,
application: { ..._application, ...update },
});
})
.catch((err) => res.send(err));
}
);
router.put(
"/update/voice-verification",
async (
req: TypedRequest<
{},
{
applicationId: string;
update: number;
}
>,
res
) => {
const update = req.body.update.toString();
Application.findByIdAndUpdate(req.body.applicationId, {
$set: { "interview.voiceVerification": update },
})
.then((_application) => {
res.send("success");
})
.catch((err) => res.send(err));
}
);
router.post(
"/analyse",
authMiddleware,
async (req: TypedRequest<{}, AnalyseApplicationPayload>, res) => {
const { applicationId, startTime, endTime } = req.body;
const payload = {
start: startTime,
end: endTime,
application_id: applicationId,
};
try {
const voiceData = await VoiceAPI.analyseVoice(payload);
return res.json({ voice: voiceData });
} catch (error) {
return res.status(500).send(error);
}
}
);
module.exports = router;
......@@ -137,7 +137,7 @@ router.post("/login", async (req: TypedRequest<{}, SignInPayload>, res) => {
}
const token = await jwt.sign({ userId: auth.userId }, JWT_SECRET, {
expiresIn: "2h",
expiresIn: "5h",
});
return res.json({
......
import { Router } from "express";
import {
ApplicationType,
JobType,
TypedRequest,
USER_TYPE,
} from "../config/types";
import {
authMiddleware,
candidateMiddleware,
organizationMiddleware,
} from "../middlewares/auth";
import Application from "../models/Application";
import Auth from "../models/Auth";
import Jobs from "../models/Job";
import ResumeAPI from "../utilities/apis/resume";
const router = Router();
router.get(
"/",
authMiddleware,
async (req: TypedRequest<{ userId: string }, null>, res) => {
try {
const user = await Auth.findOne({ userId: req.query.userId });
let jobs;
if (user.userType === USER_TYPE.CANDIDATE) {
jobs = await Jobs.find()
.populate({
path: "applications",
select: ["candidate", "status"],
})
.populate({ path: "organization" });
} else {
jobs = await Jobs.find({ organization: req.query.userId }).populate({
path: "applications",
populate: {
path: "candidate",
select: [
"name",
"contacts",
"dateOfBirth",
"profilePicture",
"resume",
"resumeData",
"selfIntro",
],
},
});
}
return res.json({ jobs, success: true });
} catch (error) {
return res.json({ error, success: false });
}
}
);
router.post(
"/",
authMiddleware,
organizationMiddleware,
async (req: TypedRequest<{ userId: string }, JobType>, res) => {
try {
const newJob = new Jobs({ ...req.body, organization: req.query.userId });
const job = await newJob.save();
return res.json({ success: true, job });
} catch (error) {
return res.json({ success: false, error });
}
}
);
router.put(
"/",
authMiddleware,
organizationMiddleware,
async (req: TypedRequest<{ userId: string }, JobType>, res) => {
try {
const job = await Jobs.findByIdAndUpdate(req.body._id, {
$set: req.body,
});
return res.json({ success: true, job });
} catch (error) {
return res.json({ success: false, error });
}
}
);
router.delete(
"/",
authMiddleware,
organizationMiddleware,
async (req: TypedRequest<{ userId: string }, { jobId: string }>, res) => {
try {
await Jobs.deleteOne({
organization: req.query.userId,
_id: req.body.jobId,
});
return res.json({ success: true });
} catch (error) {
return res.json({ success: false });
}
}
);
module.exports = router;
import { Router } from "express";
import { CandidateType, TypedRequest } from "../config/types";
import { authMiddleware } from "../middlewares/auth";
import Candidates from "../models/Candidate";
import ResumeAPI from "../utilities/apis/resume";
import VoiceAPI from "../utilities/apis/voice";
const router = Router();
router.post(
"/candidate",
authMiddleware,
async (req: TypedRequest<{ userId: string }, CandidateType>, res) => {
try {
const update = req.body;
if (req.body?.resume) {
const data: any = await ResumeAPI.extractResumeData({
user_id: req.query.userId,
resume_url: req.body.resume,
});
update.resumeData = data;
}
await Candidates.findByIdAndUpdate(req.query.userId, { $set: update });
if (req.body?.selfIntro) {
try {
VoiceAPI.enrollVoice({
user_id: req.query.userId,
video_url: req.body.selfIntro,
});
} catch (error) {}
}
return res.status(200).json({ data: req.body });
} catch (error) {
return res.status(400).send(error);
}
}
);
module.exports = router;
import { request } from "../requests";
export default class FacialAPI {
static analyseEyeBlinks = (payload: {
start: number;
end: number;
application_id: string;
}) => request("<BASE_URL>/facial/eye-blinks", "POST", payload);
}
import { request } from "../requests";
export default class ResumeAPI {
static extractResumeData = (payload: {
resume_url: string;
user_id: string;
}) => request("<BASE_URL>/resume/extract", "POST", payload);
static getResumeScores = (payload: {
resume_url: string;
user_id: string;
primary_skills: string[];
secondary_skills: string[];
job_desc: string;
}) => request("<BASE_URL>/resume/get-scores", "POST", payload);
}
import { request } from "../requests";
export default class VoiceAPI {
static enrollVoice = (payload: { video_url: string; user_id: string }) =>
request("<BASE_URL>/voice/enroll", "POST", payload);
static verifyVoice = (payload: {
video_url: string;
user_id: string;
application_id: string;
}) => request("<BASE_URL>/voice/verify", "POST", payload);
static analyseVoice = (payload: {
start: number;
end: number;
application_id: string;
}) => request("<BASE_URL>/voice/analyse", "POST", payload);
}
......@@ -401,8 +401,15 @@ export const processAttempt = ({
accepted: false,
};
result.accepted =
result.standard.inRange.full || result.fullStandard.inRange.full;
const standardCheck = controls.standard.use
? result.standard.inRange.full
: true;
const fullStandardCheck = controls.fullStandard.use
? result.fullStandard.inRange.full
: true;
result.accepted = standardCheck || fullStandardCheck;
return result;
};
......
import axios, { AxiosError, AxiosResponse, AxiosRequestConfig } from "axios";
import { UTILITY_SERVER } from "../config/contants";
axios.interceptors.response.use(
(response) => response,
(error: AxiosError) => {
return Promise.reject(error);
}
);
export const request = (
url: AxiosRequestConfig["url"],
method: AxiosRequestConfig["method"],
requestData?: AxiosRequestConfig["data"] | AxiosRequestConfig["params"],
contentType?: string
) =>
new Promise(async (resolve, reject) => {
const endpoint = url?.replace?.("<BASE_URL>", UTILITY_SERVER);
const params = method === "GET" ? requestData : null;
const data = method === "GET" ? null : requestData;
const headers = {
"Content-Type": contentType || "application/json",
};
axios({
url: endpoint,
method,
data,
params,
headers,
timeout: 90000,
})
.then(async (response: AxiosResponse) => {
resolve(response.data);
})
.catch(async (error: AxiosError) => {
if (error?.response) {
return reject(error?.response?.data);
}
reject(error);
});
});
......@@ -276,6 +276,20 @@ astral-regex@^2.0.0:
resolved "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz"
integrity sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==
asynckit@^0.4.0:
version "0.4.0"
resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
integrity sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==
axios@^1.3.4:
version "1.3.4"
resolved "https://registry.yarnpkg.com/axios/-/axios-1.3.4.tgz#f5760cefd9cfb51fd2481acf88c05f67c4523024"
integrity sha512-toYm+Bsyl6VC5wSkfkbbNB6ROv7KY93PEBBL6xyDczaIHasAiv4wPqQ/c4RjoQzipxRD2W5g21cOqQulZ7rHwQ==
dependencies:
follow-redirects "^1.15.0"
form-data "^4.0.0"
proxy-from-env "^1.1.0"
balanced-match@^1.0.0:
version "1.0.2"
resolved "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz"
......@@ -441,6 +455,13 @@ color-name@~1.1.4:
resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz"
integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
combined-stream@^1.0.8:
version "1.0.8"
resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f"
integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==
dependencies:
delayed-stream "~1.0.0"
complex.js@^2.0.11:
version "2.1.1"
resolved "https://registry.npmjs.org/complex.js/-/complex.js-2.1.1.tgz"
......@@ -546,6 +567,11 @@ define-properties@^1.1.3, define-properties@^1.1.4:
has-property-descriptors "^1.0.0"
object-keys "^1.1.1"
delayed-stream@~1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
integrity sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==
denque@^1.4.1:
version "1.5.1"
resolved "https://registry.npmjs.org/denque/-/denque-1.5.1.tgz"
......@@ -934,6 +960,20 @@ flatted@^3.1.0:
resolved "https://registry.npmjs.org/flatted/-/flatted-3.2.7.tgz"
integrity sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==
follow-redirects@^1.15.0:
version "1.15.2"
resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.2.tgz#b460864144ba63f2681096f274c4e57026da2c13"
integrity sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==
form-data@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.0.tgz#93919daeaf361ee529584b9b31664dc12c9fa452"
integrity sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==
dependencies:
asynckit "^0.4.0"
combined-stream "^1.0.8"
mime-types "^2.1.12"
forwarded@0.2.0:
version "0.2.0"
resolved "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz"
......@@ -1410,7 +1450,7 @@ mime-db@1.52.0:
resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz"
integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==
mime-types@~2.1.24, mime-types@~2.1.34:
mime-types@^2.1.12, mime-types@~2.1.24, mime-types@~2.1.34:
version "2.1.35"
resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz"
integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
......@@ -1682,6 +1722,11 @@ proxy-addr@~2.0.7:
forwarded "0.2.0"
ipaddr.js "1.9.1"
proxy-from-env@^1.1.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/proxy-from-env/-/proxy-from-env-1.1.0.tgz#e102f16ca355424865755d2c9e8ea4f24d58c3e2"
integrity sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==
pstree.remy@^1.1.8:
version "1.1.8"
resolved "https://registry.npmjs.org/pstree.remy/-/pstree.remy-1.1.8.tgz"
......
__pycache__
**/__pycache__
voices/auth/embed/**
voices/auth/temp/**
voices/interviews/**
videos/interviews/**
resumes
models/**
data/**
\ No newline at end of file
## Install python
install python 3.9 and set the env path
## Create conda python 3.9 env
conda create -n server tensorflow python=3.9
conda install scipy=1.9.3<br/>
- download and install correct scipy wheel https://pypi.org/project/scipy/#files : pip install <filename>.whl
pip install gensim==3.8.1<br/>
pip install texthero==1.1.0<br/>
conda install -c conda-forge importlib_metadata<br/>
pip install pyresparser==1.0.6<br/>
pip install soundfile==0.10.3.post1<br/>
pip install librosa==0.9.2
conda install -c blaze sqlite3
conda install tqdm=4.65.0
conda install pandas=1.5.3
conda install pytesseract
conda install "uvicorn[standard]"
conda install -c conda-forge fastapi
conda install websockets
pip install moviepy
pip install "libclang>=13.0.0"
pip install "tensorflow-io-gcs-filesystem>=0.23.1"
pip install pyannote.audio
pip install pyannote.core
pip install PyAudio
pip install python_speech_features
pip install fer==22.4.0
[OPTIONAL]
pip install pillow==9.0.0
conda install "numpy<1.24.0"
conda install cudatoolkit
conda update --all
### Install dlib
- Install Cmake : https://cmake.org/download/
- Install C++ compiler : https://visualstudio.microsoft.com/visual-cpp-build-tools
- pip install cmake
- pip install dlib==19.24
### End dlib
pip install imutils==0.5.4
## Run server
uvicorn main:app --reload
## Datasets & models
voice: https://drive.google.com/file/d/1wWsrN2Ep7x6lWqOXfr4rpKGYrJhWc8z7/view
models: https://drive.google.com/file/d/1TWjIiyyInXHaXySKymM6VcsIR5YMVQ1V/view?usp=share_link
shape_predictor_68_face_landmarks: https://www.kaggle.com/datasets/sergiovirahonda/shape-predictor-68-face-landmarksdat
## Folders
voices>auth>embed
voices>auth>temp
voices>interviews
models
data>voice
### Other conda commands
DELETE ENV : conda remove -n ENV_NAME --all
EXPORT ENV : conda env export > server_env.yaml
CREATE ENV BY ENV FILE : conda env create -f server_env.yaml
LIST ENV : conda list --export > requirements.txt
INSTALL ENV : conda install --file requirements.txt
from typing import List, Union
from pydantic import BaseModel
class ExtractResume(BaseModel):
resume_url: str
user_id:str
class ResumeScores(BaseModel):
resume_url: str
user_id:str
primary_skills:List[str] = []
secondary_skills:List[str] = []
job_desc:str
class EnrollVoice(BaseModel):
video_url: str
user_id:str
class VerifyVoice(BaseModel):
video_url: str
application_id:str
user_id:str
class AnalyseVoice(BaseModel):
start: int
end:int
application_id:str
class CountBlinks(BaseModel):
start: int
end:int
application_id:str
\ No newline at end of file
# This file may be used to create an environment using:
# $ conda create --name <env> --file <this file>
# platform: win-64
absl-py=1.4.0=pypi_0
aiohttp=3.8.4=pypi_0
aiosignal=1.3.1=pypi_0
alembic=1.10.2=pypi_0
antlr4-python3-runtime=4.9.3=pypi_0
anyio=3.6.2=pyhd8ed1ab_0
aom=3.5.0=h63175ca_0
asteroid-filterbanks=0.4.0=pypi_0
astunparse=1.6.3=pypi_0
async-timeout=4.0.2=pypi_0
attrs=22.2.0=pypi_0
audioread=3.0.0=pypi_0
backports-cached-property=1.0.2=pypi_0
blis=0.7.9=pypi_0
brotli=1.0.9=hcfcfb64_8
brotli-bin=1.0.9=hcfcfb64_8
brotlipy=0.7.0=py39ha55989b_1005
bzip2=1.0.8=h8ffe710_4
ca-certificates=2022.12.7=h5b45459_0
cachetools=5.3.0=pypi_0
catalogue=1.0.2=pypi_0
certifi=2022.12.7=pyhd8ed1ab_0
cffi=1.15.1=py39h68f70e3_3
chardet=5.1.0=pypi_0
charset-normalizer=3.1.0=pypi_0
click=8.1.3=win_pyhd8ed1ab_2
cmaes=0.9.1=pypi_0
colorama=0.4.6=pyhd8ed1ab_0
colorlog=6.7.0=pypi_0
commonmark=0.9.1=pypi_0
contourpy=1.0.7=py39h1f6ef14_0
cryptography=40.0.1=py39hb6bd5e6_0
cycler=0.11.0=pyhd8ed1ab_0
cymem=2.0.7=pypi_0
decorator=5.1.1=pyhd8ed1ab_0
docopt=0.6.2=pypi_0
docx2txt=0.8=pypi_0
einops=0.3.2=pypi_0
en-core-web-sm=2.3.1=pypi_0
expat=2.5.0=h1537add_0
fastapi=0.95.0=pyhd8ed1ab_0
ffmpeg=5.1.2=gpl_h5b1d025_106
filelock=3.10.7=pypi_0
flatbuffers=23.3.3=pypi_0
font-ttf-dejavu-sans-mono=2.37=hab24e00_0
font-ttf-inconsolata=3.000=h77eed37_0
font-ttf-source-code-pro=2.038=h77eed37_0
font-ttf-ubuntu=0.83=hab24e00_0
fontconfig=2.14.2=hbde0cde_0
fonts-conda-ecosystem=1=0
fonts-conda-forge=1=0
fonttools=4.39.3=py39ha55989b_0
freetype=2.12.1=h546665d_1
frozenlist=1.3.3=pypi_0
fsspec=2023.3.0=pypi_0
future=0.18.3=pyhd8ed1ab_0
gast=0.4.0=pypi_0
gensim=3.8.1=pypi_0
gettext=0.21.1=h5728263_0
glib=2.74.1=h12be248_1
glib-tools=2.74.1=h12be248_1
google-auth=2.17.0=pypi_0
google-auth-oauthlib=0.4.6=pypi_0
google-pasta=0.2.0=pypi_0
greenlet=2.0.2=pypi_0
grpcio=1.53.0=pypi_0
gst-plugins-base=1.22.0=h001b923_2
gstreamer=1.22.0=h6b5321d_2
h11=0.14.0=pyhd8ed1ab_0
h5py=3.8.0=pypi_0
hmmlearn=0.2.8=pypi_0
huggingface-hub=0.13.3=pypi_0
hyperpyyaml=1.1.0=pypi_0
icu=70.1=h0e60522_0
idna=3.4=pyhd8ed1ab_0
imageio=2.27.0=pyh24c5eb1_0
imageio-ffmpeg=0.4.8=pyhd8ed1ab_0
importlib-metadata=6.1.0=pyha770c72_0
importlib-resources=5.12.0=pyhd8ed1ab_0
importlib_metadata=6.1.0=hd8ed1ab_0
importlib_resources=5.12.0=pyhd8ed1ab_0
intel-openmp=2023.0.0=h57928b3_25922
joblib=1.2.0=pyhd8ed1ab_0
jsonschema=4.17.3=pypi_0
julius=0.2.7=pypi_0
keras=2.10.0=pypi_0
keras-preprocessing=1.1.2=pypi_0
kiwisolver=1.4.4=py39h1f6ef14_1
krb5=1.20.1=heb0366b_0
lcms2=2.15=h3e3b177_1
lerc=4.0.0=h63175ca_0
libblas=3.9.0=16_win64_mkl
libbrotlicommon=1.0.9=hcfcfb64_8
libbrotlidec=1.0.9=hcfcfb64_8
libbrotlienc=1.0.9=hcfcfb64_8
libcblas=3.9.0=16_win64_mkl
libclang=16.0.0=pypi_0
libclang13=15.0.7=default_h77d9078_1
libdeflate=1.18=hcfcfb64_0
libffi=3.4.2=h8ffe710_5
libglib=2.74.1=he8f3873_1
libhwloc=2.9.0=h51c2c0f_0
libiconv=1.17=h8ffe710_0
libjpeg-turbo=2.1.5.1=hcfcfb64_0
liblapack=3.9.0=16_win64_mkl
libogg=1.3.4=h8ffe710_1
libopus=1.3.1=h8ffe710_1
libpng=1.6.39=h19919ed_0
librosa=0.9.2=pypi_0
libsqlite=3.40.0=hcfcfb64_0
libtiff=4.5.0=h6c8260b_6
libvorbis=1.3.7=h0e60522_0
libwebp-base=1.3.0=hcfcfb64_0
libxcb=1.13=hcd874cb_1004
libxml2=2.10.3=hc3477c8_6
libzlib=1.2.13=hcfcfb64_4
llvmlite=0.39.1=pypi_0
m2w64-gcc-libgfortran=5.3.0=6
m2w64-gcc-libs=5.3.0=7
m2w64-gcc-libs-core=5.3.0=7
m2w64-gmp=6.1.0=2
m2w64-libwinpthread-git=5.0.0.4634.697f757=2
mako=1.2.4=pypi_0
markdown=3.4.3=pypi_0
markupsafe=2.1.2=pypi_0
matplotlib=3.7.1=py39hcbf5309_0
matplotlib-base=3.7.1=py39haf65ace_0
mkl=2022.1.0=h6a75c08_874
moviepy=1.0.3=pyhd8ed1ab_1
mpmath=1.3.0=pypi_0
msys2-conda-epoch=20160418=1
multidict=6.0.4=pypi_0
munkres=1.1.4=pyh9f0ad1d_0
murmurhash=1.0.9=pypi_0
networkx=2.8.8=pypi_0
nltk=3.8.1=pypi_0
numba=0.56.4=pypi_0
numpy=1.23.5=pypi_0
oauthlib=3.2.2=pypi_0
olefile=0.46=pypi_0
omegaconf=2.3.0=pypi_0
openh264=2.3.1=h63175ca_2
openjpeg=2.5.0=ha2aaf27_2
openssl=3.1.0=hcfcfb64_0
opt-einsum=3.3.0=pypi_0
optuna=3.1.0=pypi_0
packaging=23.0=pyhd8ed1ab_0
pandas=1.5.3=py39h2ba5b7c_0
pcre2=10.40=h17e33f8_0
pdfminer=20191125=pyhd8ed1ab_1
pdfminer-six=20221105=pypi_0
pillow=9.4.0=py39haa1d754_2
pip=23.0.1=pyhd8ed1ab_0
plac=1.1.3=pypi_0
platformdirs=3.2.0=pypi_0
plotly=5.13.1=pypi_0
ply=3.11=py_1
pooch=1.7.0=pypi_0
preprocess=1.2.3=py_1
preshed=3.0.8=pypi_0
primepy=1.3=pypi_0
proglog=0.1.9=py_0
protobuf=3.19.6=pypi_0
pthread-stubs=0.4=hcd874cb_1001
pthreads-win32=2.9.1=hfa6e2cd_3
pyannote-audio=2.1.1=pypi_0
pyannote-core=4.5=pypi_0
pyannote-database=4.1.3=pypi_0
pyannote-metrics=3.2.1=pypi_0
pyannote-pipeline=2.3=pypi_0
pyasn1=0.4.8=pypi_0
pyasn1-modules=0.2.8=pypi_0
pyaudio=0.2.13=pypi_0
pycparser=2.21=pyhd8ed1ab_0
pycryptodome=3.17=pypi_0
pydantic=1.10.7=py39ha55989b_0
pydeprecate=0.3.2=pypi_0
pygments=2.14.0=pypi_0
pyopenssl=23.1.1=pyhd8ed1ab_0
pyparsing=3.0.9=pyhd8ed1ab_0
pyqt=5.15.7=py39hb77abff_3
pyqt5-sip=12.11.0=py39h99910a6_3
pyresparser=1.0.6=pypi_0
pyrsistent=0.19.3=pypi_0
pysocks=1.7.1=pyh0701188_6
python=3.9.16=h4de0772_0_cpython
python-dateutil=2.8.2=pyhd8ed1ab_0
python-speech-features=0.6=pypi_0
python_abi=3.9=3_cp39
pytorch-lightning=1.6.5=pypi_0
pytorch-metric-learning=1.7.3=pypi_0
pytz=2023.3=pyhd8ed1ab_0
pyyaml=6.0=pypi_0
qt-main=5.15.8=h88fe7eb_7
regex=2023.3.23=pypi_0
requests=2.28.2=pyhd8ed1ab_0
requests-oauthlib=1.3.1=pypi_0
resampy=0.4.2=pypi_0
rich=12.6.0=pypi_0
rsa=4.9=pypi_0
ruamel-yaml=0.17.21=pypi_0
ruamel-yaml-clib=0.2.7=pypi_0
scikit-learn=1.2.0=py39hd77b12b_1
scipy=1.9.3=py39hfbf2dce_2
semver=2.13.0=pypi_0
sentencepiece=0.1.97=pypi_0
setuptools=67.6.1=pyhd8ed1ab_0
shellingham=1.5.0.post1=pypi_0
simplejson=3.18.4=pypi_0
singledispatchmethod=1.0=pypi_0
sip=6.7.7=py39h99910a6_0
six=1.16.0=pyh6c4a22f_0
smart-open=6.3.0=pypi_0
sniffio=1.3.0=pyhd8ed1ab_0
sortedcontainers=2.4.0=pypi_0
soundfile=0.10.3.post1=pypi_0
spacy=2.3.9=pypi_0
speechbrain=0.5.14=pypi_0
sqlalchemy=2.0.7=pypi_0
sqlite3=3.8.6=0
srsly=1.0.6=pypi_0
starlette=0.26.1=pyhd8ed1ab_0
svt-av1=1.4.1=h63175ca_0
sympy=1.11.1=pypi_0
tabulate=0.9.0=pypi_0
tbb=2021.8.0=h91493d7_0
tenacity=8.2.2=pypi_0
tensorboard=2.10.1=pypi_0
tensorboard-data-server=0.6.1=pypi_0
tensorboard-plugin-wit=1.8.1=pypi_0
tensorflow=2.10.1=pypi_0
tensorflow-estimator=2.10.0=pypi_0
tensorflow-io-gcs-filesystem=0.31.0=pypi_0
termcolor=2.2.0=pypi_0
texthero=1.1.0=pypi_0
thinc=7.4.6=pypi_0
threadpoolctl=3.1.0=pyh8a188c0_0
tk=8.6.12=h8ffe710_0
toml=0.10.2=pyhd8ed1ab_0
torch=1.13.1=pypi_0
torch-audiomentations=0.11.0=pypi_0
torch-pitch-shift=1.2.2=pypi_0
torchaudio=0.13.1=pypi_0
torchmetrics=0.11.4=pypi_0
tornado=6.2=py39ha55989b_1
tqdm=4.65.0=pyhd8ed1ab_1
typer=0.7.0=pypi_0
typing-extensions=4.5.0=hd8ed1ab_0
typing_extensions=4.5.0=pyha770c72_0
tzdata=2023c=h71feb2d_0
ucrt=10.0.22621.0=h57928b3_0
unicodedata2=15.0.0=py39ha55989b_0
unidecode=1.3.6=pypi_0
urllib3=1.26.15=pyhd8ed1ab_0
uvicorn=0.21.1=py39hcbf5309_0
vc=14.3=hb6edc58_10
vs2015_runtime=14.34.31931=h4c5c07a_10
wasabi=0.10.1=pypi_0
werkzeug=2.2.3=pypi_0
wheel=0.40.0=pyhd8ed1ab_0
win_inet_pton=1.1.0=pyhd8ed1ab_6
wordcloud=1.8.2.2=pypi_0
wrapt=1.15.0=pypi_0
x264=1!164.3095=h8ffe710_2
x265=3.5=h2d74725_3
xorg-libxau=1.0.9=hcd874cb_0
xorg-libxdmcp=1.1.3=hcd874cb_0
xz=5.2.6=h8d14728_0
yarl=1.8.2=pypi_0
zipp=3.15.0=pyhd8ed1ab_0
zstd=1.5.2=h12be248_6
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
# from fer import FER
import routes.resume as resumes
import routes.voice as voice
import routes.facial as facial
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(resumes.router)
app.include_router(voice.router)
app.include_router(facial.router)
@app.get("/")
def read_root():
return {"status": "running"}
absl-py==1.4.0
aiohttp==3.8.4
aiosignal==1.3.1
alembic==1.10.2
antlr4-python3-runtime==4.9.3
anyio @ file:///home/conda/feedstock_root/build_artifacts/anyio_1666191106763/work/dist
asteroid-filterbanks==0.4.0
astunparse==1.6.3
async-timeout==4.0.2
attrs==22.2.0
audioread==3.0.0
backports.cached-property==1.0.2
blis==0.7.9
brotlipy @ file:///D:/bld/brotlipy_1666764815687/work
cachetools==5.3.0
catalogue==1.0.2
certifi==2022.12.7
cffi @ file:///D:/bld/cffi_1671179514672/work
chardet==5.1.0
charset-normalizer==3.1.0
click @ file:///D:/bld/click_1666798499870/work
cmaes==0.9.1
colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1666700638685/work
colorlog==6.7.0
commonmark==0.9.1
contourpy @ file:///D:/bld/contourpy_1673633852898/work
cryptography @ file:///D:/bld/cryptography-split_1679811407000/work
cycler @ file:///home/conda/feedstock_root/build_artifacts/cycler_1635519461629/work
cymem==2.0.7
decorator @ file:///home/conda/feedstock_root/build_artifacts/decorator_1641555617451/work
docopt==0.6.2
docx2txt==0.8
einops==0.3.2
en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.3.1/en_core_web_sm-2.3.1.tar.gz
fastapi @ file:///home/conda/feedstock_root/build_artifacts/fastapi_1679196090342/work
filelock==3.10.7
flatbuffers==23.3.3
fonttools @ file:///D:/bld/fonttools_1680021390608/work
frozenlist==1.3.3
fsspec==2023.3.0
future @ file:///home/conda/feedstock_root/build_artifacts/future_1673596611778/work
gast==0.4.0
gensim==3.8.1
google-auth==2.17.0
google-auth-oauthlib==0.4.6
google-pasta==0.2.0
greenlet==2.0.2
grpcio==1.53.0
h11 @ file:///home/conda/feedstock_root/build_artifacts/h11_1664132893548/work
h5py==3.8.0
hmmlearn==0.2.8
huggingface-hub==0.13.3
HyperPyYAML==1.1.0
idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1663625384323/work
imageio @ file:///home/conda/feedstock_root/build_artifacts/imageio_1679914882579/work
imageio-ffmpeg @ file:///home/conda/feedstock_root/build_artifacts/imageio-ffmpeg_1673483481485/work
importlib-metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1679167925176/work
importlib-resources @ file:///home/conda/feedstock_root/build_artifacts/importlib_resources_1676919000169/work
joblib @ file:///home/conda/feedstock_root/build_artifacts/joblib_1663332044897/work
jsonschema==4.17.3
julius==0.2.7
keras==2.10.0
Keras-Preprocessing==1.1.2
kiwisolver @ file:///D:/bld/kiwisolver_1666805897768/work
libclang==16.0.0
librosa==0.9.2
llvmlite==0.39.1
Mako==1.2.4
Markdown==3.4.3
MarkupSafe==2.1.2
matplotlib @ file:///D:/bld/matplotlib-suite_1678135799522/work
moviepy @ file:///home/conda/feedstock_root/build_artifacts/moviepy_1665160419595/work
mpmath==1.3.0
multidict==6.0.4
munkres==1.1.4
murmurhash==1.0.9
networkx==2.8.8
nltk==3.8.1
numba==0.56.4
numpy==1.23.5
oauthlib==3.2.2
olefile==0.46
omegaconf==2.3.0
opt-einsum==3.3.0
optuna==3.1.0
packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1673482170163/work
pandas @ file:///D:/bld/pandas_1674136542219/work
pdfminer @ file:///home/conda/feedstock_root/build_artifacts/pdfminer_1613401440402/work
pdfminer.six==20221105
Pillow @ file:///D:/bld/pillow_1678273632076/work
plac==1.1.3
platformdirs==3.2.0
plotly==5.13.1
ply==3.11
pooch==1.7.0
preprocess==1.2.3
preshed==3.0.8
primePy==1.3
proglog==0.1.9
protobuf==3.19.6
pyannote.audio==2.1.1
pyannote.core==4.5
pyannote.database==4.1.3
pyannote.metrics==3.2.1
pyannote.pipeline==2.3
pyasn1==0.4.8
pyasn1-modules==0.2.8
PyAudio==0.2.13
pycparser @ file:///home/conda/feedstock_root/build_artifacts/pycparser_1636257122734/work
pycryptodome==3.17
pydantic @ file:///D:/bld/pydantic_1679565539355/work
pyDeprecate==0.3.2
Pygments==2.14.0
pyOpenSSL @ file:///home/conda/feedstock_root/build_artifacts/pyopenssl_1680037383858/work
pyparsing @ file:///home/conda/feedstock_root/build_artifacts/pyparsing_1652235407899/work
PyQt5==5.15.7
PyQt5-sip @ file:///D:/bld/pyqt-split_1674666735227/work/pyqt_sip
pyresparser==1.0.6
pyrsistent==0.19.3
PySocks @ file:///D:/bld/pysocks_1661604991356/work
python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1626286286081/work
python-speech-features==0.6
pytorch-lightning==1.6.5
pytorch-metric-learning==1.7.3
pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1680088766131/work
PyYAML==6.0
regex==2023.3.23
requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1673863902341/work
requests-oauthlib==1.3.1
resampy==0.4.2
rich==12.6.0
rsa==4.9
ruamel.yaml==0.17.21
ruamel.yaml.clib==0.2.7
scikit-learn @ file:///C:/b/abs_e01rh8f1vi/croot/scikit-learn_1675454931501/work
scipy==1.9.3
semver==2.13.0
sentencepiece==0.1.97
shellingham==1.5.0.post1
simplejson==3.18.4
singledispatchmethod==1.0
sip @ file:///D:/bld/sip_1675696791179/work
six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work
smart-open==6.3.0
sniffio @ file:///home/conda/feedstock_root/build_artifacts/sniffio_1662051266223/work
sortedcontainers==2.4.0
SoundFile==0.10.3.post1
spacy==2.3.9
speechbrain==0.5.14
SQLAlchemy==2.0.7
srsly==1.0.6
starlette @ file:///home/conda/feedstock_root/build_artifacts/starlette-recipe_1678817698143/work
sympy==1.11.1
tabulate==0.9.0
tenacity==8.2.2
tensorboard==2.10.1
tensorboard-data-server==0.6.1
tensorboard-plugin-wit==1.8.1
tensorflow==2.10.1
tensorflow-estimator==2.10.0
tensorflow-io-gcs-filesystem==0.31.0
termcolor==2.2.0
texthero==1.1.0
thinc==7.4.6
threadpoolctl @ file:///home/conda/feedstock_root/build_artifacts/threadpoolctl_1643647933166/work
toml @ file:///home/conda/feedstock_root/build_artifacts/toml_1604308577558/work
torch==1.13.1
torch-audiomentations==0.11.0
torch-pitch-shift==1.2.2
torchaudio==0.13.1
torchmetrics==0.11.4
tornado @ file:///D:/bld/tornado_1666788767305/work
tqdm @ file:///home/conda/feedstock_root/build_artifacts/tqdm_1677948868469/work
typer==0.7.0
typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/typing_extensions_1678559861143/work
unicodedata2 @ file:///D:/bld/unicodedata2_1667240049903/work
Unidecode==1.3.6
urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1678635778344/work
uvicorn @ file:///D:/bld/uvicorn-split_1678984112139/work
wasabi==0.10.1
Werkzeug==2.2.3
win-inet-pton @ file:///D:/bld/win_inet_pton_1667051142467/work
wordcloud==1.8.2.2
wrapt==1.15.0
yarl==1.8.2
zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1677313463193/work
# This file may be used to create an environment using:
# $ conda create --name <env> --file <this file>
# platform: win-64
_tflow_select=2.3.0=mkl
absl-py=1.3.0=py39haa95532_0
aiohttp=3.8.3=py39h2bbff1b_0
aiosignal=1.2.0=pyhd3eb1b0_0
alembic=1.10.3=pypi_0
antlr4-python3-runtime=4.9.3=pypi_0
appdirs=1.4.4=pyhd3eb1b0_0
asteroid-filterbanks=0.4.0=pypi_0
astunparse=1.6.3=py_0
async-timeout=4.0.2=py39haa95532_0
attrs=22.1.0=py39haa95532_0
audioread=3.0.0=pypi_0
backports-cached-property=1.0.2=pypi_0
blas=1.0=mkl
blinker=1.4=py39haa95532_0
blis=0.7.9=pypi_0
bottleneck=1.3.5=py39h080aedc_0
brotlipy=0.7.0=py39h2bbff1b_1003
ca-certificates=2023.01.10=haa95532_0
cachetools=4.2.2=pyhd3eb1b0_0
catalogue=1.0.2=pypi_0
certifi=2022.12.7=py39haa95532_0
cffi=1.15.1=py39h2bbff1b_3
chardet=5.1.0=pypi_0
charset-normalizer=2.0.4=pyhd3eb1b0_0
click=8.0.4=py39haa95532_0
cmaes=0.9.1=pypi_0
colorama=0.4.6=py39haa95532_0
colorlog=6.7.0=pypi_0
commonmark=0.9.1=pypi_0
contourpy=1.0.7=pypi_0
cryptography=39.0.1=py39h21b164f_0
cycler=0.11.0=pypi_0
cymem=2.0.7=pypi_0
decorator=5.1.1=pypi_0
docopt=0.6.2=pypi_0
docx2txt=0.8=pypi_0
einops=0.3.2=pypi_0
fer=22.4.0=pypi_0
filelock=3.11.0=pypi_0
flatbuffers=2.0.0=h6c2663c_0
flit-core=3.8.0=py39haa95532_0
fonttools=4.39.3=pypi_0
frozenlist=1.3.3=py39h2bbff1b_0
fsspec=2023.4.0=pypi_0
gast=0.4.0=pyhd3eb1b0_0
gensim=3.8.1=pypi_0
giflib=5.2.1=h8cc25b3_3
google-auth=2.6.0=pyhd3eb1b0_0
google-auth-oauthlib=0.4.4=pyhd3eb1b0_0
google-pasta=0.2.0=pyhd3eb1b0_0
greenlet=2.0.2=pypi_0
grpcio=1.42.0=py39hc60d5dd_0
h11=0.12.0=pyhd3eb1b0_0
h5py=3.7.0=py39h3de5c98_0
hdf5=1.10.6=h1756f20_1
hmmlearn=0.2.8=pypi_0
huggingface-hub=0.13.4=pypi_0
hyperpyyaml=1.2.0=pypi_0
icc_rt=2022.1.0=h6049295_2
icu=58.2=ha925a31_3
idna=3.4=py39haa95532_0
importlib-metadata=6.0.0=py39haa95532_0
importlib-resources=5.12.0=pypi_0
importlib_metadata=6.0.0=hd3eb1b0_0
intel-openmp=2021.4.0=haa95532_3556
joblib=1.2.0=pypi_0
jpeg=9e=h2bbff1b_1
jsonschema=4.17.3=pypi_0
julius=0.2.7=pypi_0
keras=2.10.0=py39haa95532_0
keras-preprocessing=1.1.2=pyhd3eb1b0_0
kiwisolver=1.4.4=pypi_0
libclang=16.0.0=pypi_0
libcurl=7.88.1=h86230a5_0
libpng=1.6.39=h8cc25b3_0
libprotobuf=3.20.3=h23ce68f_0
librosa=0.9.2=pypi_0
libssh2=1.10.0=hcd4344a_0
llvmlite=0.39.1=pypi_0
mako=1.2.4=pypi_0
markdown=3.4.1=py39haa95532_0
markupsafe=2.1.1=py39h2bbff1b_0
matplotlib=3.7.1=pypi_0
mkl=2021.4.0=haa95532_640
mkl-service=2.4.0=py39h2bbff1b_0
mkl_fft=1.3.1=py39h277e83a_0
mkl_random=1.2.2=py39hf11a4ad_0
mpmath=1.3.0=pypi_0
mtcnn=0.1.1=pypi_0
multidict=6.0.2=py39h2bbff1b_0
murmurhash=1.0.9=pypi_0
networkx=2.8.8=pypi_0
nltk=3.8.1=pypi_0
numba=0.56.4=pypi_0
numexpr=2.8.4=py39h5b0cc5e_0
numpy=1.23.5=py39h3b20f71_0
numpy-base=1.23.5=py39h4da318b_0
oauthlib=3.2.2=py39haa95532_0
omegaconf=2.3.0=pypi_0
opencv-contrib-python=4.7.0.72=pypi_0
opencv-python=4.7.0.72=pypi_0
openssl=1.1.1t=h2bbff1b_0
opt_einsum=3.3.0=pyhd3eb1b0_1
optuna=3.1.1=pypi_0
packaging=23.0=py39haa95532_0
pandas=2.0.0=pypi_0
pdfminer-six=20221105=pypi_0
pillow=9.5.0=pypi_0
pip=23.0.1=py39haa95532_0
plac=1.1.3=pypi_0
plotly=5.14.1=pypi_0
pooch=1.4.0=pyhd3eb1b0_0
preshed=3.0.8=pypi_0
primepy=1.3=pypi_0
protobuf=3.19.6=pypi_0
pyannote-audio=2.1.1=pypi_0
pyannote-core=4.5=pypi_0
pyannote-database=4.1.3=pypi_0
pyannote-metrics=3.2.1=pypi_0
pyannote-pipeline=2.3=pypi_0
pyasn1=0.4.8=pyhd3eb1b0_0
pyasn1-modules=0.2.8=py_0
pyaudio=0.2.13=pypi_0
pycparser=2.21=pyhd3eb1b0_0
pycryptodome=3.17=pypi_0
pydeprecate=0.3.2=pypi_0
pygments=2.14.0=pypi_0
pyjwt=2.4.0=py39haa95532_0
pyopenssl=23.0.0=py39haa95532_0
pyparsing=3.0.9=pypi_0
pyresparser=1.0.6=pypi_0
pyrsistent=0.19.3=pypi_0
pysocks=1.7.1=py39haa95532_0
python=3.9.16=h6244533_2
python-dateutil=2.8.2=pyhd3eb1b0_0
python-flatbuffers=2.0=pyhd3eb1b0_0
python-speech-features=0.6=pypi_0
pytorch-lightning=1.6.5=pypi_0
pytorch-metric-learning=1.7.3=pypi_0
pytz=2023.3=pypi_0
pyyaml=6.0=pypi_0
regex=2023.3.23=pypi_0
requests=2.28.1=py39haa95532_1
requests-oauthlib=1.3.0=py_0
resampy=0.4.2=pypi_0
rich=12.6.0=pypi_0
rsa=4.7.2=pyhd3eb1b0_1
ruamel-yaml=0.17.21=pypi_0
ruamel-yaml-clib=0.2.7=pypi_0
scikit-learn=1.2.2=pypi_0
scipy=1.9.3=py39h321e85e_1
semver=2.13.0=pypi_0
sentencepiece=0.1.97=pypi_0
setuptools=65.6.3=py39haa95532_0
shellingham=1.5.0.post1=pypi_0
simplejson=3.19.1=pypi_0
singledispatchmethod=1.0=pypi_0
six=1.16.0=pyhd3eb1b0_1
smart-open=6.3.0=pypi_0
snappy=1.1.9=h6c2663c_0
sortedcontainers=2.4.0=pypi_0
soundfile=0.10.3.post1=pypi_0
spacy=2.3.9=pypi_0
speechbrain=0.5.14=pypi_0
sqlalchemy=2.0.9=pypi_0
sqlite=3.41.1=h2bbff1b_0
sqlite3=3.8.6=0
srsly=1.0.6=pypi_0
sympy=1.11.1=pypi_0
tabulate=0.9.0=pypi_0
tenacity=8.2.2=pypi_0
tensorboard=2.10.0=py39haa95532_0
tensorboard-data-server=0.6.1=py39haa95532_0
tensorboard-plugin-wit=1.8.1=py39haa95532_0
tensorflow=2.10.0=mkl_py39ha510bab_0
tensorflow-base=2.10.0=mkl_py39h6a7f48e_0
tensorflow-estimator=2.10.0=py39haa95532_0
tensorflow-io-gcs-filesystem=0.31.0=pypi_0
termcolor=2.1.0=py39haa95532_0
texthero=1.1.0=pypi_0
thinc=7.4.6=pypi_0
threadpoolctl=3.1.0=pypi_0
torch=1.13.1=pypi_0
torch-audiomentations=0.11.0=pypi_0
torch-pitch-shift=1.2.3=pypi_0
torchaudio=0.13.1=pypi_0
torchmetrics=0.11.4=pypi_0
tqdm=4.65.0=py39hd4e2768_0
typer=0.7.0=pypi_0
typing_extensions=4.4.0=py39haa95532_0
tzdata=2023.3=pypi_0
unidecode=1.3.6=pypi_0
urllib3=1.26.15=py39haa95532_0
uvicorn=0.20.0=py39haa95532_0
vc=14.2=h21ff451_1
vs2015_runtime=14.27.29016=h5e58377_2
wasabi=0.10.1=pypi_0
websockets=10.4=py39h2bbff1b_1
werkzeug=2.2.3=py39haa95532_0
wheel=0.38.4=py39haa95532_0
win_inet_pton=1.1.0=py39haa95532_0
wincertstore=0.2=py39haa95532_2
wordcloud=1.8.2.2=pypi_0
wrapt=1.14.1=py39h2bbff1b_0
yarl=1.8.1=py39h2bbff1b_0
zipp=3.11.0=py39haa95532_0
zlib=1.2.13=h8cc25b3_0
import pandas as pd
import os
from scripts.processing import document_processing
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore")
skills = {
"primary" : ['Python', 'Machine Learning', 'node.js', 'AWS', 'Kubernetese', 'NLP', 'GCP', 'predective', 'OCR'],
"secondary" : ['data', 'science', 'modeling', 'anomaly', 'privacy', 'visualization', 'OCR'],
}
def document_score(df):
# Page score
df.loc[df['no_of_pages'] == 1, ['page_score']] = 100
df.loc[df['no_of_pages'] == 2, ['page_score']] = 60
df.loc[(df['no_of_pages'] > 2) |
(df['no_of_pages'] == 0), ['page_score']] = 30
# Word score
df.loc[(df['words'] >= 200) & (df['words'] < 400),
['word_score']] = 100
df.loc[(df['words'] >= 400) & (df['words'] < 600),
['word_score']] = 70
df.loc[((df['words'] > 0) & (df['words'] < 200))|
(df['words'] > 600) | (df['words'].isnull()),
['word_score']] = 40
df['document_score'] = (df['page_score'] + df['word_score']) * 0.25
df.drop(['word_score', 'page_score'], axis=1, inplace=True)
return df
if __name__=='__main__':
resume_dir = 'resumes/'
jd_file = 'Job_description.txt'
list_of_resumes = os.listdir(resume_dir) # list_of_resumes = ['Dhaval_Thakkar_Resume.pdf', 'asdasdasd.pdf']
df = pd.DataFrame()
for file in tqdm(list_of_resumes):
result = document_processing(resume_dir+file, skills, jd_file)
candidate = result.skills_match()
df = pd.concat([df, candidate], ignore_index=True)
df = document_score(df)
# Final score
df['Score'] = df['primary_score'] + df['secondary_score'] + df['document_score'] + df['document_similarity']
df = df.sort_values('Score', ascending=False)
df = df.reset_index(drop=True)
print(df)
df.to_csv('Candidates_score.csv', index=False)
\ No newline at end of file
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
from fer import FER
import cv2
import imutils
from scripts.blink_detection import count_blinks
from baseModels.payloads import CountBlinks
from scripts.web_socket import ConnectionManager
import json
manager = ConnectionManager()
router = APIRouter(prefix='/facial')
@router.post("/eye-blinks")
def countBlinks(payload:CountBlinks):
video_filename = 'videos/interviews/'+payload.application_id+'.mp4'
count = count_blinks(video_filename, payload.start, payload.end)
return count
@router.websocket("/ws/eye-blinks")
async def countBlinks(websocket: WebSocket, application_id: str, start:str, end:str):
await manager.connect(websocket)
try:
while True:
video_filename = 'videos/interviews/'+application_id+'.mp4'
count = count_blinks(video_filename, int(start), int(end))
await manager.send_private(json.dumps({"count":count, "end":True}), websocket)
await manager.disconnect(websocket)
except WebSocketDisconnect:
await manager.send_private(json.dumps({"end":True}), websocket)
await manager.disconnect(websocket)
return
@router.websocket("/ws/emotions")
async def emotions(websocket: WebSocket, application_id: str):
await manager.connect(websocket)
video_filename = 'videos/interviews/'+application_id+'.mp4'
face_detector = FER(mtcnn=True)
cap = cv2.VideoCapture(video_filename)
try:
while True:
ret, frame = cap.read()
if not ret:
await manager.send_private(json.dumps({"data":[], "end":True}), websocket)
await manager.disconnect(websocket)
break
frame = imutils.resize(frame, width=800)
emotions = face_detector.detect_emotions(frame)
print(emotions)
await manager.send_private(json.dumps({"data":emotions, "end":False}), websocket)
except WebSocketDisconnect:
await manager.send_private(json.dumps({"data":[], "end":True, "status":"Offline"}), websocket)
await manager.disconnect(websocket)
from fastapi import APIRouter
import os
from pyresparser import ResumeParser
import urllib
from baseModels.payloads import ExtractResume, ResumeScores
from scripts.processing import document_processing
router = APIRouter(prefix='/resume')
@router.post("/extract")
def extract(payload:ExtractResume):
filename = 'resumes/'+payload.user_id+'.pdf'
urllib.request.urlretrieve(payload.resume_url, filename)
pyres_data = ResumeParser(filename).get_extracted_data()
os.remove(filename)
return pyres_data
@router.post("/get-scores")
def get_scores(payload:ResumeScores):
filename = 'resumes/'+payload.user_id+'.pdf'
urllib.request.urlretrieve(payload.resume_url, filename)
skills = {"primary":payload.primary_skills, "secondary":payload.secondary_skills}
result = document_processing(filename, skills, payload.job_desc)
candidate = result.skills_match()
os.remove(filename)
return candidate
\ No newline at end of file
from fastapi import APIRouter
import moviepy.editor
import pickle
import os
import urllib
import numpy as np
from baseModels.payloads import EnrollVoice, AnalyseVoice, VerifyVoice
from scipy.spatial.distance import euclidean
from scripts.processing import extract_input_feature
from keras.models import load_model
from scripts.parameters import MODEL_FILE, MAX_SEC
from scripts.voice_feature_extraction import get_embedding
from scripts.api import SingletonAiohttp
router = APIRouter(prefix='/voice')
@router.post("/enroll")
def enroll(payload:EnrollVoice):
video_filename = 'voices/'+payload.user_id+'.mp4'
urllib.request.urlretrieve(payload.video_url, video_filename)
# Download video and save audio
video = moviepy.editor.VideoFileClip(video_filename)
audio = video.audio
audio_filename = 'voices/auth/temp'+payload.user_id+'.wav'
audio.write_audiofile(audio_filename, codec='pcm_s16le', bitrate='50k')
# Extract and entrol audio features
model = load_model(MODEL_FILE)
enroll_result = get_embedding(model, audio_filename, MAX_SEC)
enroll_embs = np.array(enroll_result.tolist())
np.save("voices/auth/embed/"+ payload.user_id+".npy", enroll_embs)
try:
os.remove(video_filename)
except:
print('')
try:
os.remove(audio_filename)
except:
print('')
return 'SUCCESS'
@router.post("/verify")
async def verify(payload:VerifyVoice):
video_filename = 'videos/interviews/'+payload.application_id+'.mp4'
urllib.request.urlretrieve(payload.video_url, video_filename)
# Download video and save audio
video = moviepy.editor.VideoFileClip(video_filename)
audio = video.audio
audio_filename = 'voices/interviews/'+payload.application_id+'.wav'
audio.write_audiofile(audio_filename, codec='pcm_s16le', bitrate='50k')
model = load_model(MODEL_FILE)
test_result = get_embedding(model, audio_filename, MAX_SEC)
test_embs = np.array(test_result.tolist())
enroll_embs = np.load("voices/auth/embed/"+ payload.user_id+".npy")
distance = euclidean(test_embs, enroll_embs)
url = "http://localhost:5000/applications/update/voice-verification"
payload = {'update':round(1-distance, 5), 'applicationId':payload.application_id }
await SingletonAiohttp.put_url(url, payload)
return 'success'
@router.post("/analyse")
def analys(payload:AnalyseVoice):
model = pickle.load(open("models/voice_classifier.model", "rb"))
filename = 'voices/interviews/'+payload.application_id+'.wav'
features = extract_input_feature(filename, mfcc=True, chroma=True, mel=True, start=float(payload.start), end=float(payload.end)).reshape(1, -1)
result = model.predict(features)[0]
return result
\ No newline at end of file
import aiohttp
from socket import AF_INET
from typing import List, Optional, Any, Dict
SIZE_POOL_AIOHTTP = 100
class SingletonAiohttp:
aiohttp_client: Optional[aiohttp.ClientSession] = None
@classmethod
def get_aiohttp_client(cls) -> aiohttp.ClientSession:
if cls.aiohttp_client is None:
timeout = aiohttp.ClientTimeout(total=2)
connector = aiohttp.TCPConnector(family=AF_INET, limit_per_host=SIZE_POOL_AIOHTTP)
cls.aiohttp_client = aiohttp.ClientSession(timeout=timeout, connector=connector)
return cls.aiohttp_client
@classmethod
async def close_aiohttp_client(cls) -> None:
if cls.aiohttp_client:
await cls.aiohttp_client.close()
cls.aiohttp_client = None
@classmethod
async def post_url(cls, url: str) -> Any:
client = cls.get_aiohttp_client()
try:
async with client.post(url) as response:
if response.status != 200:
return {"ERROR OCCURED" + str(await response.text())}
json_result = await response.json()
except Exception as e:
return {"ERROR": e}
return json_result
@classmethod
async def put_url(cls, url: str, payload) -> Any:
client = cls.get_aiohttp_client()
try:
async with client.put(url, data=payload) as response:
if response.status != 200:
return {"ERROR OCCURED" + str(await response.text())}
json_result = await response.json()
except Exception as e:
return {"ERROR": e}
return json_result
\ No newline at end of file
import cv2
import dlib
import imutils
import numpy as np
from imutils import face_utils
from scipy.spatial import distance as dist
from scripts.parameters import EYE_AR_THRESH, EYE_AR_CONSEC_FRAMES
def eye_aspect_ratio(eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("models/shape_predictor_68_face_landmarks.dat")
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
def count_blinks(filename, start=0, end=0):
COUNTER = 0
TOTAL = 0
vs = cv2.VideoCapture(filename)
fps = vs.get(cv2.CAP_PROP_FPS)
start_frame = start * fps
end_frame = end * fps
current_frame = start_frame
vs.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
while (current_frame<=end_frame):
ret, frame = vs.read()
if ret == True:
frame = imutils.resize(frame, width=800)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
current_frame = current_frame + 1
print('FRAME : '+str(current_frame) + ' Blinks :'+ str(TOTAL))
for rect in rects:
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
ear = (leftEAR + rightEAR) / 2.0
if ear < EYE_AR_THRESH:
COUNTER += 1
else:
if COUNTER >= EYE_AR_CONSEC_FRAMES:
TOTAL += 1
COUNTER = 0
else:
break
vs.release()
return TOTAL
\ No newline at end of file
from pyaudio import paInt16
# Signal processing
SAMPLE_RATE = 16000
PREEMPHASIS_ALPHA = 0.97
FRAME_LEN = 0.025
FRAME_STEP = 0.01
NUM_FFT = 512
BUCKET_STEP = 1
MAX_SEC = 10
# Model
MODEL_FILE = "models/voice_auth_model_cnn"
COST_METRIC = "cosine" # euclidean or cosine
INPUT_SHAPE=(NUM_FFT,None,1)
# IO
EMBED_LIST_FILE = "voices/auth/embed"
# Recognition
THRESHOLD = 0.2
EMOTIONS = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
EYE_AR_THRESH = 0.2
EYE_AR_CONSEC_FRAMES = 3
\ No newline at end of file
import pandas as pd
import texthero as hero
import numpy as np
import librosa
from pyresparser.utils import extract_text
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from pyannote.audio import Audio
from pyannote.core import Segment
class document_processing:
def __init__(self, resume, skills, job_desc):
self.resume = resume
self.skills = skills
self.job_desc = job_desc
def extract_resume(self):
filepath = self.resume
extension = filepath.split('.')[-1]
extension = '.'+extension
resume_txt = extract_text(filepath, extension=extension)
return resume_txt
def find_unigram(df, column):
unigrams = (df[column].str.lower()
.str.replace(r'[^a-z\s]', '')
.str.split(expand=True)
.stack()).reset_index(drop=True)
unigrams = hero.clean(unigrams)
un_df = pd.DataFrame(unigrams, columns = ['text'])
return un_df
def find_match(self, source, match):
# Remove the null values
match.dropna(inplace=True)
match.reset_index(drop=True)
match.columns = ['text']
match['text'] = hero.clean(match['text'])
# Find the max val
max_val = len(match) #9
#1. source (main_df) Python, Python, Python, Python
#2. match (primary skill)
# Find the skills that match with the resume
df = pd.merge(source, match, on = 'text')
df.drop_duplicates(inplace=True) # Python
df.reset_index(drop=True)
# Skills matching
match_skills = len(df) #5
if match_skills == 0:
lst_skills = []
score = 0
elif match_skills > 0:
lst_skills = df['text'].tolist()
score = int((match_skills / max_val) * 100)
return score, lst_skills
def fill_data(self, source, target, column):
source.loc[0, column] = str(target[column])
return source
def resume_cosine_score(self, text):
jd_txt = self.job_desc
jd_txt = pd.Series(jd_txt)
jd_txt = hero.clean(jd_txt)
jd_txt = jd_txt[0]
[[1,24],[24,1]]
text_list = [text, jd_txt]
cv = CountVectorizer()
count_matrix = cv.fit_transform(text_list)
match_percentage = cosine_similarity(count_matrix)[0][1] * 100
match_percentage = round(match_percentage, 2)
return match_percentage
def skills_match(self):
skills = self.skills
pyres_text = self.extract_resume()
self.text = pyres_text
ocr_ser = pd.Series(pyres_text)
cleaned_words = hero.clean(ocr_ser)
main_df = pd.DataFrame(cleaned_words[0].split(), columns = ['text'])
self.clean_data = main_df
pri_score, pri_match = self.find_match(main_df, pd.DataFrame(skills['primary']))
sec_score, sec_match = self.find_match(main_df, pd.DataFrame(skills['secondary']))
doc_sim = self.resume_cosine_score(cleaned_words[0])
return {'primary_score': pri_score,
'primary_match': pri_match,
'secondary_score': sec_score,
'secondary_match': sec_match,
'similarity': int(doc_sim)}
def extract_input_feature(file_name, **kwargs):
mfcc = kwargs.get("mfcc")
chroma = kwargs.get("chroma")
mel = kwargs.get("mel")
contrast = kwargs.get("contrast")
tonnetz = kwargs.get("tonnetz")
start = kwargs.get("start")
end = kwargs.get("end")
sample_rate = 16000
audio = Audio(sample_rate=sample_rate, mono=True)
segment = Segment(start, end)
sound, sample_rate = audio.crop(file_name, segment)
X = sound[0].numpy()
if chroma or contrast:
stft = np.abs(librosa.stft(X))
result = np.array([])
if mfcc:
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)
result = np.hstack((result, mfccs))
if chroma:
chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)
result = np.hstack((result, chroma))
if mel:
mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)
result = np.hstack((result, mel))
if contrast:
contrast = np.mean(librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T,axis=0)
result = np.hstack((result, contrast))
if tonnetz:
tonnetz = np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X), sr=sample_rate).T,axis=0)
result = np.hstack((result, tonnetz))
return result
import glob
import os
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
import librosa
import soundfile
import pickle
EMOTIONS = {
"01": "neutral",
"02": "calm",
"03": "happy",
"04": "sad",
"05": "angry",
"06": "fearful",
"07": "disgust",
"08": "surprised"
}
AVAILABLE_EMOTIONS = {
"angry",
"sad",
"neutral",
"happy"
}
def extract_feature(file_name, **kwargs):
mfcc = kwargs.get("mfcc")
chroma = kwargs.get("chroma")
mel = kwargs.get("mel")
contrast = kwargs.get("contrast")
tonnetz = kwargs.get("tonnetz")
sample_rate = 16000
with soundfile.SoundFile(file_name) as sound_file:
X = sound_file.read(dtype="float32")
if chroma or contrast:
stft = np.abs(librosa.stft(X))
result = np.array([])
if mfcc:
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)
result = np.hstack((result, mfccs))
if chroma:
chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)
result = np.hstack((result, chroma))
if mel:
mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)
result = np.hstack((result, mel))
if contrast:
contrast = np.mean(librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T,axis=0)
result = np.hstack((result, contrast))
if tonnetz:
tonnetz = np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X), sr=sample_rate).T,axis=0)
result = np.hstack((result, tonnetz))
return result
# update random_state=9
def load_data(test_size=0.2, random_state=7):
X, y = [], []
for file in glob.glob("../data/voice/Actor_*/*.wav"):
basename = os.path.basename(file)
emotion = EMOTIONS[basename.split("-")[2]]
if emotion not in AVAILABLE_EMOTIONS:
continue
features = extract_feature(file, mfcc=True, chroma=True, mel=True)
X.append(features)
y.append(emotion)
return train_test_split(np.array(X), y, test_size=test_size, random_state=random_state)
def train_voice():
X_train, X_test, y_train, y_test = load_data(test_size=0.25)
model=MLPClassifier(alpha=0.01, batch_size=256, epsilon=1e-08, hidden_layer_sizes=(300,), learning_rate='adaptive', max_iter=500)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_true=y_test, y_pred=y_pred)
print("Accuracy: {:.2f}%".format(accuracy*100))
if not os.path.isdir("result"):
os.mkdir("result")
pickle.dump(model, open("../result/mlp_classifier.model", "wb"))
train_voice()
\ No newline at end of file
import os
import numpy as np
import pandas as pd
# from scipy.spatial.distance import cdist, euclidean, cosine
from scripts.voice_preprocess import get_fft_spectrum
from scripts.parameters import BUCKET_STEP,FRAME_STEP,MAX_SEC
def buckets(max_time, steptime, frameskip):
buckets = {}
frames_per_sec = int(1/frameskip)
end_frame = int(max_time*frames_per_sec)
step_frame = int(steptime*frames_per_sec)
for i in range(0, end_frame+1, step_frame):
s = i
s = np.floor((s-7+2)/2) + 1 # for first conv layer
s = np.floor((s-3)/2) + 1 # for first maxpool layer
s = np.floor((s-5+2)/2) + 1 # for second conv layer
s = np.floor((s-3)/2) + 1 # for second maxpool layer
s = np.floor((s-3+2)/1) + 1 # for third conv layer
s = np.floor((s-3+2)/1) + 1 # for fourth conv layer
s = np.floor((s-3+2)/1) + 1 # for fifth conv layer
s = np.floor((s-3)/2) + 1 # for fifth maxpool layer
s = np.floor((s-1)/1) + 1 # for sixth fully connected layer
if s > 0:
buckets[i] = int(s)
return buckets
def get_embedding(model, wav_file, max_time):
buckets_var = buckets(MAX_SEC, BUCKET_STEP, FRAME_STEP)
signal = get_fft_spectrum(wav_file, buckets_var)
embedding = np.squeeze(model.predict(signal.reshape(1,*signal.shape,1)))
return embedding
def get_embedding_batch(model, wav_files, max_time):
return [ get_embedding(model, wav_file, max_time) for wav_file in wav_files ]
def get_embeddings_from_list_file(model, list_file, max_time):
buckets_var = buckets(MAX_SEC, BUCKET_STEP, FRAME_STEP)
result = pd.read_csv(list_file, delimiter=",")
result['features'] = result['filename'].apply(lambda x: get_fft_spectrum(x, buckets_var))
result['embedding'] = result['features'].apply(lambda x: np.squeeze(model.predict(x.reshape(1,*x.shape,1))))
return result[['filename','speaker','embedding']]
import librosa
import numpy as np
from scipy.signal import lfilter, butter
from python_speech_features import sigproc
from scripts.parameters import SAMPLE_RATE, PREEMPHASIS_ALPHA, FRAME_LEN, FRAME_STEP, NUM_FFT
def load(filename, sample_rate):
audio, sr = librosa.load(filename, sr=sample_rate, mono=True)
audio = audio.flatten()
return audio
def normalize_frames(m,epsilon=1e-12):
return np.array([(v - np.mean(v)) / max(np.std(v),epsilon) for v in m])
# Valuable dc and dither removal function implemented
# https://github.com/christianvazquez7/ivector/blob/master/MSRIT/rm_dc_n_dither.m
def remove_dc_and_dither(sin, sample_rate):
if sample_rate == 16e3:
alpha = 0.99
elif sample_rate == 8e3:
alpha = 0.999
else:
print("Sample rate must be 16kHz or 8kHz only")
exit(1)
sin = lfilter([1,-1], [1,-alpha], sin)
dither = np.random.random_sample(len(sin)) + np.random.random_sample(len(sin)) - 1
spow = np.std(dither)
sout = sin + 1e-6 * spow * dither
return sout
def get_fft_spectrum(filename, buckets):
signal = load(filename, SAMPLE_RATE)
signal *= 2**15
# get FFT spectrum
signal = remove_dc_and_dither(signal, SAMPLE_RATE)
signal = sigproc.preemphasis(signal, coeff=PREEMPHASIS_ALPHA)
frames = sigproc.framesig(signal, frame_len=FRAME_LEN*SAMPLE_RATE, frame_step=FRAME_STEP*SAMPLE_RATE, winfunc=np.hamming)
fft = abs(np.fft.fft(frames,n=NUM_FFT))
fft_norm = normalize_frames(fft.T)
# truncate to max bucket sizes
rsize = max(k for k in buckets if k <= fft_norm.shape[1])
rstart = int((fft_norm.shape[1]-rsize)/2)
out = fft_norm[:,rstart:rstart+rsize]
return out
from typing import List
from fastapi import WebSocket
class ConnectionManager:
def __init__(self) -> None:
self.active_connections: List[WebSocket] = []
async def connect(self, websocket: WebSocket):
await websocket.accept()
self.active_connections.append(websocket)
def disconnect(self, websocket: WebSocket):
self.active_connections.remove(websocket)
async def send_private(self, message: str, websocket: WebSocket):
await websocket.send_text(message)
async def broadcast(self, message: str):
for connection in self.active_connections:
await connection.send_text(message)
\ No newline at end of file
name: pyserver
channels:
- blaze
- conda-forge
- defaults
dependencies:
- _tflow_select=2.3.0=mkl
- absl-py=1.4.0=pyhd8ed1ab_0
- aiohttp=3.8.4=py39ha55989b_0
- aiosignal=1.3.1=pyhd8ed1ab_0
- anyio=3.6.2=pyhd8ed1ab_0
- aom=3.5.0=h63175ca_0
- astunparse=1.6.3=pyhd8ed1ab_0
- async-timeout=4.0.2=pyhd8ed1ab_0
- attrs=22.2.0=pyh71513ae_0
- blinker=1.5=pyhd8ed1ab_0
- brotlipy=0.7.0=py39ha55989b_1005
- bzip2=1.0.8=h8ffe710_4
- ca-certificates=2022.12.7=h5b45459_0
- cachetools=5.3.0=pyhd8ed1ab_0
- certifi=2022.12.7=pyhd8ed1ab_0
- cffi=1.15.1=py39h68f70e3_3
- charset-normalizer=2.1.1=pyhd8ed1ab_0
- click=8.1.3=win_pyhd8ed1ab_2
- colorama=0.4.6=pyhd8ed1ab_0
- cryptography=38.0.4=py39h58e9bdb_0
- decorator=5.1.1=pyhd8ed1ab_0
- expat=2.5.0=h63175ca_1
- fastapi=0.95.0=pyhd8ed1ab_0
- ffmpeg=5.1.2=gpl_h5b1d025_106
- flatbuffers=23.3.3=h63175ca_0
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=hab24e00_0
- fontconfig=2.14.2=hbde0cde_0
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- freetype=2.12.1=h546665d_1
- frozenlist=1.3.3=py39ha55989b_0
- gast=0.4.0=pyh9f0ad1d_0
- giflib=5.2.1=h64bf75a_3
- google-auth=2.17.0=pyh1a96a4e_0
- google-auth-oauthlib=0.4.6=pyhd8ed1ab_0
- google-pasta=0.2.0=pyh8c360ce_0
- grpcio=1.42.0=py39hc60d5dd_0
- h11=0.14.0=pyhd8ed1ab_0
- h5py=3.7.0=py39h3de5c98_0
- hdf5=1.10.6=nompi_h5268f04_1114
- icu=58.2=ha925a31_3
- idna=3.4=pyhd8ed1ab_0
- imageio=2.27.0=pyh24c5eb1_0
- imageio-ffmpeg=0.4.8=pyhd8ed1ab_0
- importlib-metadata=6.1.0=pyha770c72_0
- importlib_metadata=6.1.0=hd8ed1ab_0
- intel-openmp=2023.0.0=h57928b3_25922
- jpeg=9e=hcfcfb64_3
- keras=2.10.0=py39haa95532_0
- keras-preprocessing=1.1.2=pyhd8ed1ab_0
- krb5=1.20.1=h6609f42_0
- lcms2=2.15=ha5c8aab_0
- lerc=4.0.0=h63175ca_0
- libblas=3.9.0=16_win64_mkl
- libcblas=3.9.0=16_win64_mkl
- libcurl=7.88.1=h68f0423_1
- libdeflate=1.17=hcfcfb64_0
- libexpat=2.5.0=h63175ca_1
- libffi=3.4.2=h8ffe710_5
- libhwloc=2.9.0=h51c2c0f_0
- libiconv=1.17=h8ffe710_0
- liblapack=3.9.0=16_win64_mkl
- libopus=1.3.1=h8ffe710_1
- libpng=1.6.39=h19919ed_0
- libprotobuf=3.20.2=h12be248_0
- libsqlite=3.40.0=hcfcfb64_0
- libssh2=1.10.0=h680486a_3
- libtiff=4.5.0=hf8721a0_2
- libwebp-base=1.3.0=hcfcfb64_0
- libxcb=1.13=hcd874cb_1004
- libxml2=2.10.3=hc3477c8_6
- libzlib=1.2.13=hcfcfb64_4
- m2w64-gcc-libgfortran=5.3.0=6
- m2w64-gcc-libs=5.3.0=7
- m2w64-gcc-libs-core=5.3.0=7
- m2w64-gmp=6.1.0=2
- m2w64-libwinpthread-git=5.0.0.4634.697f757=2
- markdown=3.4.3=pyhd8ed1ab_0
- markupsafe=2.1.2=py39ha55989b_0
- mkl=2022.1.0=h6a75c08_874
- moviepy=1.0.3=pyhd8ed1ab_1
- msys2-conda-epoch=20160418=1
- multidict=6.0.4=py39ha55989b_0
- oauthlib=3.2.2=pyhd8ed1ab_0
- openh264=2.3.1=h63175ca_2
- openjpeg=2.5.0=ha2aaf27_2
- openssl=1.1.1t=hcfcfb64_0
- opt_einsum=3.3.0=pyhd8ed1ab_1
- packaging=23.0=pyhd8ed1ab_0
- pandas=1.5.3=py39h2ba5b7c_1
- pillow=9.4.0=py39hcebd2be_1
- pip=23.0.1=pyhd8ed1ab_0
- platformdirs=3.2.0=pyhd8ed1ab_0
- pooch=1.7.0=pyha770c72_3
- proglog=0.1.9=py_0
- pthread-stubs=0.4=hcd874cb_1001
- pthreads-win32=2.9.1=hfa6e2cd_3
- pyasn1=0.4.8=py_0
- pyasn1-modules=0.2.7=py_0
- pycparser=2.21=pyhd8ed1ab_0
- pydantic=1.10.7=py39ha55989b_0
- pyjwt=2.6.0=pyhd8ed1ab_0
- pyopenssl=23.1.1=pyhd8ed1ab_0
- pysocks=1.7.1=pyh0701188_6
- pytesseract=0.3.10=pyhd8ed1ab_0
- python=3.9.16=h6244533_2
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python-flatbuffers=23.1.21=pyhd8ed1ab_0
- python_abi=3.9=2_cp39
- pytz=2023.3=pyhd8ed1ab_0
- pyu2f=0.1.5=pyhd8ed1ab_0
- requests=2.28.2=pyhd8ed1ab_0
- requests-oauthlib=1.3.1=pyhd8ed1ab_0
- rsa=4.9=pyhd8ed1ab_0
- scipy=1.9.3=py39hfbf2dce_2
- setuptools=67.6.1=pyhd8ed1ab_0
- six=1.16.0=pyh6c4a22f_0
- snappy=1.1.10=hfb803bf_0
- sniffio=1.3.0=pyhd8ed1ab_0
- sqlite=3.41.1=h2bbff1b_0
- sqlite3=3.8.6=0
- starlette=0.26.1=pyhd8ed1ab_0
- svt-av1=1.4.1=h63175ca_0
- tbb=2021.8.0=h91493d7_0
- tensorboard=2.10.0=py39haa95532_0
- tensorboard-data-server=0.6.1=py39haa95532_0
- tensorboard-plugin-wit=1.8.1=pyhd8ed1ab_0
- tensorflow=2.10.0=mkl_py39ha510bab_0
- tensorflow-base=2.10.0=mkl_py39h6a7f48e_0
- tensorflow-estimator=2.10.0=py39haa95532_0
- termcolor=2.2.0=pyhd8ed1ab_0
- tk=8.6.12=h8ffe710_0
- tqdm=4.65.0=pyhd8ed1ab_1
- typing-extensions=4.5.0=hd8ed1ab_0
- typing_extensions=4.5.0=pyha770c72_0
- tzdata=2023c=h71feb2d_0
- ucrt=10.0.22621.0=h57928b3_0
- urllib3=1.26.15=pyhd8ed1ab_0
- uvicorn=0.21.1=py39hcbf5309_0
- vc=14.3=hb6edc58_10
- vs2015_runtime=14.34.31931=h4c5c07a_10
- werkzeug=2.2.3=pyhd8ed1ab_0
- wheel=0.40.0=pyhd8ed1ab_0
- win_inet_pton=1.1.0=pyhd8ed1ab_6
- wrapt=1.15.0=py39ha55989b_0
- x264=1!164.3095=h8ffe710_2
- x265=3.5=h2d74725_3
- xorg-libxau=1.0.9=hcd874cb_0
- xorg-libxdmcp=1.1.3=hcd874cb_0
- xz=5.2.6=h8d14728_0
- yarl=1.8.2=py39ha55989b_0
- zipp=3.15.0=pyhd8ed1ab_0
- zlib=1.2.13=hcfcfb64_4
- zstd=1.5.2=h12be248_6
- pip:
- alembic==1.10.2
- antlr4-python3-runtime==4.9.3
- asteroid-filterbanks==0.4.0
- audioread==3.0.0
- backports-cached-property==1.0.2
- blis==0.7.9
- catalogue==1.0.2
- chardet==5.1.0
- cmaes==0.9.1
- colorlog==6.7.0
- commonmark==0.9.1
- contourpy==1.0.7
- cycler==0.11.0
- cymem==2.0.7
- dlib==19.24.0
- docopt==0.6.2
- docx2txt==0.8
- einops==0.3.2
- en-core-web-sm==2.3.1
- fer==22.4.0
- filelock==3.10.7
- fonttools==4.39.3
- fsspec==2023.3.0
- gensim==3.8.1
- greenlet==2.0.2
- hmmlearn==0.2.8
- huggingface-hub==0.13.3
- hyperpyyaml==1.1.0
- importlib-resources==5.12.0
- imutils==0.5.4
- joblib==1.2.0
- jsonschema==4.17.3
- julius==0.2.7
- kiwisolver==1.4.4
- librosa==0.9.2
- llvmlite==0.39.1
- mako==1.2.4
- matplotlib==3.7.1
- mpmath==1.3.0
- mtcnn==0.1.1
- murmurhash==1.0.9
- networkx==2.8.8
- nltk==3.8.1
- numba==0.56.4
- numpy==1.23.5
- omegaconf==2.3.0
- opencv-contrib-python==4.7.0.72
- opencv-python==4.7.0.72
- optuna==3.1.0
- pdfminer-six==20221105
- plac==1.1.3
- plotly==5.14.0
- preshed==3.0.8
- primepy==1.3
- protobuf==3.19.6
- pyannote-audio==2.1.1
- pyannote-core==4.5
- pyannote-database==4.1.3
- pyannote-metrics==3.2.1
- pyannote-pipeline==2.3
- pyaudio==0.2.13
- pycryptodome==3.17
- pydeprecate==0.3.2
- pygments==2.14.0
- pyparsing==3.0.9
- pyresparser==1.0.6
- pyrsistent==0.19.3
- python-speech-features==0.6
- pytorch-lightning==1.6.5
- pytorch-metric-learning==1.7.3
- pyyaml==6.0
- regex==2023.3.23
- resampy==0.4.2
- rich==12.6.0
- ruamel-yaml==0.17.21
- ruamel-yaml-clib==0.2.7
- scikit-learn==1.2.2
- semver==2.13.0
- sentencepiece==0.1.97
- shellingham==1.5.0.post1
- simplejson==3.18.4
- singledispatchmethod==1.0
- smart-open==6.3.0
- sortedcontainers==2.4.0
- soundfile==0.10.3.post1
- spacy==2.3.9
- speechbrain==0.5.14
- sqlalchemy==2.0.7
- srsly==1.0.6
- sympy==1.11.1
- tabulate==0.9.0
- tenacity==8.2.2
- texthero==1.1.0
- thinc==7.4.6
- threadpoolctl==3.1.0
- torch==1.13.1
- torch-audiomentations==0.11.0
- torch-pitch-shift==1.2.3
- torchaudio==0.13.1
- torchmetrics==0.11.4
- typer==0.7.0
- unidecode==1.3.6
- wasabi==0.10.1
- wordcloud==1.8.2.2
prefix: C:\Users\User\miniconda3\envs\pyserver
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment