Commit 59c6a55f authored by H.M.C. Nadunithara Wijerathne's avatar H.M.C. Nadunithara Wijerathne

Merge branch 'it19980096' into 'master'

Resume analyser

See merge request !2
parents 7b765318 4bfd17e0
......@@ -11,6 +11,7 @@
"author": "Namit Nathwani",
"license": "ISC",
"dependencies": {
"axios": "^1.3.4",
"bcryptjs": "^2.4.3",
"cors": "^2.8.5",
"express": "^4.18.2",
......
......@@ -13,3 +13,4 @@ export const DEFAULT_CONTROLS = {
use: true,
},
};
export const UTILITY_SERVER = "http://127.0.0.1:8000";
......@@ -47,6 +47,16 @@ export type AuthType = {
controls?: ControlsType;
};
export type ResumeDataType = {
skills: string[] | null;
degree: string[] | null;
designation: string[] | null;
experience: string[] | null;
company_names: string[] | null;
no_of_pages: number;
total_experience: number;
};
export type CandidateType = {
_id?: string;
name: string;
......@@ -60,6 +70,9 @@ export type CandidateType = {
dateOfBirth: string;
jobIds: string[];
profilePicture: string;
state: "INTIAL" | "READY";
resume?: string;
resumeData?: ResumeDataType;
};
export type OrganizationType = {
......@@ -87,6 +100,41 @@ export type ControlsType = {
};
};
export type JobType = {
_id: string;
title: string;
description: string;
primarySkills: string[];
secondarySkills?: string[];
salary: {
min: number;
max: number;
currency: string;
};
applications: string[];
organization: string;
};
export type ApplicationType = {
candidate: string;
job: string;
status: "Pending" | "Accepted" | "In progress" | "Rejected";
interview?: {
date: string;
time: string;
link: string;
videoRef?: string;
};
score: {
primary: number;
primatyMatch: string[];
secondary: number;
secondaryMatch: string[];
similarity: number;
total: number;
};
};
export interface TypedRequest<T extends Query, U> extends Request {
body: U;
query: T;
......
......@@ -8,11 +8,10 @@ app.use(cors());
// Routes
const authRoute = require("./routes/auth");
// Environment constants
const userRoute = require("./routes/user");
const jobsRoute = require("./routes/jobs");
// Service Initialisation
mongoose.connect(MONGO_URL, {
useFindAndModify: false,
useNewUrlParser: true,
......@@ -20,13 +19,6 @@ mongoose.connect(MONGO_URL, {
useCreateIndex: true,
});
// const db = mongoose.connection;
// db.on("error", (error) => console.log(error, "connection error:"));
// db.once("open", () => {
// console.log("Connected to MongoDB Instance");
// });
// Express Initialisation
app.use(express.urlencoded({ extended: true }));
app.use(express.json());
......@@ -35,5 +27,7 @@ app.use(express.urlencoded({ extended: false }));
// Routes
app.use("/auth", authRoute);
app.use("/user", userRoute);
app.use("/jobs", jobsRoute);
app.listen(API_PORT, () => console.log(`Listening on port ${API_PORT}`));
import { Request, Response, NextFunction } from "express";
import * as jwt from "jsonwebtoken";
import { JWT_SECRET } from "../config/contants";
import { TypedRequest, USER_TYPE } from "../config/types";
import Auth from "../models/Auth";
export const authMiddleware = (
req: Request,
......@@ -21,3 +23,37 @@ export const authMiddleware = (
}
}
};
export const organizationMiddleware = async (
req: TypedRequest<{ userId: string }, any>,
res: Response,
next: NextFunction
) => {
try {
const org = await Auth.findOne({ userId: req.query.userId });
if (org && org.userType === USER_TYPE.ORGANIZATION) {
return next();
} else {
throw new Error("Organization not found");
}
} catch (error) {
return res.status(400).send(error);
}
};
export const candidateMiddleware = async (
req: TypedRequest<{ userId: string }, any>,
res: Response,
next: NextFunction
) => {
try {
const org = await Auth.findOne({ userId: req.query.userId });
if (org && org.userType === USER_TYPE.CANDIDATE) {
return next();
} else {
throw new Error("Candidate not found");
}
} catch (error) {
return res.status(400).send(error);
}
};
import { Schema, model } from "mongoose";
import { ApplicationType } from "../config/types";
const applicationSchema = new Schema<ApplicationType>({
candidate: { type: Schema.Types.ObjectId, ref: "candidates" },
job: { type: Schema.Types.ObjectId, ref: "jobs" },
status: { type: String, require: false, default: "Pending" },
interview: {
type: {
date: String,
time: String,
link: String,
videoRef: String,
},
require: false,
},
score: {
type: {
primary: Number,
primatyMatch: [String],
secondary: Number,
secondaryMatch: [String],
similarity: Number,
total: Number,
},
require: false,
},
});
const Application = model<ApplicationType>("applications", applicationSchema);
export default Application;
import { Schema, model } from "mongoose";
import { AddressType, CandidateType } from "../config/types";
import { AddressType, CandidateType, ResumeDataType } from "../config/types";
const AddressSchema = new Schema<AddressType>(
{
......@@ -19,6 +19,18 @@ const ContactsSchema = new Schema<AddressType>(
},
{ id: false }
);
const ResumeDataSchema = new Schema<ResumeDataType>(
{
skills: { type: [String], require: false },
degree: { type: [String], require: false },
designation: { type: [String], require: false },
experience: { type: [String], require: false },
company_names: { type: [String], require: false },
no_of_pages: { type: Number, require: false },
total_experience: { type: Number, require: false },
},
{ id: false }
);
const candidateSchema = new Schema<CandidateType>({
name: String,
......@@ -27,6 +39,9 @@ const candidateSchema = new Schema<CandidateType>({
dateOfBirth: String,
jobIds: [{ type: Schema.Types.ObjectId, ref: "jobs" }],
profilePicture: String,
state: { type: String, default: "INTIAL" },
resume: { type: String, require: false },
resumeData: { type: ResumeDataSchema, require: false },
});
const Candidates = model<CandidateType>("candidates", candidateSchema);
......
import { Schema, model } from "mongoose";
import { JobType } from "../config/types";
const jobSchema = new Schema<JobType>({
title: String,
description: String,
primarySkills: { type: [String], require: true },
secondarySkills: { type: [String], require: false },
salary: {
min: Number,
max: Number,
currency: String,
},
applications: [{ type: Schema.Types.ObjectId, ref: "applications" }],
organization: [{ type: Schema.Types.ObjectId, ref: "organizations" }],
});
const Jobs = model<JobType>("jobs", jobSchema);
export default Jobs;
......@@ -137,7 +137,7 @@ router.post("/login", async (req: TypedRequest<{}, SignInPayload>, res) => {
}
const token = await jwt.sign({ userId: auth.userId }, JWT_SECRET, {
expiresIn: "2h",
expiresIn: "5h",
});
return res.json({
......
import { Router } from "express";
import {
ApplicationType,
JobType,
TypedRequest,
USER_TYPE,
} from "../config/types";
import {
authMiddleware,
candidateMiddleware,
organizationMiddleware,
} from "../middlewares/auth";
import Application from "../models/Application";
import Auth from "../models/Auth";
import Jobs from "../models/Job";
import ResumeAPI from "../utilities/apis/resume";
const router = Router();
router.get(
"/",
authMiddleware,
async (req: TypedRequest<{ userId: string }, null>, res) => {
try {
const user = await Auth.findOne({ userId: req.query.userId });
let jobs;
if (user.userType === USER_TYPE.CANDIDATE) {
jobs = await Jobs.find()
.populate({
path: "applications",
select: ["candidate", "status"],
})
.populate({ path: "organization" });
} else {
jobs = await Jobs.find({ organization: req.query.userId }).populate({
path: "applications",
populate: {
path: "candidate",
select: [
"name",
"contacts",
"dateOfBirth",
"profilePicture",
"resume",
"resumeData",
],
},
});
}
return res.json({ jobs, success: true });
} catch (error) {
return res.json({ error, success: false });
}
}
);
router.post(
"/",
authMiddleware,
organizationMiddleware,
async (req: TypedRequest<{ userId: string }, JobType>, res) => {
try {
const newJob = new Jobs({ ...req.body, organization: req.query.userId });
const job = await newJob.save();
return res.json({ success: true, job });
} catch (error) {
return res.json({ success: false, error });
}
}
);
router.put(
"/",
authMiddleware,
organizationMiddleware,
async (req: TypedRequest<{ userId: string }, JobType>, res) => {
try {
const job = await Jobs.findByIdAndUpdate(req.body._id, {
$set: req.body,
});
return res.json({ success: true, job });
} catch (error) {
return res.json({ success: false, error });
}
}
);
router.delete(
"/",
authMiddleware,
organizationMiddleware,
async (req: TypedRequest<{ userId: string }, { jobId: string }>, res) => {
try {
await Jobs.deleteOne({
organization: req.query.userId,
_id: req.body.jobId,
});
return res.json({ success: true });
} catch (error) {
return res.json({ success: false });
}
}
);
router.put(
"/apply",
authMiddleware,
candidateMiddleware,
async (
req: TypedRequest<
{ userId: string },
{ application: ApplicationType; resumeUrl: string }
>,
res
) => {
try {
const { application, resumeUrl } = req.body;
const job = await Jobs.findById(application.job);
const data: any = await ResumeAPI.getResumeScores({
user_id: req.query.userId,
resume_url: resumeUrl,
primary_skills: job.primarySkills,
secondary_skills: job.secondarySkills,
job_desc: job.description,
});
const score: ApplicationType["score"] = {
primary: data.primary_score,
primatyMatch: data.primary_match,
secondary: data.secondary_score,
secondaryMatch: data.secondary_match,
similarity: data.similarity,
total: data.primary_score + data.secondary_score + data.similarity,
};
const newApplication = new Application({ ...application, score });
const _application = await newApplication.save();
job.applications.push(_application.id);
await job.save();
return res.json({
success: true,
applicationId: _application.id,
});
} catch (error) {
return res.json({ success: false, error });
}
}
);
module.exports = router;
import { Router } from "express";
import { CandidateType, TypedRequest } from "../config/types";
import { authMiddleware } from "../middlewares/auth";
import Candidates from "../models/Candidate";
import ResumeAPI from "../utilities/apis/resume";
const router = Router();
router.post(
"/candidate",
authMiddleware,
async (req: TypedRequest<{ userId: string }, CandidateType>, res) => {
try {
const update = req.body;
if (req.body?.resume) {
const data: any = await ResumeAPI.extractResumeData({
user_id: req.query.userId,
resume_url: req.body.resume,
});
update.resumeData = data;
}
await Candidates.findByIdAndUpdate(req.query.userId, { $set: req.body });
return res.status(200).json({ data: req.body });
} catch (error) {
return res.status(400).send(error);
}
}
);
module.exports = router;
import { ResumeDataType } from "../../config/types";
import { request } from "../requests";
export default class ResumeAPI {
static extractResumeData = (payload: {
resume_url: string;
user_id: string;
}) => request("<BASE_URL>/resume/extract", "POST", payload);
static getResumeScores = (payload: {
resume_url: string;
user_id: string;
primary_skills: string[];
secondary_skills: string[];
job_desc: string;
}) => request("<BASE_URL>/resume/get-scores", "POST", payload);
}
......@@ -401,8 +401,15 @@ export const processAttempt = ({
accepted: false,
};
result.accepted =
result.standard.inRange.full || result.fullStandard.inRange.full;
const standardCheck = controls.standard.use
? result.standard.inRange.full
: true;
const fullStandardCheck = controls.fullStandard.use
? result.fullStandard.inRange.full
: true;
result.accepted = standardCheck || fullStandardCheck;
return result;
};
......
import axios, { AxiosError, AxiosResponse, AxiosRequestConfig } from "axios";
import { UTILITY_SERVER } from "../config/contants";
axios.interceptors.response.use(
(response) => response,
(error: AxiosError) => {
return Promise.reject(error);
}
);
export const request = (
url: AxiosRequestConfig["url"],
method: AxiosRequestConfig["method"],
requestData?: AxiosRequestConfig["data"] | AxiosRequestConfig["params"],
contentType?: string
) =>
new Promise(async (resolve, reject) => {
const endpoint = url?.replace?.("<BASE_URL>", UTILITY_SERVER);
const params = method === "GET" ? requestData : null;
const data = method === "GET" ? null : requestData;
const headers = {
"Content-Type": contentType || "application/json",
};
axios({
url: endpoint,
method,
data,
params,
headers,
timeout: 30000,
})
.then(async (response: AxiosResponse) => {
resolve(response.data);
})
.catch(async (error: AxiosError) => {
if (error?.response) {
return reject(error?.response?.data);
}
reject(error);
});
});
......@@ -276,6 +276,20 @@ astral-regex@^2.0.0:
resolved "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz"
integrity sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==
asynckit@^0.4.0:
version "0.4.0"
resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
integrity sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==
axios@^1.3.4:
version "1.3.4"
resolved "https://registry.yarnpkg.com/axios/-/axios-1.3.4.tgz#f5760cefd9cfb51fd2481acf88c05f67c4523024"
integrity sha512-toYm+Bsyl6VC5wSkfkbbNB6ROv7KY93PEBBL6xyDczaIHasAiv4wPqQ/c4RjoQzipxRD2W5g21cOqQulZ7rHwQ==
dependencies:
follow-redirects "^1.15.0"
form-data "^4.0.0"
proxy-from-env "^1.1.0"
balanced-match@^1.0.0:
version "1.0.2"
resolved "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz"
......@@ -441,6 +455,13 @@ color-name@~1.1.4:
resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz"
integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
combined-stream@^1.0.8:
version "1.0.8"
resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f"
integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==
dependencies:
delayed-stream "~1.0.0"
complex.js@^2.0.11:
version "2.1.1"
resolved "https://registry.npmjs.org/complex.js/-/complex.js-2.1.1.tgz"
......@@ -546,6 +567,11 @@ define-properties@^1.1.3, define-properties@^1.1.4:
has-property-descriptors "^1.0.0"
object-keys "^1.1.1"
delayed-stream@~1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
integrity sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==
denque@^1.4.1:
version "1.5.1"
resolved "https://registry.npmjs.org/denque/-/denque-1.5.1.tgz"
......@@ -934,6 +960,20 @@ flatted@^3.1.0:
resolved "https://registry.npmjs.org/flatted/-/flatted-3.2.7.tgz"
integrity sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==
follow-redirects@^1.15.0:
version "1.15.2"
resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.2.tgz#b460864144ba63f2681096f274c4e57026da2c13"
integrity sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==
form-data@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.0.tgz#93919daeaf361ee529584b9b31664dc12c9fa452"
integrity sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==
dependencies:
asynckit "^0.4.0"
combined-stream "^1.0.8"
mime-types "^2.1.12"
forwarded@0.2.0:
version "0.2.0"
resolved "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz"
......@@ -1410,7 +1450,7 @@ mime-db@1.52.0:
resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz"
integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==
mime-types@~2.1.24, mime-types@~2.1.34:
mime-types@^2.1.12, mime-types@~2.1.24, mime-types@~2.1.34:
version "2.1.35"
resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz"
integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
......@@ -1682,6 +1722,11 @@ proxy-addr@~2.0.7:
forwarded "0.2.0"
ipaddr.js "1.9.1"
proxy-from-env@^1.1.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/proxy-from-env/-/proxy-from-env-1.1.0.tgz#e102f16ca355424865755d2c9e8ea4f24d58c3e2"
integrity sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==
pstree.remy@^1.1.8:
version "1.1.8"
resolved "https://registry.npmjs.org/pstree.remy/-/pstree.remy-1.1.8.tgz"
......
__pycache__
**/__pycache__
\ No newline at end of file
filename,name,mobile_number,email,company_names,college_name,experience,skills,experience_age,degree,words,primary_score,primary_match,secondary_score,secondary_match,no_of_pages,document_similarity,document_score,Score
resumes/Dhaval_Thakkar_Resume.pdf,Dhaval Thakkar,9191729595,thakkar.dhaval.haresh@gmail.com,['UNIFYND TECHNOLOGIES PVT. LTD'],None,"['UNIFYND TECHNOLOGIES PVT. LTD. | Data Scientist', 'Mumbai, MH, India | June 2018 – Present', '• Led the development of a Templatized OCR Engine with GUI to onboard 2000+ retailers from different malls. The', 'microservice deployed is currently operating at an accuracy of 81%', '• Built a Customer Segmentation model to target customers with relevant coupons, rewards, and content resulting', 'in a 3x increase in revenue and 2x increase in coupon utilization', '• Built a Dynamic Coupon Pricing Engine for malls that led to a 5x increase in coupon consumption on the coupon', 'marketplace', '• Built a Pricing Engine and Customer Segmentation Model for a logistics company which saw a 32% reduction in', 'Customer Attrition and a 12% increase in Repeat Purchase Rate', '• Developed an Automated End to End Reporting system to track KPIs performance for 10 malls that saves 60', 'hours of manual labour each month', 'UNIFYND TECHNOLOGIES PVT. LTD. | Intern Data Scientist Mumbai, MH, India | Sept 2017 - June 2018', '• Built a Smart Cryptocurrency trading platform which used social data and historical prices to optimize current', 'portfolio. Boosted overall profit from the portfolio by 30%', '• Worked with Product and Marketing teams to identify the power users of an app which resulted in 43% increase in', 'activity and a 65% increase in revenue from these users', 'ZIFF, INC | Deep Learning Intern', 'Provo, UT, USA | May 2017 – Aug 2017', '• Demonstrated competency in Hyperparameter Optimization, Image Augmentation and Learning Rate decay', 'strategies using the Keras Library', '• Deployed a Multi-Class Image classifier microservice written on Flask as a container on AWS EC2 using Docker']","['Html', 'Data analytics', 'Marketing', 'Segmentation', 'Content', 'Algorithms', 'Numpy', 'Pandas', 'Github', 'R', 'Logistics', 'Css', 'Operating systems', 'Testing', 'Flask', 'Mysql', 'Scrapy', 'Machine learning', 'Security', 'Keras', 'Python', 'Kpis', 'System', 'Docker', 'Reporting', 'Analytics', 'Aws', 'Engineering', 'Anaconda', 'Networking', 'Sql']",5.75,['Bachelor of Engineering'],350,44,"['ocr', 'aws', 'python', 'gcp']",42,"['data', 'ocr', 'science']",1,32,50.0,168.0
resumes/python-developer-resume-2.pdf,Python Developer,456-7890,ggonzalez@email.com,None,None,"['Python Developer Intern', 'Knewton', 'April 2016 - April 2017', '· Worked alongside another developer to implement RESTful APIs', 'Chicago, IL', 'in Django that enabled internal analytics team to increase', 'reporting speed by 24%', '· Using Selenium, built out a unit testing infrastructure for a client', 'web application that reduced the number of bugs reported by', 'the client by 11% month over month']","['Django', 'Math', 'Oracle', 'Requests', 'Github', 'Api', 'Database', 'Css', 'Design', 'Postgresql', 'Testing', 'Agile', 'Apis', 'Selenium', 'Rest', 'Python', 'Writing', 'System', 'Updates', 'Javascript', 'Reporting', 'Analytics', 'Aws', 'Sql', 'Process']",1.0,"['B.S.', 'M.S.']",223,22,"['python', 'aws']",28,"['science', 'data']",1,20,50.0,120.0
resumes/software-engineer-resume-1.pdf,New York,456-7890,cmcturland@email.com,None,None,"['Software Engineer', 'Embark', 'January 2015 - current / New York, NY', 'Worked with product managers to re-architect a multi-page web', 'app into a single page web-app, boosting yearly revenue by $1.4M', 'Constructed the logic for a streamlined ad-serving platform that', 'scaled to our 35M users, which improved the page speed by 15%', 'after implementation', 'Tested software for bugs and operating speed, fixing bugs and', 'documenting processes to increase efficiency by 18%', 'Iterated platform for college admissions, collaborating with a group', 'of 4 engineers to create features across the software', 'Software Engineer', 'MarketSmart', 'April 2012 - January 2015 / Washington, DC', 'Built RESTful APIs that served data to the JavaScript front-end', 'based on dynamically chosen user inputs that handled over 500,000', 'concurrent users', 'Built internal tool using NodeJS and Pupeteer.js to automate QA and', 'monitoring of donor-facing web app, which improved CTR by 3%', 'Reviewed code and conducted testing for 3 additional features on', 'donor-facing web app that increased contributions by 12%', 'Software Engineer Intern', 'Marketing Science Company', 'April 2011 - March 2012 / Pittsburgh, PA', 'Partnered with a developer to implement RESTful APIs in Django,', 'enabling analytics team to increase reporting speed by 24%', 'Using Selenium I built out a unit testing infrastructure for a client', 'application that reduced the number of bugs reported by the client', 'by 11% month over month']","['Django', 'Marketing', 'Unix', 'Nosql', 'R', 'Css', 'Postgresql', 'Testing', 'Mysql', 'Sci', 'Apis', 'Selenium', 'Admissions', 'Python', 'Html5', 'Javascript', 'Reporting', 'Analytics', 'C', 'Sql', 'Aws']",3.67,['B.S.'],233,22,"['python', 'aws']",28,"['science', 'data']",1,10,50.0,110.0
resumes/Santhosh_Narayanan.pdf,SANTHOSH NARAYANAN,417-6755,santhosn@usc.edu,None,None,"['on an EC2 server supported by S3 and RDS.', '\uf0a7 Maintained AWS infrastructure for institute’s annual technical festival website, by hosting the website', 'on an EC2 Ubuntu server.', 'K J Somaiya Inst. of Engg. & I.T – Penetration tester', 'December 2016 – January 2016', '\uf0a7 Conducted penetration testing for institute’s online admission and examination portal.', '\uf0a7 Performed authentication checks, access control checks, per screen checks (XSS, SQL injection.).', '\uf0a7 Delivered error free application, incorporating patches for the respective bugs using ASP.NET']","['Html', 'Jupyter', 'Access', 'Numpy', 'Php', 'Matplotlib', 'Oracle', 'Pandas', 'Computer science', 'Css', 'Purchasing', 'Schedule', 'Scheduling', 'Flask', 'Testing', 'Lan', 'Mysql', 'Scrapy', 'Security', 'Programming', 'Website', 'Keras', 'Python', 'System', 'Wordpress', 'Spyder', 'Technical', 'Ubuntu', 'Javascript', 'Java', 'Aws', 'Engineering', 'Sql', 'Certification']",,None,367,22,"['python', 'aws']",14,['science'],1,7,50.0,93.0
About the job
Borneo.io is building the next-generation ML Powered data privacy platform for hyper-growth companies. The Data Scientist role is at the core of Borneo's engineering. You will be building models, manipulating big data, and working with APIs essential to the Borneo product.
We are growing fast and expanding our data science family with outstanding minds and diverse personalities.
As a Data Scientist at Borneo, you'll have the opportunity to:
Work with some of the largest data sets used by some of the leading global technology companies
Help build a predictive product and inform features at the ground level.
Lead the way in leveraging unstructured data for predictive modeling, anomaly detection, and drive privacy compliance.
Responsibilities:
Identify, automate data collection processes
Dive into complex data sets to analyze trends and identify opportunities for improvement.
Build predictive models and machine-learning algorithms
Present information using data visualization techniques
Propose solutions and strategies to business challenges
Have a data-driven decision making approach
Requirements:
5-8 years of relevant experience, B2B startup experience preferred
Proven experience as a Data Scientist or Data Analyst
Experience in building ML models and deploying them to production
A solid understanding of data science fundamentals, statistical techniques, NLP algorithms
Understand research papers and create quick proof of concept relevant to the product
Expert in implementing quick prototypes that shows business value
Experience with programming languages such as NodeJs /Python/JavaScript: Cloud technologies: AWS/GCP/K8 etc.
### Create conda python 3.9 env
conda create -n server python=3.9
### PIP Packages
gensim==3.8.1
texthero==1.1.0
pyresparser
### CONDA Packages
tqdm
pdf2image
pandas
pytesseract
"uvicorn[standard]"
fastapi
### Run server
uvicorn main:app --reload
from typing import List, Union
from pydantic import BaseModel
class ExtractResume(BaseModel):
resume_url: str
user_id:str
class ResumeScores(BaseModel):
resume_url: str
user_id:str
primary_skills:List[str] = []
secondary_skills:List[str] = []
job_desc:str
\ No newline at end of file
from fastapi import FastAPI
import routes.resume as resumes
app = FastAPI()
app.include_router(resumes.router)
@app.get("/")
def read_root():
return {"status": "running"}
\ No newline at end of file
tqdm==4.61.2
pdf2image==1.16.0
pandas==1.3.0
pytesseract==0.3.8
import pandas as pd
import os
from scripts.processing import document_processing
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore")
skills = {
"primary" : ['Python', 'Machine Learning', 'node.js', 'AWS', 'Kubernetese', 'NLP', 'GCP', 'predective', 'OCR'],
"secondary" : ['data', 'science', 'modeling', 'anomaly', 'privacy', 'visualization', 'OCR'],
}
def document_score(df):
# Page score
df.loc[df['no_of_pages'] == 1, ['page_score']] = 100
df.loc[df['no_of_pages'] == 2, ['page_score']] = 60
df.loc[(df['no_of_pages'] > 2) |
(df['no_of_pages'] == 0), ['page_score']] = 30
# Word score
df.loc[(df['words'] >= 200) & (df['words'] < 400),
['word_score']] = 100
df.loc[(df['words'] >= 400) & (df['words'] < 600),
['word_score']] = 70
df.loc[((df['words'] > 0) & (df['words'] < 200))|
(df['words'] > 600) | (df['words'].isnull()),
['word_score']] = 40
df['document_score'] = (df['page_score'] + df['word_score']) * 0.25
df.drop(['word_score', 'page_score'], axis=1, inplace=True)
return df
if __name__=='__main__':
resume_dir = 'resumes/'
jd_file = 'Job_description.txt'
list_of_resumes = os.listdir(resume_dir) # list_of_resumes = ['Dhaval_Thakkar_Resume.pdf', 'asdasdasd.pdf']
df = pd.DataFrame()
for file in tqdm(list_of_resumes):
result = document_processing(resume_dir+file, skills, jd_file)
candidate = result.skills_match()
df = pd.concat([df, candidate], ignore_index=True)
df = document_score(df)
# Final score
df['Score'] = df['primary_score'] + df['secondary_score'] + df['document_score'] + df['document_similarity']
df = df.sort_values('Score', ascending=False)
df = df.reset_index(drop=True)
print(df)
df.to_csv('Candidates_score.csv', index=False)
\ No newline at end of file
from fastapi import APIRouter
import os
from pyresparser import ResumeParser
import urllib
from baseModels.payloads import ExtractResume, ResumeScores
from scripts.processing import document_processing
router = APIRouter(prefix='/resume')
@router.post("/extract")
def extract(payload:ExtractResume):
filename = 'resumes/'+payload.user_id+'.pdf'
urllib.request.urlretrieve(payload.resume_url, filename)
pyres_data = ResumeParser(filename).get_extracted_data()
os.remove(filename)
return pyres_data
@router.post("/get-scores")
def get_scores(payload:ResumeScores):
filename = 'resumes/'+payload.user_id+'.pdf'
urllib.request.urlretrieve(payload.resume_url, filename)
skills = {"primary":payload.primary_skills, "secondary":payload.secondary_skills}
result = document_processing(filename, skills, payload.job_desc)
candidate = result.skills_match()
os.remove(filename)
return candidate
\ No newline at end of file
import pandas as pd
import texthero as hero
from pyresparser.utils import extract_text
from PIL import Image
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
# skills = {
# "primary" : ['Python', 'Machine Learning', 'node.js', 'AWS', 'Kubernetese', 'NLP', 'GCP', 'predective', 'OCR'],
# "secondary" : ['data', 'science', 'modeling', 'anomaly', 'privacy', 'visualization', 'OCR'],
# }
class document_processing:
def __init__(self, resume, skills, job_desc):
self.resume = resume
self.skills = skills
self.job_desc = job_desc
def extract_resume(self):
filepath = self.resume
extension = filepath.split('.')[-1]
extension = '.'+extension
resume_txt = extract_text(filepath, extension=extension)
return resume_txt
def find_unigram(df, column):
unigrams = (df[column].str.lower()
.str.replace(r'[^a-z\s]', '')
.str.split(expand=True)
.stack()).reset_index(drop=True)
unigrams = hero.clean(unigrams)
un_df = pd.DataFrame(unigrams, columns = ['text'])
return un_df
def find_match(self, source, match):
# Remove the null values
match.dropna(inplace=True)
match.reset_index(drop=True)
match.columns = ['text']
match['text'] = hero.clean(match['text'])
# Find the max val
max_val = len(match) #9
#1. source (main_df) Python, Python, Python, Python
#2. match (primary skill)
# Find the skills that match with the resume
df = pd.merge(source, match, on = 'text')
df.drop_duplicates(inplace=True) # Python
df.reset_index(drop=True)
# Skills matching
match_skills = len(df) #5
if match_skills == 0:
lst_skills = []
score = 0
elif match_skills > 0:
lst_skills = df['text'].tolist()
score = int((match_skills / max_val) * 100)
return score, lst_skills
def fill_data(self, source, target, column):
source.loc[0, column] = str(target[column])
return source
def resume_cosine_score(self, text):
jd_txt = self.job_desc
jd_txt = pd.Series(jd_txt)
jd_txt = hero.clean(jd_txt)
jd_txt = jd_txt[0]
[[1,24],[24,1]]
text_list = [text, jd_txt]
cv = CountVectorizer()
count_matrix = cv.fit_transform(text_list)
match_percentage = cosine_similarity(count_matrix)[0][1] * 100
match_percentage = round(match_percentage, 2)
return match_percentage
def skills_match(self):
skills = self.skills
pyres_text = self.extract_resume()
self.text = pyres_text
ocr_ser = pd.Series(pyres_text)
cleaned_words = hero.clean(ocr_ser)
main_df = pd.DataFrame(cleaned_words[0].split(), columns = ['text'])
self.clean_data = main_df
pri_score, pri_match = self.find_match(main_df, pd.DataFrame(skills['primary']))
sec_score, sec_match = self.find_match(main_df, pd.DataFrame(skills['secondary']))
doc_sim = self.resume_cosine_score(cleaned_words[0])
return {'primary_score': pri_score,
'primary_match': pri_match,
'secondary_score': sec_score,
'secondary_match': sec_match,
'similarity': int(doc_sim)}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment