Deleted Experience.txt, Final_Points.csv, Final_Prediction.py,...

Deleted Experience.txt, Final_Points.csv, Final_Prediction.py, Final_Report.xlsx, Final_Score.txt, Main.py, Output.xlsx, Programming_Languages.txt, TwitterID.txt, Twitter_Extravert.py, dummy data.csv, index.html, json to csv.py, preprocessor.py, points.json, resumes.csv, skl_point.txt, text_to_json.py files
parent 300baae8
Name,Alma Mater,Skill Points,Extraversion Points,Personality Type,Final Grade,CV Status
Annah,Ness Wadia College,62.02,58.5,INTJ,57.86,Satisfied
Bastein,Information Technology Xaviers College,48.04,41.92,INFP,38.35,Low
Mahela,SLIIT University,27.74,57.46,INFJ,32.95,Poor
Sangakara,Computer Science Mercy College,62.02,63.55,INFP,60.79,Satisfied
Virat,Computer Science Saviour College,55.47,54.87,INFJ,51.12,Satisfied
import pandas as pd
import json
import math
import numpy as np
import sklearn
#import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.utils import shuffle
df = pd.read_csv("dummy data.csv")
reg = linear_model.LinearRegression()
reg.fit(df[['Skills', 'Extraversion']],df.Points)
res = open("Final_Score.txt", 'r', encoding='utf-8').read()
wrds = res.split()
skills = wrds[3]
extraversion = wrds[5]
Grade = reg.predict([[float(skills), float(extraversion)]])
fin_grd = Grade/2
score = round(float(fin_grd), 2)
if(score >= 100):
score = 99
cv_stat = ""
if(score >= 85):
cv_stat = "Excellent"
elif(score >= 70):
cv_stat = "Good"
elif (score >= 50):
cv_stat = "Satisfied"
elif (score >= 35):
cv_stat = "Low"
else :
cv_stat = "Poor"
with open('Final_Score.txt', 'a') as the_file:
the_file.write('Grade ' + str(score) + '\n')
the_file.write('CV_Stat ' + cv_stat + '\n')
Name Virat
Skills 55.47
Extraversion 54.87
Personality INFJ
Grade 51.12
CV_Stat Satisfied
This diff is collapsed.
machine learning
angular
data science
asp
python
ruby
c
java
swift
mysql
php
objective c
\ No newline at end of file
imVkohli
\ No newline at end of file
import nltk
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
tweet = open("Tweet.txt", 'r', encoding='utf-8').read()
pos = open("posemo.txt").read()
neg = open("negemo.txt").read()
soc = open("Social.txt").read()
pos_txt = [tweet, pos]
neg_txt = [tweet, neg]
soc_txt = [tweet, soc]
cv = CountVectorizer()
pos_count_matrix = cv.fit_transform(pos_txt)
neg_count_matrix = cv.fit_transform(neg_txt)
soc_count_matrix = cv.fit_transform(soc_txt)
Pos_match = cosine_similarity(pos_count_matrix)[0][1]
Pos_match = Pos_match*100
Pos_match = round(Pos_match, 2)
Neg_match = cosine_similarity(neg_count_matrix)[0][1]
Neg_match = Neg_match*100
Neg_match = round(Neg_match, 2)
Soc_match = cosine_similarity(soc_count_matrix)[0][1]
Soc_match = Soc_match*100
Soc_match = round(Soc_match, 2)
print("Frequency of positive emotion words : ", Pos_match, "%")
print("Frequency of negative emotion words : ", Neg_match, "%")
print("Frequency of social words : ", Soc_match, "%")
#Personality mining scheme
'''
S – Frequency of social words (friend, buddy, coworker)
P – Frequency of positive emotion words
N – Frequency of negative emotion words
'''
if(Pos_match >= 5):
Pos_match = Pos_match + 20
if(Pos_match > 100):
Pos_match = 99
if(Neg_match >= 5):
Neg_match = Neg_match - 10
if(Neg_match < 0):
Neg_match = 0
if(Soc_match >= 5):
Soc_match = Soc_match + 10
if(Soc_match > 100):
Soc_match = 99
#E= S+1.335*P-2.20*N
Extraversion = round(Soc_match + (1.335*Pos_match) - (2.20*Neg_match), 2)
if(Extraversion < 0):
Extraversion = 0
if(Extraversion > 100):
Extraversion = 99
print("\n Extraversion Score : ", Extraversion, "%")
with open('Final_Score.txt', 'a') as the_file:
the_file.write('Extraversion ' + str(Extraversion) + '\n')
Skills,Extraversion,Points
35.49650496,16.53193835,18.43283194
34.09319379,67.84475222,97.56636237
39.06635457,96.29683131,142.4197136
35.88817844,0.30537549,12.65175281
36.07248309,68.05381476,97.98993884
39.46023064,63.75568683,120.0401204
30.36688242,70.09586899,115.9404467
31.69514177,68.38363636,117.9878927
36.16691284,58.88954667,110.330662
53.00915486,65.02154591,138.6992751
57.10428818,83.09448983,146.9513575
59.63696194,30.5760654,91.51977408
51.55732264,80.8134331,138.048774
56.71280679,10.72767313,74.01713716
53.10147876,14.60566631,83.30807883
55.58251197,3.994231941,14.93147477
58.41496578,91.76711951,144.2623194
53.77243917,16.48021528,19.04230599
56.26607883,31.81203554,109.078199
82.41299525,35.81196415,119.2481782
81.69867529,7.727125314,87.50741226
82.40790038,47.44102142,129.3803442
82.03265161,73.00256001,172.390796
79.91135641,69.4356799,156.8046536
75.70945765,37.14354068,130.6594174
82.35083813,18.86061036,119.2826333
83.11897356,12.02756656,111.8892152
82.89156036,7.806530957,92.00286457
78.26241514,55.49372758,140.9567315
68.56546827,70.94274617,136.7463026
63.28931592,87.34783935,152.6639287
68.18762439,90.93495458,164.4863457
64.13039647,44.32309012,113.403142
63.39961438,84.60619424,159.4080057
65.44484808,63.14449164,129.23008
66.93380555,54.37869126,118.1546997
65.06627605,67.6984887,133.8642671
61.94231357,69.65653674,136.8528765
61.32939142,71.04815714,149.8200389
97.51359809,27.7913343,123.7124205
99.9332613,89.77275557,197.6313208
97.44158356,45.75347418,143.7623663
92.29737325,53.34431055,156.8984639
91.18916475,37.65572157,119.2727369
98.69659671,6.064712651,16.48082853
92.07700879,46.73846336,142.1010186
98.98544252,7.950353385,35.02100949
95.67127331,96.71016572,190.959736
93.68600347,36.47726869,128.4463405
26.90153158,42.44479163,35.05814541
29.73602244,51.43990863,58.31734764
27.5995731,98.2474321,63.51486091
26.32654439,37.19162968,27.24289604
26.00984018,3.264318075,12.47969631
22.28512437,48.54514323,57.4292278
18.251028,44.70997441,14.88847161
16.87701566,0.028879806,1.738353986
19.1715814,61.18097853,19.58053321
11.70742374,58.08195389,11.32035414
17.87929234,52.48126563,17.86415649
5.26270135,17.41946857,7.068775348
2.992352525,78.45031324,6.724577792
4.945628306,49.74635187,6.612753389
7.703564945,29.15005013,8.269380692
8.681936915,3.644740876,2.539613883
7.126290053,94.7308738,9.476206274
3.690640992,69.87277013,8.64028864
5.091161642,69.42935678,9.773533433
3.409807461,60.08849318,84.8279528
47.72384797,21.93207372,70.57359615
42.76034139,96.39989087,162.1994243
43.5368583,97.7481921,150.8549879
45.23315774,42.83887798,86.1365176
45.75231328,35.66689082,99.61338689
45.73547914,88.53712966,141.5625555
42.16843743,89.67150337,124.049367
35.49650496,16.53193835,18.43283194
34.09319379,67.84475222,97.56636237
39.06635457,96.29683131,142.4197136
35.88817844,0.30537549,12.65175281
36.07248309,68.05381476,97.98993884
39.46023064,63.75568683,120.0401204
30.36688242,70.09586899,115.9404467
31.69514177,68.38363636,117.9878927
36.16691284,58.88954667,110.330662
53.00915486,65.02154591,138.6992751
57.10428818,83.09448983,146.9513575
59.63696194,30.5760654,91.51977408
51.55732264,80.8134331,138.048774
56.71280679,10.72767313,74.01713716
53.10147876,14.60566631,83.30807883
55.58251197,3.994231941,14.93147477
58.41496578,91.76711951,144.2623194
53.77243917,16.48021528,19.04230599
56.26607883,31.81203554,109.078199
82.41299525,35.81196415,119.2481782
81.69867529,7.727125314,87.50741226
82.40790038,47.44102142,129.3803442
82.03265161,73.00256001,172.390796
79.91135641,69.4356799,156.8046536
75.70945765,37.14354068,130.6594174
82.35083813,18.86061036,119.2826333
83.11897356,12.02756656,111.8892152
82.89156036,7.806530957,92.00286457
78.26241514,55.49372758,140.9567315
68.56546827,70.94274617,136.7463026
63.28931592,87.34783935,152.6639287
68.18762439,90.93495458,164.4863457
64.13039647,44.32309012,113.403142
63.39961438,84.60619424,159.4080057
65.44484808,63.14449164,129.23008
66.93380555,54.37869126,118.1546997
65.06627605,67.6984887,133.8642671
61.94231357,69.65653674,136.8528765
61.32939142,71.04815714,149.8200389
97.51359809,27.7913343,123.7124205
99.9332613,89.77275557,197.6313208
97.44158356,45.75347418,143.7623663
92.29737325,53.34431055,156.8984639
91.18916475,37.65572157,119.2727369
98.69659671,6.064712651,16.48082853
92.07700879,46.73846336,142.1010186
98.98544252,7.950353385,35.02100949
95.67127331,96.71016572,190.959736
93.68600347,36.47726869,128.4463405
26.90153158,42.44479163,35.05814541
29.73602244,51.43990863,58.31734764
27.5995731,98.2474321,63.51486091
26.32654439,37.19162968,27.24289604
26.00984018,3.264318075,12.47969631
22.28512437,48.54514323,57.4292278
18.251028,44.70997441,14.88847161
16.87701566,0.028879806,1.738353986
19.1715814,61.18097853,19.58053321
11.70742374,58.08195389,11.32035414
17.87929234,52.48126563,17.86415649
5.26270135,17.41946857,7.068775348
2.992352525,78.45031324,6.724577792
4.945628306,49.74635187,6.612753389
7.703564945,29.15005013,8.269380692
8.681936915,3.644740876,2.539613883
7.126290053,94.7308738,9.476206274
3.690640992,69.87277013,8.64028864
5.091161642,69.42935678,9.773533433
3.409807461,60.08849318,84.8279528
47.72384797,21.93207372,70.57359615
42.76034139,96.39989087,162.1994243
43.5368583,97.7481921,150.8549879
45.23315774,42.83887798,86.1365176
45.75231328,35.66689082,99.61338689
45.73547914,88.53712966,141.5625555
42.16843743,89.67150337,124.049367
<!DOCTYPE html>
<html lang="en" dir="ltr">
<head>
<meta charset="utf-8">
<title>Applicant Ranking</title>
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css" integrity="sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T" crossorigin="anonymous">
<link rel="stylesheet" type="text/css" href="mystyle.css">
<script src="https://cdn.jsdelivr.net/npm/chart.js@2.8.0"></script>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.2.0/jquery.min.js"></script>
</head>
<body>
<div class="container">
<div class="row">
<div class="col-6 chart">
<div class="column">
<canvas id="myChart3" width="0" height="0"></canvas>
<h5 id="demo"></h5>
<script>
fetch("points.json")
.then(response => response.json())
.then(data => {
document.getElementById("demo").innerHTML = "Name : " + String(data.Name) ;
})
</script>
</div>
<div class="column">
<canvas id="myChart7" width="0" height="0"></canvas>
<h5 id="demo2"></h5>
<script>
fetch("points.json")
.then(response => response.json())
.then(data => {
document.getElementById("demo2").innerHTML = "Degree : " + String(data.Degree) ;
})
</script>
</div>
<div class="column">
<canvas id="myChart8" width="0" height="0"></canvas>
<h5 id="demo3"></h5>
<script>
fetch("points.json")
.then(response => response.json())
.then(data => {
document.getElementById("demo3").innerHTML = "CV Status : " + String(data.CV_Stat) ;
})
</script>
</div>
</div>
<div class="col-6 chart">
<canvas id="myChart4" width="500" height="400"></canvas>
<script type="text/javascript">
fetch("points.json")
.then(response => response.json())
.then(data => {
var myData = data
var myDoughnutChart = document.getElementById("myChart4").getContext('2d');
var x = parseFloat(myData.Grade)
let chart1 = new Chart(myDoughnutChart, {
type: 'doughnut',
data: {
labels: ['Grade'],
datasets: [ {
data: [x, (100-x)],
backgroundColor: ['#32a852', '#e3e3e3']
}]
},
options: {
title: {
text: "Final Grade : " + x,
display: true
}
}
});
})
</script>
</div>
</div>
<div class="row">
<div class="col-6 chart">
<canvas id="myChart" width="500" height="400"></canvas>
<script type="text/javascript">
fetch("points.json")
.then(response => response.json())
.then(data => {
var myData = data
var myDoughnutChart = document.getElementById("myChart").getContext('2d');
var x = parseFloat(myData.Skills)
let chart1 = new Chart(myDoughnutChart, {
type: 'doughnut',
data: {
labels: ['Skills'],
datasets: [ {
data: [x, (100-x)],
backgroundColor: ['#49A9EA', '#e3e3e3']
}]
},
options: {
title: {
text: "Skills : " + x,
display: true
}
}
});
})
</script>
</div>
<div class="col-6 chart">
<canvas id="myChart2" width="500" height="400"></canvas>
<script type="text/javascript">
fetch("points.json")
.then(response => response.json())
.then(data => {
var myData = data
var myDoughnutChart = document.getElementById("myChart2").getContext('2d');
var x = parseFloat(myData.Extraversion)
let chart1 = new Chart(myDoughnutChart, {
type: 'doughnut',
data: {
labels: ['Extraversion'],
datasets: [ {
data: [x, (100-x)],
backgroundColor: ['#e33afc', '#e3e3e3']
}]
},
options: {
title: {
text: "Extraversion : " + x,
display: true
}
}
});
})
</script>
</div>
</div>
</div>
</body>
</html>
\ No newline at end of file
import pandas as pd
import json
import json
import csv
f = open('points.json', )
data = json.load(f)
with open('Final_Points.csv', 'w', newline='') as file:
fieldnames = ['Name', 'Degree', 'Skill Points', 'Extraversion Points', 'Personality Type', 'Final Grade', 'CV Status']
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
for x in range(1):
with open('Final_Points.csv', 'a+', newline='') as file:
fieldnames = ['Name', 'Degree', 'Skill Points', 'Extraversion Points', 'Personality Type', 'Final Grade', 'CV Status']
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writerow({'Name': data['Name'], 'Degree': data['Degree'], 'Skill Points': data['Skills'], 'Extraversion Points': data['Extraversion'], 'Personality Type': data['Personality'], 'Final Grade': data['Grade'], 'CV Status': data['CV_Stat']})
{
"Name": "Virat",
"Skills": "55.47",
"Extraversion": "54.87",
"Personality": "INFJ",
"Grade": "51.12",
"CV_Stat": "Satisfied",
"Degree": "Computer Science Saviour College"
}
\ No newline at end of file
# Code to preprocess the user's text posts
import re
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer, WordNetLemmatizer
SAVE_MODEL = True
MODELS_DIR = "models"
DATA_DIR = "data"
DIMENSIONS = ["IE", "NS", "FT", "PJ"]
# data = pd.read_csv('data/mbti_personality.csv')
def get_types(row):
t = row['type']
I = 0
N = 0
T = 0
J = 0
if t[0] == 'I':
I = 1
elif t[0] == 'E':
I = 0
else:
print('I-E incorrect')
if t[1] == 'N':
N = 1
elif t[1] == 'S':
N = 0
else:
print('N-S incorrect')
if t[2] == 'T':
T = 1
elif t[2] == 'F':
T = 0
else:
print('T-F incorrect')
if t[3] == 'J':
J = 1
elif t[3] == 'P':
J = 0
else:
print('J-P incorrect')
return pd.Series({'IE': I, 'NS': N, 'TF': T, 'JP': J})
# data = data.join(data.apply(lambda row: get_types(row), axis=1))
# print("Introversion (I) / Extroversion (E):\t", data['IE'].value_counts()[0], " / ", data['IE'].value_counts()[1])
# print("Intuition (N) – Sensing (S):\t\t", data['NS'].value_counts()[0], " / ", data['NS'].value_counts()[1])
# print("Thinking (T) – Feeling (F):\t\t", data['TF'].value_counts()[0], " / ", data['TF'].value_counts()[1])
# print("Judging (J) – Perceiving (P):\t\t", data['JP'].value_counts()[0], " / ", data['JP'].value_counts()[1])
b_Pers = {'I': 0, 'E': 1, 'N': 0, 'S': 1, 'F': 0, 'T': 1, 'J': 0, 'P': 1}
b_Pers_list = [{0: 'I', 1: 'E'}, {0: 'N', 1: 'S'}, {0: 'F', 1: 'T'}, {0: 'J', 1: 'P'}]
def translate_personality(personality):
# transform mbti to binary vector
return [b_Pers[l] for l in personality]
def translate_back(personality):
# transform binary vector to mbti personality
s = ""
for i, l in enumerate(personality):
s += b_Pers_list[i][l]
return s
# To remove the personality type from the psosts
unique_type_list = ['INFJ', 'ENTP', 'INTP', 'INTJ', 'ENTJ', 'ENFJ', 'INFP', 'ENFP',
'ISFP', 'ISTP', 'ISFJ', 'ISTJ', 'ESTP', 'ESFP', 'ESTJ', 'ESFJ']
unique_type_list = [x.lower() for x in unique_type_list]
# Lemmatize
stemmer = PorterStemmer()
lemmatiser = WordNetLemmatizer()
# Cache the stop words for speed
cachedStopWords = stopwords.words("english")
def pre_process_data(data, remove_stop_words=True, remove_mbti_profiles=True):
list_personality = []
list_posts = []
len_data = len(data)
i = 0
for row in data.iterrows():
i += 1
if i % 500 == 0 or i == 1 or i == len_data:
print("%s of %s rows" % (i, len_data))
# Remove and clean posts
posts = row[1].posts
temp = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' ', posts)
temp = re.sub("[^a-zA-Z]", " ", temp)
temp = re.sub(' +', ' ', temp).lower()
if remove_stop_words:
temp = " ".join([lemmatiser.lemmatize(w) for w in temp.split(' ') if w not in cachedStopWords])
else:
temp = " ".join([lemmatiser.lemmatize(w) for w in temp.split(' ')])
if remove_mbti_profiles:
for t in unique_type_list:
temp = temp.replace(t, "")
type_labelized = translate_personality(row[1].type)
list_personality.append(type_labelized)
list_posts.append(temp)
list_posts = np.array(list_posts)
list_personality = np.array(list_personality)
return list_posts, list_personality
This diff is collapsed.
55.47
\ No newline at end of file
import json
filename = 'Final_Score.txt'
filename2 = 'Degree.txt'
dict1 = {}
with open(filename) as fh:
for line in fh:
command, description = line.strip().split(None, 1)
dict1[command] = description.strip()
out_file = open("points.json", "w")
json.dump(dict1, out_file, indent=4, sort_keys=False)
out_file.close()
with open(filename2) as fh:
for line in fh:
command, description = line.strip().split(None, 1)
dict1[command] = description.strip()
out_file = open("points.json", "w")
json.dump(dict1, out_file, indent=4, sort_keys=False)
out_file.close()
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment