Commit ae332288 authored by Ranodya M.J.C IT19987644's avatar Ranodya M.J.C IT19987644

Merge branch 'Charmie_IT19987644' into 'master'

Charmie it19987644

See merge request !3
parents 06f73d55 314fd011
import spacy,requests
from transformers import pipeline
class ComplexFunc:
# """docstring for Tenses."""
def __init__(self):
self.ent_pairs = list()
self.nlp = spacy.load('en_core_web_sm')
self.nlp_ = pipeline("question-answering", model='distilbert-base-cased-distilled-squad')
def get_time_place_from_sent(self,sentence):
xdate =[]
xplace =[]
for i in sentence.ents:
if i.label_ in ('DATE'):
xdate.append(str(i))
if i.label_ in ('GPE'):
xplace.append(str(i))
return xdate, xplace
def find_obj(self, sentence, place, time):
object_list = []
for word in sentence:
# """OBJECT FINDING loop"""
if word.dep_ in ('obj', 'dobj', 'pobj'):
buffer_obj = word
if str(word) in place and word.nbor(-1).dep_ in ('prep') and str(word.nbor(-1)) == "of":
pass
# """ INDIA should be in place list + "of" "India" is there then it will come here """
else:
if str(word) not in time and str(word) not in place:
# """ INDIA should not be in place list + INDIA should not be in time list """
# """ice-cream and mangoes"""
for child in word.subtree:
if child.dep_ in ('conj', 'dobj', 'pobj', 'obj') and (str(child) not in time) and (str(child) not in place):
if [i for i in child.lefts]:
if child.nbor(-1).dep_ in ('nummod') and child.dep_ in ('dobj', 'obj','pobj'):
child = str(child.nbor(-1)) + " " + str(child)
object_list.append(str(child))
elif child.nbor(-1).dep_ in ('punct'):
if child.nbor(-2).dep_ in ('compound'):
#ice-cream
child = str(child.nbor(-2)) + str(child.nbor(-1)) + str(child)
object_list.append(str(child))
elif child.nbor(-2).dep_ in ('amod'):
#social-distancing
child = str(child.nbor(-2)) + str(child.nbor(-1)) + str(child)
object_list.append(str(child))
elif child.nbor(-1).dep_ in ('compound'):
# print(child)
child_with_comp = ""
for i in child.subtree:
if i.dep_ in ('compound', 'nummod','quantmod'):
if child_with_comp == "":
child_with_comp = str(i)
else:
child_with_comp = child_with_comp +" "+ str(i)
elif i.dep_ in ('cc'):
break
child = child_with_comp + " " + str(child)
# ice cream
object_list.append(str(child))
elif child.nbor(-1).dep_ in ('det'):
# The Taj Mahal
object_list.append(str(child))
elif [i for i in child.rights]:
if str(child.text) not in object_list:
object_list.append(str(child.text))
for a in child.children:
if a.dep_ in ('conj'):
if a.nbor(-1).dep_ in ('punct'):
pass
else:
object_list.extend( [ str(a.text) ] )
else:
# icecream
if str(child) not in object_list:
object_list.append(str(child))
elif str(word) in place and str(word.nbor(-1)) != "of":
if object_list == []:
object_list.append(str(word))
else:
pass
else:
if str(word) in time and object_list == []:
object_list.append(str(word))
return object_list, buffer_obj
def find_subj(self, sentence):
subject_list = []
# """ SUBJECT FINDING loop"""
dep_word = [word.dep_ for word in sentence]
word_dep_count_subj = [dep_word.index(word) for word in dep_word if word in ('nsubj', 'subj', 'nsubjpass')]
if word_dep_count_subj:
word_dep_count_subj = word_dep_count_subj[0] + 1
else:
word_dep_count_subj = 1
subject_final = ""
for word in sentence:
# print(word.dep_, word)
if word_dep_count_subj > 0:
# in prime minister it gives compound and then nmod
if word.dep_ in ('compound') or word.dep_ in ('nmod') or word.dep_ in ('amod') or word.dep_ in ('poss') or word.dep_ in ('case') or word.dep_ in ('nummod'):
if subject_final == "":
subject_final = str(word)
word_dep_count_subj = word_dep_count_subj - 1
elif word.dep_ in ('case'):
subject_final = subject_final+ "" +str(word)
word_dep_count_subj = word_dep_count_subj - 1
else:
subject_final = subject_final+ " " +str(word)
word_dep_count_subj = word_dep_count_subj - 1
elif word.dep_ in ('nsubj', 'subj', 'nsubjpass'):
if subject_final == "":
subject_final = str(word)
subject_list.extend([str(a.text) for a in word.subtree if a.dep_ in ('conj')])
word_dep_count_subj = word_dep_count_subj - 1
break
else:
subject_final = subject_final+" "+str(word)
subject_list.extend([str(a.text) for a in word.subtree if a.dep_ in ('conj')])
word_dep_count_subj = word_dep_count_subj - 1
break
else:
pass
subject_list.append(subject_final)
return subject_list
def find_relation(self, buffer_obj):
aux_relation = ""
# RELATION FINDING loop
relation = [w for w in buffer_obj.ancestors if w.dep_ =='ROOT']
if relation:
relation = relation[0]
sp_relation = relation
if relation.nbor(1).pos_ in ('VERB'):
if relation.nbor(2).dep_ in ('xcomp'):
relation = ' '.join((str(relation), str(relation.nbor(1)), str(relation.nbor(2))))
else:
relation = str(relation)
if str(sp_relation.nbor(2)) != 'and':
if sp_relation.nbor(1).dep_ in ('xcomp'):
aux_relation = str(sp_relation.nbor(1))
else:
aux_relation = str(sp_relation.nbor(2))
elif relation.nbor(1).pos_ in ('ADP', 'PART') and relation.nbor(1).dep_ in ('aux') and str(relation.nbor(1)) == 'to':
# print(relation.nbor(1), relation.nbor(1).pos_ )
# print(relation)
relation = " ".join((str(relation), str(relation.nbor(1))))
if str(sp_relation.nbor(2)) != 'and':
aux_relation = str(sp_relation.nbor(2))
elif relation.nbor(1).dep_ in ('prep') and str(relation.nbor(1)) == 'to' and (relation.nbor(1)).dep_ not in ('obj','dobj','pobj','det'):
# print(relation.nbor(1), relation.nbor(1).pos_ )
# print(relation)
relation = " ".join((str(relation), str(relation.nbor(1))))
else:
relation = str(relation)
else:
relation = 'unknown'
return relation, aux_relation
def normal_sent(self, sentence):
time, place = self.get_time_place_from_sent(sentence)
subject_list, object_list = [], []
aux_relation, child_with_comp = "", ""
subject_list = self.find_subj(sentence)
object_list, buffer_obj = self.find_obj(sentence, place, time)
relation, aux_relation = self.find_relation(buffer_obj)
self.ent_pairs = []
if time:
time = time[0]
else:
time = ""
if place:
place = place[0]
else:
place = ""
pa, pb=[], []
for m in subject_list:
pa.append([m])
for n in object_list:
pb.append([n])
# print(pa, pb)
for m in range(0, len(pa)):
for n in range(0, len(pb)):
self.ent_pairs.append([str(pa[m][0]).lower(), str(relation).lower(),str(aux_relation).lower(), str(pb[n][0]).lower(), str(time), str(place)])
# print(self.ent_pairs)
return self.ent_pairs
def question_pairs(self, question__):
# questionList = question__.split(" ")
# print(questionList)
questionNLPed = self.nlp(question__)
maybe_object = ([i for i in questionNLPed if i.dep_ in ('obj', 'pobj', 'dobj')])
# print(maybe_object)
maybe_place, maybe_time = [], []
aux_relation = ""
maybe_time, maybe_place = self.get_time_place_from_sent(questionNLPed)
object_list = []
for obj in questionNLPed:
objectNEW = obj
# print(obj.dep_)
# FOR WHO
if obj.dep_ in ('obj', 'dobj', 'pobj', 'xcomp') and str(obj).lower() != "what":
buffer_obj = obj
if obj.dep_ in ('xcomp') and obj.nbor(-1).dep_ in ('aux') and obj.nbor(-2).dep_ in ('ROOT'):
# print("here")
continue
if str(obj) in maybe_place and obj.nbor(-1).dep_ in ('prep') and str(obj.nbor(-1)) == "of":
# """ INDIA should be in place list + "of" "India" is there then it will come here """
pass
else:
if str(obj) not in maybe_time and str(obj) not in maybe_place:
# INDIA should not be in place list + INDIA should not be in time list
# ice-cream and mangoes
for child in obj.subtree:
# print(child)
if child.dep_ in ('conj', 'dobj', 'pobj', 'obj'):
if [i for i in child.lefts]:
if child.nbor(-1).dep_ in ('punct') and child.nbor(-2).dep_ in ('compound'):
# """ice-cream"""
child = str(child.nbor(-2)) + str(child.nbor(-1)) + str(child)
object_list.append(str(child))
elif child.nbor(-1).dep_ in ('compound'):
# print(child)
child_with_comp = ""
for i in child.subtree:
if i.dep_ in ('compound', 'nummod','quantmod'):
if child_with_comp == "":
child_with_comp = str(i)
else:
child_with_comp = child_with_comp +" "+ str(i)
elif i.dep_ in ('cc'):
break
child = child_with_comp + " " + str(child)
# ice cream
# print(child)
object_list.append(str(child))
elif child.nbor(-1).dep_ in ('det'):
# The Taj Mahal
object_list.append(str(child))
elif [i for i in child.rights]:
if str(child.text) not in object_list:
object_list.append(str(child.text))
for a in child.children:
if a.dep_ in ('conj'):
if a.nbor(-1).dep_ in ('punct'):
pass
else:
object_list.extend( [ str(a.text) ] )
else:
# icecream
if str(child) not in object_list:
object_list.append(str(child))
elif obj.dep_ in ('xcomp'):
object_list.append(str(obj))
elif str(obj) in maybe_place and str(obj.nbor(-1)) != "of":
object_list.append(str(obj))
else:
if str(obj) in maybe_time and object_list == []:
object_list.append(str(obj))
# print(object_list)
obj = object_list[-1]
# # print(obj)
# # print(obj.nbor(1))
# try:
# if obj.nbor(-1).pos_ in ('PUNCT') and obj.nbor(-2).pos_ in ('NOUN'):
# obj = ' '.join((str(obj.nbor(-2)), str(obj)))
# elif obj.nbor(-1).pos_ in ('NOUN'):
# obj = ' '.join( (str(obj.nbor(-1)), str(obj) ))
# # elif obj.nbor(1).pos_ in ('ROOT'):
# # pass
# except IndexError:
# pass
# elif obj.nbor(1).pos_ in :
# print(obj.nbor(1).pos_)
# print(obj)
relation = [w for w in objectNEW.ancestors if w.dep_ =='ROOT']
if relation:
relation = relation[0]
sp_relation = relation
# print(sp_relation)
# print(relation)
if relation.nbor(1).pos_ in ('ADP', 'PART', 'VERB'):
if relation.nbor(2).dep_ in ('xcomp'):
aux_relation = str(relation.nbor(2))
relation = str(relation)+" "+str(relation.nbor(1))
else:# print(relation.nbor(2).dep_)
relation = str(relation)
# print(relation)
subject = [a for a in sp_relation.lefts if a.dep_ in ('subj', 'nsubj','nsubjpass')] # identify subject nodes
# print(subject)
if subject:
subject = subject[0]
# print(subject)
# subject, subject_type = self.prepro.refine_ent(subject, question__)
# print(subject)
else:
subject = 'unknown'
else:
relation = 'unknown'
# obj, object_type = self.prepro.refine_ent(obj, question__)
# print(subject, relation, obj)
self.ent_pairs = []
if maybe_time and maybe_place:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str(obj).lower(), str(maybe_time[0]).lower(), str(maybe_place[0]).lower()])
elif maybe_time:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str(obj).lower(), str(maybe_time[0]).lower(), str("").lower()])
elif maybe_place:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str(obj).lower(), str("").lower(), str(maybe_place[0]).lower()])
else:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str(obj).lower(), str("").lower(), str("").lower()])
# ent_pairs.append([str(subject), str(relation), str(obj)])
# print(self.ent_pairs)
return self.ent_pairs
elif str(obj).lower() == "what":
relation = [w for w in objectNEW.ancestors if w.dep_ =='ROOT']
if relation:
relation = relation[0]
sp_relation = relation
if relation.nbor(1).pos_ in ('ADP', 'PART', 'VERB'):
if relation.nbor(2).dep_ in ('xcomp'):
aux_relation = str(relation.nbor(2))
relation = str(relation)+" "+str(relation.nbor(1))
else:# print(relation.nbor(2).dep_)
relation = str(relation)
# print(relation)
subject = self.find_subj(questionNLPed)
# print(subject)
subject = subject[-1]
# subject = [a for a in sp_relation.lefts if a.dep_ in ('subj', 'nsubj','nsubjpass')] # identify subject nodes
# print(subject)
# if subject:
# subject = subject[0]
# print(subject)
# subject, subject_type = self.prepro.refine_ent(subject, question__)
# print(subject)
# else:
# subject = 'unknown'
else:
relation = 'unknown'
# obj, object_type = self.prepro.refine_ent(obj, question__)
# print(obj)
self.ent_pairs = []
# print(subject,relation,obj)
if maybe_time and maybe_place:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str(obj).lower(), str(maybe_time[0]).lower(), str(maybe_place[0]).lower()])
elif maybe_time:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str(obj).lower(), str(maybe_time[0]).lower(), str("").lower()])
elif maybe_place:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str(obj).lower(), str("").lower(), str(maybe_place[0]).lower()])
else:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str(obj).lower(), str("").lower(), str("").lower()])
# ent_pairs.append([str(subject), str(relation), str(obj)])
# print(self.ent_pairs)
return self.ent_pairs
elif obj.dep_ in ('advmod'):
# print(str(obj).lower())
if str(obj).lower() == 'where':
relation = [w for w in obj.ancestors if w.dep_ =='ROOT']
# print(relation)
if relation:
relation = relation[0]
sp_relation = relation
# print(relation)
if relation.nbor(1).pos_ in ('ADP', 'PART', 'VERB'):
if relation.nbor(2).dep_ in ('xcomp'):
aux_relation = str(relation.nbor(2))
relation = str(relation)+" "+str(relation.nbor(1))
else:# print(relation.nbor(2).dep_)
relation = str(relation)
# print(relation)
# for left_word in sp_relation.lefts:
# if left_word.dep_ in ('subj', 'nsubj','nsubjpass'):
# if [i for i in left_word.lefts]:
# for left_of_left_word in left_word.lefts:
# subject = str(left_of_left_word) + " " + str(left_word)
# else:
# subject = str(left_word)
subject = self.find_subj(questionNLPed)
# print(subject)
subject = subject[-1]
# subject = [a for a in sp_relation.lefts if a.dep_ in ('subj', 'nsubj','nsubjpass')] # identify subject nodes
# # print(subject)
# if subject:
# subject = subject[0]
# # print(subject)
# # subject, subject_type = self.prepro.refine_ent(subject, question__)
# # print(subject)
# else:
# subject = 'unknown'
else:
relation = 'unknown'
self.ent_pairs = []
# print(obj, subject, relation)
if maybe_object:
if maybe_time and maybe_place:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str(maybe_object[-1]).lower(), str(maybe_time[0]).lower(), str("where").lower()])
elif maybe_time:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str(maybe_object[-1]).lower(), str(maybe_time[0]).lower(), str("where").lower()])
elif maybe_place:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str(maybe_object[-1]).lower(), str("").lower(), str("where").lower()])
else:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str(maybe_object[-1]).lower(), str("").lower(), str("where").lower()])
else:
if maybe_time and maybe_place:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str("").lower(), str(maybe_time[0]).lower(), str("where").lower()])
elif maybe_time:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str("").lower(), str(maybe_time[0]).lower(), str("where").lower()])
elif maybe_place:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str("").lower(), str("").lower(), str("where").lower()])
else:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str("").lower(), str("").lower(), str("where").lower()])
# ent_pairs.append([str(subject), str(relation), str(obj)])
# print(self.ent_pairs)
return self.ent_pairs
elif str(obj).lower() == 'when':
# print(obj)
relation = [w for w in obj.ancestors if w.dep_ =='ROOT']
# print(relation)
if relation:
relation = relation[0]
sp_relation = relation
# print(relation)
if relation.nbor(1).pos_ in ('ADP', 'PART', 'VERB'):
# print(relation.nbor(1).pos_)
if relation.nbor(2).dep_ in ('xcomp'):
relation = ' '.join((str(relation), str(relation.nbor(1)), str(relation.nbor(2))))
else:# print(relation.nbor(2).dep_)
relation = ' '.join((str(relation), str(relation.nbor(1))))
# print(relation)
for left_word in sp_relation.lefts:
if left_word.dep_ in ('subj', 'nsubj','nsubjpass'):
if [i for i in left_word.lefts]:
for left_of_left_word in left_word.lefts:
subject = str(left_of_left_word) + " " + str(left_word)
else:
subject = str(left_word)
# subject = [a for a in sp_relation.lefts if a.dep_ in ('subj', 'nsubj','nsubjpass')] # identify subject nodes
# # print(subject)
# if subject:
# subject = subject[0]
# # print(subject)
# # subject, subject_type = self.prepro.refine_ent(subject, question__)
# # print(subject)
# else:
# subject = 'unknown'
else:
relation = 'unknown'
self.ent_pairs = []
# print(obj, subject, relation)
if maybe_object:
if maybe_time and maybe_place:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str(maybe_object[-1]).lower(), str("when").lower(), str(maybe_place[0]).lower()])
elif maybe_time:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str(maybe_object[-1]).lower(), str("when").lower(), str("").lower()])
elif maybe_place:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str(maybe_object[-1]).lower(), str("when").lower(), str(maybe_place[0]).lower()])
else:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str(maybe_object[-1]).lower(), str("when").lower(), str("").lower()])
else:
if maybe_time and maybe_place:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str("").lower(), str("when").lower(), str(maybe_place[0]).lower()])
elif maybe_time:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str("").lower(), str("when").lower(), str("").lower()])
elif maybe_place:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str("").lower(), str("when").lower(), str(maybe_place[0]).lower()])
else:
self.ent_pairs.append([str(subject).lower(), str(relation).lower(),str(aux_relation).lower(), str("").lower(), str("when").lower(), str("").lower()])
# ent_pairs.append([str(subject), str(relation), str(obj)])
# print(self.ent_pairs)
return self.ent_pairs
# import json
# import pandas
import os
class exportToJSON:
"""docstring for exportToJSON."""
def __init__(self):
super(exportToJSON, self).__init__()
def dumpdata(self, pairs):
if os.path.exists(os.path.join(os.getcwd(), 'extra')):
pass
else:
os.makedirs('extra')
my_data = pairs.to_json('extra/database.json', orient='index')
# print(my_data)
class exportToCSV:
"""docstring for exportToJSON."""
def __init__(self):
super(exportToJSON, self).__init__()
def dumpdata(self, pairs):
df = pairs.to_csv(index=False)
# ff = pairs.to_csv('out.zip', index=False, compression=compression_opts)
# print(df)
# df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
# 'mask': ['red', 'purple'],
# 'weapon': ['sai', 'bo staff']})
#
# df.to_csv(index=False)
# 'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
#
# Create ‘out.zip’ containing ‘out.csv’
# compression_opts = dict(method='zip',
# archive_name='out.csv')
# df.to_csv('out.zip', index=False,
# compression=compression_opts)
# import re
import pandas as pd
import spacy
from KGQnA._complex import ComplexFunc
from KGQnA._resolvedep import change_nouns
class GetEntity:
"""docstring for GetEntity."""
def __init__(self):
super(GetEntity, self).__init__()
self.complex = ComplexFunc()
self.nlp = spacy.load('en_core_web_sm')
self.change = change_nouns()
def preprocess_text(self, input_file):
text_strip = [text.strip() for text in input_file]
preprocessed_text = [text for text in text_strip if text not in ('', ' ')]
text = " ".join(preprocessed_text)
text = self.change.resolved(text)
text = self.nlp(text)
return text
def get_entity(self, text):
ent_pairs, final_entity_pairs = [],[]
sentences = [one_sentence.text.strip() for one_sentence in text.sents]
for one_sentence in sentences:
final_entity_pairs = []
one_sentence = self.nlp(one_sentence)
dep = [token.dep_ for token in one_sentence]
# print(dep)
# pos = [token.pos_ for token in one_sentence]
# label = [token.label_ for token in one_sentence.ents]
normal_sent_ = self.complex.normal_sent(one_sentence)
if normal_sent_:
for pair in normal_sent_:
ent_pairs.append(pair)
pairs = pd.DataFrame(ent_pairs, columns=['source', 'relation', 'aux_relation', 'target', 'time', 'place'])
number_of_ent_pairs = str(len(ent_pairs))
final_entity_pairs.append(pairs)
if final_entity_pairs:
return final_entity_pairs, number_of_ent_pairs
return None, None
if __name__ == '__main__':
test = GetEntity()
text = test.nlp("Vibhav ate chocolates. Vedant met Vibhav")
entities, numbers = test.get_entity(text)
# print(entities[0])
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
from KGQnA._getentitypair import GetEntity
class GraphEnt:
"""docstring for graphEnt."""
def __init__(self):
super(GraphEnt, self).__init__()
self.x = GetEntity()
def createGraph(self, dataEntities):
entity_list = dataEntities.values.tolist()
source, relations, target = [],[],[]
for i in entity_list:
# if i[0] == "" or i[1] == "" or i[3] == "":
# pass
# else:
source.append(i[0])
relations.append(i[1])
# aux_relations = i[2]
target.append(i[3])
# time = i[4]
# place = i[5]
kg_df = pd.DataFrame({'source':source, 'target':target, 'edge':relations})
G=nx.from_pandas_edgelist(kg_df, "source", "target", edge_attr=True, create_using=nx.MultiDiGraph())
plt.figure(figsize=(12,12))
pos = nx.spring_layout(G, k = 2) # k regulates the distance between nodes
nx.draw(G, with_labels=True, node_color='skyblue', node_size=1500, edge_cmap=plt.cm.Blues, pos = pos)
# nx.draw_networkx_edge_labels(G,pos,edge_labels=labels,font_size=30)
plt.show()
if __name__ == '__main__':
test = GraphEnt()
print("Can't Test directly")
import re
import json
import spacy
import inflect
import requests
from KGQnA._getentitypair import GetEntity
from KGQnA._complex import *
class QuestionAnswer:
"""docstring for QuestionAnswer."""
def __init__(self):
super(QuestionAnswer, self).__init__()
self.complex = ComplexFunc()
self.nlp = spacy.load('en_core_web_sm')
self.p = inflect.engine()
def findanswer(self, question, c=None, con=None):
if con is None:
p = self.complex.question_pairs(question)
if p == [] or p is None:
return "Not Applicable"
pair = p[0]
# print(pair[5])
f = open("extra/database.json","r", encoding="utf8")
listData = f.readlines()
relQ = []
loaded = json.loads(listData[0])
relationQ = self.nlp(pair[1])
for i in relationQ:
relationQ = i.lemma_
relQ.append(relationQ)
objectQ = pair[3]
subList = []
timeQ = str(pair[4]).lower()
placeQ = str(pair[5]).lower()
# print(timeQ, placeQ)
relationQ = " ".join(relQ)
# print(relationQ)
if pair[0] in ('who'):
for i in loaded:
relationS = [relation for relation in self.nlp(loaded[str(i)]["relation"])]
relationSSS = " ".join([relation.lemma_ for relation in self.nlp(loaded[str(i)]["relation"])])
relationS = [i.lemma_ for i in relationS]
relationS = relationS[0]
# print(relationSSS)
if relationS == relationQ:
objectS = loaded[str(i)]["target"]
objectS = re.sub('-', ' ', objectS)
objectQ = re.sub('-', ' ', objectQ)
# print(objectQ, objectS)
if self.p.singular_noun(objectS):
objectS = self.p.singular_noun(objectS)
if self.p.singular_noun(objectQ):
objectQ = self.p.singular_noun(objectQ)
if objectS == objectQ:
if str(pair[4]) != "":
timeS = [str(loaded[str(i)]["time"]).lower()]
# print(timeQ, timeS)
if timeQ in timeS:
answer_subj = loaded[str(i)]["source"]
subList.append(answer_subj)
else:
answer_subj = loaded[str(i)]["source"]
subList.append(answer_subj)
elif str(relationSSS) == str(relationQ):
objectS = loaded[str(i)]["target"]
objectS = re.sub('-', ' ', objectS)
if objectS == objectQ:
if str(pair[4]) != "":
timeS = [str(loaded[str(i)]["time"]).lower()]
if timeQ in timeS:
answer_subj = loaded[str(i)]["source"]
subList.append(answer_subj)
else:
answer_subj = loaded[str(i)]["source"]
subList.append(answer_subj)
answer_subj = ",".join(subList)
if answer_subj == "":
return "None"
return answer_subj
elif pair[3] in ['what']:
subjectQ = pair[0]
subList = []
for i in loaded:
subjectS = loaded[str(i)]["source"]
# print(subjectQ, subjectS)
if subjectQ == subjectS:
relationS = [relation for relation in self.nlp(loaded[str(i)]["relation"])]
relationS = [i.lemma_ for i in relationS]
if len(relationS) > 1:
relationS = " ".join(relationS)
else:
relationS = relationS[0]
# print(relationQ, relationS)
if relationQ == relationS:
if str(pair[5]) != "":
placeS = [str(place).lower() for place in self.nlp(loaded[str(i)]["place"])]
# print(placeQ, placeS)
if placeQ in placeS:
if str(pair[4]) != "":
timeS = [str(time).lower() for time in self.nlp(loaded[str(i)]["time"])]
if timeQ in timeS:
answer_subj = loaded[str(i)]["target"]
subList.append(answer_subj)
else:
answer_subj = loaded[str(i)]["target"]
subList.append(answer_subj)
else:
if str(pair[4]) != "":
timeS = [str(time).lower() for time in self.nlp(loaded[str(i)]["time"])]
if timeQ in timeS:
answer_subj = loaded[str(i)]["target"]
subList.append(answer_subj)
else:
answer_subj = loaded[str(i)]["target"]
subList.append(answer_subj)
answer_obj = ",".join(subList)
if answer_obj == "":
return "None"
return answer_obj
elif pair[4] in ['when']:
subjectQ = pair[0]
# print(relationQ, subjectQ)
# print(pair[2])
for i in loaded:
# if i.dep_ in ('obj'):
# print(loaded[str(i)], "HERE we go")
subjectS = loaded[str(i)]["source"]
# print(type(subjectQ), type(subjectS), numberOfPairs)
if subjectQ == subjectS:
relationS = [relation for relation in self.nlp(loaded[str(i)]["relation"])]
# print(relationS)
relationS = [i.lemma_ for i in relationS]
relBuffer = relationS
# print(relationS[0], relationS[1])
# print(relBuffer[1])
if len(relBuffer) < 2:
relationS = relBuffer[0]
else:
if str(relBuffer[1]).lower() == 'to':
relationS = " ".join(relationS)
else:
relationS = relationS[0]
extraIN = relBuffer[1].lower()
# print(relationQ, relationS)
if relationQ == relationS:
if str(pair[5]) != "":
placeS = [str(place).lower() for place in self.nlp(loaded[str(i)]["place"])]
# print(placeQ, placeS)
if placeQ in placeS:
if loaded[str(i)]["time"] != '':
answer_obj = loaded[str(i)]["time"]
# elif extraIN == "in" or extraIN == "on":
# answer_obj = loaded[str(i)]["target"]
return answer_obj
return None
else:
if loaded[str(i)]["time"] != '':
answer_obj = loaded[str(i)]["time"]
return answer_obj
return None
elif pair[5] in ['where']:
subjectQ = pair[0]
for i in loaded:
subjectS = loaded[str(i)]["source"]
if subjectQ == subjectS:
relationS = [relation for relation in self.nlp(loaded[str(i)]["relation"])]
relationS = [i.lemma_ for i in relationS]
relationS = relationS[0]
if relationQ == relationS:
if str(pair[4]) != "":
timeS = [str(time).lower() for time in self.nlp(loaded[str(i)]["time"])]
if timeQ in timeS:
answer_obj = loaded[str(i)]["place"]
if answer_obj in (" ",""):
if int(i)<int(len(loaded)-1):
pass
return None
return answer_obj
return None
answer_obj = loaded[str(i)]["place"]
if answer_obj in (" ",""):
if int(i)<int(len(loaded)-1):
pass
return None
return answer_obj
else:
output = self.complex.nlp_(question=question, context=con)
return output
import spacy
class change_nouns:
"""docstring for change_nouns."""
def __init__(self):
super(change_nouns, self).__init__()
self.nlp = spacy.load('en_core_web_sm')
def resolved(self, text):
flag = True
official_subject = "Unknown"
sentences = []
prev_subjs = []
temp_text = text
# print([i for i, j in enumerate(temp_text) if j in ("(",")")])
pos_of_brackets = {pos:char for pos, char in enumerate(temp_text) if str(char) in ("(",")")}
# print(pos_of_brackets)
# if pos_of_brackets:
# for key, val in pos_of_brackets:
# if val in ["("]:
# # string_with_brackets = str(temp_text[pos_of_brackets[0]:pos_of_brackets[-1]+2])
# # last_pos = pos_of_brackets[0]-1
# text = temp_text[:pos_of_brackets[0]] + temp_text[pos_of_brackets[-1]+2:]
text = self.nlp(text)
# checked_for_and , depend , pos_of_and_= self.check_for_multi_and_(sent)
# print(checked_for_and)
# sent1, sent2 = self.diff_sent_return(sent, depend, pos_of_and_)
for sent in text.sents:
prev_subj, compound_is, last_word = "", "", ""
dep_word = [word.dep_ for word in sent]
# print(dep_word)
word_dep_count_subj = [dep_word.index(word) for word in dep_word if word in ('nsubj', 'subj', 'nsubjpass')]
# print(word_dep_count_subj)
try:
word_dep_count_subj = word_dep_count_subj[-1] + 1
except IndexError:
word_dep_count_subj = 1
more_subjs = [word for word in dep_word if word in ('nsubj', 'subj', 'nsubjpass')]
for word in sent:
if len(more_subjs) > 1:
if word.dep_ in more_subjs:
if word.dep_ in ['nsubjpass']:
# print("HELLO", word.dep_)
break
elif word.dep_ in ('nsubj','subj'):
if word_dep_count_subj > 0:
# """ IN prime minister it gives compound and then nmod """
if word.dep_ in ('compound') or word.dep_ in ('nmod', 'amod'):
if compound_is == "":
compound_is = str(word)
word_dep_count_subj = word_dep_count_subj - 1
else:
compound_is = compound_is+ " " +str(word)
word_dep_count_subj = word_dep_count_subj - 1
elif word.dep_ in ('nsubj', 'subj', 'nsubjpass'):
pronoun = [i for i in word.subtree]
if compound_is == "":
if str(word) not in ('he','HE', 'He','she','SHE', 'She','it','IT', 'It'):
prev_subj = str(word)
if str(pronoun[0]) not in ('his','His', 'her','Her', 'its', 'Its'):
prev_subjs = [prev_subj]
official_subject = prev_subjs[0]
word_dep_count_subj = word_dep_count_subj - 1
else:
if str('poss') in [str(i.dep_) for i in word.subtree]:
prev_subj = compound_is
word_dep_count_subj = word_dep_count_subj - 1
prev_subjs = [prev_subj]
# official_subject = prev_subjs[0]
else:
prev_subj = compound_is+" "+str(word)
word_dep_count_subj = word_dep_count_subj - 1
prev_subjs = [prev_subj]
official_subject = prev_subjs[0]
# if str(word) in ('they'):
# subject_list.extend([str(a.text) for a in word.subtree if a.dep_ in ('conj')])
if str(word) in ('he','HE', 'He','she','SHE', 'She','it','IT', 'It'):
# print(prev_subjs)
new_word = prev_subjs[-1]
# print(new_word)
sentences.append(str(sent).replace(str(word), str(new_word)))
flag = False
if pronoun:
if len(pronoun) <= 2 and str(pronoun[0]) in ('his','His', 'her','Her', 'its', 'Its'):
print(official_subject)
new_word = str(official_subject)+"\'s"
# print(new_word)
sentences.append(str(sent).replace((str(pronoun[0])), str(new_word)))
flag = False
elif len(pronoun)>2 and str(pronoun[0]) in ('his','His', 'her','Her', 'its', 'Its'):
new_word = str(official_subject)+"\'s"
sentences.append(str(sent).replace(str(pronoun[0]), str(new_word)))
flag = False
elif word.dep_ in ('nsubj','subj','nsubjpass') and str(word) not in ('he','HE', 'He','she','SHE', 'She','it','IT', 'It'):
last_word = word
else:
pass
else:
if word_dep_count_subj > 0:
# """ IN prime minister it gives compound and then nmod """
if word.dep_ in ('compound') or word.dep_ in ('nmod', 'amod'):
if compound_is == "":
compound_is = str(word)
word_dep_count_subj = word_dep_count_subj - 1
else:
compound_is = compound_is+ " " +str(word)
word_dep_count_subj = word_dep_count_subj - 1
elif word.dep_ in ('nsubj', 'subj', 'nsubjpass'):
pronoun = [i for i in word.subtree]
if compound_is == "":
if str(word) not in ('he','HE', 'He','she','SHE', 'She','it','IT', 'It'):
prev_subj = str(word)
if str(pronoun[0]) not in ('his','His', 'her','Her', 'its', 'Its'):
prev_subjs = [prev_subj]
official_subject = prev_subjs[0]
word_dep_count_subj = word_dep_count_subj - 1
else:
if str('poss') in [str(i.dep_) for i in word.subtree]:
prev_subj = compound_is
word_dep_count_subj = word_dep_count_subj - 1
prev_subjs = [prev_subj]
# official_subject = prev_subjs[0]
else:
prev_subj = compound_is+" "+str(word)
word_dep_count_subj = word_dep_count_subj - 1
prev_subjs = [prev_subj]
official_subject = prev_subjs[0]
# if str(word) in ('they'):
# subject_list.extend([str(a.text) for a in word.subtree if a.dep_ in ('conj')])
if str(word) in ('he','HE', 'He','she','SHE', 'She','it','IT', 'It'):
# print(prev_subjs)
new_word = prev_subjs[-1]
# print(new_word)
sentences.append(str(sent).replace(str(word), str(new_word)))
flag = False
if pronoun:
if len(pronoun) <= 2 and str(pronoun[0]) in ('his','His', 'her','Her', 'its', 'Its'):
# print(official_subject)
new_word = str(official_subject)+"\'s"
# print(new_word)
sentences.append(str(sent).replace((str(pronoun[0])), str(new_word)))
flag = False
elif len(pronoun)>2 and str(pronoun[0]) in ('his','His', 'her','Her', 'its', 'Its'):
new_word = str(official_subject)+"\'s"
sentences.append(str(sent).replace(str(pronoun[0]), str(new_word)))
flag = False
elif word.dep_ in ('nsubj','subj','nsubjpass') and str(word) not in ('he','HE', 'He','she','SHE', 'She','it','IT', 'It'):
last_word = word
else:
pass
if flag:
sentences.append(str(sent))
else:
flag = True
resolved_text = " ".join(sentences)
# print(resolved_text)
return resolved_text
def check_for_multi_and_(self, sentence):
x = []
count = 0
for word in sentence:
# print([i for i in word.subtree])
count += 1
if word.dep_ in ('cc'):
x.append(count-1)
# print([i for i in word.head.rights if i.dep_ in ('obj', 'dobj', 'pobj')])
# print([i for i in word.head.rights if i.dep_ in ('nsubj', 'nsubjpass', 'subj')])
# print([i for i in word.head.rights if i.dep_ in ('conj')])
# print(x)
depen = []
for i in x:
depen.append([word.dep_ for word in sentence[:i]])
senten1, senten2 = "", ""
list2 = ["nsubj", "ROOT", "dobj"]
# , ["subj", "ROOT", "dobj"], ["subj", "ROOT", "pobj"], ["nsubj", "ROOT", "obj"], ["nsubj", "ROOT", "dobj"], ["nsubj", "ROOT", "pobj"], ["nsubjpass", "ROOT", "obj"], ["nsubjpass", "ROOT", "dobj"], ["nsubjpass", "ROOT", "pobj"]]
for list1 in depen:
check = all(item in list1 for item in list2)
#
# print(list1)
if check:
# print(depen, x)
return True, depen, x
return False, [], 0
def diff_sent_return(self, sentence, depen, pos_of_and):
newcount = -1
senten1, senten2 = "", ""
# , ["subj", "ROOT", "dobj"], ["subj", "ROOT", "pobj"], ["nsubj", "ROOT", "obj"], ["nsubj", "ROOT", "dobj"], ["nsubj", "ROOT", "pobj"], ["nsubjpass", "ROOT", "obj"], ["nsubjpass", "ROOT", "dobj"], ["nsubjpass", "ROOT", "pobj"]]
list2 = ["nsubj", "ROOT", "dobj"]
for i in depen:
newcount += 1
list1 = i
check = all(item in list1 for item in list2)
if check:
lista = [str(w) for w in sentence]
p1 = lista[:pos_of_and[newcount]]
p2 = lista[pos_of_and[newcount]+1:]
# print(p1, p2)
senten1 = " ".join(p1)
senten2 = " ".join(p2)
senten1 = self.nlp(senten1)
senten2 = self.nlp(senten2)
return str(senten1), str(senten2)
if __name__ == "__main__":
test = change_nouns()
sentences = test.resolved("The Normans (Norman: Nourmands; French: Normands; Latin: Normanni) were the people who in the 10th and 11th centuries gave their name to Normandy, a region in France. They were descended from Norse (\"Norman\" comes from \"Norseman\") raiders and pirates from Denmark, Iceland and Norway who, under their leader Rollo, agreed to swear fealty to King Charles III of West Francia. Through generations of assimilation and mixing with the native Frankish and Roman-Gaulish populations, their descendants would gradually merge with the Carolingian-based cultures of West Francia. The distinct cultural and ethnic identity of the Normans emerged initially in the first half of the 10th century, and it continued to evolve over the succeeding centuries.")
print(sentences)
import numpy as np
import pandas as pd
import tensorflow as tf
import os, PyPDF2, re, pickle
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
from KGQnA._exportPairs import exportToJSON
from KGQnA._getentitypair import GetEntity
from KGQnA._graph import GraphEnt
from KGQnA._qna import QuestionAnswer
# import secure_filename
from flask_cors import CORS
from werkzeug.utils import secure_filename
from flask import Flask, request, jsonify, render_template
tokenizer_facts_path_cases = 'weights/TOKENIZER_FACTS_MODEL_CASES.pkl'
tokenizer_facts_path_facts = 'weights/TOKENIZER_FACTS_MODEL_FACTS.pkl'
summarization_model_facts_path = 'weights/FACTS_SUMMARIZATION_MODEL.h5'
tokenizer_judgements_path_cases = 'weights/TOKENIZER_JUDGEMENTS_MODEL_CASES.pkl'
tokenizer_judgements_path_facts = 'weights/TOKENIZER_JUDGEMENTS_MODEL_FACTS.pkl'
summarization_model_judgements_path = 'weights/JUDGEMENTS_SUMMARIZATION_MODEL.h5'
with open(tokenizer_facts_path_cases, 'rb') as handle:
tokenizer_facts_cases = pickle.load(handle)
with open(tokenizer_facts_path_facts, 'rb') as handle:
tokenizer_facts_summarize = pickle.load(handle)
with open(tokenizer_judgements_path_cases, 'rb') as handle:
tokenizer_judgements_cases = pickle.load(handle)
with open(tokenizer_judgements_path_facts, 'rb') as handle:
tokenizer_judgements_summarize = pickle.load(handle)
def encoder(max_x_len, x_voc_size):
encoder_inputs = tf.keras.layers.Input(shape=(max_x_len,))
enc_emb = tf.keras.layers.Embedding(x_voc_size, 300, mask_zero=True)(encoder_inputs)
encoder_lstm = tf.keras.layers.LSTM(300, return_sequences=True, return_state=True)
_, state_h, state_c = encoder_lstm(enc_emb)
encoder_states = [state_h, state_c]
return encoder_inputs, encoder_states
def decoder(y_voc_size, encoder_states):
decoder_inputs = tf.keras.layers.Input(shape=(None,))
dec_emb_layer = tf.keras.layers.Embedding(y_voc_size, 300, mask_zero=True)
dec_emb = dec_emb_layer(decoder_inputs)
decoder_lstm = tf.keras.layers.LSTM(300, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(dec_emb, initial_state=encoder_states)
decoder_dense = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(y_voc_size, activation='softmax'))
decoder_outputs = decoder_dense(decoder_outputs)
return decoder_inputs, decoder_outputs
encoder_inputs, encoder_states = encoder(5500, len(tokenizer_facts_cases.word_index) + 1)
decoder_inputs, decoder_outputs = decoder(len(tokenizer_facts_summarize.word_index) + 1, encoder_states)
inference_model_facts = tf.keras.models.Model([encoder_inputs, decoder_inputs], decoder_outputs)
inference_model_facts.load_weights(summarization_model_facts_path)
encoder_inputs, encoder_states = encoder(5500, len(tokenizer_judgements_cases.word_index) + 1)
decoder_inputs, decoder_outputs = decoder(len(tokenizer_judgements_summarize.word_index) + 1, encoder_states)
inference_model_judgements = tf.keras.models.Model([encoder_inputs, decoder_inputs], decoder_outputs)
inference_model_judgements.load_weights(summarization_model_judgements_path)
def decontracted(phrase):
phrase = re.sub(r"won't", "will not", phrase)
phrase = re.sub(r"can\'t", "can not", phrase)
phrase = re.sub(r"n\'t", " not", phrase)
phrase = re.sub(r"\'re", " are", phrase)
phrase = re.sub(r"\'s", " is", phrase)
phrase = re.sub(r"\'d", " would", phrase)
phrase = re.sub(r"\'ll", " will", phrase)
phrase = re.sub(r"\'t", " not", phrase)
phrase = re.sub(r"\'ve", " have", phrase)
phrase = re.sub(r"\'m", " am", phrase)
return phrase
def clean_case(case):
case=re.sub(r'\s+',' ', case)
case=re.sub(r'\n',' ', case)
case=re.sub(r"([?!¿])", r" \1 ", case)
case=decontracted(case)
case = re.sub('[^A-Za-z0-9.,]+', ' ', case)
case = case.lower()
return case
def inference_summarization(
input_text,
tokenizer_cases,
tokenizer_summarize,
inference_model,
max_x_len = 5500,
max_y_len = 600
):
input_text = clean_case(input_text)
input_text = tokenizer_cases.texts_to_sequences([input_text])
input_text = tf.keras.preprocessing.sequence.pad_sequences(input_text, maxlen=max_x_len, padding='post')
summary = np.zeros((1, max_y_len))
summary[0,0] = tokenizer_summarize.word_index['sostok']
stop_condition = False
i = 1
while not stop_condition:
preds = inference_model.predict([input_text, summary], verbose=0)
pred = np.argmax(preds[0,i-1])
summary[0,i] = pred
i += 1
if pred == tokenizer_summarize.word_index['eostok'] or i >= max_y_len:
stop_condition = True
summary = summary[0]
new_summary = []
for i in summary:
if i != 0:
new_summary.append(i)
summary = ' '.join([tokenizer_summarize.index_word[i] for i in new_summary])
summary = summary.replace('eostok', '').replace('sostok', '').strip()
return summary
class QnA(object):
def __init__(self):
super(QnA, self).__init__()
self.qna = QuestionAnswer()
self.getEntity = GetEntity()
self.export = exportToJSON()
self.graph = GraphEnt()
self.pdf_dir = 'data/references/'
def read_pdf_data(self, pdf_file):
pdf_path = self.pdf_dir + pdf_file + '.pdf' if pdf_file[-1] != '.' else self.pdf_dir + pdf_file + 'pdf'
pdf_file = open(pdf_path, 'rb')
pdf_reader = PyPDF2.PdfFileReader(pdf_file)
num_pages = pdf_reader.getNumPages()
whole_text = ''
for page in range(num_pages):
page_obj = pdf_reader.getPage(page)
text = page_obj.extractText()
whole_text += f" {text}"
pdf_file.close()
whole_text = whole_text.replace('\n', ' ')
whole_text = re.sub(' +', ' ', whole_text)
whole_text = whole_text.strip().lower()
return whole_text
def extract_answers(self, question):
all_files = os.listdir(self.pdf_dir)
all_files = [file[:-3] for file in all_files if file[-3:] == 'pdf']
all_outputs = []
for idx, file in enumerate(all_files):
context = self.read_pdf_data(file)
refined_context = self.getEntity.preprocess_text(context)
try:
outputs = self.qna.findanswer(question, con=context)
except:
_, numberOfPairs = self.getEntity.get_entity(refined_context)
outputs = self.qna.findanswer(question, numberOfPairs)
all_outputs.append(outputs)
print("Processing file {} of {}".format(idx + 1, len(all_files)))
answers = [output['answer'] for output in all_outputs]
scores = [output['score'] for output in all_outputs]
# get the best answer
best_answer = answers[scores.index(max(scores))]
reference = all_files[scores.index(max(scores))]
return best_answer, reference
lemmatizer = WordNetLemmatizer()
re_tokenizer = RegexpTokenizer(r'\w+')
stopwords_list = stopwords.words('english')
tokenizer_pvd_path = 'weights/TOKENIZER_PVD.pkl'
model_pvd_weights = 'weights/MODEL_PVD.h5'
data_path = 'data/judgments/public-stories.xlsx'
class_dict_violation_flag = {
'yes': 1,
'no': 0
}
class_dict_violation_type = {
'article 11. of the constitution' : 4,
'article 12. (1) of the constitution' : 3,
'article 13. (1) of the constitution' : 2,
'article 17. of the constitution' : 1,
'no-violation': 0
}
class_dict_violation_flag_rev = {v: k for k, v in class_dict_violation_flag.items()}
class_dict_violation_type_rev = {v: k for k, v in class_dict_violation_type.items()}
with open(tokenizer_pvd_path, 'rb') as fp:
tokenizer_pvd = pickle.load(fp)
model_pvd = tf.keras.models.load_model(model_pvd_weights)
def extract_violation_data(violationType):
df_ = pd.read_excel(data_path)
df_.ViolationType = df_.ViolationType.str.lower().str.strip()
df_ = df_[df_.ViolationType == violationType]
df_ = df_.iloc[0]
Lawyers = df_.Lawyers.replace('\n', ' ')
Court = df_.Court.replace('\n', ' ')
DocumentShouldBring = df_.DocumentShouldBring.replace('\n', ' ')
Suggetion = df_.Suggetion.replace('\n', ' ')
return {
"Lawyers" : f"{Lawyers}",
"Court" : f"{Court}",
"DocumentShouldBring" : f"{DocumentShouldBring}",
"Suggetion" : f"{Suggetion}"
}
def read_pdf_data(
pdf_file
):
pdf_file = open(pdf_file, 'rb')
pdf_reader = PyPDF2.PdfFileReader(pdf_file)
num_pages = pdf_reader.getNumPages()
whole_text = ''
for page in range(num_pages):
page_obj = pdf_reader.getPage(page)
text = page_obj.extractText()
whole_text += f" {text}"
pdf_file.close()
whole_text = whole_text.replace('\n', ' ')
whole_text = re.sub(' +', ' ', whole_text)
whole_text = whole_text.strip().lower()
return whole_text
def lemmatization(lemmatizer,sentence):
lem = [lemmatizer.lemmatize(k) for k in sentence]
return [k for k in lem if k]
def remove_stop_words(stopwords_list,sentence):
return [k for k in sentence if k not in stopwords_list]
def preprocess_one(description):
description = description.lower()
remove_punc = re_tokenizer.tokenize(description) # Remove puntuations
remove_num = [re.sub('[0-9]', '', i) for i in remove_punc] # Remove Numbers
remove_num = [i for i in remove_num if len(i)>0] # Remove empty strings
lemmatized = lemmatization(lemmatizer,remove_num) # Word Lemmatization
remove_stop = remove_stop_words(stopwords_list,lemmatized) # remove stop words
updated_description = ' '.join(remove_stop)
return updated_description
def inference_pvd(description):
description = preprocess_one(description)
description = tokenizer_pvd.texts_to_sequences([description])
description = tf.keras.preprocessing.sequence.pad_sequences(
description,
maxlen=500,
padding='pre'
)
prediction = model_pvd.predict(description)
p1, p2 = prediction
p1 = np.argmax(p1.squeeze())
p2 = np.argmax(p2.squeeze())
violationFlag, violationType = class_dict_violation_flag_rev[p1], class_dict_violation_type_rev[p2]
if (violationFlag == 'no') or (violationType == 'no-violation'):
violationType, violationData = 'no-violation', None
else:
violationData = extract_violation_data(violationType)
return {
"violationType" : f"{violationType}",
"violationData" : violationData
}
app = Flask(__name__)
CORS(app)
qna_ = QnA()
app.config['UPLOAD_FOLDER'] = 'uploads'
@app.route('/pvd', methods=['POST'])
def pvd():
# data = request.files
# file = data['file']
# file_path = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
# file.save(file_path)
data = request.get_json()
story = data['story']
return jsonify(inference_pvd(story))
@app.route('/qna', methods=['POST'])
def qna():
data = request.get_json()
question = data['question']
answer, reference = qna_.extract_answers(question)
return jsonify({
"answer" : f"{answer}",
"reference" : f"{reference}"
})
@app .route('/summary', methods=['POST'])
def summary():
data = request.files
file = data['file']
file_path = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
file.save(file_path)
text = read_pdf_data(file_path)
summary_facts = inference_summarization(
text,
tokenizer_facts_cases,
tokenizer_facts_summarize,
inference_model_facts,
)
summary_judgements = inference_summarization(
text,
tokenizer_judgements_cases,
tokenizer_judgements_summarize,
inference_model_judgements,
)
return jsonify({
"summary_facts" : f"{summary_facts}",
"summary_judgements" : f"{summary_judgements}"
})
if __name__ == '__main__':
app.run(
debug=True,
host='0.0.0.0',
port=5003
)
\ No newline at end of file
@media screen and (max-width: 400px) {
#features {
padding: 20px;
width: 111%;
}
#about,
#services,
#testimonials,
#team,
#contact,
#footer {
width: 111%;
}
#portfolio {
width: 110%;
}
}
import React, { useState, useEffect } from "react";
import JsonData from "./data/data.json";
import SmoothScroll from "smooth-scroll";
import { BrowserRouter as Router, Route, Routes } from "react-router-dom";
import Login from "./components/login";
import Home from "./components/home";
import {Testimonials} from "./components/testimonials";
import {Header} from "./components/header";
import Register from "./components/register";
import "./App.css";
export const scroll = new SmoothScroll('a[href*="#"]', {
speed: 1000,
speedAsDuration: true,
});
const App = () => {
const [landingPageData, setLandingPageData] = useState({});
useEffect(() => {
setLandingPageData(JsonData);
}, []);
return (
<Router>
<Routes>
<Route exact path="/" element={<Login />} />
<Route exact path="/home" element={<Home />} />
<Route exact path="/register" element={<Register />} />
<Route exact path="/register" element={<Register />} />
<Route exact path="/summarizing" element={<Header data={landingPageData.Testimonials} />} />
<Route exact path="/support" element={<Testimonials data={landingPageData.Testimonials} />} />
</Routes>
</Router>
);
};
export default App;
import React from "react";
import image from '../images/image.png'
import { Link } from "react-router-dom";
export const About = (props) => {
return (
<div id="about">
<div className="">
<div className="row">
<div className="col-xs-12 col-md-12">
{" "}
<img src={image} style={{ width: '100vw', objectFit: 'contain' }} alt="" />{" "}
</div>
<div className="col-xs-12 col-md-7" style={{ paddingInline: '5vw' }}>
<div>
<h2>Intelligent Support Services</h2>
<p style={{color:'#BBBBBB'}}>In Which Intelligent Support Services Are We provide?</p>
<p style={{fontWeight:'initial'}}>Bringing Al to the courtroom in order to make Sri Lanka a more just nation by increasing the availability
of justice for the general public by allowing judges and lawyers to handle more cases with comparability
reduced effort and time</p>
</div>
</div>
<div className="col-xs-12 col-md-5">
<div className="row" style={{ display: 'flex', justifyContent: 'center' }}>
<Link to="/support" className="page-scroll">
<div style={{ marginTop: '10px', width: '180px', textAlign: 'center' }} className="col-xs-3 col-md-4">
<p style={{ textAlign: 'center' }}>Q and A <br />Support</p>
<i style={{ textAlign: 'center' }} className="fa fa-group"></i>
</div>
</Link>
<Link to="/Summarizing" className="page-scroll">
<div style={{ marginTop: '10px', width: '180px', textAlign: 'center' }} className="col-xs-3 col-md-4">
<p style={{ textAlign: 'center' }}>Content Summarizing Support</p>
<i className="fa fa-book"></i>
</div>
</Link>
</div>
</div>
</div>
</div>
</div >
);
};
import React from "react";
export const Features = (props) => {
return (
<div id="features" className="text-center">
<div className="container">
<div className="col-md-12">
<h2 className="small-text">By analysing:</h2>
</div>
<div className="row" style={{display:'flex',justifyContent:'center'}}>
{props.data
? props.data.map((d, i) => (
<div key={`${d.title}-${i}`} style={{marginInline:'30px', marginTop:'10px'}} className="col-xs-3 col-md-1">
{" "}
<i className={d.icon}></i>
<p>{d.title}</p>
{/* <p>{d.text}</p> */}
</div>
))
: "Loading..."}
</div>
</div>
</div>
);
};
import React ,{CSSProperties}from "react";
import axios from 'axios';
import BarLoader from "react-spinners/ClipLoader";
import { Navigation } from "../components/navigation";
import image from '../images/image.png'
const options = ["facts of the case", "judicial reasoning"];
const override= {
display: "block",
margin: "0 auto",
borderColor: "red",
};
export const Header = (props) => {
const [selectedFile, setSelectedFile] = React.useState(null);
const [isLoading, setIsLoading] = React.useState(true);
const [selected, setSelected] = React.useState(options[0]);
const [response, setResponse] = React.useState({
"violationData": {
"Court": "the Supreme Court",
"DocumentShouldBring": "medical evidence Documents",
"Lawyers": "Shantha Jayawardena with Niranjan Arulpragasam , Upul Kumarapperuma , Ms. Nayomi Wickramasekera",
"Suggetion": "Considering all these things, we hold that the Petitioners have not presented their case to the satisfaction of this Court. We therefore can’t rely on the complaint of both Petitioners. For the above reasons, we dismiss the Petition of the Petitioner."
},
"violationType": "article 11. of the constitution"
});
const handleSubmit = async(event) => {
event.preventDefault()
setIsLoading(false)
const formData = new FormData();
formData.append("file", selectedFile);
try {
const resp = await axios({
method: "post",
url: "http://ec2-13-229-183-94.ap-southeast-1.compute.amazonaws.com:5006/summary",
data: formData,
headers: { "Content-Type": "multipart/form-data" },
});
console.log("🚀 ~ file: header.jsx:16 ~ handleSubmit ~ response:", resp)
setResponse(resp)
setIsLoading(true)
} catch(error) {
console.log(error)
}
}
const handleFileSelect = (event) => {
setSelectedFile(event.target.files[0])
}
return (
<div><Navigation />
<header id="header" style={{marginTop:'20vh'}}>
<img src={image} style={{ width: '100vw', objectFit: 'contain',height:'30vh' }} alt="" />{" "}
<div className="intro">
<div className="overlay">
<div className="container">
<div className="row">
<div className="col-md-12 ">
<h1 className="large-text">
Case Summarizing Support For A Better Decision To Get Started, Upload The Case File
<span></span>
</h1>
<p style={{ textTransform: 'capitalize' }} className="small-text">To Get Started, Upload A Case File</p>
<div className="col-md-2">
<input type="file" onChange={handleFileSelect}/>
</div><div className="col-md-2">
<select value={selected}
onChange={e => setSelected(e.target.value)}>
{options.map((value) => (
<option value={value} key={value}>
{value}
</option>
))}
</select>
</div>
<div className="col-md-12">
<button
type="submit"
form="myForm"
className="btn btn-custom btn-lg page-scroll"
alt="submit Checkout"
style={{marginBlock:'20px'}}
onClick={handleSubmit}
>
submit
</button>
<BarLoader loading={!isLoading} height={1} width={1} color="#36d7b7" />
</div>
{/* <a
href="#features"
style={{ textTransform: 'capitalize' }}
className="btn btn-custom btn-lg page-scroll"
>
Build a graph and predict the decision
</a>{" "} */}
</div>
</div>
{response&&<div style={{ backgroundColor: "#F6ECE8" }}>
<div className="testimonial">
<div className="testimonial-content">
<div>
<div className="col-md-3">
<button
type="submit"
form="myForm"
className="btn btn-custom btn-lg page-scroll"
alt="submit Checkout"
onClick={handleSubmit}
>
save
</button></div> <div className="col-md-3">
<button
type="submit"
form="myForm"
className="btn btn-custom btn-lg page-scroll"
alt="submit Checkout"
onClick={handleSubmit}
>
print
</button></div> <div className="col-md-6">
<button
type="submit"
form="myForm"
className="btn btn-custom btn-lg page-scroll"
alt="submit Checkout"
onClick={handleSubmit}
>
share
</button></div></div>
<div className="testimonial-meta" style={{marginTop:'20px',color:'black'}}>Court: {response?.violationData?.Court}</div>
<p style={{marginTop:'10px',color:'black'}}> DocumentShouldBring: {response?.violationData?.DocumentShouldBring}</p>
<p style={{marginTop:'10px',color:'black'}}> Lawyers: {response?.violationData?.Lawyers}</p>
<p style={{marginTop:'10px',color:'black'}}> Suggetion: {response?.violationData?.Suggetion}</p>
<p style={{marginTop:'10px',color:'black'}}> violationType: {response?.violationType}</p>
</div>
</div>
</div>}
</div>
</div>
</div>
</header>
</div>
);
};
import React, { useState, useEffect } from "react";
import { Navigation } from "../components/navigation";
import { Header } from "../components/header";
import { Features } from "../components/features";
import { About } from "../components/about";
import { Testimonials } from "../components/testimonials";
import JsonData from "../data/data.json";
import SmoothScroll from "smooth-scroll";
import "../App.css";
export const scroll = new SmoothScroll('a[href*="#"]', {
speed: 1000,
speedAsDuration: true,
});
const App = () => {
const [landingPageData, setLandingPageData] = useState({});
useEffect(() => {
setLandingPageData(JsonData);
}, []);
return (
<div>
<Navigation />
<About data={landingPageData.About} />
{/* <Header data={landingPageData.Header} /> */}
<Features data={landingPageData.Features} />
{/* <Testimonials data={landingPageData.Testimonials} /> */}
</div>
);
};
export default App;
import React from "react";
export const Image = ({ title, largeImage, smallImage }) => {
return (
<div className="portfolio-item">
<div className="hover-bg">
{" "}
<a href={largeImage} title={title} data-lightbox-gallery="gallery1">
<div className="hover-text">
<h4>{title}</h4>
</div>
<img src={smallImage} className="img-responsive" alt={title} />{" "}
</a>{" "}
</div>
</div>
);
};
.login {
height: 100vh;
width: 100vw;
display: flex;
align-items: center;
justify-content: center;
}
.login__container {
display: flex;
flex-direction: column;
text-align: center;
/* background-color: #dcdcdc; */
padding: 30px;
width: 30%;
margin-left: 40%;
margin-top: -1%;
}
.login__textBox {
padding: 10px;
font-size: 18px;
margin-bottom: 10px;
}
.login__btn {
padding: 10px;
font-size: 18px;
margin-bottom: 10px;
border: none;
color: white;
background-color: #BBBBBB;
}
.login__google {
background-color: #4285f4;
}
.login div {
margin-top: 7px;
}
\ No newline at end of file
import React, { useEffect, useState } from "react";
import { Link, useNavigate } from "react-router-dom";
import { auth, logInWithEmailAndPassword, signInWithGoogle } from "../firebase";
import { useAuthState } from "react-firebase-hooks/auth";
import "./login.css";
import image from '../images/image.png'
function Login() {
const [email, setEmail] = useState("");
const [password, setPassword] = useState("");
const [user, loading, error] = useAuthState(auth);
const navigate = useNavigate();
useEffect(() => {
if (loading) {
// maybe trigger a loading screen
return;
}
// if (user) navigate("/home");
}, [user, loading]);
const userLogin=async(email, password)=>{
console.log('email, password',email, password)
// await logInWithEmailAndPassword(email, password)
if(await logInWithEmailAndPassword(email, password)){
navigate("/home")
}
}
return (
<div className="">
<img src={image} style={{ width: '100vw', objectFit: 'contain' }} alt="" />{" "}
<div className="login__container" >
<input
type="text"
className="login__textBox"
value={email}
onChange={(e) => setEmail(e.target.value)}
placeholder="E-mail Address"
/>
<input
type="password"
className="login__textBox"
value={password}
onChange={(e) => setPassword(e.target.value)}
placeholder="Password"
/>
<button
className="login__btn"
onClick={() => userLogin(email, password)}
>
Login
</button>
<button className="login__btn login__google" onClick={signInWithGoogle}>
Login with Google
</button>
<div>
<Link to="/reset">Forgot Password</Link>
</div>
<div>
Don't have an account? <Link to="/register">Register</Link> now.
</div>
</div>
</div>
);
}
export default Login;
\ No newline at end of file
import React from "react";
import logo from "../images/logo.png"
import { Link } from "react-router-dom";
import {
auth,
registerWithEmailAndPassword,
logout,
} from "../firebase";
export const Navigation = (props) => {
return (
<nav id="menu" className="navbar navbar-default navbar-fixed-top">
<div className="">
<div
className="collapse navbar-collapse"
id="bs-example-navbar-collapse-1"
>
<ul className="nav navbar-nav navbar-left">
<li>
<img style={{ width: '170px', height: '150px' }} src={logo} alt="" />{" "}
</li>
<li>
<p className="navbar-brand page-scroll" href="#page-top">
Ceylon LawMate<span className="navbar-brand-text" style={{}}><br /><br/>Bringing Data into the Sri lankan Courtroom</span>
</p>
</li>
</ul>
<ul className="nav navbar-nav navbar-right" style={{marginTop:'40px'}}>
<li>
<a href="/home" className="page-scroll">
Home
</a>
</li>
<li>
<a href="#about" className="page-scroll">
Services
</a>
</li>
<li>
<a href="#services" className="page-scroll">
About
</a>
</li>
<li>
<Link to="/" className="page-scroll">
Log in
</Link>
</li>
<li>
<a href="#contact" className="page-scroll">
Contact
</a>
</li>
</ul>
</div>
</div>
</nav>
);
};
.register {
height: 100vh;
width: 100vw;
display: flex;
align-items: center;
justify-content: center;
}
.register__container {
display: flex;
flex-direction: column;
text-align: center;
/* background-color: #dcdcdc; */
padding: 30px;
padding: 30px;
width: 30%;
margin-left: 40%;
margin-top: -1%;
}
.register__textBox {
padding: 10px;
font-size: 18px;
margin-bottom: 10px;
}
.register__btn {
padding: 10px;
font-size: 18px;
margin-bottom: 10px;
border: none;
color: white;
background-color: black;
}
.register__google {
background-color: #4285f4;
}
.register div {
margin-top: 7px;
}
\ No newline at end of file
import React, { useEffect, useState } from "react";
import { useAuthState } from "react-firebase-hooks/auth";
import { Link, useNavigate } from "react-router-dom";
import {
auth,
registerWithEmailAndPassword,
signInWithGoogle,
} from "../firebase";
import "../components/register.css";
import image from '../images/image.png'
function Register() {
const [email, setEmail] = useState("");
const [password, setPassword] = useState("");
const [name, setName] = useState("");
const [user, loading, error] = useAuthState(auth);
// const history = useHistory();
const navigate = useNavigate();
const register = async() => {
if (!name) alert("Please enter name");
if(await registerWithEmailAndPassword(name, email, password)){
navigate("/home")
}
};
useEffect(() => {
if (loading) return;
// if (user) navigate('/home');;
}, [user, loading]);
return (
<div className="">
<img src={image} style={{ width: '100vw', objectFit: 'contain' }} alt="" />{" "}
<div className="register__container">
<input
type="text"
className="register__textBox"
value={name}
onChange={(e) => setName(e.target.value)}
placeholder="Full Name"
/>
<input
type="text"
className="register__textBox"
value={email}
onChange={(e) => setEmail(e.target.value)}
placeholder="E-mail Address"
/>
<input
type="password"
className="register__textBox"
value={password}
onChange={(e) => setPassword(e.target.value)}
placeholder="Password"
/>
<button className="register__btn" onClick={register}>
Register
</button>
<button
className="register__btn register__google"
onClick={signInWithGoogle}
>
Register with Google
</button>
<div>
Already have an account? <Link to="/">Login</Link> now.
</div>
</div>
</div>
);
}
export default Register;
\ No newline at end of file
import React, { useState } from "react";
import myGif from "../images/gif.gif";
import axios from "axios";
import BarLoader from "react-spinners/ClipLoader";
import { Navigation } from "../components/navigation";
export const Testimonials = (props) => {
const [commentText, setCommentText] = useState("");
const [isLoading, setIsLoading] = React.useState(true);
const [response, setResponse] = useState({
"answer": "-",
"reference": "-"
});
const handleOnSubmit = (event) => {
event.preventDefault();
setIsLoading(false)
console.log(commentText);
axios
.post(
"http://ec2-13-229-183-94.ap-southeast-1.compute.amazonaws.com:5006/qna",
{
question:
commentText,
}
)
.then((response) => {
console.log(
"🚀 ~ file: testimonials.jsx:15 ~ .then ~ response:",
response
);
setIsLoading(true)
setResponse(response.data);
});
};
return (<div><Navigation />
<div id="testimonials" style={{marginTop:'20vh'}}>
<div className="container-fluid">
<div className="section-title text-center">
{/* <h2>What our clients say</h2> */}
</div>
<div
className="col-md-12"
style={{
border: "solid #F6ECE8 1px",
height: "80vh",
marginLeft: "30px",
}}
>
<textarea
name="commentTextArea"
type="text"
style={{width:'90vw',height:'60%'}}
id="CommentsOrAdditionalInformation"
value={commentText}
onChange={(e) => setCommentText(e.target.value)}
/>
<button
type="submit"
form="myForm"
className="btn btn-custom btn-lg page-scroll"
alt="submit Checkout"
style={{marginBlock:'20px'}}
onClick={handleOnSubmit}
>
submit
</button>
<BarLoader loading={!isLoading} height={1} width={1} color="#36d7b7" />
{response&&<div style={{ backgroundColor: "#F6ECE8" }}>
<div className="testimonial">
<div className="testimonial-content">
<div className="testimonial-meta">Answer: {response?.answer}</div>
<p style={{marginTop:'10px'}}> Reference: {response?.reference}</p>
</div>
</div>
</div>}
</div>
{/* <div
className="col-md-4"
style={{
border: "solid #F6ECE8 1px",
height: "80vh",
marginLeft: "30px",
}}
>
<img
src={myGif}
alt="my-gif"
style={{ height: "70%", width: "100%", objectFit: "cover" }}
/>
<div>
<div
className="testimonial"
style={{ border: "solid #F6ECE8 1px" }}
>
<div className="testimonial-content">
<div className="testimonial-meta">i- Prediction</div>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Praesent ac risus nisi. Duis al blandit eros. Pellentesque
pretiumLorem ipsum dolor sit amet, consectetur adipiscing
elit. Praesent ac risus nisi. Duis al blandit eros.
Pellentesque pretium
</p>
</div>
</div> */}
{/* </div> */}
{/* </div> */}
</div>
</div>
</div>
);
};
{
"Header": {
"title": "Case summarizing support for a better decision To get started, upload the case file",
"paragraph": "To get started, upload a case file"
},
"About": {
"paragraph": "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.",
"Why": [
"Lorem ipsum dolor",
"Tempor incididunt",
"Lorem ipsum dolor",
"Incididunt ut labore"
],
"Why2": [
"Aliquip ex ea commodo",
"Lorem ipsum dolor",
"Exercitation ullamco",
"Lorem ipsum dolor"
]
},
"Gallery": [
{
"title": "Project Title",
"largeImage": "img/portfolio/01-large.jpg",
"smallImage": "img/portfolio/01-small.jpg"
},
{
"title": "Project Title",
"largeImage": "img/portfolio/02-large.jpg",
"smallImage": "img/portfolio/02-small.jpg"
},
{
"title": "Project Title",
"largeImage": "img/portfolio/03-large.jpg",
"smallImage": "img/portfolio/03-small.jpg"
},
{
"title": "Project Title",
"largeImage": "img/portfolio/04-large.jpg",
"smallImage": "img/portfolio/04-small.jpg"
},
{
"title": "Project Title",
"largeImage": "img/portfolio/05-large.jpg",
"smallImage": "img/portfolio/05-small.jpg"
},
{
"title": "Project Title",
"largeImage": "img/portfolio/06-large.jpg",
"smallImage": "img/portfolio/06-small.jpg"
},
{
"title": "Project Title",
"largeImage": "img/portfolio/07-large.jpg",
"smallImage": "img/portfolio/07-small.jpg"
},
{
"title": "Project Title",
"largeImage": "img/portfolio/08-large.jpg",
"smallImage": "img/portfolio/08-small.jpg"
},
{
"title": "Project Title",
"largeImage": "img/portfolio/09-large.jpg",
"smallImage": "img/portfolio/09-small.jpg"
}
],
"Services": [
{
"icon": "fa fa-wordpress",
"name": "Lorem ipsum dolor",
"text": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis sed dapibus leo nec ornare diam sedasd commodo nibh ante facilisis bibendum dolor feugiat at."
},
{
"icon": "fa fa-cart-arrow-down",
"name": "Consectetur adipiscing",
"text": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis sed dapibus leo nec ornare diam sedasd commodo nibh ante facilisis bibendum dolor feugiat at."
},
{
"icon": "fa fa-cloud-download",
"name": "Lorem ipsum dolor",
"text": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis sed dapibus leo nec ornare diam sedasd commodo nibh ante facilisis bibendum dolor feugiat at."
},
{
"icon": "fa fa-language",
"name": "Consectetur adipiscing",
"text": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis sed dapibus leo nec ornare diam sedasd commodo nibh ante facilisis bibendum dolor feugiat at."
},
{
"icon": "fa fa-plane",
"name": "Lorem ipsum dolor",
"text": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis sed dapibus leo nec ornare diam sedasd commodo nibh ante facilisis bibendum dolor feugiat at."
},
{
"icon": "fa fa-pie-chart",
"name": "Consectetur adipiscing",
"text": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis sed dapibus leo nec ornare diam sedasd commodo nibh ante facilisis bibendum dolor feugiat at."
}
],
"Testimonials": [
{
"img": "#FFFFFF",
"text": "This prediction Is supported by the following sources",
"name": "The sources that are used to predict"
},
{
"img": "#C0BEBD",
"text": "Title and / or reference number of the case file",
"name": "Current case file"
},
{
"img": "#FFFFFF",
"text": "Title and / or reference number of the source",
"name": "01 source to the prediction"
},
{
"img": "#F6ECE8",
"text": "Title and / or reference number of the source",
"name": "02 source to the prediction"
},
{
"img": "#FFFFFF",
"text": "Title and / or reference number of the source",
"name": "03 source to the prediction"
},
{
"img": "#FFFFFF",
"text": "Title and / or reference number of the source",
"name": "04 source to the prediction"
}
],
"Team": [
{
"img": "img/team/01.jpg",
"name": "John Doe",
"job": "Director"
},
{
"img": "img/team/02.jpg",
"name": "Mike Doe",
"job": "Senior Designer"
},
{
"img": "img/team/03.jpg",
"name": "Jane Doe",
"job": "Senior Designer"
},
{
"img": "img/team/04.jpg",
"name": "Karen Doe",
"job": "Project Manager"
}
],
"Contact": {
"address": "4321 California St, San Francisco, CA 12345 ",
"phone": "+1 123 456 1234",
"email": "info@company.com",
"facebook": "fb.com",
"twitter": "twitter.com",
"youtube": "youtube.com"
},
"Features": [
{
"icon": "fa fa-book",
"title": "Constitution of Sri Lanka",
"text": "Lorem ipsum dolor sit amet placerat facilisis felis mi in tempus eleifend pellentesque natoque etiam."
},
{
"icon": "fa fa-lastfm",
"title": "Penal Code of Sri Lanka",
"text": "Lorem ipsum dolor sit amet placerat facilisis felis mi in tempus eleifend pellentesque natoque etiam."
},
{
"icon": "fa fa-calendar-o",
"title": "Acts",
"text": "Lorem ipsum dolor sit amet placerat facilisis felis mi in tempus eleifend pellentesque natoque etiam."
},
{
"icon": "fa fa-drupal",
"title": "Local Previous Cases",
"text": "Lorem ipsum dolor sit amet placerat facilisis felis mi in tempus eleifend pellentesque natoque etiam."
}, {
"icon": "fa fa-bullhorn",
"title": "Internationa Cases",
"text": "Lorem ipsum dolor sit amet placerat facilisis felis mi in tempus eleifend pellentesque natoque etiam."
},
{
"icon": "fa fa-group",
"title": "International Treaties",
"text": "Lorem ipsum dolor sit amet placerat facilisis felis mi in tempus eleifend pellentesque natoque etiam."
},
{
"icon": "fa fa-institution",
"title": "Reference Books",
"text": "Lorem ipsum dolor sit amet placerat facilisis felis mi in tempus eleifend pellentesque natoque etiam."
},{
"icon": "fa fa-newspaper-o",
"title": "Research Articles",
"text": "Lorem ipsum dolor sit amet placerat facilisis felis mi in tempus eleifend pellentesque natoque etiam."
}
]
}
import { initializeApp } from "firebase/app";
import {GoogleAuthProvider,getAuth,signInWithPopup, signInWithEmailAndPassword,createUserWithEmailAndPassword, sendPasswordResetEmail ,signOut} from "firebase/auth";
import {getFirestore,query,getDocs,collection,where, addDoc} from "firebase/firestore";
const firebaseConfig = {
apiKey: "AIzaSyAoPfOqaCqV9TbOdcbWCGYdX9cU8HDFFgU",
authDomain: "ceylon-law.firebaseapp.com",
projectId: "ceylon-law",
storageBucket: "ceylon-law.appspot.com",
messagingSenderId: "1012913036810",
appId: "1:1012913036810:web:b38d082586ad095343c9f3"
};
const app =initializeApp(firebaseConfig);
const auth = getAuth(app);
const db = getFirestore(app);
const googleProvider = new GoogleAuthProvider();
const signInWithGoogle = async () => {
try {
const res = await signInWithPopup(auth, googleProvider);
const user = res.user;
const q = query(collection(db, "users"), where("uid", "==", user.uid));
const docs = await getDocs(q);
if (docs.docs.length === 0) {
await addDoc(collection(db, "users"), {
uid: user.uid,
name: user.displayName,
authProvider: "google",
email: user.email,
});
}
} catch (err) {
console.error(err);
alert(err.message);
}
};
const logInWithEmailAndPassword = async (email, password) => {
try {
await signInWithEmailAndPassword(auth, email, password);
return true;
} catch (err) {
console.error(err);
alert(err.message);
return false;
}
};
const registerWithEmailAndPassword = async (name, email, password) => {
try {
const res = await createUserWithEmailAndPassword(auth, email, password);
const user = res.user;
await addDoc(collection(db, "users"), {
uid: user.uid,
name,
authProvider: "local",
email,
});
return true
} catch (err) {
console.error(err);
alert(err.message);
return false
}
};
const sendPasswordReset = async (email) => {
try {
await sendPasswordResetEmail(auth, email);
alert("Password reset link sent!");
} catch (err) {
console.error(err);
alert(err.message);
}
};
const logout = () => {
signOut(auth);
};
export {
auth,
db,
signInWithGoogle,
logInWithEmailAndPassword,
registerWithEmailAndPassword,
sendPasswordReset,
logout,
};
\ No newline at end of file
body {
margin: 0;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen',
'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue',
sans-serif;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
code {
font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New',
monospace;
}
import React from 'react';
import ReactDOM from 'react-dom';
import './index.css';
import App from './App';
import * as serviceWorker from './serviceWorker';
ReactDOM.render(
<React.StrictMode>
<App />
</React.StrictMode>,
document.getElementById('root')
);
// If you want your app to work offline and load faster, you can change
// unregister() to register() below. Note this comes with some pitfalls.
// Learn more about service workers: https://bit.ly/CRA-PWA
serviceWorker.unregister();
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 841.9 595.3">
<g fill="#61DAFB">
<path d="M666.3 296.5c0-32.5-40.7-63.3-103.1-82.4 14.4-63.6 8-114.2-20.2-130.4-6.5-3.8-14.1-5.6-22.4-5.6v22.3c4.6 0 8.3.9 11.4 2.6 13.6 7.8 19.5 37.5 14.9 75.7-1.1 9.4-2.9 19.3-5.1 29.4-19.6-4.8-41-8.5-63.5-10.9-13.5-18.5-27.5-35.3-41.6-50 32.6-30.3 63.2-46.9 84-46.9V78c-27.5 0-63.5 19.6-99.9 53.6-36.4-33.8-72.4-53.2-99.9-53.2v22.3c20.7 0 51.4 16.5 84 46.6-14 14.7-28 31.4-41.3 49.9-22.6 2.4-44 6.1-63.6 11-2.3-10-4-19.7-5.2-29-4.7-38.2 1.1-67.9 14.6-75.8 3-1.8 6.9-2.6 11.5-2.6V78.5c-8.4 0-16 1.8-22.6 5.6-28.1 16.2-34.4 66.7-19.9 130.1-62.2 19.2-102.7 49.9-102.7 82.3 0 32.5 40.7 63.3 103.1 82.4-14.4 63.6-8 114.2 20.2 130.4 6.5 3.8 14.1 5.6 22.5 5.6 27.5 0 63.5-19.6 99.9-53.6 36.4 33.8 72.4 53.2 99.9 53.2 8.4 0 16-1.8 22.6-5.6 28.1-16.2 34.4-66.7 19.9-130.1 62-19.1 102.5-49.9 102.5-82.3zm-130.2-66.7c-3.7 12.9-8.3 26.2-13.5 39.5-4.1-8-8.4-16-13.1-24-4.6-8-9.5-15.8-14.4-23.4 14.2 2.1 27.9 4.7 41 7.9zm-45.8 106.5c-7.8 13.5-15.8 26.3-24.1 38.2-14.9 1.3-30 2-45.2 2-15.1 0-30.2-.7-45-1.9-8.3-11.9-16.4-24.6-24.2-38-7.6-13.1-14.5-26.4-20.8-39.8 6.2-13.4 13.2-26.8 20.7-39.9 7.8-13.5 15.8-26.3 24.1-38.2 14.9-1.3 30-2 45.2-2 15.1 0 30.2.7 45 1.9 8.3 11.9 16.4 24.6 24.2 38 7.6 13.1 14.5 26.4 20.8 39.8-6.3 13.4-13.2 26.8-20.7 39.9zm32.3-13c5.4 13.4 10 26.8 13.8 39.8-13.1 3.2-26.9 5.9-41.2 8 4.9-7.7 9.8-15.6 14.4-23.7 4.6-8 8.9-16.1 13-24.1zM421.2 430c-9.3-9.6-18.6-20.3-27.8-32 9 .4 18.2.7 27.5.7 9.4 0 18.7-.2 27.8-.7-9 11.7-18.3 22.4-27.5 32zm-74.4-58.9c-14.2-2.1-27.9-4.7-41-7.9 3.7-12.9 8.3-26.2 13.5-39.5 4.1 8 8.4 16 13.1 24 4.7 8 9.5 15.8 14.4 23.4zM420.7 163c9.3 9.6 18.6 20.3 27.8 32-9-.4-18.2-.7-27.5-.7-9.4 0-18.7.2-27.8.7 9-11.7 18.3-22.4 27.5-32zm-74 58.9c-4.9 7.7-9.8 15.6-14.4 23.7-4.6 8-8.9 16-13 24-5.4-13.4-10-26.8-13.8-39.8 13.1-3.1 26.9-5.8 41.2-7.9zm-90.5 125.2c-35.4-15.1-58.3-34.9-58.3-50.6 0-15.7 22.9-35.6 58.3-50.6 8.6-3.7 18-7 27.7-10.1 5.7 19.6 13.2 40 22.5 60.9-9.2 20.8-16.6 41.1-22.2 60.6-9.9-3.1-19.3-6.5-28-10.2zM310 490c-13.6-7.8-19.5-37.5-14.9-75.7 1.1-9.4 2.9-19.3 5.1-29.4 19.6 4.8 41 8.5 63.5 10.9 13.5 18.5 27.5 35.3 41.6 50-32.6 30.3-63.2 46.9-84 46.9-4.5-.1-8.3-1-11.3-2.7zm237.2-76.2c4.7 38.2-1.1 67.9-14.6 75.8-3 1.8-6.9 2.6-11.5 2.6-20.7 0-51.4-16.5-84-46.6 14-14.7 28-31.4 41.3-49.9 22.6-2.4 44-6.1 63.6-11 2.3 10.1 4.1 19.8 5.2 29.1zm38.5-66.7c-8.6 3.7-18 7-27.7 10.1-5.7-19.6-13.2-40-22.5-60.9 9.2-20.8 16.6-41.1 22.2-60.6 9.9 3.1 19.3 6.5 28.1 10.2 35.4 15.1 58.3 34.9 58.3 50.6-.1 15.7-23 35.6-58.4 50.6zM320.8 78.4z"/>
<circle cx="420.9" cy="296.5" r="45.7"/>
<path d="M520.5 78.1z"/>
</g>
</svg>
// This optional code is used to register a service worker.
// register() is not called by default.
// This lets the app load faster on subsequent visits in production, and gives
// it offline capabilities. However, it also means that developers (and users)
// will only see deployed updates on subsequent visits to a page, after all the
// existing tabs open on the page have been closed, since previously cached
// resources are updated in the background.
// To learn more about the benefits of this model and instructions on how to
// opt-in, read https://bit.ly/CRA-PWA
const isLocalhost = Boolean(
window.location.hostname === 'localhost' ||
// [::1] is the IPv6 localhost address.
window.location.hostname === '[::1]' ||
// 127.0.0.0/8 are considered localhost for IPv4.
window.location.hostname.match(
/^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/
)
);
export function register(config) {
if (process.env.NODE_ENV === 'production' && 'serviceWorker' in navigator) {
// The URL constructor is available in all browsers that support SW.
const publicUrl = new URL(process.env.PUBLIC_URL, window.location.href);
if (publicUrl.origin !== window.location.origin) {
// Our service worker won't work if PUBLIC_URL is on a different origin
// from what our page is served on. This might happen if a CDN is used to
// serve assets; see https://github.com/facebook/create-react-app/issues/2374
return;
}
window.addEventListener('load', () => {
const swUrl = `${process.env.PUBLIC_URL}/service-worker.js`;
if (isLocalhost) {
// This is running on localhost. Let's check if a service worker still exists or not.
checkValidServiceWorker(swUrl, config);
// Add some additional logging to localhost, pointing developers to the
// service worker/PWA documentation.
navigator.serviceWorker.ready.then(() => {
console.log(
'This web app is being served cache-first by a service ' +
'worker. To learn more, visit https://bit.ly/CRA-PWA'
);
});
} else {
// Is not localhost. Just register service worker
registerValidSW(swUrl, config);
}
});
}
}
function registerValidSW(swUrl, config) {
navigator.serviceWorker
.register(swUrl)
.then(registration => {
registration.onupdatefound = () => {
const installingWorker = registration.installing;
if (installingWorker == null) {
return;
}
installingWorker.onstatechange = () => {
if (installingWorker.state === 'installed') {
if (navigator.serviceWorker.controller) {
// At this point, the updated precached content has been fetched,
// but the previous service worker will still serve the older
// content until all client tabs are closed.
console.log(
'New content is available and will be used when all ' +
'tabs for this page are closed. See https://bit.ly/CRA-PWA.'
);
// Execute callback
if (config && config.onUpdate) {
config.onUpdate(registration);
}
} else {
// At this point, everything has been precached.
// It's the perfect time to display a
// "Content is cached for offline use." message.
console.log('Content is cached for offline use.');
// Execute callback
if (config && config.onSuccess) {
config.onSuccess(registration);
}
}
}
};
};
})
.catch(error => {
console.error('Error during service worker registration:', error);
});
}
function checkValidServiceWorker(swUrl, config) {
// Check if the service worker can be found. If it can't reload the page.
fetch(swUrl, {
headers: { 'Service-Worker': 'script' },
})
.then(response => {
// Ensure service worker exists, and that we really are getting a JS file.
const contentType = response.headers.get('content-type');
if (
response.status === 404 ||
(contentType != null && contentType.indexOf('javascript') === -1)
) {
// No service worker found. Probably a different app. Reload the page.
navigator.serviceWorker.ready.then(registration => {
registration.unregister().then(() => {
window.location.reload();
});
});
} else {
// Service worker found. Proceed as normal.
registerValidSW(swUrl, config);
}
})
.catch(() => {
console.log(
'No internet connection found. App is running in offline mode.'
);
});
}
export function unregister() {
if ('serviceWorker' in navigator) {
navigator.serviceWorker.ready
.then(registration => {
registration.unregister();
})
.catch(error => {
console.error(error.message);
});
}
}
// jest-dom adds custom jest matchers for asserting on DOM nodes.
// allows you to do things like:
// expect(element).toHaveTextContent(/react/i)
// learn more: https://github.com/testing-library/jest-dom
import '@testing-library/jest-dom/extend-expect';
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment