Commit 7d67b62f authored by Jayasith H.B.C's avatar Jayasith H.B.C

Merge branch 'master' into 'IT19079264'

Master

See merge request !105
parents 79d4d5bd 7bcede41
No preview for this file type
...@@ -53,7 +53,7 @@ def generate_diagram(filename): ...@@ -53,7 +53,7 @@ def generate_diagram(filename):
print(Exception) print(Exception)
# generate python file for the class # generate python file for the class (pyreverse)
def generate_class(actors, data): def generate_class(actors, data):
res = create_class_methods(data) res = create_class_methods(data)
class_string_arr = generate_class_string_array(actors, res) class_string_arr = generate_class_string_array(actors, res)
......
...@@ -11,15 +11,10 @@ def remove_unwanted_values(data): ...@@ -11,15 +11,10 @@ def remove_unwanted_values(data):
return data return data
# removing duplicates # punctuation removing
def remove_duplicates(data):
return list(set(data))
# punctuation removal
def remove_punctuation(sentence): def remove_punctuation(sentence):
text_no_punct = [token for token in sentence if not token.is_punct] text_without_punctuation = [token for token in sentence if not token.is_punct]
cleaned_sentence = ' '.join(token.text for token in text_no_punct) cleaned_sentence = ' '.join(token.text for token in text_without_punctuation)
return cleaned_sentence return cleaned_sentence
...@@ -37,44 +32,45 @@ def main(scenario, assignment_type): ...@@ -37,44 +32,45 @@ def main(scenario, assignment_type):
del sentences[-1] del sentences[-1]
# creating required lists # creating required lists
nc = [] nouns_pronouns = []
cleaned_extracted_actions = [] cleaned_extracted_actions = []
cleaned_sentences = [] cleaned_sentences = []
splitted_actions_array = [] splitted_actions_and_actor_array = []
# looping through each sentence # looping through each sentence
for sentence in sentences: for sentence in sentences:
# getting actors using nouns pronouns
res = get_nouns_pronouns(sentence) res = get_nouns_pronouns(sentence)
nc.append(str(res)) nouns_pronouns.append(str(res))
cleaned_sentence = remove_punctuation(sentence) cleaned_sentence = remove_punctuation(sentence)
cleaned_sentences.append(cleaned_sentence) cleaned_sentences.append(cleaned_sentence)
splitted_actions = split_actions(str(cleaned_sentence)) splitted_actions_and_actor = split_actions(str(cleaned_sentence))
splitted_actions_array.append(splitted_actions) splitted_actions_and_actor_array.append(splitted_actions_and_actor)
extracted_actions = get_actions(splitted_actions) extracted_actions = get_actions(splitted_actions_and_actor)
if extracted_actions is not None: if extracted_actions is not None:
cleaned_extracted_actions.append(extracted_actions) cleaned_extracted_actions.append(extracted_actions)
# remove duplicates of the actors # remove duplicates of the actors
nc = list(dict.fromkeys(nc)) nouns_pronouns = list(dict.fromkeys(nouns_pronouns))
data = remove_unwanted_values(nc) actors = remove_unwanted_values(nouns_pronouns)
extracted_relationships = get_include_extend_relationships(splitted_actions_array) extracted_relationships = get_include_extend_relationships(splitted_actions_and_actor_array)
actors_and_use_cases_array = identify_use_cases(cleaned_extracted_actions) actors_and_use_cases_array = identify_use_cases(cleaned_extracted_actions)
if assignment_type == 1: if assignment_type == 1:
generated_usecase_diagram_path = generate_use_case_diagram(data, extracted_relationships, generated_usecase_diagram_path = generate_use_case_diagram(actors, extracted_relationships,
actors_and_use_cases_array) actors_and_use_cases_array)
return generated_usecase_diagram_path return generated_usecase_diagram_path
elif assignment_type == 2: elif assignment_type == 2:
generated_class_diagram_path = generate_class(data, cleaned_extracted_actions) generated_class_diagram_path = generate_class(actors, cleaned_extracted_actions)
return generated_class_diagram_path return generated_class_diagram_path
elif assignment_type == 3: elif assignment_type == 3:
generated_class_diagram_path = generate_class(data, cleaned_extracted_actions) generated_class_diagram_path = generate_class(actors, cleaned_extracted_actions)
generated_usecase_diagram_path = generate_use_case_diagram(data, extracted_relationships, generated_usecase_diagram_path = generate_use_case_diagram(actors, extracted_relationships,
actors_and_use_cases_array) actors_and_use_cases_array)
return generated_class_diagram_path, generated_usecase_diagram_path return generated_class_diagram_path, generated_usecase_diagram_path
...@@ -24,12 +24,14 @@ def get_nouns_pronouns(sentence): ...@@ -24,12 +24,14 @@ def get_nouns_pronouns(sentence):
return token return token
# removing punctuations
def remove_punctuation(sentence): def remove_punctuation(sentence):
text_no_punct = [token for token in sentence if not token.is_punct] text_no_punct = [token for token in sentence if not token.is_punct]
cleaned_sentence = ' '.join(token.text for token in text_no_punct) cleaned_sentence = ' '.join(token.text for token in text_no_punct)
return cleaned_sentence return cleaned_sentence
# get actions and actors
def split_actions(sentence): def split_actions(sentence):
split_string = "should be able to " split_string = "should be able to "
if split_string in sentence: if split_string in sentence:
...@@ -37,16 +39,18 @@ def split_actions(sentence): ...@@ -37,16 +39,18 @@ def split_actions(sentence):
return extracted_string return extracted_string
def get_actions(splitted_action): # get
def get_actions(splitted_action_and_actor):
temp_array = [] temp_array = []
if splitted_action is not None and '|' in splitted_action[1]: if splitted_action_and_actor is not None and '|' in splitted_action_and_actor[1]:
res = splitted_action[1].split(' | ') res = splitted_action_and_actor[1].split(' | ')
# print('res',res)
temp_array.append(splitted_action[0]) temp_array.append(splitted_action_and_actor[0])
temp_array.append(res[0]) temp_array.append(res[0])
print(temp_array)
return temp_array return temp_array
else: else:
return splitted_action return splitted_action_and_actor
def get_sentences(text): def get_sentences(text):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment