Commit c0fdadde authored by P.R.K Peramuna's avatar P.R.K Peramuna

Integrated project

parent b8526f4d
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Warming up PyWSD (takes ~10 secs)... took 4.7420501708984375 secs.\n",
"c:\\Users\\Legion\\.conda\\envs\\tf210\\lib\\site-packages\\transformers\\models\\t5\\tokenization_t5.py:199: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
"- Be aware that you SHOULD NOT rely on t5-base automatically truncating your input to 512 when padding/encoding.\n",
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
" warnings.warn(\n",
"You are using the legacy behaviour of the <class 'transformers.models.t5.tokenization_t5.T5Tokenizer'>. This means that tokens that come after special tokens will not be properly handled. We recommend you to read the related pull request available at https://github.com/huggingface/transformers/pull/24565\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"All models loaded successfully!\n"
]
}
],
"source": [
"import pprint\n",
"import pathlib\n",
"import cv2 as cv\n",
"import numpy as np \n",
"import pandas as pd\n",
"import PyPDF2, re, os\n",
"import datetime as dt\n",
"import cv2, dlib, math\n",
"import mediapipe as mp\n",
"from fastai.vision.all import *\n",
"from transformers import pipeline, T5ForConditionalGeneration,T5Tokenizer\n",
"\n",
"\n",
"import pke\n",
"import nltk\n",
"import torch\n",
"import random\n",
"import string\n",
"import warnings\n",
"import requests\n",
"import traceback\n",
"import numpy as np\n",
"from textwrap3 import wrap\n",
"from nltk.corpus import stopwords\n",
"from pywsd.lesk import simple_lesk\n",
"from pywsd.lesk import cosine_lesk\n",
"from pywsd.lesk import adapted_lesk\n",
"from nltk.corpus import wordnet as wn\n",
"from flashtext import KeywordProcessor\n",
"from nltk.tokenize import sent_tokenize\n",
"from pywsd.similarity import max_similarity\n",
"\n",
"temp = pathlib.PosixPath\n",
"pathlib.PosixPath = pathlib.WindowsPath\n",
"\n",
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
"mp_face_mesh = mp.solutions.face_mesh\n",
"face_mesh = mp_face_mesh.FaceMesh(\n",
" min_detection_confidence=0.5, \n",
" min_tracking_confidence=0.5\n",
" )\n",
"\n",
"face_detector = dlib.get_frontal_face_detector()\n",
"shape_predictor = dlib.shape_predictor(\"weights/shape_predictor_68_face_landmarks.dat\")\n",
"\n",
"learn_emotion = load_learner('weights/emotions_rf.pkl')\n",
"learn_emotion_labels = learn_emotion.dls.vocab\n",
"\n",
"pipeline_lesson = pipeline(\n",
" task=\"summarization\",\n",
" model=\"./weights/lesson-summarization\",\n",
" device=0 if torch.cuda.is_available() else -1,\n",
" )\n",
"\n",
"summary_model = T5ForConditionalGeneration.from_pretrained('t5-base')\n",
"summary_tokenizer = T5Tokenizer.from_pretrained('t5-base')\n",
"\n",
"question_model = T5ForConditionalGeneration.from_pretrained('ramsrigouthamg/t5_squad_v1')\n",
"question_tokenizer = T5Tokenizer.from_pretrained('ramsrigouthamg/t5_squad_v1')\n",
"\n",
"question_model = question_model.to(device)\n",
"summary_model = summary_model.to(device)\n",
"\n",
"print(\"All models loaded successfully!\")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"head_pose_dict = {\n",
" \"Looking Left\" : 0,\n",
" \"Looking Right\" : 1,\n",
" \"Looking Up\" : 2,\n",
" \"Looking Down\" : 3,\n",
" \"Looking Forward\" : 4\n",
" } \n",
"\n",
"drowsiness_dict = {\n",
" \"Sleepy\" : 0,\n",
" \"Not Sleepy\" : 1\n",
" } \n",
"\n",
"emotion_dict = {\n",
" \"angry\" : 0, \n",
" \"disgust\" : 1,\n",
" \"fear\" : 2,\n",
" \"happy\" : 3,\n",
" \"neutral\" : 4,\n",
" \"sad\" : 5,\n",
" \"surprise\" : 6\n",
" }"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"[nltk_data] Downloading package punkt to\n",
"[nltk_data] C:\\Users\\Legion\\AppData\\Roaming\\nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n",
"[nltk_data] Downloading package brown to\n",
"[nltk_data] C:\\Users\\Legion\\AppData\\Roaming\\nltk_data...\n",
"[nltk_data] Package brown is already up-to-date!\n",
"[nltk_data] Downloading package wordnet to\n",
"[nltk_data] C:\\Users\\Legion\\AppData\\Roaming\\nltk_data...\n",
"[nltk_data] Package wordnet is already up-to-date!\n",
"[nltk_data] Downloading package stopwords to\n",
"[nltk_data] C:\\Users\\Legion\\AppData\\Roaming\\nltk_data...\n",
"[nltk_data] Package stopwords is already up-to-date!\n"
]
}
],
"source": [
"def set_seed(seed: int):\n",
" random.seed(seed)\n",
" np.random.seed(seed)\n",
" torch.manual_seed(seed)\n",
" torch.cuda.manual_seed_all(seed)\n",
"\n",
"nltk.download('punkt')\n",
"nltk.download('brown')\n",
"nltk.download('wordnet')\n",
"nltk.download('stopwords')\n",
"warnings.filterwarnings('ignore')\n",
"set_seed(42)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"def mid(p1 ,p2):\n",
" return int((p1.x + p2.x)/2), int((p1.y + p2.y)/2)\n",
"\n",
"def eye_aspect_ratio(eye_landmark, face_roi_landmark):\n",
" left_point = (face_roi_landmark.part(eye_landmark[0]).x, face_roi_landmark.part(eye_landmark[0]).y)\n",
" right_point = (face_roi_landmark.part(eye_landmark[3]).x, face_roi_landmark.part(eye_landmark[3]).y)\n",
"\n",
" center_top = mid(face_roi_landmark.part(eye_landmark[1]), face_roi_landmark.part(eye_landmark[2]))\n",
" center_bottom = mid(face_roi_landmark.part(eye_landmark[5]), face_roi_landmark.part(eye_landmark[4]))\n",
"\n",
" hor_line_length = math.hypot((left_point[0] - right_point[0]), (left_point[1] - right_point[1]))\n",
" ver_line_length = math.hypot((center_top[0] - center_bottom[0]), (center_top[1] - center_bottom[1]))\n",
"\n",
" ratio = hor_line_length / ver_line_length\n",
" return ratio\n",
"\n",
"def mouth_aspect_ratio(lips_landmark, face_roi_landmark):\n",
" left_point = (face_roi_landmark.part(lips_landmark[0]).x, face_roi_landmark.part(lips_landmark[0]).y)\n",
" right_point = (face_roi_landmark.part(lips_landmark[2]).x, face_roi_landmark.part(lips_landmark[2]).y)\n",
"\n",
" center_top = (face_roi_landmark.part(lips_landmark[1]).x, face_roi_landmark.part(lips_landmark[1]).y)\n",
" center_bottom = (face_roi_landmark.part(lips_landmark[3]).x, face_roi_landmark.part(lips_landmark[3]).y)\n",
"\n",
" hor_line_length = math.hypot((left_point[0] - right_point[0]), (left_point[1] - right_point[1]))\n",
" ver_line_length = math.hypot((center_top[0] - center_bottom[0]), (center_top[1] - center_bottom[1]))\n",
" if hor_line_length == 0:\n",
" return ver_line_length\n",
" ratio = ver_line_length / hor_line_length\n",
" return ratio\n",
"\n",
"def predict_emotion(img_path):\n",
" img = cv.imread(img_path)\n",
" img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n",
" img = cv.resize(img, (48, 48))\n",
" img = PILImage.create(img)\n",
" probs_emotion = learn_emotion.predict(img)[-1]\n",
" emotions = {learn_emotion_labels[i]: float(probs_emotion[i]) for i in range(len(learn_emotion_labels))}\n",
" return emotions\n",
"\n",
"def head_pose_estimation(\n",
" video_path=None,\n",
" is_visualize=False\n",
" ): \n",
" if video_path is not None:\n",
" cap = cv2.VideoCapture(video_path)\n",
" else: \n",
" cap = cv2.VideoCapture(0)\n",
"\n",
" head_pose_state = []\n",
"\n",
" while cap.isOpened():\n",
" _, image = cap.read()\n",
" if image is None:\n",
" break\n",
" \n",
" image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)\n",
" image.flags.writeable = False\n",
" results = face_mesh.process(image)\n",
" image.flags.writeable = True\n",
" \n",
" image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n",
" img_h, img_w, _ = image.shape\n",
" face_3d = []\n",
" face_2d = []\n",
"\n",
" if results.multi_face_landmarks:\n",
" for face_landmarks in results.multi_face_landmarks:\n",
" for idx, lm in enumerate(face_landmarks.landmark):\n",
" if idx == 33 or idx == 263 or idx == 1 or idx == 61 or idx == 291 or idx == 199:\n",
" if idx == 1:\n",
" nose_2d = (lm.x * img_w, lm.y * img_h)\n",
" nose_3d = (lm.x * img_w, lm.y * img_h, lm.z * 8000)\n",
"\n",
" x, y = int(lm.x * img_w), int(lm.y * img_h)\n",
"\n",
" face_2d.append([x, y])\n",
" face_3d.append([x, y, lm.z]) \n",
" \n",
" face_2d = np.array(face_2d, dtype=np.float64)\n",
" face_3d = np.array(face_3d, dtype=np.float64)\n",
" focal_length = 1 * img_w\n",
"\n",
" cam_matrix = np.array([ [focal_length, 0, img_h / 2],\n",
" [0, focal_length, img_w / 2],\n",
" [0, 0, 1]])\n",
"\n",
"\n",
" dist_matrix = np.zeros((4, 1), dtype=np.float64)\n",
" success, rot_vec, trans_vec = cv2.solvePnP(face_3d, face_2d, cam_matrix, dist_matrix)\n",
"\n",
" rmat, jac = cv2.Rodrigues(rot_vec)\n",
" angles, mtxR, mtxQ, Qx, Qy, Qz = cv2.RQDecomp3x3(rmat)\n",
"\n",
" x = angles[0] * 360\n",
" y = angles[1] * 360\n",
"\n",
" if y < -10:\n",
" text = \"Looking Left\"\n",
" elif y > 10:\n",
" text = \"Looking Right\"\n",
" elif x < -10:\n",
" text = \"Looking Down\"\n",
" elif x > 10:\n",
" text = \"Looking Up\"\n",
" else:\n",
" text = \"Looking Forward\"\n",
" head_pose_state.append(head_pose_dict[text])\n",
"\n",
" if is_visualize:\n",
" nose_3d_projection, _ = cv2.projectPoints(nose_3d, rot_vec, trans_vec, cam_matrix, dist_matrix)\n",
"\n",
" p1 = (int(nose_2d[0]), int(nose_2d[1]))\n",
" p2 = (int(nose_3d_projection[0][0][0]), int(nose_3d_projection[0][0][1]))\n",
" cv2.line(image, p1, p2, (255, 0, 0), 2)\n",
" cv2.putText(image, text, (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n",
"\n",
" if is_visualize:\n",
" cv2.imshow('Head Pose Estimation', image)\n",
" if cv2.waitKey(30) & 0xFF == 27:\n",
" break\n",
"\n",
" cap.release()\n",
" return head_pose_state\n",
"\n",
"\n",
"def drowsiness_detection(\n",
" video_path=None,\n",
" is_visualize=False,\n",
" font = cv2.FONT_HERSHEY_TRIPLEX\n",
" ): \n",
" if video_path is not None:\n",
" cap = cv2.VideoCapture(video_path)\n",
" else:\n",
" cap = cv2.VideoCapture(0)\n",
"\n",
" count = 0\n",
" drowsiness_state = []\n",
" while True:\n",
" _, img = cap.read()\n",
" if img is None:\n",
" break\n",
"\n",
" img = cv2.flip(img,1)\n",
" gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n",
"\n",
" faces = face_detector(gray)\n",
"\n",
" for face_roi in faces:\n",
"\n",
" landmark_list = shape_predictor(gray, face_roi)\n",
"\n",
" left_eye_ratio = eye_aspect_ratio([36, 37, 38, 39, 40, 41], landmark_list)\n",
" right_eye_ratio = eye_aspect_ratio([42, 43, 44, 45, 46, 47], landmark_list)\n",
" eye_open_ratio = (left_eye_ratio + right_eye_ratio) / 2\n",
" if is_visualize:\n",
" cv2.putText(img, str(eye_open_ratio), (0, 13), font, 0.5, (100, 100, 100))\n",
"\n",
" inner_lip_ratio = mouth_aspect_ratio([60,62,64,66], landmark_list)\n",
" outter_lip_ratio = mouth_aspect_ratio([48,51,54,57], landmark_list)\n",
" mouth_open_ratio = (inner_lip_ratio + outter_lip_ratio) / 2;\n",
" if is_visualize:\n",
" cv2.putText(img, str(mouth_open_ratio), (448, 13), font, 0.5, (100, 100, 100))\n",
"\n",
" if mouth_open_ratio > 0.380 and eye_open_ratio > 4.0 or eye_open_ratio > 4.30:\n",
" count +=1\n",
" else:\n",
" count = 0\n",
" x,y = face_roi.left(), face_roi.top()\n",
" x1,y1 = face_roi.right(), face_roi.bottom()\n",
" if count>10:\n",
" if is_visualize:\n",
" cv2.rectangle(img, (x,y), (x1,y1), (0, 0, 255), 2)\n",
" cv2.putText(img, \"Sleepy\", (x, y-5), font, 0.5, (0, 0, 255))\n",
" drowsiness_state.append(drowsiness_dict[\"Sleepy\"])\n",
" \n",
" else:\n",
" if is_visualize:\n",
" cv2.rectangle(img, (x,y), (x1,y1), (0, 255, 0), 2)\n",
" drowsiness_state.append(drowsiness_dict[\"Not Sleepy\"])\n",
"\n",
" if is_visualize:\n",
" cv2.imshow(\"Drowsiness Detection\", img)\n",
" if cv2.waitKey(30) & 0xFF == 27:\n",
" break\n",
"\n",
" cap.release()\n",
" cv2.destroyAllWindows()\n",
" return drowsiness_state\n",
"\n",
"def emotion_detection(\n",
" video_path=None,\n",
" is_visualize=False\n",
" ):\n",
" if video_path is not None:\n",
" cap = cv2.VideoCapture(video_path)\n",
" else:\n",
" cap = cv2.VideoCapture(0)\n",
"\n",
" emotion_state = []\n",
" while True:\n",
" _, img = cap.read()\n",
" if img is None:\n",
" break\n",
"\n",
" img = cv2.flip(img,1)\n",
" gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n",
"\n",
" faces = face_detector(gray)\n",
"\n",
" for face_roi in faces:\n",
" x,y = face_roi.left(), face_roi.top()\n",
" x1,y1 = face_roi.right(), face_roi.bottom()\n",
" cv2.rectangle(img, (x,y), (x1,y1), (0, 255, 0), 2)\n",
" roi_gray = gray[y:y1,x:x1]\n",
" roi_color = img[y:y1,x:x1]\n",
" roi_gray = cv2.resize(roi_gray, (48, 48))\n",
" roi_gray = PILImage.create(roi_gray)\n",
" probs_emotion = learn_emotion.predict(roi_gray)[-1]\n",
" emotions = {learn_emotion_labels[i]: float(probs_emotion[i]) for i in range(len(learn_emotion_labels))}\n",
" emotion = max(emotions, key=emotions.get)\n",
" if is_visualize:\n",
" cv2.putText(img, emotion, (x, y-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))\n",
" emotion_state.append(emotion_dict[emotion])\n",
"\n",
" if is_visualize:\n",
" cv2.imshow(\"Emotion Detection\", img)\n",
" if cv2.waitKey(1) & 0xFF == 27:\n",
" break\n",
"\n",
" cap.release()\n",
" cv2.destroyAllWindows()\n",
" return emotion_state"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"def find_available_hours_per_week(available_time_dict):\n",
" available_hours = {}\n",
" for day in available_time_dict.keys():\n",
" available_hour_per_day = 0\n",
" for time in available_time_dict[day]:\n",
" start_time = time.split('-')[0]\n",
" end_time = time.split('-')[1]\n",
" start_time = dt.datetime.strptime(start_time, '%H:%M')\n",
" end_time = dt.datetime.strptime(end_time, '%H:%M')\n",
" available_hour_per_day += (end_time - start_time).seconds/3600\n",
"\n",
" available_hours[day] = available_hour_per_day\n",
" return available_hours\n",
"\n",
"def map_time_slot(duration):\n",
" # map each half hour to a number, so a day should have 0 - 48 \n",
" # eg : duration - '06:00-18:00'\n",
"\n",
" start_time = duration.split('-')[0]\n",
" end_time = duration.split('-')[1]\n",
"\n",
" start_time = dt.datetime.strptime(start_time, '%H:%M')\n",
" end_time = dt.datetime.strptime(end_time, '%H:%M')\n",
"\n",
" time_starter = dt.datetime.strptime('00:00', '%H:%M')\n",
" idx_tracker = []\n",
"\n",
" idx = 0\n",
" while True:\n",
" if time_starter >= start_time and time_starter < end_time:\n",
" idx_tracker.append(idx)\n",
" time_starter += dt.timedelta(minutes=30)\n",
" idx += 1\n",
" if time_starter >= dt.datetime.strptime('23:59', '%H:%M'):\n",
" break\n",
"\n",
" return idx_tracker\n",
"\n",
"def map_idx_to_time(idx):\n",
" time_starter = dt.datetime.strptime('00:00', '%H:%M')\n",
" for i in range(idx):\n",
" time_starter += dt.timedelta(minutes=30)\n",
" start_time = time_starter.strftime('%H:%M')\n",
" end_time = (time_starter + dt.timedelta(minutes=30)).strftime('%H:%M')\n",
" return f\"{start_time}-{end_time}\"\n",
"\n",
"def find_available_slots_per_week(available_time_dict):\n",
" available_slots = {}\n",
" for day in available_time_dict.keys():\n",
" available_slots_per_day = []\n",
" for time in available_time_dict[day]:\n",
" available_slots_per_day += map_time_slot(time)\n",
" available_slots[day] = available_slots_per_day\n",
" return available_slots\n",
"\n",
"def extract_all_slots(available_times):\n",
" slot_dict = find_available_slots_per_week(available_times)\n",
" all_slots = []\n",
" for dat, slots in slot_dict.items():\n",
" for slot in slots:\n",
" all_slots.append(f\"{dat}_{slot}\")\n",
" return all_slots\n",
"\n",
"def reward_per_module(\n",
" student_details,\n",
" level_mapping = {\n",
" 'Easy': 0,\n",
" 'Average': 1,\n",
" 'Hard': 2\n",
" }):\n",
" df = pd.DataFrame(student_details)\n",
" df['Student Level'] = df['Student Level'].map(level_mapping)\n",
"\n",
" CreditPoints = df['Credit points'].values\n",
" StudentLevel = df['Student Level'].values\n",
" StudentLevel = [int(i) + 1 for i in StudentLevel]\n",
"\n",
" TotalChapters = df['Total Chapters'].values\n",
" StudiedAlready = df['Studied Already'].values\n",
"\n",
" ChapterScore = StudiedAlready / TotalChapters\n",
"\n",
" reward = (CreditPoints * StudentLevel) * ChapterScore\n",
" reward = reward / np.sum(reward)\n",
"\n",
" modules = df['Subject name & PDF'].values\n",
" return dict(zip(modules, reward))\n",
"\n",
"def q_learning():\n",
" Q = np.zeros([7, 48])\n",
"\n",
" gamma = 0.8\n",
" alpha = 0.9\n",
"\n",
" num_episodes = 1000\n",
"\n",
" for i in range(num_episodes):\n",
" s = 0\n",
"\n",
" while s < 6:\n",
" a = np.argmax(Q[s, :] + np.random.randn(1, 48) * (1. / (i + 1)))\n",
"\n",
" s_prime = s + 1\n",
" r = 1\n",
"\n",
" Q[s, a] = Q[s, a] + alpha * (r + gamma * np.max(Q[s_prime, :]) - Q[s, a])\n",
" s = s_prime\n",
"\n",
" return Q\n",
"\n",
"def assign_slots_per_week(available_times, student_details):\n",
" module_weights = reward_per_module(student_details)\n",
" all_slots = extract_all_slots(available_times)\n",
"\n",
" module_weights = {k : int(v * len(all_slots)) for k, v in module_weights.items()}\n",
" # sort the modules based on weights\n",
" module_weights = {k: v for k, v in sorted(module_weights.items(), key=lambda item: item[1], reverse=True)}\n",
" module_weights[list(module_weights.keys())[-1]] = (len(all_slots) - sum(module_weights.values())) + module_weights[list(module_weights.keys())[-1]]\n",
"\n",
" all_slots_cp = all_slots.copy()\n",
" all_slots_cp = np.array(all_slots_cp)\n",
"\n",
" assign_slots = {}\n",
" for module, weight in module_weights.items():\n",
" Q = q_learning()\n",
" rand_idxs = np.random.choice(len(all_slots_cp), weight, replace=False)\n",
" module_json = {}\n",
" for idx in rand_idxs:\n",
" slopt_details = all_slots_cp[idx]\n",
" day, splot_id = slopt_details.split('_')\n",
" if day not in module_json.keys():\n",
" module_json[day] = []\n",
" module_json[day].append(int(splot_id))\n",
" \n",
" all_slots_cp = np.delete(all_slots_cp, rand_idxs)\n",
" assign_slots[module] = module_json\n",
"\n",
" for module, slots in assign_slots.items():\n",
" for day, slot_ids in slots.items():\n",
" # sort the slot ids in ascending order\n",
" slot_ids = sorted(slot_ids)\n",
" assign_slots[module][day] = slot_ids\n",
" assign_slots[module][day] = [map_idx_to_time(i) for i in slot_ids]\n",
"\n",
" return assign_slots"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"def read_pdf_data(pdf_path):\n",
" pdf_file = open(pdf_path, 'rb')\n",
" pdf_reader = PyPDF2.PdfFileReader(pdf_file)\n",
" num_pages = pdf_reader.getNumPages()\n",
"\n",
" whole_text = ''\n",
" for page in range(num_pages):\n",
" page_obj = pdf_reader.getPage(page)\n",
" text = page_obj.extractText()\n",
" whole_text += f\" {text}\"\n",
" pdf_file.close()\n",
"\n",
" # split this text into paragraphs\n",
" sentences = whole_text.split('\\n')\n",
" sen_lengths = [len(sen) for sen in sentences]\n",
" avg_sen_length = np.mean(sen_lengths)\n",
" avg_sen_length = 80\n",
"\n",
" # split into paragraphs\n",
" paragraphs = []\n",
" paragraph = ''\n",
" for sentence in sentences:\n",
" if len(sentence) > avg_sen_length:\n",
" paragraph += f\" {sentence}\"\n",
" else:\n",
" paragraphs.append(paragraph)\n",
" paragraph = ''\n",
" return paragraphs\n",
"\n",
"def inference_lesson_summarizer(pdf_path):\n",
" paragraphs = read_pdf_data(pdf_path)\n",
" summarized_text = ''\n",
" for paragraph in paragraphs:\n",
" summary_paragraph = pipeline_lesson(paragraph)[0]['summary_text']\n",
" summarized_text += f\"{summary_paragraph}\\n\\n\"\n",
"\n",
" return summarized_text"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"def postprocesstext (content):\n",
" final=\"\"\n",
" for sent in sent_tokenize(content):\n",
" sent = sent.capitalize()\n",
" final = final +\" \"+sent\n",
" return final\n",
"\n",
"\n",
"def summarizer(\n",
" text,\n",
" model,\n",
" tokenizer,\n",
" max_len = 512\n",
" ):\n",
" text = text.strip().replace(\"\\n\",\" \")\n",
" text = \"summarize: \"+text\n",
"\n",
" encoding = tokenizer.encode_plus(\n",
" text,\n",
" max_length=max_len, \n",
" pad_to_max_length=False,\n",
" return_tensors=\"pt\",\n",
" truncation=True\n",
" ).to(device)\n",
" input_ids, attention_mask = encoding[\"input_ids\"], encoding[\"attention_mask\"]\n",
"\n",
" outs = model.generate(\n",
" input_ids=input_ids,\n",
" attention_mask=attention_mask,\n",
" early_stopping=True,\n",
" num_beams=3,\n",
" num_return_sequences=1,\n",
" no_repeat_ngram_size=2,\n",
" min_length = 75,\n",
" max_length=300\n",
" )\n",
"\n",
"\n",
" dec = [tokenizer.decode(ids,skip_special_tokens=True) for ids in outs]\n",
" summary = dec[0]\n",
" summary = postprocesstext(summary)\n",
" summary= summary.strip()\n",
"\n",
" return summary\n",
"\n",
"def get_nouns_multipartite(content):\n",
" out=[]\n",
" try:\n",
" extractor = pke.unsupervised.MultipartiteRank()\n",
" extractor.load_document(input=content,language='en')\n",
"\n",
" pos = {'PROPN','NOUN'}\n",
" stoplist = list(string.punctuation)\n",
" stoplist += ['-lrb-', '-rrb-', '-lcb-', '-rcb-', '-lsb-', '-rsb-']\n",
" stoplist += stopwords.words('english')\n",
"\n",
" extractor.candidate_selection(pos=pos)\n",
" extractor.candidate_weighting(alpha=1.1,\n",
" threshold=0.75,\n",
" method='average')\n",
" keyphrases = extractor.get_n_best(n=15)\n",
"\n",
"\n",
" for val in keyphrases:\n",
" out.append(val[0])\n",
" except:\n",
" out = []\n",
" traceback.print_exc()\n",
"\n",
" return out\n",
"\n",
"def get_keywords(\n",
" originaltext,\n",
" summarytext,\n",
" n_questions = 8\n",
" ):\n",
" keywords = get_nouns_multipartite(originaltext)\n",
" # print (\"keywords unsummarized: \",keywords)\n",
" keyword_processor = KeywordProcessor()\n",
" for keyword in keywords:\n",
" keyword_processor.add_keyword(keyword)\n",
"\n",
" keywords_found = keyword_processor.extract_keywords(summarytext)\n",
" keywords_found = list(set(keywords_found))\n",
" # print (\"keywords_found in summarized: \",keywords_found)\n",
"\n",
" important_keywords =[]\n",
" for keyword in keywords:\n",
" if keyword in keywords_found:\n",
" important_keywords.append(keyword)\n",
"\n",
" return important_keywords[:n_questions] if len(important_keywords) > n_questions else important_keywords\n",
"\n",
"def get_question(context,answer,model,tokenizer):\n",
" text = \"context: {} answer: {}\".format(context,answer)\n",
" encoding = tokenizer.encode_plus(text,max_length=384, pad_to_max_length=False,truncation=True, return_tensors=\"pt\").to(device)\n",
" input_ids, attention_mask = encoding[\"input_ids\"], encoding[\"attention_mask\"]\n",
"\n",
" outs = model.generate(\n",
" input_ids=input_ids,\n",
" attention_mask=attention_mask,\n",
" early_stopping=True,\n",
" num_beams=5,\n",
" num_return_sequences=1,\n",
" no_repeat_ngram_size=2,\n",
" max_length=72\n",
" )\n",
"\n",
"\n",
" dec = [tokenizer.decode(ids,skip_special_tokens=True) for ids in outs]\n",
"\n",
"\n",
" Question = dec[0].replace(\"question:\",\"\")\n",
" Question= Question.strip()\n",
" return Question\n",
"\n",
"def get_distractors_wordnet(syn,word):\n",
" distractors=[]\n",
" word= word.lower()\n",
" orig_word = word\n",
" if len(word.split())>0:\n",
" word = word.replace(\" \",\"_\")\n",
"\n",
" hypernym = syn.hypernyms()\n",
" if len(hypernym) == 0: \n",
" return distractors\n",
" \n",
" for item in hypernym[0].hyponyms():\n",
" name = item.lemmas()[0].name()\n",
" if name == orig_word:\n",
" continue\n",
" name = name.replace(\"_\",\" \")\n",
" name = \" \".join(w.capitalize() for w in name.split())\n",
" if name is not None and name not in distractors:\n",
" distractors.append(name)\n",
" return distractors\n",
"\n",
"def get_wordsense(sent,word):\n",
" word= word.lower()\n",
" \n",
" if len(word.split())>0:\n",
" word = word.replace(\" \",\"_\")\n",
" \n",
" \n",
" synsets = wn.synsets(word,'n')\n",
" if synsets:\n",
" wup = max_similarity(sent, word, 'wup', pos='n')\n",
" adapted_lesk_output = adapted_lesk(sent, word, pos='n')\n",
" lowest_index = min (synsets.index(wup),synsets.index(adapted_lesk_output))\n",
" return synsets[lowest_index]\n",
" else:\n",
" return None\n",
"\n",
"def get_distractors_conceptnet(word):\n",
" word = word.lower()\n",
" original_word= word\n",
" if (len(word.split())>0):\n",
" word = word.replace(\" \",\"_\")\n",
"\n",
" distractor_list = [] \n",
" url = \"http://api.conceptnet.io/query?node=/c/en/%s/n&rel=/r/PartOf&start=/c/en/%s&limit=20\"%(word,word)\n",
" obj = requests.get(url).json()\n",
"\n",
" for edge in obj['edges']:\n",
" link = edge['end']['term'] \n",
"\n",
" url2 = \"http://api.conceptnet.io/query?node=%s&rel=/r/PartOf&end=%s&limit=20\"%(link,link)\n",
" obj2 = requests.get(url2).json()\n",
"\n",
" for edge in obj2['edges']:\n",
" word2 = edge['start']['label']\n",
"\n",
" if word2 not in distractor_list and original_word.lower() not in word2.lower():\n",
" distractor_list.append(word2)\n",
" \n",
" return distractor_list\n",
"\n",
"def get_distractors_ensemble(q, a):\n",
" try:\n",
" wordsense = get_wordsense(q,a)\n",
" if wordsense:\n",
" distractors = get_distractors_wordnet(wordsense,a)\n",
" if len(distractors) ==0:\n",
" distractors = get_distractors_conceptnet(a)\n",
" if len(distractors) != 0:\n",
" distractors =[dis.capitalize() for dis in distractors if dis.lower() not in a.lower()]\n",
" return distractors\n",
" else:\n",
" distractors = get_distractors_conceptnet(a)\n",
" if len(distractors) != 0:\n",
" distractors =[dis.capitalize() for dis in distractors if dis.lower() not in a.lower()]\n",
" return distractors\n",
" except:\n",
" return []\n",
" \n",
"def qna_generation_pipeline(pdf_path, n_questions):\n",
" pdf_file = open(pdf_path, 'rb')\n",
" pdf_reader = PyPDF2.PdfFileReader(pdf_file)\n",
" num_pages = pdf_reader.getNumPages()\n",
"\n",
" whole_text = ''\n",
" for page in range(num_pages):\n",
" page_obj = pdf_reader.getPage(page)\n",
" text = page_obj.extractText()\n",
" whole_text += f\" {text}\"\n",
" pdf_file.close()\n",
"\n",
" summarized_text = summarizer(whole_text,summary_model,summary_tokenizer)\n",
" imp_keywords = get_keywords(whole_text,summarized_text, n_questions = n_questions)\n",
"\n",
" data = []\n",
" for answer in imp_keywords:\n",
" q_json = {}\n",
" ques = get_question(summarized_text,answer,question_model,question_tokenizer)\n",
" distractors = get_distractors_ensemble(ques, answer)\n",
" answer = answer.capitalize()\n",
" choices = [answer] + distractors[:3]\n",
" random.shuffle(choices)\n",
"\n",
" q_json[\"question\"] = ques\n",
" q_json[\"answer\"] = answer\n",
" q_json[\"choices\"] = choices\n",
"\n",
" data.append(q_json)\n",
"\n",
" return data"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"def inference_attention_analyzer(video_path):\n",
" head_pose_state = head_pose_estimation(video_path)\n",
" drowsiness_state = drowsiness_detection(video_path)\n",
" emotion_state = emotion_detection(video_path)\n",
"\n",
" head_pose_distribution = np.bincount(head_pose_state)\n",
" drowsiness_distribution = np.bincount(drowsiness_state)\n",
" emotion_distribution = np.bincount(emotion_state)\n",
"\n",
" head_pose_percentage = head_pose_distribution / len(head_pose_state) * 100\n",
" drowsiness_percentage = drowsiness_distribution / len(drowsiness_state) * 100\n",
" emotion_percentage = emotion_distribution / len(emotion_state) * 100\n",
"\n",
" head_pose_response = {key: f\"{round(value, 2)} %\" for key, value in zip(head_pose_dict.keys(), head_pose_percentage)}\n",
" drowsiness_response = {key: f\"{round(value, 2)} %\" for key, value in zip(drowsiness_dict.keys(), drowsiness_percentage)}\n",
" emotion_response = {key: f\"{round(value, 2)} %\" for key, value in zip(emotion_dict.keys(), emotion_percentage)}\n",
"\n",
" return {\n",
" \"head_pose\": head_pose_response,\n",
" \"drowsiness\": drowsiness_response,\n",
" \"emotion\": emotion_response\n",
" }"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Your max_length is set to 200, but your input_length is only 110. Since this is a summarization task, where outputs shorter than the input are typically wanted, you might consider decreasing max_length manually, e.g. summarizer('...', max_length=55)\n",
"Your max_length is set to 200, but your input_length is only 130. Since this is a summarization task, where outputs shorter than the input are typically wanted, you might consider decreasing max_length manually, e.g. summarizer('...', max_length=65)\n",
"Your max_length is set to 200, but your input_length is only 173. Since this is a summarization task, where outputs shorter than the input are typically wanted, you might consider decreasing max_length manually, e.g. summarizer('...', max_length=86)\n",
"Your max_length is set to 200, but your input_length is only 81. Since this is a summarization task, where outputs shorter than the input are typically wanted, you might consider decreasing max_length manually, e.g. summarizer('...', max_length=40)\n",
"Your max_length is set to 200, but your input_length is only 128. Since this is a summarization task, where outputs shorter than the input are typically wanted, you might consider decreasing max_length manually, e.g. summarizer('...', max_length=64)\n",
"Your max_length is set to 200, but your input_length is only 137. Since this is a summarization task, where outputs shorter than the input are typically wanted, you might consider decreasing max_length manually, e.g. summarizer('...', max_length=68)\n",
"Your max_length is set to 200, but your input_length is only 99. Since this is a summarization task, where outputs shorter than the input are typically wanted, you might consider decreasing max_length manually, e.g. summarizer('...', max_length=49)\n",
"Your max_length is set to 200, but your input_length is only 139. Since this is a summarization task, where outputs shorter than the input are typically wanted, you might consider decreasing max_length manually, e.g. summarizer('...', max_length=69)\n"
]
},
{
"data": {
"text/plain": [
"\"Computer memory is represented as a sequence of bytes, grouped into words. Bytes have unique addresses or indices. The size of a word and byte determines the amount of memory that can be accessed. In C, a pointer is a variable that stores the memory address of another variable, which can also be a pointeder.\\n\\nThe value pointed to by a pointer can be retrieved using the unary * operator, for example: int *p, and int x = *p;. The memory address of a variable can be obtained using the Unary ampersand (& ) operator, such as int*p = &x; to retrieve the value points to. In C, pointers use consecutive memory addresses without being able to identify them in the code\\n\\nPointer arithmetic can be used to adjust w here a pointer pointer. For example, if pc points to the first element of an array, after executing pC+=3; then c points towards the fourth element. A pointer can even be dereferenced using array notation. For an array c, for example, the value of c[2] represents the value that the array el ement represents which is two elements beyond the array element currently pointed to\\n\\nIn the second lecture, we defined functions that took an array as an argument. For instance, void reverse(char s[] affects the values of the array. It affects both the local value and the array itself.\\n\\nC allows the creation of arrays of pointers, such as int *a[5]. These arrays are particularly useful in strings. For instance, in C's support for command line arguments, main(int argc, char *argv[]), argv is an array of character pointers representing the command line argument.\\n\\nIn C, to define an instance of a structure called circle, we write struct circle c;. Structures can also be initialized with values using the syntax 'struct circles c = 12, 23, 5;'.An automatic or local structure variable can be initialised by a function call. For instance, structure ci rcle c = circle_init(); initializes the structure variable c with values specified by the C function.A structure can be declared and int.Int y.\\n\\nIn C, programmers can use pointers to functions, enabling functions to be passed as arguments to other functions. This allows for increased flexibility and parameterization of algorithms. For instance, a sorting algorithm can be designed to accept a pointer to a comparison function.\\n\\nA structure membe r can be accessed using the notation '. notation: 'structname.member; for example: pt.x', while comparisons between structures (pt1 > Pt2) are not defined, meaning points to structures can be defined using the 'structure circle *pc' operator, but it can look cumbersome, such as (*pc).x).\\n\\n\""
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"inference_lesson_summarizer('data/TextsummarizationData/Data/PDF1_Data.pdf')"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'question': 'What is an example of a pointer being defined using an asterisk?',\n",
" 'answer': 'Example',\n",
" 'choices': ['Antitype', 'Abstractionism', 'Appearance', 'Example']},\n",
" {'question': 'Each byte has a unique what?',\n",
" 'answer': 'Address',\n",
" 'choices': ['Prescription', 'Misdirection', 'Address', 'Markup']},\n",
" {'question': 'Computer memory is often abstracted as a sequence of what?',\n",
" 'answer': 'Bytes',\n",
" 'choices': ['Allocation unit', 'Exabit', 'Block', 'Bytes']},\n",
" {'question': 'How can pointers be dereferenced?',\n",
" 'answer': 'Operator',\n",
" 'choices': ['Hedger', 'Scalper', 'Operator', 'Venture capitalist']},\n",
" {'question': 'Computer memory is often abstracted as what?',\n",
" 'answer': 'Sequence',\n",
" 'choices': ['Film clip', 'Footage', 'Microfilm', 'Sequence']}]"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"qna_generation_pipeline('data/TextsummarizationData/Data/PDF1_Data.pdf', 5)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'Software process modeling (SPM)': {'thursday': ['04:00-04:30',\n",
" '15:00-15:30',\n",
" '16:30-17:00',\n",
" '17:30-18:00',\n",
" '18:30-19:00',\n",
" '20:00-20:30',\n",
" '20:30-21:00'],\n",
" 'monday': ['04:30-05:00', '17:30-18:00', '19:30-20:00', '20:00-20:30'],\n",
" 'wednesday': ['04:00-04:30', '04:30-05:00', '18:30-19:00', '19:00-19:30'],\n",
" 'sunday': ['07:00-07:30',\n",
" '07:30-08:00',\n",
" '08:30-09:00',\n",
" '12:30-13:00',\n",
" '13:00-13:30',\n",
" '15:30-16:00',\n",
" '17:30-18:00'],\n",
" 'friday': ['04:30-05:00', '18:30-19:00', '19:30-20:00'],\n",
" 'tuesday': ['05:30-06:00', '19:30-20:00', '20:00-20:30'],\n",
" 'saturday': ['06:30-07:00',\n",
" '07:30-08:00',\n",
" '09:00-09:30',\n",
" '11:30-12:00',\n",
" '13:00-13:30',\n",
" '16:00-16:30',\n",
" '17:00-17:30']},\n",
" 'Internet & web technologies (IWT)': {'sunday': ['06:00-06:30',\n",
" '09:00-09:30',\n",
" '11:00-11:30',\n",
" '12:00-12:30',\n",
" '14:30-15:00'],\n",
" 'thursday': ['14:30-15:00', '15:30-16:00', '19:30-20:00'],\n",
" 'friday': ['04:00-04:30',\n",
" '17:00-17:30',\n",
" '17:30-18:00',\n",
" '18:00-18:30',\n",
" '19:00-19:30'],\n",
" 'monday': ['05:30-06:00',\n",
" '17:00-17:30',\n",
" '18:00-18:30',\n",
" '18:30-19:00',\n",
" '20:30-21:00'],\n",
" 'saturday': ['07:00-07:30',\n",
" '09:30-10:00',\n",
" '11:00-11:30',\n",
" '12:00-12:30',\n",
" '17:30-18:00'],\n",
" 'wednesday': ['19:30-20:00', '20:00-20:30'],\n",
" 'tuesday': ['05:00-05:30', '17:00-17:30', '18:00-18:30', '20:30-21:00']},\n",
" 'Object oriented concepts (OOC)': {'tuesday': ['04:00-04:30',\n",
" '16:30-17:00',\n",
" '18:30-19:00'],\n",
" 'saturday': ['06:00-06:30', '14:30-15:00', '15:30-16:00'],\n",
" 'friday': ['05:30-06:00', '16:00-16:30', '20:00-20:30'],\n",
" 'monday': ['05:00-05:30', '19:00-19:30'],\n",
" 'sunday': ['06:30-07:00',\n",
" '09:30-10:00',\n",
" '10:00-10:30',\n",
" '10:30-11:00',\n",
" '14:00-14:30',\n",
" '15:00-15:30',\n",
" '17:00-17:30'],\n",
" 'wednesday': ['05:00-05:30', '05:30-06:00'],\n",
" 'thursday': ['05:00-05:30', '05:30-06:00']},\n",
" 'Infromation system & data modeling (ISDM)': {'thursday': ['04:30-05:00',\n",
" '16:00-16:30',\n",
" '19:00-19:30'],\n",
" 'friday': ['20:30-21:00'],\n",
" 'sunday': ['08:00-08:30', '11:30-12:00', '13:30-14:00'],\n",
" 'monday': ['04:00-04:30', '16:00-16:30', '16:30-17:00'],\n",
" 'wednesday': ['20:30-21:00'],\n",
" 'tuesday': ['04:30-05:00', '16:00-16:30', '17:30-18:00'],\n",
" 'saturday': ['10:00-10:30', '14:00-14:30']},\n",
" 'English for academic purpose (EAP)': {'tuesday': ['19:00-19:30'],\n",
" 'thursday': ['14:00-14:30', '17:00-17:30', '18:00-18:30'],\n",
" 'saturday': ['08:00-08:30',\n",
" '08:30-09:00',\n",
" '10:30-11:00',\n",
" '12:30-13:00',\n",
" '13:30-14:00',\n",
" '15:00-15:30',\n",
" '16:30-17:00'],\n",
" 'wednesday': ['18:00-18:30'],\n",
" 'friday': ['05:00-05:30', '16:30-17:00'],\n",
" 'sunday': ['16:00-16:30', '16:30-17:00']}}"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"available_times = {\n",
" \"monday\":[\n",
" \"04:00-6:00\",\n",
" \"16:00-21:00\"\n",
" ],\n",
" \"tuesday\":[\n",
" \"04:00-6:00\",\n",
" \"16:00-21:00\"\n",
" ],\n",
" \"wednesday\":[\n",
" \"04:00-6:00\",\n",
" \"18:00-21:00\"\n",
" ],\n",
" \"thursday\":[\n",
" \"04:00-6:00\",\n",
" \"14:00-21:00\"\n",
" ],\n",
" \"friday\":[\n",
" \"04:00-6:00\",\n",
" \"16:00-21:00\"\n",
" ],\n",
" \"saturday\":[\n",
" \"06:00-18:00\"\n",
" ],\n",
" \"sunday\":[\n",
" \"06:00-18:00\"\n",
" ]\n",
" }\n",
"\n",
"\n",
"student_details = [\n",
" {\n",
" \"Subject name & PDF\":\"English for academic purpose (EAP)\",\n",
" \"Credit points\":3,\n",
" \"Student Level\":\"Easy\",\n",
" \"Total Chapters\":6,\n",
" \"Studied Already\":4\n",
" },\n",
" {\n",
" \"Subject name & PDF\":\"Infromation system & data modeling (ISDM)\",\n",
" \"Credit points\":4,\n",
" \"Student Level\":\"Easy\",\n",
" \"Total Chapters\":8,\n",
" \"Studied Already\":5\n",
" },\n",
" {\n",
" \"Subject name & PDF\":\"Internet & web technologies (IWT)\",\n",
" \"Credit points\":4,\n",
" \"Student Level\":\"Average\",\n",
" \"Total Chapters\":9,\n",
" \"Studied Already\":5\n",
" },\n",
" {\n",
" \"Subject name & PDF\":\"Object oriented concepts (OOC)\",\n",
" \"Credit points\":4,\n",
" \"Student Level\":\"Hard\",\n",
" \"Total Chapters\":7,\n",
" \"Studied Already\":2\n",
" },\n",
" {\n",
" \"Subject name & PDF\":\"Software process modeling (SPM)\",\n",
" \"Credit points\":3,\n",
" \"Student Level\":\"Hard\",\n",
" \"Total Chapters\":5,\n",
" \"Studied Already\":3\n",
" }\n",
" ]\n",
"\n",
"assign_slots_per_week(available_times, student_details)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
"<style>\n",
" /* Turns off some styling */\n",
" progress {\n",
" /* gets rid of default border in Firefox and Opera. */\n",
" border: none;\n",
" /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
" background-size: auto;\n",
" }\n",
" progress:not([value]), progress:not([value])::-webkit-progress-bar {\n",
" background: repeating-linear-gradient(45deg, #7e7e7e, #7e7e7e 10px, #5c5c5c 10px, #5c5c5c 20px);\n",
" }\n",
" .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
" background: #F44336;\n",
" }\n",
"</style>\n"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
"{'head_pose': {'Looking Left': '3.38 %',\n",
" 'Looking Right': '5.84 %',\n",
" 'Looking Up': '24.28 %',\n",
" 'Looking Down': '0.0 %',\n",
" 'Looking Forward': '66.5 %'},\n",
" 'drowsiness': {'Sleepy': '13.11 %', 'Not Sleepy': '86.89 %'},\n",
" 'emotion': {'angry': '16.6 %',\n",
" 'disgust': '1.12 %',\n",
" 'fear': '2.93 %',\n",
" 'happy': '0.28 %',\n",
" 'neutral': '60.95 %',\n",
" 'sad': '17.99 %',\n",
" 'surprise': '0.14 %'}}"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"inference_attention_analyzer('videos/111.mp4')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "tf210",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.13"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment