Commit 97674ac5 authored by I.K Seneviratne's avatar I.K Seneviratne

Committing some implementations in graphical representation of lecture student...

Committing some implementations in graphical representation of lecture student behavior summary in the Lecturer Home page.
parent c9c450da
......@@ -110,7 +110,7 @@ class LectureVideo(models.Model):
def __str__(self):
return self.lecture_video_id
# ACTIVITY section
# lecture activity table
class LectureActivity(models.Model):
lecture_activity_id = models.CharField(max_length=10)
......@@ -124,6 +124,30 @@ class LectureActivity(models.Model):
return self.lecture_activity_id
# this abstract class will define the details for an activity frame group
class LectureActivityFrameGroupDetails(models.Model):
frame_group = models.CharField(max_length=10)
phone_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
listening_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
writing_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
def __str__(self):
return self.frame_group
class meta:
abstract = True
# this class will contain the activity frame groupings
class LectureActivityFrameGroupings(models.Model):
name = models.CharField(max_length=15, default="")
lecture_activity_id = models.ForeignKey(LectureActivity, on_delete=models.CASCADE)
frame_group_details = models.ArrayField(LectureActivityFrameGroupDetails)
def __str__(self):
return self.name
# EMOTIONS section
# Lecture emotion report
class LectureEmotionReport(models.Model):
......
......@@ -776,4 +776,19 @@ class GetLectureVideoSummaryTimeLandmarks(APIView):
return Response({
"response": time_landmarks
})
# this API will retrieve lecture activity summary
class GetLectureActivitySummary(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
frame_landmarks, frame_group_dict = ve.getFrameLandmarks(video_name)
frame_group_percentages = ar.activity_frame_groupings(video_name, frame_landmarks, frame_group_dict)
return Response({
"frame_landmarks": frame_landmarks,
"frame_group_dict": frame_group_dict,
"frame_group_percentages": frame_group_percentages
})
\ No newline at end of file
......@@ -614,4 +614,128 @@ def get_student_activity_summary_for_period(activities):
percentages["listening_perct"] = listening_average_perct
percentages["writing_perct"] = note_taking_average_perct
return percentages, individual_lec_activities, activity_labels
\ No newline at end of file
return percentages, individual_lec_activities, activity_labels
# this method will retrieve activity frame groupings for a lecture
def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_05.h5")
np.set_printoptions(suppress=True)
# load the model
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
size = (224, 224)
# initializing the count variables
frame_count = 0
# class labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
# get the frame differences for each frame group
frame_group_diff = {}
# loop through the frame group dictionary
for key in frame_group_dict.keys():
split_values = key.split("-")
value1 = int(split_values[0])
value2 = int(split_values[1])
diff = value2 - value1
# assign the difference
frame_group_diff[key] = diff if diff > 0 else 1
# looping through the frames
for frame in os.listdir(EXTRACTED_DIR):
# getting the frame folder
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
# initializing the variables
phone_count = 0
note_count = 0
listen_count = 0
# looping through the detections in each frame
for detections in os.listdir(FRAME_FOLDER):
# checking whether the image contains only one person
if "frame" not in detections:
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
image = cv2.resize(image, size)
image_array = np.asarray(image)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
# run the inference
prediction = model.predict(data)
# get the predicted label
label = class_labels[prediction.argmax()]
# increment the count based on the label
if label == class_labels[0]:
phone_count += 1
elif label == class_labels[1]:
listen_count += 1
elif label == class_labels[2]:
note_count += 1
# finding the time landmark that the current frame is in
for i in frame_landmarks:
index = frame_landmarks.index(i)
j = index + 1
# checking whether the next index is within the range
if j < len(frame_landmarks):
next_value = frame_landmarks[j]
# checking the correct time landmark range
if (frame_count >= i) & (frame_count < next_value):
frame_name = "{}-{}".format(i, next_value)
frame_group_dict[frame_name]['phone_count'] += phone_count
frame_group_dict[frame_name]['listen_count'] += listen_count
frame_group_dict[frame_name]['note_count'] += note_count
# increment the frame count
frame_count += 1
# calculate the percentage values
for key in frame_group_dict.keys():
frame_group_details = frame_group_dict[key]
frame_group_phone_count = frame_group_details['phone_count']
frame_group_listen_count = frame_group_details['listen_count']
frame_group_note_count = frame_group_details['note_count']
frame_diff = int(frame_group_diff[key])
frame_group_phone_perct = float(frame_group_phone_count / frame_diff) * 100
frame_group_listen_perct = float(frame_group_listen_count / frame_diff) * 100
frame_group_note_perct = float(frame_group_note_count / frame_diff) * 100
# assign the values to the same dictionary
frame_group_dict[key]['phone_perct'] = frame_group_phone_perct
frame_group_dict[key]['listen_perct'] = frame_group_listen_perct
frame_group_dict[key]['note_perct'] = frame_group_note_perct
# return the dictionary
return frame_group_dict
\ No newline at end of file
......@@ -115,4 +115,67 @@ def getTimeLandmarks(video_name):
time_landmarks.append(str(real_duration))
time_landmarks_values.append(duration)
return time_landmarks
\ No newline at end of file
return time_landmarks
# this method will retrieve the time landmarks for a lecture video
def getFrameLandmarks(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
# iteration
video = cv2.VideoCapture(VIDEO_PATH)
no_of_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
int_no_of_frames = int(no_of_frames)
fps = int(video.get(cv2.CAP_PROP_FPS))
# calculating the duration in seconds
duration = int(no_of_frames / fps)
# define the number of time gaps required
THRESHOLD_GAP = 5
# define a frame gap
frame_gap = int(int_no_of_frames / THRESHOLD_GAP)
initial_frame_landmark = 0
# define frame landmarks
frame_landmarks = [0]
# loop through the threshold gap limit to define the time landmarks
for i in range(THRESHOLD_GAP):
initial_frame_landmark += frame_gap
frame_landmarks.append(initial_frame_landmark)
# append the final frame
frame_landmarks.append(int_no_of_frames)
# defining the frame group dictionary
frame_group_list = []
# creating frame group names
for landmark in frame_landmarks:
index = frame_landmarks.index(landmark)
j = index + 1
# if the next index is within the range of the list
if j < len(frame_landmarks):
next_value = frame_landmarks[j]
group_name = "{}-{}".format(landmark, next_value)
# append to the list
frame_group_list.append(group_name)
# define a dictionary to hold the frame groups
frame_group_dict = {}
# loop through the group names to create a dictionary
for name in frame_group_list:
frame_group_dict[name] = {'phone_count': 0, 'listen_count': 0, 'note_count': 0}
return frame_landmarks, frame_group_dict
\ No newline at end of file
# Generated by Django 2.2.11 on 2020-10-07 12:29
import FirstApp.MongoModels
from django.db import migrations, models
import django.db.models.deletion
import djongo.models.fields
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0008_auto_20200825_1821'),
]
operations = [
migrations.CreateModel(
name='LectureActivityFrameGroupDetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('frame_group', models.CharField(max_length=10)),
('phone_perct', models.DecimalField(decimal_places=1, default=0.0, max_digits=3)),
('listening_perct', models.DecimalField(decimal_places=1, default=0.0, max_digits=3)),
('writing_perct', models.DecimalField(decimal_places=1, default=0.0, max_digits=3)),
],
),
migrations.CreateModel(
name='LectureActivityFrameGroupings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=15)),
('frame_group_details', djongo.models.fields.ArrayField(model_container=FirstApp.MongoModels.LectureActivityFrameGroupDetails)),
('lecture_activity_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.LectureActivity')),
],
),
]
......@@ -249,12 +249,37 @@
//this function will handle the 'summary' button
$('#summary_btn').click(function () {
//this function will handle the activity 'summary' button
$('#activity_summary_btn').click(function () {
//fetch the activity summary details
fetch('http://127.0.0.1:8000/get-lecture-activity-summary/?video_name=' + global_video_name)
.then((res) => res.json())
.then((out) => alert(out.frame_landmarks))
.catch((err) => alert('error: ' + err));
//open the modal
$('#summaryModal').modal();
let test_date = new Date(0, 0, 0, 0, 12, 30, 0);
//render the chart onto the modal body
renderChart();
});
//this function will handle the emotion 'summary' button
$('#emotion_summary_btn').click(function () {
//open the modal
$('#summaryModal').modal();
//render the chart onto the modal body
renderChart();
});
//this function will handle the gaze 'summary' button
$('#gaze_summary_btn').click(function () {
//open the modal
$('#summaryModal').modal();
//render the chart onto the modal body
......@@ -1109,6 +1134,12 @@
</div>
<!-- end of progress area (activity) -->
<hr>
<!-- button to view activity summary -->
<button type="button" class="btn btn-primary float-right" id="activity_summary_btn">
Summary
</button>
<!-- end of button to view activity summary -->
</li>
<!-- end of the activity list item -->
......@@ -1172,6 +1203,12 @@
</div>
<!-- end of emotion progress bars -->
<hr>
<!-- button to view emotion summary -->
<button type="button" class="btn btn-primary float-right" id="emotion_summary_btn">
Summary
</button>
<!-- end of button to view emotion summary -->
</li>
<!-- end of the emotion list item -->
......@@ -1240,16 +1277,20 @@
</div>
<!-- end of progress area (gaze) -->
<hr>
</li>
<!-- end of the gaze list item -->
<!-- button to view a summary -->
<li class="list-group-item">
<button type="button" class="btn btn-primary float-right" id="summary_btn">
<!-- button to view gaze summary -->
<button type="button" class="btn btn-primary float-right" id="gaze_summary_btn">
Summary
</button>
<!-- end of button to view gaze summary -->
</li>
<!-- end of the gaze list item -->
</ul>
......
......@@ -178,6 +178,10 @@ urlpatterns = [
# retrieves lecture video summary time landmarks
url(r'^get-lecture-video-summary-time-landmarks/$', api.GetLectureVideoSummaryTimeLandmarks.as_view()),
# retrieves lecture activity summary
url(r'^get-lecture-activity-summary/$', api.GetLectureActivitySummary.as_view()),
# routers
# path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework'))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment