Commit 257d1d7f authored by kulvinu's avatar kulvinu

KnowledgeIQ Implementation

parent 897d94ae
......@@ -14,6 +14,7 @@ import numpy as np
from trainAnn import headPose, headDistence, facialExperssions, blinkCount, detector
from keras.models import load_model
from sklearn.preprocessing import OneHotEncoder
from mlengine import transform_audio
# Attention classes
attClass = {0: 'Low Attention', 1: 'Mid Attention', 2: 'High Attention'}
......@@ -323,6 +324,32 @@ def predictShapePattern():
# print(prediction2)
return jsonify(response)
# Knowledge IQ evaluation
@app.route('/predictKnowledgeIq', methods = ['POST','GET'])
@cross_origin()
def predictKnowledgeIq():
if request.method == 'POST':
file = request.files.get('file')
questionIndex = request.form.get('questionIndex')
print(questionIndex)
if file is None or file.filename == "":
return jsonify({'error: no file'})
try:
file.save("./"+file.filename)
prediction = transform_audio(file.filename)
data = {'prediction': prediction}
db.db['knowledgeIQScore'].insert_one({
"activityName": "Colour Numbers",
"questionIndex": questionIndex,
"transcription": prediction,
})
return jsonify(data)
except:
return jsonify({'error: Error during pipeline execution'})
return jsonify({'result: test'})
# Running app
if __name__ == "__main__":
......
import numpy as np
import librosa
import librosa.display
import IPython.display as ipd
import matplotlib.pyplot as plt
def read_file(file_name, sample_rate):
wav_file = wave.open(file_name, mode="rb")
channels = wav_file.getnchannels()
num_frames = wav_file.getnframes()
if wav_file.getframerate() != sample_rate:
raise ValueError("Audio file should have a sample rate of %d. got %d" % (sample_rate, wav_file.getframerate()))
samples = wav_file.readframes(num_frames)
wav_file.close()
frames = struct.unpack('h' * num_frames * channels, samples)
if channels == 2:
print("Picovoice processes single-channel audio but stereo file is provided. Processing left channel only.")
return frames[::channels]
## Loading audio
dataset_dir = '/datasets/live_recordings/'
audio_name = 'one.wav'
y, sample_rate = librosa.load(dataset_dir + audio_name, res_type='kaiser_fast')
# Play the original audio
print("Original audio - downsampled by librosa")
ipd.Audio(y, rate=sample_rate)
#------------------------------------------------------------------------------------
## Trim the beginning and ending silence
y_trimmed, _ = librosa.effects.trim(y)
print("Original duration: ", librosa.get_duration(y))
print("Trimmed duration: ", librosa.get_duration(y_trimmed))
figure = plt.figure()
# Trimmed audio - without silence
trimmed = figure.add_subplot(2, 1, 2)
librosa.display.waveplot(y_trimmed, sr=sample_rate, color='r')
plt.title('Trimmed')
# Original audio - with silence at the end
original = figure.add_subplot(2, 1, 1,sharex=trimmed)
librosa.display.waveplot(y, sr=sample_rate)
plt.title('Original')
plt.tight_layout()
plt.show()
# Play the original audio
print("Trimmed audio")
ipd.Audio(y_trimmed, rate=sample_rate)
###Audio Segmentation into windows
from pydub import AudioSegment
from pydub.silence import split_on_silence
sound_file = AudioSegment.from_wav("one.wav")
audio_chunks = split_on_silence(sound_file, min_silence_len=500, silence_thresh=-40)
print ("AudioChunks", audio_chunks)
for i, chunk in enumerate(audio_chunks):
out_file = "./a//.wav".format(i)
print ("exporting", out_file)
chunk.export(out_file, format="wav")
<ш4BFO S"t7^OBpВ[cd\"+Gz )?d,fɵ.gu/Ug_S ! G,(+xP.+s >}N%"[:ax(|[)bVX!Hivɟvzs9J.eà~'}ۓ~QVP @5 :Hf LVPRTd)RbaVen.u
\ No newline at end of file
import difflib
colourNumbers = [ "5 4","5 6 7","5 6 7 8"];
digitSpan = [ "5 4","5 6 7","5 6 7 8"];
def calKnowledgeIqActivityScore(activity, questionIndex, prediction):
prediction = prediction.replace(".","");
cal_score = [];
if (activity == "Colour Numbers"):
if (colourNumbers[questionIndex] == prediction):
score = 100
else:
score = 0
temp = difflib.SequenceMatcher(None, colourNumbers[questionIndex], prediction)
print(temp.get_matching_blocks())
print('Similarity Score: ',temp.ratio())
cal_score = [score, temp.ratio()*100]
elif (activity == "Digit Span"):
if (digitSpan[questionIndex] == prediction):
score = 100
else:
score = 0
temp = difflib.SequenceMatcher(None, colourNumbers[questionIndex], prediction)
print(temp.get_matching_blocks())
print('Similarity Score: ',temp.ratio())
cal_score = [score, temp.ratio()*100]
else:
cal_score = [0,0]
print('Score can be specified to the activity specified in the system!')
return cal_score
\ No newline at end of file
import io
import tensorflow as tf
from tensorflow import keras
import os
import pandas as pd
from sklearn.model_selection import train_test_split
import pickle
import numpy as np
from scipy.io import wavfile as wav
import scipy
import scipy.signal as sps
from python_speech_features import mfcc
from python_speech_features import logfbank
from tensorflow.keras.models import Sequential, save_model, load_model
from audioPreprocessing import readFile
modelfilepath = './saved_model'
datafilepath = './data'
size = 48
DIGITS = ["0", "1", "2", "3","4","5","6","7","8","9"]
model = load_model(modelfilepath, compile=True)
def transform_audio(file):
#read .wav file
#resample audio file
(rate, sig) = readFile(file);
number_of_samples = round(len(sig) * float(16000) / rate)
sig = sps.resample(sig, number_of_samples)
#Encode numbers using 48*13 matrix
#Compute MFCC features from an audio signal
mfcc_feat = mfcc(sig,rate,nfft=2048)
#Return a new numpy array with the specified shape.
mfcc_feat = np.resize(mfcc_feat, (size,13))
#set the independent variable
pred = get_prediction(mfcc_feat);
return pred
def get_prediction(X):
pred = model.predict(X.reshape(-1,size,13,1))
prediction = DIGITS[np.argmax(pred)]
print("\n\033[1mPredicted digit sound: %.0f"%pred.argmax(),"\033[0m \n ")
print("Predicted probability array:")
print(pred)
return prediction
This diff is collapsed.
This diff is collapsed.
......@@ -11,6 +11,7 @@ import datetime
import base64
import io
from preProcessor import transform_audio
from knowledgeIqScoreCalculation import calKnowledgeIqActivityScore
# Libraries required for model utilization
import cv2 as cv
import numpy as np
......@@ -20,6 +21,8 @@ from controler import captureImage
from keras.models import load_model
from sklearn.preprocessing import OneHotEncoder
import logging
# Attention classes
attClass = {0: 'Low Attention', 1: 'Mid Attention', 2: 'High Attention'}
......@@ -35,6 +38,8 @@ app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
logging.basicConfig(level=logging.INFO)
logging.getLogger('flask_cors').level = logging.DEBUG
# for prediction
......@@ -410,9 +415,9 @@ def mentalChromScores():
})
# Knowledge IQ evaluation
@app.route('/predictKnowledgeIq', methods = ['POST','GET'])
@cross_origin()
def predictKnowledgeIq():
@app.route('/predictColourNumbers', methods = ['POST','GET'])
@cross_origin(origins="localhost")
def predictColourNames():
if request.method == 'POST':
file = request.files.get('file')
questionIndex = request.form.get('questionIndex')
......@@ -428,9 +433,38 @@ def predictKnowledgeIq():
db.db['knowledgeIQScore'].insert_one({
"activityName": "Colour Numbers",
"questionIndex": questionIndex,
"transcription": prediction
})
return jsonify({'success: Data Added to the database successfully'})
except:
return jsonify({'error: Error during pipeline execution'})
return jsonify({'result: test'})
@app.route('/predictDigitSpan', methods = ['POST','GET'])
@cross_origin()
def predictDigitSpan():
if request.method == 'POST':
file = request.files.get('file')
questionIndex = request.form.get('questionIndex')
print(questionIndex)
activity = "Digit Span"
# score = calKnowledgeIqActivityScore(activity, questionIndex, prediction)
if file is None or file.filename == "":
return jsonify({'error: no file'})
try:
file.save("./"+file.filename)
prediction = transform_audio(file.filename)
data = {'prediction': prediction}
db.db['knowledgeIQScore'].insert_one({
"activityName": "Digit Span",
"questionIndex": questionIndex,
"transcription": prediction,
# "accuracyScore": score[0],
# "similarityScore": score[1]
})
return jsonify(data)
return jsonify({'success: Data Added to the database successfully'})
except:
return jsonify({'error: Error during pipeline execution'})
return jsonify({'result: test'})
......
This diff is collapsed.
......@@ -5,6 +5,7 @@ import {
Arithmetic,
ColourNumbers,
DigitSpan,
ColourNumbersController,
DigitSpanController,
VideoPlayerScreen,
} from "../index";
......@@ -96,7 +97,7 @@ const ActivityContainer = () => {
className="activity-container-navigator-item"
onClick={() => {
setCurrentActivityNo(6);
setIsGuideVideoClicked(true);
// setIsGuideVideoClicked(true);
changeColor();
}}
>
......@@ -107,7 +108,7 @@ const ActivityContainer = () => {
className="activity-container-navigator-item"
onClick={() => {
setCurrentActivityNo(7);
setIsGuideVideoClicked(true);
// setIsGuideVideoClicked(true);
changeColor();
}}
>
......@@ -138,7 +139,7 @@ const ActivityContainer = () => {
<PairCancerlation nextActivity={nextActivityHandler} />
)}
{currentActivityNo === 6 && (
<ColourNumbers nextActivity={nextActivityHandler} />
<ColourNumbersController nextActivity={nextActivityHandler} />
)}
{currentActivityNo === 7 && (
<DigitSpanController nextActivity={nextActivityHandler} />
......
......@@ -5,7 +5,7 @@ import Box from "@mui/material/Box";
import "./ColourNumbers.css";
import { lightBlue, yellow } from "@mui/material/colors";
import { createTheme, ThemeProvider, styled } from "@mui/material/styles";
import { RecordingHandler } from "../../reasoningIqEval/recorder/Recorder";
import { RecordingHandler } from "../recorder/Recorder";
import Timer from "../../reasoningIqEval/timer/Timer";
const Item = styled(Paper)(({ theme }) => ({
textAlign: "center",
......@@ -18,7 +18,7 @@ const Item = styled(Paper)(({ theme }) => ({
const ColourNumbers = ({ nextActivity }) => {
const ColourNumbers = ({ GoNext, isAllCompleted }) => {
const [allCompleted, setAllCompleted] = useState(false);
const [activityIndex, setActivityIndex] = useState(1);
......@@ -26,13 +26,15 @@ const switchActivityHandler = () => {
let activityNo = activityIndex + 1;
setActivityIndex(activityNo);
// switch question
console.log('switch');
console.log('switch');
};
useEffect(() => {
setTimeout(() => {
}, 5000);
console.log('rec');
RecordingHandler(`CN.wav`, activityIndex);
RecordingHandler(`CN.wav`, activityIndex, 'CN');
}, [activityIndex]);
return (
......@@ -66,10 +68,11 @@ useEffect(() => {
</Box>
</Grid>
</Grid>
{!allCompleted && <Timer switchActivity={switchActivityHandler} />}
</div>
);
};
export default ColourNumbers;
export default React.memo(ColourNumbers);
import React, { useState, useEffect } from "react";
import { ColourNumbers } from "../../..";
import VideoPlayerScreen from "./videoPlayer";
const ColourNumbersController = () => {
const activityCount = 4;
const [currentActivityNo, setCurrentActivityNo] = useState(1);
const [isAudioCompleted, setIsAudioCompleted] = useState(false);
const [isAllCompleted, setIsAllCompleted] = useState(false);
const nextActivityHandler = () => {
if (currentActivityNo < activityCount - 1) {
setCurrentActivityNo(currentActivityNo + 1);
} else {
setIsAllCompleted(true);
}
setIsAudioCompleted(false);
};
const StartRecording = (state) => {
setTimeout(() => {
setIsAudioCompleted(true);
}, 1000);
};
return (
<>
<div className="w-full h-full">
<div>
{!isAudioCompleted ? (
<VideoPlayerScreen
currentActivtyIndex={currentActivityNo}
changeScreen={StartRecording}
/>
) : (
<>
<ColourNumbers
GoNext={nextActivityHandler}
isAllCompleted={isAllCompleted}
currentActivityNo={currentActivityNo}
/>
</>
)}
</div>
</div>
</>
);
};
export default ColourNumbersController;
.activity-container-angry-grid {
display: grid;
grid-template-rows: 1fr 1fr 1fr;
grid-template-columns: 1fr 1fr 1fr 1fr 1fr;
gap: 0px;
height: 100%;
padding: 20px;
}
#activity-container-item-0 {
grid-row-start: 1;
grid-column-start: 1;
grid-row-end: 4;
grid-column-end: 2;
margin-right: 10px;
border: solid gainsboro;
border-radius: 5px;
padding: 5px;
}
\ No newline at end of file
import React, { useState, useEffect } from "react";
import toast, { Toaster } from "react-hot-toast";
import { ContainerCard } from "../../../../components/index";
import Video from "./videos";
const VideoPlayerScreen = ({ currentActivtyIndex, changeScreen }) => {
return (
<>
<div className="student-page-angry-grid">
<div id="student-page-item-0">
<ContainerCard>
<div className="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8">
<div className="lg:text-center">
<h2 className="text-base text-[#3d59c1] font-semibold tracking-wide uppercase mt-4">
ICAAT
</h2>
</div>
<div className="mt-10 sm:mt-0">
<Video currentActivtyIndex={currentActivtyIndex} changeScreen={changeScreen} />
</div>
</div>
</ContainerCard>
</div>
</div>
</>
);
};
export default VideoPlayerScreen;
import React, { useState, useEffect, useRef } from "react";
import Vid4 from "../../../../assets/video/vid4.mp4";
const Video = ({ currentActivtyIndex, changeScreen }) => {
const [videoLink, setVideoLink] = useState("");
const vidRef = useRef();
useEffect(() => {
switch (currentActivtyIndex) {
case 1:
setVideoLink(Vid4);
break;
case 2:
setVideoLink(Vid4);
break;
case 3:
setVideoLink(Vid4);
break;
case 4:
setVideoLink(Vid4);
break;
case 5:
setVideoLink("/Videos/vid1.mp4");
break;
default:
}
const timer = setTimeout(() => {
vidRef.current.play();
}, 3000);
return () => clearTimeout(timer);
}, []);
const myCallback = () => {
changeScreen(false);
}
return (
<div className="flex items-center">
{videoLink && (
<>
<video ref={vidRef} className="w-full h-full p-2" controls onEnded={() => myCallback()}>
<source src={videoLink} type="video/mp4" />
</video>
</>
)}
</div>
);
};
export default Video;
......@@ -8,82 +8,22 @@ import VolumeUpIcon from "@mui/icons-material/VolumeUp";
import VolumeOffIcon from "@mui/icons-material/VolumeOff";
import Picture from "../../../../assets/digitspan.jpg";
import Button from "@mui/material/Button";
// import Instruction1 from '../../../../assets/audio/audio.aac';
// import Instruction2 from '../../../../assets/audio/audio2.aac';
import Timer from "../../reasoningIqEval/timer/Timer";
import { RecordingHandler } from "../recorder/Recorder";
import "./DigitSpan.css";
const DigitSpan = ({ GoNext, isAllCompleted }) => {
const [buttonClicked, setButtonClicked] = useState(1);
const [audio, setAudio] = useState("Instruction1");
const [allCompleted, setAllCompleted] = useState(false);
const DigitSpan = ({ GoNext, isAllCompleted, currentActivityNo }) => {
const [activityIndex, setActivityIndex] = useState(1);
// const switchActivityHandler = ({switchActivity}) => {
// let activityNo = activityIndex + 1;
// setActivityIndex(activityNo);
// // switch question
// console.log("switch");
// // if(activityNo < 4){
// // }
// // else{
// // setAllCompleted(true);
// // }
// };
// const audioFiles =[{source: Instruction1},
// {source: Instruction2}];
// const audioFiles2 =[Instruction1,Instruction2]
// const playAudio = () => {
// const audioPromise = this.audio.play()
// if (audioPromise !== undefined) {
// audioPromise
// .then(_ => {
// // autoplay started
// console.log('Audio Playing')
// })
// .catch(err => {
// // catch dom exception
// console.info(err)
// })
// }
// }
useEffect(() => {
console.log("rec");
RecordingHandler(`DS.wav`, activityIndex);
}, [activityIndex]);
// const switchAudio = () => {
RecordingHandler(`DS.wav`, currentActivityNo, 'DS');
}, [currentActivityNo]);
// if (audio < audioFiles.length - 1) {
// setActivityIndex(activityIndex+1)
// this.setState({
// audio: audioFiles2[activityIndex]
// });
// //restart playlist
// } else {
// console.log('ERROR');
// }
// }
// useEffect(() => {
// console.log('rec');
// RecordingHandler(`CN.wav`, activityIndex);
// }, [activityIndex]);
return (
<div className="container">
{/* <div class="flex flex-wrap justify-center">
<img
alt=""
class="max-w-sm h-auto shadow-lg"
src={Picture}
/>
</div> */}
{isAllCompleted && (
<div className="w-4/6 h-4/6 m-auto">
{" "}
......@@ -100,30 +40,6 @@ const DigitSpan = ({ GoNext, isAllCompleted }) => {
justifyContent="center"
alignItems="center"
>
{/* {buttonClicked === 1 && (
<Avatar sx={{ bgcolor: green[500], width: 100, height: 100}}
onClick={()=> {setButtonClicked(2)}}>
<MicSharpIcon sx={{ fontSize: 60 }}/>
</Avatar>
)}
{buttonClicked === 2 && (
<Avatar sx={{ bgcolor: red[500], width: 100, height: 100 }}
onClick={()=> {setButtonClicked(1)}}>
<MicOffSharpIcon sx={{ fontSize: 60 }}/>
</Avatar>
)} */}
{/* {buttonClicked === 1 && (
<Avatar sx={{ bgcolor: green[500], width: 100, height: 100}}
onClick={()=> {}}>
<VolumeUpIcon sx={{ fontSize: 60 }}/>
</Avatar>
)}
{buttonClicked === 2 && (
<Avatar sx={{ bgcolor: red[500], width: 100, height: 100 }}
onClick={()=> {setButtonClicked(1)}}>
<VolumeOffIcon sx={{ fontSize: 60 }}/>
</Avatar>
)} */}
{!isAllCompleted && <Timer switchActivity={GoNext} />}
</Stack>
{/* <Button
......
......@@ -39,6 +39,7 @@ const DigitSpanController = () => {
<DigitSpan
GoNext={nextActivityHandler}
isAllCompleted={isAllCompleted}
currentActivityNo={currentActivityNo}
/>
</>
)}
......
......@@ -3,11 +3,16 @@ import axios from "axios";
import baseURL from "../../../../config/api";
import API from "../../../../config/api";
export const RecordingHandler = async (fileName, questionIndex) => {
export const RecordingHandler = async (
fileName,
questionIndex,
activityName
) => {
let stream = await navigator.mediaDevices.getUserMedia({
video: false,
audio: true,
});
let recorder = new RecordRTC.StereoAudioRecorder(stream, {
type: "audio",
mimeType: "audio/wav",
......@@ -21,23 +26,28 @@ export const RecordingHandler = async (fileName, questionIndex) => {
await recorder.stop(function () {
let blob = recorder.blob;
processRecording(blob, fileName, questionIndex);
processRecording(blob, fileName, activityName, questionIndex);
});
stream.getTracks().forEach(function (track) {
track.stop();
});
};
const processRecording = (blob, fileName, questionIndex) => {
const processRecording = (blob, fileName, activityName, questionIndex) => {
let recordedFile = new File([blob], fileName);
uploadRecording(recordedFile, fileName, questionIndex);
uploadRecording(recordedFile, fileName, activityName, questionIndex);
};
const uploadRecording = async (file, fileName, questionIndex) => {
const uploadRecording = async (file, fileName, activityName, questionIndex) => {
let data = new FormData();
let candidateID = 0;
if (localStorage) {
candidateID = localStorage.getItem("candidateID");
}
console.log(questionIndex);
data.append("file", file, fileName);
data.append("questionIndex", questionIndex);
data.append("candidateID", candidateID);
const config = {
headers: {
......@@ -46,11 +56,32 @@ const uploadRecording = async (file, fileName, questionIndex) => {
},
};
await API.post(`predictKnowledgeIq`, data, config)
.then((res) => {
console.log(res, "DONE" + new Date().toISOString);
})
.catch((err) => {
console.log(err, "ERROR" + new Date().toISOString);
});
// if (activityName === 'DS'){
// await API.post(`predictDigitSpan`, data, config)
// .then((res) => {
// console.log(res, "DONE" + new Date().toISOString);
// })
// .catch((err) => {
// console.log(err, "ERROR" + new Date().toISOString);
// });
// }
// if (activityName === 'CN'){
// await API.post(`predictColourNumbers`, data, config)
// .then((res) => {
// console.log(res, "DONE" + new Date().toISOString);
// })
// .catch((err) => {
// console.log(err, "ERROR" + new Date().toISOString);
// });
// }
await API.post(`predictColourNumbers`, data, config)
.then((res) => {
console.log(res, "DONE" + new Date().toISOString);
})
.catch((err) => {
console.log(err, "ERROR" + new Date().toISOString);
});
};
......@@ -7,6 +7,7 @@ export { default as PictureConcept } from "./activities/reasoningIqEval/pictureC
export { default as Arithmetic } from "./activities/reasoningIqEval/arithmetic/Arithmetic";
export { default as ImageRow } from "./activities/reasoningIqEval/pictureConcept/ImageRow";
export { default as ColourNumbers } from "./activities/knowledgeIqEval/colourNumbers/ColourNumbers";
export { default as ColourNumbersController } from "./activities/knowledgeIqEval/colourNumbers/colourNumbersController";
export { default as DigitSpan } from "./activities/knowledgeIqEval/digitSpan/DigitSpan";
export { default as EthicalClearenceScreen } from "./activities/ethicalClearence";
export { default as VideoPlayerScreen } from "./activities/videoPlayer";
......
import React, { useState, useEffect, useRef } from "react";
import A1 from "../Audio/A1.mp3";
import A2 from "../Audio/A2.mp3";
import A1 from "../assets/Audio/DS1.mp3";
import A2 from "../assets/Audio/DS2.mp3";
import A3 from "../assets/Audio/DS3.mp3";
import Image from "../assets/digitspan.jpg";
const Audio = ({ currentActivtyIndex, changeScreen }) => {
const myAudio = useRef();
......@@ -16,14 +18,14 @@ const Audio = ({ currentActivtyIndex, changeScreen }) => {
setAudio(A2);
break;
case 3:
setAudio(A1);
break;
case 4:
setAudio(A2);
break;
case 5:
setAudio(A1);
setAudio(A3);
break;
// case 4:
// setAudio(A2);
// break;
// case 5:
// setAudio(A1);
// break;
default:
}
const timer = setTimeout(() => {
......@@ -54,9 +56,17 @@ const Audio = ({ currentActivtyIndex, changeScreen }) => {
onEnded={() => myCallback()}
/>
<button className="text-black" onClick={handleBeep}>
{/* <button className="text-black" onClick={handleBeep}>
Start
</button>
</button> */}
<img
className="items-center justify-center"
src={Image}
style={{width:'200px'}}
alt=""
/>
</>
)}
</div>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment