Commit 47720cbe authored by janithgamage1.ed's avatar janithgamage1.ed

Merge branch 'master' into feature/UI-API-Connect

parents 262e550c e873fad7
This diff is collapsed.
...@@ -37,7 +37,7 @@ async def upload_video(video: UploadFile = File(...)): ...@@ -37,7 +37,7 @@ async def upload_video(video: UploadFile = File(...)):
with open(file_location, "wb") as file: with open(file_location, "wb") as file:
file.write(video.file.read()) file.write(video.file.read())
return {"text": "OK2"} return {"text": "Video Upload Successfully"}
except Exception as e: except Exception as e:
logger.info(f"Failed to upload file. {e}") logger.info(f"Failed to upload file. {e}")
raise HTTPException( raise HTTPException(
......
...@@ -84,7 +84,7 @@ async def uploaded_video(file: UploadFile = File(...)): ...@@ -84,7 +84,7 @@ async def uploaded_video(file: UploadFile = File(...)):
return JSONResponse(content={"error": str(e)}, status_code=500) return JSONResponse(content={"error": str(e)}, status_code=500)
unicode_to_int_mapping = {"මම": 1, "හෙට": 2, "යනවා": 3, "මං": 4} unicode_to_int_mapping = {"මම": 1, "හෙට": 2, "යනවා": 3, "මං": 4,"ගුඩ්":5, "මෝනිං":6, "උඹ":7, "ආවේ":8, "ඇයි":9}
def translate_text(text, target_language): def translate_text(text, target_language):
......
...@@ -61,7 +61,8 @@ origins = [ ...@@ -61,7 +61,8 @@ origins = [
"http://localhost:51373", "http://localhost:51373",
"http://localhost:51489", "http://localhost:51489",
"https://v6p9d9t4.ssl.hwcdn.net", "https://v6p9d9t4.ssl.hwcdn.net",
"https://64f66d39fdef493229b2ddd9--lambent-unicorn-97396a.netlify.app" # "https://64f66d39fdef493229b2ddd9--lambent-unicorn-97396a.netlify.app",
"https://64f7cfd336356b18eb42de2b--lambent-unicorn-97396a.netlify.app"
] ]
app.add_middleware( app.add_middleware(
......
...@@ -24,6 +24,7 @@ emotion_model.load_weights("../ML_Models/Emotion_Detection_Model/emotion_model.h ...@@ -24,6 +24,7 @@ emotion_model.load_weights("../ML_Models/Emotion_Detection_Model/emotion_model.h
class EmotionPredictionService: class EmotionPredictionService:
def __init__(self, model): def __init__(self, model):
self.model = model self.model = model
self.current_emotion = None
def predict_emotion_detection_video(video_request: UploadFile) -> Dict[str, str]: def predict_emotion_detection_video(video_request: UploadFile) -> Dict[str, str]:
try: try:
...@@ -85,7 +86,26 @@ class EmotionPredictionService: ...@@ -85,7 +86,26 @@ class EmotionPredictionService:
break break
emotions = predict_emotion_from_frame(frame) emotions = predict_emotion_from_frame(frame)
predicted_emotions.extend(emotions) if emotions:
new_emotion = emotions[0] # Assuming you only process one face at a time
cv2.putText(frame, f"Emotion: {new_emotion}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
if new_emotion != self.current_emotion:
self.current_emotion = new_emotion
predicted_emotions.append(new_emotion)
# Display the frame with emotion prediction
cv2.imshow('Emotion Detection', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# while True:
# ret, frame = cap.read()
# if not ret:
# break
# emotions = predict_emotion_from_frame(frame)
# predicted_emotions.extend(emotions)
cap.release() cap.release()
os.remove(video_location) os.remove(video_location)
......
...@@ -6,11 +6,37 @@ import Button from '@mui/material/Button'; ...@@ -6,11 +6,37 @@ import Button from '@mui/material/Button';
import UploadOutlined from '@ant-design/icons/lib/icons/UploadOutlined'; import UploadOutlined from '@ant-design/icons/lib/icons/UploadOutlined';
import AudioOutlined from '@ant-design/icons/lib/icons/AudioOutlined'; import AudioOutlined from '@ant-design/icons/lib/icons/AudioOutlined';
import { Link } from 'react-router-dom';
import { Box, Stack, } from '@mui/material';
import { APP_DEFAULT_PATH } from 'config';
import construction from 'assets/images/maintenance/under-construction.svg';
import {CardContent,IconButton,InputAdornment,Paper,TextField,Typography} from '@mui/material';
import CopyOutlined from '@ant-design/icons/lib/icons/CopyOutlined';
import AudioEmotionDetectService from '../../../../services/AudioEmotionDetection.js';
import { MuiFileInput } from 'mui-file-input';
import { useSnackbar } from 'notistack';
const List = () => { const List = () => {
const [audioBlob, setAudioBlob] = useState<Blob | undefined>(undefined); const [audioBlob, setAudioBlob] = useState<Blob | undefined>(undefined);
const [mediaRecorder, setMediaRecorder] = useState<MediaRecorder | undefined>(undefined); const [mediaRecorder, setMediaRecorder] = useState<MediaRecorder | undefined>(undefined);
const [isRecording, setIsRecording] = useState<boolean>(false); const [isRecording, setIsRecording] = useState<boolean>(false);
const [audioUrl, setAudioUrl] = useState<string | undefined>(undefined); const [audioUrl, setAudioUrl] = useState<string | undefined>(undefined);
const [value, setValue] = useState('');
const [file, setFile] = useState<File | string | null>(null);
const [loading, setLoading] = useState(false);
const [isUploadFile, setIsUploadFile] = useState<boolean | string | null>(true);
const handleDropSingleFile = (files: any) => {
if (files) {
setFile(
Object.assign(files, {
preview: URL.createObjectURL(files)
})
);
setAudioUrl(URL.createObjectURL(files));
}
};
const handleRecordStart = async () => { const handleRecordStart = async () => {
// Clear the uploaded audio state when recording starts // Clear the uploaded audio state when recording starts
...@@ -54,6 +80,53 @@ const List = () => { ...@@ -54,6 +80,53 @@ const List = () => {
// Handle case where uploaded file is not an audio file // Handle case where uploaded file is not an audio file
} }
}; };
const { enqueueSnackbar } = useSnackbar();
const handleChange = (event: React.ChangeEvent<HTMLTextAreaElement>) => {
setValue(event.target.value);
};
const onCopy = (text: string) => {
if (text) {
navigator.clipboard.writeText(text);
enqueueSnackbar('Copied!', { variant: 'success' });
}
};
// Audio Upload
const predictEmotionFromAudio = async () => {
console.log("OK75")
console.log(file);
if (file) {
setLoading(true);
const formData = new FormData();
//@ts-ignore
formData.append('audio_request', file, file.name);
try {
const response = await AudioEmotionDetectService.predictEmotionAudio(formData);
if (response.status == 200) {
console.log(response.data);
setValue(response.data.predicted_emotion);
} else {
enqueueSnackbar('Something went Wrong!', { variant: 'error' });
}
setLoading(false);
} catch (error) {
console.log(error);
setLoading(false);
enqueueSnackbar('Something went Wrong!', { variant: 'error' });
}
} else {
enqueueSnackbar('Please select a file.', { variant: 'warning' });
}
};
const checkEmotionUpload = () => {
if (isUploadFile) {
return 'contained';
} else {
return 'outlined';
}
};
return ( return (
<MainCard content={false}> <MainCard content={false}>
...@@ -65,17 +138,20 @@ const List = () => { ...@@ -65,17 +138,20 @@ const List = () => {
<div style={{ textAlign: 'center' }}> <div style={{ textAlign: 'center' }}>
<input <input
type="file" type="file"
accept="audio/*"
onChange={handleUpload} onChange={handleUpload}
style={{ display: 'none' }} style={{ display: 'none' }}
id="audio-upload" id="audio-upload"
/> />
<label htmlFor="audio-upload"> <label htmlFor="audio-upload">
<Button <Button
variant="contained" // variant="contained"
variant={checkEmotionUpload()}
color="primary" color="primary"
component="span" component="span"
startIcon={<UploadOutlined />} startIcon={<UploadOutlined />}
onClick={() => {
setIsUploadFile(true);
}}
> >
Upload Upload
</Button> </Button>
...@@ -88,6 +164,49 @@ const List = () => { ...@@ -88,6 +164,49 @@ const List = () => {
> >
{isRecording ? 'Stop Recording' : 'Record'} {isRecording ? 'Stop Recording' : 'Record'}
</Button> </Button>
<Button
variant="contained"
disabled={loading}
onClick={() => {
predictEmotionFromAudio();
}}
>
Prediction
</Button>
<div>
<Typography variant="overline" sx={{ color: 'text.secondary' }}>
Predict Emotion
</Typography>
<TextField
fullWidth
value={value}
onChange={handleChange}
InputProps={{
endAdornment: (
<InputAdornment position="end">
<IconButton onClick={() => onCopy(value)}>
<CopyOutlined />
</IconButton>
</InputAdornment>
)
}}
/>
</div>
<CardContent>
{/* ! Important */}
{/* @ts-ignore */}
<MuiFileInput value={file} onChange={handleDropSingleFile} inputProps={{ accept: 'audio/*' }} />
<Paper style={{ padding: '20px', marginTop: '15px' }}>
<Typography variant="h5" align="center" gutterBottom>
Preview
</Typography>
<div style={{ marginTop: '20px', textAlign: 'center' }}>
{file ? <video src={audioUrl} width="400" controls /> : <p>No Audio Selected ...</p>}
</div>
</Paper>
</CardContent>
{audioBlob && ( {audioBlob && (
<audio controls> <audio controls>
<source src={URL.createObjectURL(audioBlob)} type="audio/wav" /> <source src={URL.createObjectURL(audioBlob)} type="audio/wav" />
...@@ -104,12 +223,30 @@ const List = () => { ...@@ -104,12 +223,30 @@ const List = () => {
</MainCard> </MainCard>
</Grid> </Grid>
<Grid item xs={12} md={6}> <Grid item xs={12} md={6}>
<h2>3D Avatar</h2> <h2>3D Avatar</h2>
<MainCard> <MainCard>
{/* Content of the second card */} <Grid container spacing={4} direction="column" alignItems="center" justifyContent="center" sx={{ minHeight: '100vh', py: 2 }}>
{/* You can put your 3D avatar components here */} <Grid item xs={12}>
</MainCard> <Box sx={{ width: { xs: 300, sm: 480 } }}>
<img src={construction} alt="mantis" style={{ width: '100%', height: 'auto' }} />
</Box>
</Grid>
<Grid item xs={12}>
<Stack spacing={2} justifyContent="center" alignItems="center">
<Typography align="center" variant="h1">
Under Construction
</Typography>
<Typography color="textSecondary" align="center" sx={{ width: '85%' }}>
Hey! Please check out this site later. We are doing some maintenance on it right now.
</Typography>
<Button component={Link} to={APP_DEFAULT_PATH} variant="contained">
Back To Home
</Button>
</Stack>
</Grid>
</Grid> </Grid>
</MainCard>
</Grid>
</Grid> </Grid>
</ScrollX> </ScrollX>
</MainCard> </MainCard>
......
import axios from 'axios';
class SignLanguageToTextService {
predictSignLanguageVideo(speed, data) {
return axios.post(
`http://127.0.0.1:8000/predict-sign-language/video/speed_levels?speed=${speed}`,
data
);
}
}
export default new SignLanguageToTextService();
...@@ -271,7 +271,8 @@ const VideoTranslate = () => { ...@@ -271,7 +271,8 @@ const VideoTranslate = () => {
{/* Conditionally render the Unity WebGL build */} {/* Conditionally render the Unity WebGL build */}
{showUnityWebGL && ( {showUnityWebGL && (
<iframe <iframe
src="https://64f66d39fdef493229b2ddd9--lambent-unicorn-97396a.netlify.app/" // src="https://64f66d39fdef493229b2ddd9--lambent-unicorn-97396a.netlify.app/"
src="https://64f7cfd336356b18eb42de2b--lambent-unicorn-97396a.netlify.app/"
width="700px" width="700px"
height="700px" // Adjust the height as needed height="700px" // Adjust the height as needed
title="Unity WebGL" title="Unity WebGL"
......
import axios from 'axios';
class AudioEmotionDetectService {
predictEmotionAudio(data) {
return axios.post(
`http://127.0.0.1:8000/predict_emotion/audio/`,
data
);
}
}
export default new AudioEmotionDetectService();
\ No newline at end of file
import axios from 'axios';
class VideoEmotionDetectService {
predictEmotionVideo(data) {
return axios.post(
`http://127.0.0.1:8000/predict_emotion/video/`,
data
);
}
}
export default new VideoEmotionDetectService();
\ No newline at end of file
...@@ -3,10 +3,8 @@ import axios from 'axios'; ...@@ -3,10 +3,8 @@ import axios from 'axios';
class VideoToSignLanguage { class VideoToSignLanguage {
videoTranslation(data) { videoTranslation(data) {
return axios.post( return axios.post(
// @ts-ignore `http://127.0.0.1:8000/translated_items/`, data
`http://127.0.0.1:8000/translated_items/`, );
data
);
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment