Commit c726599a authored by Thathsarani R.P.H.S.R's avatar Thathsarani R.P.H.S.R

Import Flask folder with python files.

parent 5c1b091e
from voice_assistant import ChatBot
from flask import Flask, render_template, redirect
# from flask import Flask, render_template, redirect
# from voice_assistant import ChatBot
app = Flask(__name__)
ai = ChatBot(name="AIC")
overall_emotion = []
@app.route("/")
def index():
return render_template("index.html", emotion_percentages={})
#@app.route("/process")
#def process():
# ai.run()
# global overall_emotion
# overall_emotion = ai.sentiment_analysis
# return render_template("index.html", overall_emotion=overall_emotion)
@app.route("/process")
def process():
ai.run()
sentiment_analysis = ai.sentiment_analysis
emotion_percentages = calculate_emotion_percentages(sentiment_analysis)
stress_level = calculate_stress_level(sentiment_analysis)
return render_template("index.html", emotion_percentages=emotion_percentages, stress_level=stress_level)
def calculate_emotion_percentages(sentiment_analysis):
total_emotions = len(sentiment_analysis)
emotion_count = {}
for emotion in sentiment_analysis:
emotion_count[emotion] = emotion_count.get(emotion, 0) + 1
emotion_percentages = {}
for emotion, count in emotion_count.items():
percentage = (count / total_emotions) * 100
emotion_percentages[emotion] = round(percentage, 2)
return emotion_percentages
def calculate_stress_level(sentiment_analysis):
# Determine stress level based on detected emotions
if any(emotion in sentiment_analysis for emotion in ["Angry", "Fear", "Sad", "Disgust"]):
return "High Stress"
elif any(emotion in sentiment_analysis for emotion in ["Happy", "Suprised"]):
return "Low Stress"
else:
return "Netural Stress"
@app.route("/conversation", methods=["POST"])
def conversation():
# Existing code for the conversation logic
# Redirect back to the home page
return redirect("/")
@app.route("/reset", methods=["POST"])
def reset():
ai.sentiment_analysis = [] # Clear the emotions list
return redirect("/")
if __name__ == "__main__":
app.run()
import keras.models
import numpy as np
# Load the saved emotion identification model
loaded_model = keras.models.load_model('C:/Users/malithg/PycharmProjects/AI_Virtual_C/my_model.h5')
classLabels = ('Angry', 'Fear', 'Disgust', 'Happy', 'Sad', 'Surprised', 'Neutral')
from chatbot_web import app
if __name__ == "__main__":
app.run()
import os
import sounddevice as sd
import soundfile as sf
import numpy as np
import librosa
import keras.models
import sys
import os
import sounddevice as sd
import soundfile as sf
import numpy as np
import librosa
import keras.models
# Load the saved emotion identification model
loaded_model = keras.models.load_model('C:/Users/dell/Desktop/AI_Virtual_C/my_model.h5')
classLabels = ('Angry', 'Fear', 'Disgust', 'Happy', 'Sad', 'Surprised', 'Neutral')
def record_and_analyze_user_input(chatbot):
duration = 2.5 # Set the duration of the recording (in seconds)
sample_rate = 22050 * 2 # Set the sample rate of the recording
# Redirect standard output to a null device
sys.stdout = open(os.devnull, 'w')
# Record the live audio
recording = sd.rec(int(duration * sample_rate), samplerate=sample_rate, channels=1)
sd.wait() # Wait until the recording is finished
# Restore standard output
sys.stdout = sys.__stdout__
# Preprocess the recorded audio
mfccs = librosa.feature.mfcc(y=recording.flatten(), sr=sample_rate, n_mfcc=39)
input_data = mfccs[np.newaxis, ..., np.newaxis]
# Perform emotion prediction
with np.printoptions(suppress=True):
predictions = loaded_model.predict(input_data)
# Get the predicted label
predicted_label = classLabels[np.argmax(predictions)]
# Store the predicted emotion in the array
chatbot.sentiment_analysis.append(predicted_label)
return predicted_label
import speech_recognition as sr
def speech_to_text():
recognizer = sr.Recognizer()
mic = sr.Microphone()
with sr.Microphone() as mic:
print("Listening...")
audio = recognizer.listen(mic)
text = "ERROR"
try:
text = recognizer.recognize_google(audio)
print("Me -->", text)
except:
print("Me --> ERROR")
return text
body {
background-color: rgba(0, 0, 255, 0.5);
margin: 0;
padding: 0;
}
.container {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
height: 100vh;
}
.start-button {
background-color: rgba(255, 255, 255, 0.5);
border: none;
border-radius: 50px;
padding: 15px 30px;
color: #ffffff;
font-size: 18px;
text-align: center;
text-decoration: none;
display: inline-block;
transition-duration: 0.4s;
cursor: pointer;
box-shadow: 0px 8px 15px rgba(0, 0, 0, 0.1);
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
z-index: 2;
}
.table-wrapper {
background-color: lightgrey;
border-radius: 20px;
padding: 20px;
position: relative;
z-index: 1;
}
table {
width: 100%;
}
th, td {
padding: 10px;
text-align: left;
color: #333333;
}
th {
background-color: rgba(255, 255, 255, 0.3);
}
form {
text-align: center;
margin-top: 20px;
}
.btn-danger {
background-color: #ff0000;
color: #ffffff;
border: none;
border-radius: 50px;
padding: 10px 20px;
font-size: 16px;
transition-duration: 0.4s;
cursor: pointer;
}
.btn-danger:hover {
background-color: #cc0000;
}
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>zenBot</title>
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.1/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-4bw+/aepP/YC94hEpVNVgiZdgIC5+VKNBQNGCHeKRQN+PtmoHDEXuppvnDJzQIu9" crossorigin="anonymous">
<!------ display shortcut icon ------>
<link rel = " shortcut icon " href = "../static/logo.png">
</head>
<body style="background-color: black;">
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.1/dist/js/bootstrap.bundle.min.js" integrity="sha384-HwwvtgBNo3bZJJLYd8oVXjrBZt8cqVSpeBNS5n7C8IVInixGAoxmnlMuBnhbgrkm" crossorigin="anonymous"></script>
<div class = "container mt-5" style="background-color: whitesmoke; width : 40%;">
<br>
<center>
<img src="../static/chatbot.png" alt="bot">
<div> <h1> zenBot Emotion Level Check </h1> </div>
<br>
<button type="button" class="btn btn-outline-danger btn-lg" onclick="window.location.href='/process'">Start</button> <br> <br>
<img src="../static/mic.png" alt="mic"> </center>
<br>
<table class="table table-danger table-striped table-hover" style="text-align: center;">
<thead>
<tr>
<th scope="col">Emotion</th>
<th scope="col">Precentage</th>
</tr>
</thead>
<tbody>
{% for emotion, percentage in emotion_percentages.items() %}
<tr>
<td>{{ emotion }}</td>
<td>{{ percentage }}%</td>
</tr>
{% endfor %}
</tbody>
</table>
<br>
<!--
<h5> {{ emotion }} </h5>
<div class="progress" role="progressbar" aria-label="Danger example" aria-valuenow="0" aria-valuemin="0" aria-valuemax="100">
<div class="progress-bar text-bg-danger" style="width: '{{ percentage }}%'">{{ percentage }}%</div>
</div>
<br>
-->
<center> <div> <h1> zenBot Stress Level Check </h1> </div>
<br>
<h2 style="color: crimson;">{{ stress_level }}</h2>
<br>
</center>
<form method="POST" action="/reset" id="reset-form">
<center> <button type="submit" class="btn btn-outline-dark btn-lg">Reset</button> </center>
</form>
<br>
</div>
<br>
</body>
</html>
\ No newline at end of file
-<!DOCTYPE html>
<html>
<head>
<title>AI Counsellor</title>
<style>
body {
background-color: rgba(0, 0, 128, 0.7);
margin: 0;
padding: 0;
}
.container {
position: relative;
height: 100vh;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
}
.table-wrapper {
background-color: rgba(0, 0, 0, 0.3);
border-radius: 20px;
padding: 20px;
margin-bottom: 30px;
box-shadow: 0px 0px 20px rgba(0, 0, 0, 0.4);
}
table {
width: 100%;
}
th, td {
padding: 10px;
text-align: left;
color: #ffffff;
}
th {
background-color: rgba(255, 255, 255, 0.3);
}
.start-button {
background-color: rgba(255, 255, 255, 0.5);
border: none;
border-radius: 50px;
padding: 15px 30px;
color: #ffffff;
font-size: 18px;
text-align: center;
text-decoration: none;
display: inline-block;
transition-duration: 0.4s;
cursor: pointer;
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
z-index: 2;
box-shadow: 0px 8px 15px rgba(0, 0, 0, 0.4);
}
.start-button:hover {
background-color: rgba(255, 255, 255, 0.7);
box-shadow: 0px 8px 15px rgba(0, 0, 0, 0.6);
}
form {
text-align: center;
margin-top: 20px;
}
.btn-danger {
background-color: #ff0000;
color: #ffffff;
border: none;
border-radius: 50px;
padding: 10px 20px;
font-size: 16px;
transition-duration: 0.4s;
}
.btn-danger:hover {
background-color: #cc0000;
}
h1 {
text-align: center;
color: #333333;
}
</style>
</head>
<body>
<h1>AI Counsellor</h1>
<button onclick="window.location.href='/process'" class="start-button">Start</button>
<div class="container">
<div class="table-wrapper">
<table id="emotion-table">
<thead>
<tr>
<th>Emotion</th>
<th>Percentage</th>
</tr>
</thead>
<tbody>
{% for emotion, percentage in emotion_percentages.items() %}
<tr>
<td>{{ emotion }}</td>
<td>{{ percentage }}%</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
<form method="POST" action="/reset" id="reset-form">
<button type="submit" class="btn btn-danger">Reset</button>
</form>
</div>
</body>
</html>
import pyttsx3
def text_to_speech(text):
print("Dev -->", text)
engine = pyttsx3.init()
engine.say(text)
engine.runAndWait()
import datetime
def wake_up(text, name):
return True if name in text.lower() else False
def action_time():
return datetime.datetime.now().time().strftime('%H:%M')
import os
import datetime
import pyttsx3
import sounddevice as sd
import soundfile as sf
import numpy as np
import keras.models
import librosa
import speech_recognition as sr
import transformers
import threading
import sys
from utils import wake_up, action_time
from record_analysis import record_and_analyze_user_input
from text_to_speech import text_to_speech
# Load the saved emotion identification model
loaded_model = keras.models.load_model('C:/Users/dell/Desktop/AI_Virtual_C/my_model.h5')
classLabels = ('Angry', 'Fear', 'Disgust', 'Happy', 'Sad', 'Surprised', 'Neutral')
class ChatBot:
def __init__(self, name):
self.text = None
print("----- Starting up", name, "-----")
self.name = name
self.user_input_counter = 0
self.user_inputs = []
self.sentiment_analysis = []
self.record_thread = None
def speech_to_text(self):
recognizer = sr.Recognizer()
mic = sr.Microphone()
with sr.Microphone() as mic:
print("Listening...")
audio = recognizer.listen(mic)
self.text = "ERROR"
try:
self.text = recognizer.recognize_google(audio)
print("Me -->", self.text)
except:
print("Me --> ERROR")
def run(self):
nlp = transformers.pipeline("conversational", model="microsoft/DialoGPT-medium")
os.environ["TOKENIZERS_PARALLELISM"] = "true"
res1 = "Hello, I am zenBot. What can I do for you?"
text_to_speech(res1)
while True:
self.speech_to_text()
if wake_up(self.text, self.name):
res = "Hello, I am ZenBot the AI. What can I do for you?"
elif "time" in self.text:
res = action_time()
elif any(i in self.text for i in ["thank", "thanks"]):
res = np.random.choice(
["You're welcome!", "Anytime!", "No problem!", "Cool!", "I'm here if you need me!", "You're welcome!"]
)
elif any(i in self.text for i in ["exit", "close"]):
res = np.random.choice(
["Tata", "Have a good day", "Bye", "Goodbye", "Hope to meet soon", "Peace out!"]
)
text_to_speech(res)
break
else:
if self.text == "ERROR":
res = "Sorry, come again?"
else:
if self.record_thread and self.record_thread.is_alive():
self.record_thread.join() # Wait for the previous thread to finish if it's still running
# Start a new thread to run record_and_analyze_user_input()
self.record_thread = threading.Thread(target=record_and_analyze_user_input, args=(self,))
self.record_thread.start()
chat = nlp(transformers.Conversation(self.text), pad_token_id=50256)
res = str(chat)
res = res[res.find("bot >> ") + 7:].strip()
text_to_speech(res)
overall_emotion = self.sentiment_analysis
print("Overall Emotion of the conversation:", overall_emotion)
print("----- Closing down Dev -----")
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment