Commit 9bd0d528 authored by nivebaby's avatar nivebaby

With transfer learning

parent 30964d7f
This source diff could not be displayed because it is too large. You can view the blob instead.
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "a47d2356",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: pydub in c:\\users\\singer\\anaconda3\\lib\\site-packages (0.25.1)\n",
"Requirement already satisfied: noisereduce in c:\\users\\singer\\anaconda3\\lib\\site-packages (2.0.0)\n",
"Requirement already satisfied: numpy in c:\\users\\singer\\anaconda3\\lib\\site-packages (from noisereduce) (1.20.3)\n",
"Requirement already satisfied: scipy in c:\\users\\singer\\anaconda3\\lib\\site-packages (from noisereduce) (1.7.1)\n",
"Requirement already satisfied: tqdm in c:\\users\\singer\\anaconda3\\lib\\site-packages (from noisereduce) (4.62.3)\n",
"Requirement already satisfied: matplotlib in c:\\users\\singer\\anaconda3\\lib\\site-packages (from noisereduce) (3.4.3)\n",
"Requirement already satisfied: librosa in c:\\users\\singer\\anaconda3\\lib\\site-packages (from noisereduce) (0.9.1)\n",
"Requirement already satisfied: joblib in c:\\users\\singer\\anaconda3\\lib\\site-packages (from noisereduce) (1.1.0)\n",
"Requirement already satisfied: packaging>=20.0 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from librosa->noisereduce) (21.0)\n",
"Requirement already satisfied: numba>=0.45.1 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from librosa->noisereduce) (0.54.1)\n",
"Requirement already satisfied: soundfile>=0.10.2 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from librosa->noisereduce) (0.10.3.post1)\n",
"Requirement already satisfied: audioread>=2.1.5 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from librosa->noisereduce) (2.1.9)\n",
"Requirement already satisfied: scikit-learn>=0.19.1 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from librosa->noisereduce) (0.24.2)\n",
"Requirement already satisfied: decorator>=4.0.10 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from librosa->noisereduce) (5.1.0)\n",
"Requirement already satisfied: pooch>=1.0 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from librosa->noisereduce) (1.6.0)\n",
"Requirement already satisfied: resampy>=0.2.2 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from librosa->noisereduce) (0.2.2)\n",
"Requirement already satisfied: llvmlite<0.38,>=0.37.0rc1 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from numba>=0.45.1->librosa->noisereduce) (0.37.0)\n",
"Requirement already satisfied: setuptools in c:\\users\\singer\\anaconda3\\lib\\site-packages (from numba>=0.45.1->librosa->noisereduce) (58.0.4)\n",
"Requirement already satisfied: pyparsing>=2.0.2 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from packaging>=20.0->librosa->noisereduce) (3.0.4)\n",
"Requirement already satisfied: requests>=2.19.0 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from pooch>=1.0->librosa->noisereduce) (2.26.0)\n",
"Requirement already satisfied: appdirs>=1.3.0 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from pooch>=1.0->librosa->noisereduce) (1.4.4)\n",
"Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from requests>=2.19.0->pooch>=1.0->librosa->noisereduce) (2021.10.8)\n",
"Requirement already satisfied: urllib3<1.27,>=1.21.1 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from requests>=2.19.0->pooch>=1.0->librosa->noisereduce) (1.26.7)\n",
"Requirement already satisfied: charset-normalizer~=2.0.0 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from requests>=2.19.0->pooch>=1.0->librosa->noisereduce) (2.0.4)\n",
"Requirement already satisfied: idna<4,>=2.5 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from requests>=2.19.0->pooch>=1.0->librosa->noisereduce) (3.2)\n",
"Requirement already satisfied: six>=1.3 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from resampy>=0.2.2->librosa->noisereduce) (1.16.0)\n",
"Requirement already satisfied: threadpoolctl>=2.0.0 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from scikit-learn>=0.19.1->librosa->noisereduce) (2.2.0)\n",
"Requirement already satisfied: cffi>=1.0 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from soundfile>=0.10.2->librosa->noisereduce) (1.14.6)\n",
"Requirement already satisfied: pycparser in c:\\users\\singer\\anaconda3\\lib\\site-packages (from cffi>=1.0->soundfile>=0.10.2->librosa->noisereduce) (2.20)\n",
"Requirement already satisfied: kiwisolver>=1.0.1 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from matplotlib->noisereduce) (1.3.1)\n",
"Requirement already satisfied: python-dateutil>=2.7 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from matplotlib->noisereduce) (2.8.2)\n",
"Requirement already satisfied: cycler>=0.10 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from matplotlib->noisereduce) (0.10.0)\n",
"Requirement already satisfied: pillow>=6.2.0 in c:\\users\\singer\\anaconda3\\lib\\site-packages (from matplotlib->noisereduce) (8.4.0)\n",
"Requirement already satisfied: colorama in c:\\users\\singer\\anaconda3\\lib\\site-packages (from tqdm->noisereduce) (0.4.4)\n",
"Requirement already satisfied: pyaudio in c:\\users\\singer\\anaconda3\\lib\\site-packages (0.2.11)\n",
"Requirement already satisfied: json-tricks in c:\\users\\singer\\anaconda3\\lib\\site-packages (3.15.5)\n"
]
}
],
"source": [
"!pip install pydub\n",
"!pip install noisereduce\n",
"!pip install pyaudio\n",
"!pip install json-tricks"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "9ac37580",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from json_tricks import load\n",
"\n",
"import numpy as np\n",
"\n",
"import librosa\n",
"from pydub import AudioSegment, effects\n",
"import noisereduce as nr\n",
"\n",
"import tensorflow as tf\n",
"import keras\n",
"from keras.models import model_from_json\n",
"from keras.models import load_model\n",
"\n",
"import matplotlib.pyplot as plt"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "2bc2b32c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"sequential\"\n",
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
" lstm (LSTM) (None, 257, 64) 20480 \n",
" \n",
" lstm_1 (LSTM) (None, 64) 33024 \n",
" \n",
" dense (Dense) (None, 8) 520 \n",
" \n",
"=================================================================\n",
"Total params: 54,024\n",
"Trainable params: 54,024\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n",
"None\n"
]
}
],
"source": [
"saved_model_path = 'model8723.json'\n",
"saved_weights_path = 'model8723_weights.h5'\n",
"\n",
"#Reading the model from JSON file\n",
"with open(saved_model_path, 'r') as json_file:\n",
" json_savedModel = json_file.read()\n",
" \n",
"# Loading the model architecture, weights\n",
"model = tf.keras.models.model_from_json(json_savedModel)\n",
"model.load_weights(saved_weights_path)\n",
"\n",
"# Compiling the model with similar parameters as the original model.\n",
"model.compile(loss='categorical_crossentropy', \n",
" optimizer='RMSProp', \n",
" metrics=['categorical_accuracy'])\n",
"\n",
"print(model.summary())"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "785ce747",
"metadata": {},
"outputs": [],
"source": [
"def preprocess(file_path, frame_length = 2048, hop_length = 512):\n",
" '''\n",
" A process to an audio .wav file before execcuting a prediction.\n",
" Arguments:\n",
" - file_path - The system path to the audio file.\n",
" - frame_length - Length of the frame over which to compute the speech features. default: 2048\n",
" - hop_length - Number of samples to advance for each frame. default: 512\n",
"\n",
" Return:\n",
" 'X_3D' variable, containing a shape of: (batch, timesteps, feature) for a single file (batch = 1).\n",
" ''' \n",
" # Fetch sample rate.\n",
" _, sr = librosa.load(path = file_path, sr = None)\n",
" # Load audio file\n",
" rawsound = AudioSegment.from_file(file_path, duration = None) \n",
" # Normalize to 5 dBFS \n",
" normalizedsound = effects.normalize(rawsound, headroom = 5.0) \n",
" # Transform the audio file to np.array of samples\n",
" normal_x = np.array(normalizedsound.get_array_of_samples(), dtype = 'float32') \n",
" # Noise reduction \n",
" final_x = nr.reduce_noise(normal_x, sr=sr)\n",
" \n",
" \n",
" \n",
" f1 = librosa.feature.rms(final_x, frame_length=frame_length, hop_length=hop_length, center=True, pad_mode='reflect').T # Energy - Root Mean Square\n",
" f2 = librosa.feature.zero_crossing_rate(final_x, frame_length=frame_length, hop_length=hop_length,center=True).T # ZCR\n",
" f3 = librosa.feature.mfcc(final_x, sr=sr, S=None, n_mfcc=13, hop_length = hop_length).T # MFCC \n",
" X = np.concatenate((f1, f2, f3), axis = 1)\n",
" \n",
" X_3D = np.expand_dims(X, axis=0)\n",
" \n",
" return X_3D"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "c8b5cab2",
"metadata": {},
"outputs": [],
"source": [
"# Emotions list is created for a readable form of the model prediction.\n",
"\n",
"emotions = {\n",
" 0 : 'neutral',\n",
" 1 : 'calm',\n",
" 2 : 'happy',\n",
" 3 : 'sad',\n",
" 4 : 'angry',\n",
" 5 : 'fearful',\n",
" 6 : 'disgust',\n",
" 7 : 'suprised' \n",
"}\n",
"emo_list = list(emotions.values())\n",
"\n",
"def is_silent(data):\n",
" # Returns 'True' if below the 'silent' threshold\n",
" return max(data) < 100"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "c6d8d5d7",
"metadata": {},
"outputs": [],
"source": [
"import pyaudio\n",
"import wave\n",
"from array import array\n",
"import struct\n",
"import time\n",
"\n",
"\n",
"def play(file):\n",
" CHUNK=1024\n",
" \n",
" wf=wave.open(file,'rb')\n",
" \n",
" p=pyaudio.PyAudio()\n",
" \n",
" stream=p.open(format=p.get_format_from_width(wf.getsampwidth()),\n",
" channels=wf.getnchannels(),\n",
" rate=wf.getframerate(),\n",
" output=True)\n",
" \n",
" data = wf.readframes(CHUNK)\n",
" \n",
" while len(data)>0:\n",
" stream.write(data)\n",
" data=wf.readframes(CHUNK)\n",
" \n",
" stream.stop_stream()\n",
" stream.close()\n",
" \n",
" p.terminate()\n",
" \n",
" \n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d1479bc5",
"metadata": {},
"outputs": [],
"source": [
"import pyaudio\n",
"import wave\n",
"from array import array\n",
"import struct\n",
"import time\n",
"\n",
"# Initialize variables\n",
"RATE = 24414\n",
"CHUNK = 512\n",
"RECORD_SECONDS = 7.1\n",
"\n",
"FORMAT = pyaudio.paInt32\n",
"CHANNELS = 1\n",
"WAVE_OUTPUT_FILE = \"output.wav\"\n",
"\n",
"# Open an input channel\n",
"p = pyaudio.PyAudio()\n",
"stream = p.open(format=FORMAT,\n",
" channels=CHANNELS,\n",
" rate=RATE,\n",
" input=True,\n",
" frames_per_buffer=CHUNK)\n",
"\n",
"\n",
"# Initialize a non-silent signals array to state \"True\" in the first 'while' iteration.\n",
"data = array('h', np.random.randint(size = 512, low = 0, high = 500))\n",
"\n",
"\n",
"# SESSION START\n",
"print(\"** session started\")\n",
"total_predictions = [] # A list for all predictions in the session.\n",
"tic = time.perf_counter()\n",
"\n",
"while is_silent(data) == False:\n",
" print(\"* recording...\")\n",
" frames = [] \n",
" data = np.nan # Reset 'data' variable.\n",
"\n",
" timesteps = int(RATE / CHUNK * RECORD_SECONDS) # => 339\n",
"\n",
"# # Insert frames to 'output.wav'.\n",
"# for i in range(0, timesteps):\n",
"# data = array('l', stream.read(CHUNK)) \n",
"# frames.append(data)\n",
"\n",
"# wf = wave.open(WAVE_OUTPUT_FILE, 'wb')\n",
"# wf.setnchannels(CHANNELS)\n",
"# wf.setsampwidth(p.get_sample_size(FORMAT))\n",
"# wf.setframerate(RATE)\n",
"# wf.writeframes(b''.join(frames))\n",
"\n",
" print(\"* done recording\")\n",
" \n",
" \n",
"\n",
" x = preprocess(WAVE_OUTPUT_FILE) # 'output.wav' file preprocessing.\n",
" # Model's prediction => an 8 emotion probabilities array.\n",
" predictions = model.predict(x, use_multiprocessing=True)\n",
" pred_list = list(predictions)\n",
" pred_np = np.squeeze(np.array(pred_list).tolist(), axis=0) # Get rid of 'array' & 'dtype' statments.\n",
" total_predictions.append(pred_np)\n",
" \n",
" # Present emotion distribution for a sequence (7.1 secs).\n",
" fig = plt.figure(figsize = (10, 2))\n",
" plt.bar(emo_list, pred_np, color = 'darkturquoise')\n",
" plt.ylabel(\"Probabilty (%)\")\n",
" plt.show()\n",
" \n",
" play('output.wav')\n",
" print('hello')\n",
"\n",
" \n",
" max_emo = np.argmax(predictions)\n",
" print('max emotion:', emotions.get(max_emo,-1))\n",
" \n",
" print(100*'-')\n",
" \n",
" # Define the last 2 seconds sequence.\n",
" last_frames = np.array(struct.unpack(str(96 * CHUNK) + 'B' , np.stack(( frames[-1], frames[-2], frames[-3], frames[-4],\n",
" frames[-5], frames[-6], frames[-7], frames[-8],\n",
" frames[-9], frames[-10], frames[-11], frames[-12],\n",
" frames[-13], frames[-14], frames[-15], frames[-16],\n",
" frames[-17], frames[-18], frames[-19], frames[-20],\n",
" frames[-21], frames[-22], frames[-23], frames[-24]),\n",
" axis =0)) , dtype = 'b')\n",
" if is_silent(last_frames): # If the last 2 seconds are silent, end the session.\n",
" break\n",
" \n",
"# SESSION END \n",
"toc = time.perf_counter()\n",
"stream.stop_stream()\n",
"stream.close()\n",
"p.terminate()\n",
"wf.close()\n",
"print('** session ended')\n",
"\n",
"\n",
"\n",
"# Present emotion distribution for the whole session.\n",
"total_predictions_np = np.mean(np.array(total_predictions).tolist(), axis=0)\n",
"fig = plt.figure(figsize = (10, 5))\n",
"plt.bar(emo_list, total_predictions_np, color = 'indigo')\n",
"plt.ylabel(\"Mean probabilty (%)\")\n",
"plt.title(\"Session Summary\")\n",
"plt.show()\n",
"\n",
"print(f\"Emotions analyzed for: {(toc - tic):0.4f} seconds\") \n",
" \n",
" \n",
" \n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "10150863",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "0fada8ab",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.9.0 64-bit",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.0"
},
"vscode": {
"interpreter": {
"hash": "7a7e265e5e544617e1d00adfc1176a2008b9fc3cc653be133465b17d6ab0f4ba"
}
}
},
"nbformat": 4,
"nbformat_minor": 5
}
"""
ASGI config for audiocheck project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'audiocheck.settings')
application = get_asgi_application()
from django import forms
from django.apps import AppConfig
from django import forms
from audiocheck.models import Audio_store
class AudioForm(forms.ModelForm):
class Meta:
model=Audio_store
fields=['record']
\ No newline at end of file
from django.db import models
from django.apps import AppConfig
class Audio_store(models.Model):
record=models.FileField(upload_to='')
class Meta:
db_table='Audio_store'
\ No newline at end of file
"""
Django settings for audiocheck project.
Generated by 'django-admin startproject' using Django 4.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
PRODUCT_MODEL = 'Audio_store'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-i!zl!#ao+%h^w*8rua7fd#*tc52yn4ak$#etwu%k7)p3khqwuu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'audiocheck',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'audiocheck.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'audiocheck.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_URL = '/static/'
MEDIA_URL='/media/'
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
\ No newline at end of file
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
</head>
<body>
<form action="audio" method = "post" enctype="multipart/form-data">
{% csrf_token %}
{{ form }}
<button type="submit">Upload</button>
</form>
</body>
</html>
\ No newline at end of file
from django.contrib import admin
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('audio',views.Audio_store),
path('show',views.check),
path('result/',views.Audio_store)
]+ static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
from django.http import HttpResponse
from django.shortcuts import render
import wave as wv
from email.mime import audio
from typing import IO, List
from django.db import models
from unicodedata import numeric
from django.http import HttpResponse
from django.shortcuts import render
import joblib
import numpy as np
import pickle
from django.db import models
import librosa
from pyparsing import replaceWith
from requests import request
import soundfile
import os,glob,pickle
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from django.core.files.storage import FileSystemStorage
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
import sqlite3
from .forms import AudioForm
def extract_feature(file_name,mfcc,chroma,mel):
with soundfile.SoundFile(file_name) as sound_file:
X=sound_file.read(dtype='float32')
sample_rate=sound_file.samplerate
if chroma:
stft=np.abs(librosa.stft(X))
result=np.array([])
if mfcc:
mfccs=np.mean(librosa.feature.mfcc(y=X,sr=sample_rate,n_mfcc=40).T,axis=0)
result=np.hstack((result,mfccs))
if chroma:
chroma=np.mean(librosa.feature.chroma_stft(S=stft,sr=sample_rate).T,axis=0)
result=np.hstack((result,chroma))
if mel:
mel=np.mean(librosa.feature.melspectrogram(X,sr=sample_rate).T,axis=0)
resutl=np.hstack((result,mel))
return result
def display_emotion(request):
# print(request)
print("hello")
audio_file=request.GET.get('color','')
print(audio_file)
return render(request, 'show.html', {'one' : audio_file})
def Audio_store(request):
if request.method == 'POST':
form = AudioForm(request.POST,request.FILES or None)
if form.is_valid():
form.save()
return HttpResponse('successfully uploaded')
else:
form =AudioForm()
return render(request, 'aud.htm', {'form' : form})
def check(request):
# Create a SQL connection to our SQLite database
con = sqlite3.connect("db.sqlite3")
cur = con.cursor()
cur.execute('SELECT record FROM Audio_store;')
# The result of a "cursor.execute" can be iterated over by row
rows=cur.fetchall()
result_1d = [row[0] for row in rows]
return render(request, 'show.html', {'row' : result_1d})
# lis=[]
# for row in rows:
# lis.append(row)
# return render(request, 'show.htm', {'row' : rows})
# Be sure to close the connection
con.close()
\ No newline at end of file
"""
WSGI config for audiocheck project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'audiocheck.settings')
application = get_wsgi_application()
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'audiocheck.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
</head>
<body>
<form action="audio" method = "post" enctype="multipart/form-data">
{% csrf_token %}
{{ form }}
<button type="submit">Upload</button>
</form>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
</head>
<body>
{{one}}
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
<script type="text/javascript">
function display()
{
audiodisplay=document.querySelector('input[name=color]:checked').value;
document.querySelector("#selecteraudioname").textContent='The checked radio value is' +audiodisplay;
}
</script>
</head>
<body>
<form action="{% url 'display_emotion' %}" enctype="multipart/form-data">
{% csrf_token %}
{% for student in row %}
<input type="radio" value="{{student}}" id="css" name="color" >
{{student}}
<br>
{% endfor %}
<input type="submit" value="Click" onclick="display()" />
<b style="color:green" id="selecteraudioname"></b>
{{one}}
</form>
</body>
</html>
\ No newline at end of file
"""
ASGI config for audiocheck project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'audiocheck.settings')
application = get_asgi_application()
from django import forms
from django.apps import AppConfig
from django import forms
from audiocheck.models import Audio_store
class AudioForm(forms.ModelForm):
class Meta:
model=Audio_store
fields=['record']
\ No newline at end of file
from django.db import models
from django.apps import AppConfig
class Audio_store(models.Model):
record=models.FileField(upload_to='')
class Meta:
db_table='Audio_store'
\ No newline at end of file
"""
Django settings for audiocheck project.
Generated by 'django-admin startproject' using Django 4.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
PRODUCT_MODEL = 'Audio_store'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-i!zl!#ao+%h^w*8rua7fd#*tc52yn4ak$#etwu%k7)p3khqwuu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'audiocheck',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'audiocheck.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'audiocheck.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_URL = '/static/'
MEDIA_URL='/media/'
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
\ No newline at end of file
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
</head>
<body>
<form action="audio" method = "post" enctype="multipart/form-data">
{% csrf_token %}
{{ form }}
<button type="submit">Upload</button>
</form>
</body>
</html>
\ No newline at end of file
from django.contrib import admin
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('audio',views.Audio_store),
path('show',views.check),
path('result/',views.Audio_store)
]+ static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
from django.http import HttpResponse
from django.shortcuts import render
import wave as wv
from email.mime import audio
from typing import IO, List
from django.db import models
from unicodedata import numeric
from django.http import HttpResponse
from django.shortcuts import render
import joblib
import numpy as np
import pickle
from django.db import models
import librosa
from pyparsing import replaceWith
from requests import request
import soundfile
import os,glob,pickle
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from django.core.files.storage import FileSystemStorage
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
import sqlite3
from .forms import AudioForm
def extract_feature(file_name,mfcc,chroma,mel):
with soundfile.SoundFile(file_name) as sound_file:
X=sound_file.read(dtype='float32')
sample_rate=sound_file.samplerate
if chroma:
stft=np.abs(librosa.stft(X))
result=np.array([])
if mfcc:
mfccs=np.mean(librosa.feature.mfcc(y=X,sr=sample_rate,n_mfcc=40).T,axis=0)
result=np.hstack((result,mfccs))
if chroma:
chroma=np.mean(librosa.feature.chroma_stft(S=stft,sr=sample_rate).T,axis=0)
result=np.hstack((result,chroma))
if mel:
mel=np.mean(librosa.feature.melspectrogram(X,sr=sample_rate).T,axis=0)
resutl=np.hstack((result,mel))
return result
def display_emotion(request):
# print(request)
print("hello")
audio_file=request.GET.get('color','')
print(audio_file)
return render(request, 'show.html', {'one' : audio_file})
def Audio_store(request):
if request.method == 'POST':
form = AudioForm(request.POST,request.FILES or None)
if form.is_valid():
form.save()
return HttpResponse('successfully uploaded')
else:
form =AudioForm()
return render(request, 'aud.htm', {'form' : form})
def check(request):
# Create a SQL connection to our SQLite database
con = sqlite3.connect("db.sqlite3")
cur = con.cursor()
cur.execute('SELECT record FROM Audio_store;')
# The result of a "cursor.execute" can be iterated over by row
rows=cur.fetchall()
result_1d = [row[0] for row in rows]
return render(request, 'show.html', {'row' : result_1d})
# lis=[]
# for row in rows:
# lis.append(row)
# return render(request, 'show.htm', {'row' : rows})
# Be sure to close the connection
con.close()
\ No newline at end of file
"""
WSGI config for audiocheck project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'audiocheck.settings')
application = get_wsgi_application()
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'audiocheck.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
{"class_name": "Sequential", "config": {"name": "sequential", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 257, 15], "dtype": "float32", "sparse": false, "ragged": false, "name": "lstm_input"}}, {"class_name": "LSTM", "config": {"name": "lstm", "trainable": true, "batch_input_shape": [null, 257, 15], "dtype": "float32", "return_sequences": true, "return_state": false, "go_backwards": false, "stateful": false, "unroll": false, "time_major": false, "units": 64, "activation": "tanh", "recurrent_activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "recurrent_initializer": {"class_name": "Orthogonal", "config": {"gain": 1.0, "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "unit_forget_bias": true, "kernel_regularizer": null, "recurrent_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "recurrent_constraint": null, "bias_constraint": null, "dropout": 0.0, "recurrent_dropout": 0.0, "implementation": 2}}, {"class_name": "LSTM", "config": {"name": "lstm_1", "trainable": true, "dtype": "float32", "return_sequences": false, "return_state": false, "go_backwards": false, "stateful": false, "unroll": false, "time_major": false, "units": 64, "activation": "tanh", "recurrent_activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "recurrent_initializer": {"class_name": "Orthogonal", "config": {"gain": 1.0, "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "unit_forget_bias": true, "kernel_regularizer": null, "recurrent_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "recurrent_constraint": null, "bias_constraint": null, "dropout": 0.0, "recurrent_dropout": 0.0, "implementation": 2}}, {"class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 8, "activation": "softmax", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "keras_version": "2.8.0", "backend": "tensorflow"}
\ No newline at end of file
This source diff could not be displayed because it is too large. You can view the blob instead.
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
</head>
<body>
<form action="audio" method = "post" enctype="multipart/form-data">
{% csrf_token %}
{{ form }}
<button type="submit">Upload</button>
</form>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
</head>
<body>
{{one}}
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
<script type="text/javascript">
function display()
{
audiodisplay=document.querySelector('input[name=color]:checked').value;
document.querySelector("#selecteraudioname").textContent='The checked radio value is' +audiodisplay;
}
</script>
</head>
<body>
<form action="{% url 'display_emotion' %}" enctype="multipart/form-data">
{% csrf_token %}
{% for student in row %}
<input type="radio" value="{{student}}" id="css" name="color" >
{{student}}
<br>
{% endfor %}
<input type="submit" value="Click" onclick="display()" />
<b style="color:green" id="selecteraudioname"></b>
{{one}}
</form>
</body>
</html>
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment