Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
2
240
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
U.D.C.S.WIJESOORIYA
240
Commits
eb14f11a
Commit
eb14f11a
authored
Apr 26, 2022
by
Malsha Rathnasiri
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
final model
parent
40d2aa63
Changes
12
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
287 additions
and
58 deletions
+287
-58
backend/backend/cms/apps.py
backend/backend/cms/apps.py
+1
-1
backend/backend/cms/migrations/0001_initial.py
backend/backend/cms/migrations/0001_initial.py
+23
-0
backend/backend/cms/model/predict.py
backend/backend/cms/model/predict.py
+46
-0
backend/backend/cms/model/train.py
backend/backend/cms/model/train.py
+165
-55
backend/backend/cms/models.py
backend/backend/cms/models.py
+5
-0
backend/backend/cms/serializers.py
backend/backend/cms/serializers.py
+9
-1
backend/backend/cms/views.py
backend/backend/cms/views.py
+36
-1
backend/backend/settings.py
backend/backend/settings.py
+1
-0
backend/backend/urls.py
backend/backend/urls.py
+1
-0
backend/best_model_final.hdf5
backend/best_model_final.hdf5
+0
-0
backend/db.sqlite3
backend/db.sqlite3
+0
-0
requirements.txt
requirements.txt
+0
-0
No files found.
backend/backend/cms/apps.py
View file @
eb14f11a
...
...
@@ -3,4 +3,4 @@ from django.apps import AppConfig
class
CmsConfig
(
AppConfig
):
default_auto_field
=
'django.db.models.BigAutoField'
name
=
'cms'
name
=
'
backend.
cms'
backend/backend/cms/migrations/0001_initial.py
0 → 100644
View file @
eb14f11a
# Generated by Django 4.0.4 on 2022-04-25 19:51
from
django.db
import
migrations
,
models
class
Migration
(
migrations
.
Migration
):
initial
=
True
dependencies
=
[
]
operations
=
[
migrations
.
CreateModel
(
name
=
'MlModel'
,
fields
=
[
(
'id'
,
models
.
AutoField
(
primary_key
=
True
,
serialize
=
False
)),
(
'description'
,
models
.
TextField
(
blank
=
True
)),
(
'timestamp'
,
models
.
DateTimeField
(
auto_now
=
True
)),
(
'details'
,
models
.
JSONField
()),
],
),
]
backend/backend/cms/model/predict.py
0 → 100644
View file @
eb14f11a
import
pickle
from
keras.models
import
load_model
import
numpy
as
np
import
IPython.display
as
ipd
import
random
from
sklearn.model_selection
import
train_test_split
from
sklearn.preprocessing
import
LabelEncoder
def
predict
():
model
=
load_model
(
r'./best_model_final.hdf5'
)
f1
=
open
(
'all_label.txt'
,
'rb'
)
all_label
=
pickle
.
load
(
f1
)
print
(
'loaded labels'
)
f2
=
open
(
'all_waves_file.txt'
,
'rb'
)
all_wave
=
pickle
.
load
(
f2
)
print
(
'loaded waves'
)
le
=
LabelEncoder
()
y
=
le
.
fit_transform
(
all_label
)
classes
=
list
(
le
.
classes_
)
train_data_file
=
open
(
"train_data_file.txt"
,
'rb'
)
[
x_tr
,
x_val
,
y_tr
,
y_val
]
=
np
.
load
(
train_data_file
,
allow_pickle
=
True
)
train_data_file
.
close
()
def
predictSamples
(
audio
):
prob
=
model
.
predict
(
audio
.
reshape
(
1
,
8000
,
1
))
index
=
np
.
argmax
(
prob
[
0
])
return
classes
[
index
]
index
=
random
.
randint
(
0
,
len
(
x_val
)
-
1
)
samples
=
x_val
[
index
]
.
ravel
()
print
(
"Audio:"
,
classes
[
np
.
argmax
(
y_val
[
index
])])
ipd
.
Audio
(
samples
,
rate
=
8000
)
print
(
"Text:"
,
predictSamples
(
samples
))
\ No newline at end of file
backend/backend/cms/model/train.py
View file @
eb14f11a
import
pickle
from
matplotlib
import
pyplot
import
os
import
librosa
import
IPython.display
as
ipd
...
...
@@ -6,84 +8,192 @@ import numpy as np
from
scipy.io
import
wavfile
import
warnings
from
sklearn.preprocessing
import
LabelEncoder
from
keras.utils
import
np_utils
from
sklearn.model_selection
import
train_test_split
from
keras.layers
import
Dense
,
Dropout
,
Flatten
,
Conv1D
,
Input
,
MaxPooling1D
from
keras.models
import
Model
from
keras.callbacks
import
EarlyStopping
,
ModelCheckpoint
from
keras
import
backend
as
K
K
.
clear_session
()
warnings
.
filterwarnings
(
"ignore"
)
os
.
listdir
(
'../input/'
)
# os.listdir('../../../data/')
classes
=
[
'down'
,
'go'
,
'left'
,
'no'
,
'off'
,
'on'
,
'right'
,
'stop'
,
'up'
,
'yes'
]
def
train
():
print
(
'1'
)
train_audio_path
=
r'./backend/data/train/train/audio/'
samples
,
sample_rate
=
librosa
.
load
(
train_audio_path
+
'yes/0a7c2a8d_nohash_0.wav'
,
sr
=
16000
)
# fig = plt.figure(figsize=(14, 8))
# ax1 = fig.add_subplot(211)
# ax1.set_title('Raw wave of ' + r'../input/train/audio/yes/0a7c2a8d_nohash_0.wav')
# ax1.set_xlabel('time')
# ax1.set_ylabel('Amplitude')
# ax1.plot(np.linspace(0, sample_rate/len(samples), sample_rate), samples)
ipd
.
Audio
(
samples
,
rate
=
sample_rate
)
print
(
sample_rate
)
samples
=
librosa
.
resample
(
samples
,
sample_rate
,
8000
)
ipd
.
Audio
(
samples
,
rate
=
8000
)
labels
=
os
.
listdir
(
train_audio_path
)
# find count of each label and plot bar graph
no_of_recordings
=
[]
for
label
in
labels
:
waves
=
[
f
for
f
in
os
.
listdir
(
train_audio_path
+
'/'
+
label
)
if
f
.
endswith
(
'.wav'
)]
no_of_recordings
.
append
(
len
(
waves
))
# plot
# plt.figure(figsize=(30,5))
index
=
np
.
arange
(
len
(
labels
))
# plt.bar(index, no_of_recordings)
# plt.xlabel('Commands', fontsize=12)
# plt.ylabel('No of recordings', fontsize=12)
# plt.xticks(index, labels, fontsize=15, rotation=60)
# plt.title('No. of recordings for each command')
# plt.show()
print
(
'2'
)
labels
=
[
"yes"
,
"no"
,
"up"
,
"down"
,
"left"
,
"right"
,
"on"
,
"off"
,
"stop"
,
"go"
]
# labels_file = open('./labels_file.bin', 'wb+')
# pickle.dump(obj=labels, file=labels_file)
# labels_file.close()
# # file = open('./labels_file.bin', 'rb')
# # dict = pickle.load(file)
# # print('loaded')
# # print(dict)
# # print('fdnasf')
duration_of_recordings
=
[]
for
label
in
labels
:
print
(
'2.1'
,
label
)
waves
=
[
f
for
f
in
os
.
listdir
(
train_audio_path
+
'/'
+
label
)
if
f
.
endswith
(
'.wav'
)]
for
wav
in
waves
:
sample_rate
,
samples
=
wavfile
.
read
(
train_audio_path
+
'/'
+
label
+
'/'
+
wav
)
duration_of_recordings
.
append
(
float
(
len
(
samples
)
/
sample_rate
))
plt
.
hist
(
np
.
array
(
duration_of_recordings
))
train_audio_path
=
r'./backend/data/train/train/audio/'
# all_wave = []
# all_label = []
# for label in labels:
# print(label)
# waves = [f for f in os.listdir(
# train_audio_path + '/' + label) if f.endswith('.wav')]
# for wav in waves:
# samples, sample_rate = librosa.load(
# train_audio_path + '/' + label + '/' + wav, sr=16000)
# samples = librosa.resample(samples, sample_rate, 8000)
# if(len(samples) == 8000):
# all_wave.append(samples)
# all_label.append(label)
# print('3')
f1
=
open
(
'all_label.txt'
,
'rb'
)
all_label
=
pickle
.
load
(
f1
)
print
(
'loaded labels'
)
f2
=
open
(
'all_waves_file.txt'
,
'rb'
)
all_wave
=
pickle
.
load
(
f2
)
print
(
'loaded waves'
)
train_audio_path
=
'../input/tensorflow-speech-recognition-challenge/train/audio/'
samples
,
sample_rate
=
librosa
.
load
(
train_audio_path
+
'yes/0a7c2a8d_nohash_0.wav'
,
sr
=
16000
)
fig
=
plt
.
figure
(
figsize
=
(
14
,
8
))
ax1
=
fig
.
add_subplot
(
211
)
ax1
.
set_title
(
'Raw wave of '
+
'../input/train/audio/yes/0a7c2a8d_nohash_0.wav'
)
ax1
.
set_xlabel
(
'time'
)
ax1
.
set_ylabel
(
'Amplitude'
)
ax1
.
plot
(
np
.
linspace
(
0
,
sample_rate
/
len
(
samples
),
sample_rate
),
samples
)
le
=
LabelEncoder
()
y
=
le
.
fit_transform
(
all_label
)
classes
=
list
(
le
.
classes_
)
print
(
classes
)
print
(
all_wave
)
ipd
.
Audio
(
samples
,
rate
=
sample_rate
)
print
(
'4'
)
y
=
np_utils
.
to_categorical
(
y
,
num_classes
=
len
(
labels
))
all_wave
=
np
.
array
(
all_wave
)
.
reshape
(
-
1
,
8000
,
1
)
x_tr
,
x_val
,
y_tr
,
y_val
=
train_test_split
(
np
.
array
(
all_wave
),
np
.
array
(
y
),
stratify
=
y
,
test_size
=
0.2
,
random_state
=
777
,
shuffle
=
True
)
print
(
sample_rate
)
train_data_file
=
open
(
'train_data_file.txt'
,
'wb+'
)
np
.
save
(
file
=
train_data_file
,
arr
=
np
.
array
([
x_tr
,
x_val
,
y_tr
,
y_val
]))
train_data_file
.
close
()
inputs
=
Input
(
shape
=
(
8000
,
1
))
samples
=
librosa
.
resample
(
samples
,
sample_rate
,
8000
)
ipd
.
Audio
(
samples
,
rate
=
8000
)
# First Conv1D layer
conv
=
Conv1D
(
8
,
13
,
padding
=
'valid'
,
activation
=
'relu'
,
strides
=
1
)(
inputs
)
conv
=
MaxPooling1D
(
3
)(
conv
)
conv
=
Dropout
(
0.3
)(
conv
)
# Second Conv1D layer
conv
=
Conv1D
(
16
,
11
,
padding
=
'valid'
,
activation
=
'relu'
,
strides
=
1
)(
conv
)
conv
=
MaxPooling1D
(
3
)(
conv
)
conv
=
Dropout
(
0.3
)(
conv
)
# Third Conv1D layer
conv
=
Conv1D
(
32
,
9
,
padding
=
'valid'
,
activation
=
'relu'
,
strides
=
1
)(
conv
)
conv
=
MaxPooling1D
(
3
)(
conv
)
conv
=
Dropout
(
0.3
)(
conv
)
labels
=
os
.
listdir
(
train_audio_path
)
# Fourth Conv1D layer
conv
=
Conv1D
(
64
,
7
,
padding
=
'valid'
,
activation
=
'relu'
,
strides
=
1
)(
conv
)
conv
=
MaxPooling1D
(
3
)(
conv
)
conv
=
Dropout
(
0.3
)(
conv
)
#find count of each label and plot bar graph
no_of_recordings
=
[]
for
label
in
labels
:
waves
=
[
f
for
f
in
os
.
listdir
(
train_audio_path
+
'/'
+
label
)
if
f
.
endswith
(
'.wav'
)]
no_of_recordings
.
append
(
len
(
waves
))
#plot
plt
.
figure
(
figsize
=
(
30
,
5
))
index
=
np
.
arange
(
len
(
labels
))
plt
.
bar
(
index
,
no_of_recordings
)
plt
.
xlabel
(
'Commands'
,
fontsize
=
12
)
plt
.
ylabel
(
'No of recordings'
,
fontsize
=
12
)
plt
.
xticks
(
index
,
labels
,
fontsize
=
15
,
rotation
=
60
)
plt
.
title
(
'No. of recordings for each command'
)
plt
.
show
()
# Flatten layer
conv
=
Flatten
()(
conv
)
labels
=
[
"yes"
,
"no"
,
"up"
,
"down"
,
"left"
,
"right"
,
"on"
,
"off"
,
"stop"
,
"go"
]
# Dense Layer 1
conv
=
Dense
(
256
,
activation
=
'relu'
)(
conv
)
conv
=
Dropout
(
0.3
)(
conv
)
duration_of_recordings
=
[]
for
label
in
labels
:
waves
=
[
f
for
f
in
os
.
listdir
(
train_audio_path
+
'/'
+
label
)
if
f
.
endswith
(
'.wav'
)]
for
wav
in
waves
:
sample_rate
,
samples
=
wavfile
.
read
(
train_audio_path
+
'/'
+
label
+
'/'
+
wav
)
duration_of_recordings
.
append
(
float
(
len
(
samples
)
/
sample_rate
))
plt
.
hist
(
np
.
array
(
duration_of_recordings
))
# Dense Layer 2
conv
=
Dense
(
128
,
activation
=
'relu'
)(
conv
)
conv
=
Dropout
(
0.3
)(
conv
)
train_audio_path
=
'../input/tensorflow-speech-recognition-challenge/train/audio/'
outputs
=
Dense
(
len
(
labels
),
activation
=
'softmax'
)(
conv
)
all_wave
=
[]
all_label
=
[]
for
label
in
labels
:
print
(
label
)
waves
=
[
f
for
f
in
os
.
listdir
(
train_audio_path
+
'/'
+
label
)
if
f
.
endswith
(
'.wav'
)]
for
wav
in
waves
:
samples
,
sample_rate
=
librosa
.
load
(
train_audio_path
+
'/'
+
label
+
'/'
+
wav
,
sr
=
16000
)
samples
=
librosa
.
resample
(
samples
,
sample_rate
,
8000
)
if
(
len
(
samples
)
==
8000
)
:
all_wave
.
append
(
samples
)
all_label
.
append
(
label
)
model
=
Model
(
inputs
,
outputs
)
model
.
summary
()
model
.
compile
(
loss
=
'categorical_crossentropy'
,
optimizer
=
'adam'
,
metrics
=
[
'accuracy'
])
es
=
EarlyStopping
(
monitor
=
'val_loss'
,
mode
=
'min'
,
verbose
=
1
,
patience
=
10
,
min_delta
=
0.0001
)
mc
=
ModelCheckpoint
(
'best_model.hdf5'
,
monitor
=
'val_accuracy'
,
verbose
=
1
,
save_best_only
=
True
,
mode
=
'max'
)
history
=
model
.
fit
(
x_tr
,
y_tr
,
epochs
=
100
,
callbacks
=
[
es
,
mc
],
batch_size
=
32
,
validation_data
=
(
x_val
,
y_val
))
le
=
LabelEncoder
(
)
y
=
le
.
fit_transform
(
all_label
)
classes
=
list
(
le
.
classes_
)
# pyplot.plot(history.history['loss'], label='train'
)
# pyplot.plot(history.history['val_loss'], label='test'
)
# pyplot.legend(
)
# pyplot.show()
y
=
np_utils
.
to_categorical
(
y
,
num_classes
=
len
(
labels
))
\ No newline at end of file
return
history
backend/backend/cms/models.py
View file @
eb14f11a
from
django.db
import
models
# Create your models here.
class
MlModel
(
models
.
Model
):
id
=
models
.
AutoField
(
primary_key
=
True
)
description
=
models
.
TextField
(
blank
=
True
)
timestamp
=
models
.
DateTimeField
(
blank
=
True
,
auto_now
=
True
)
details
=
models
.
JSONField
()
backend/backend/cms/serializers.py
View file @
eb14f11a
from
django.contrib.auth.models
import
User
,
Group
from
.models
import
MlModel
from
rest_framework
import
serializers
...
...
@@ -11,4 +12,11 @@ class UserSerializer(serializers.HyperlinkedModelSerializer):
class
GroupSerializer
(
serializers
.
HyperlinkedModelSerializer
):
class
Meta
:
model
=
Group
fields
=
[
'url'
,
'name'
]
\ No newline at end of file
fields
=
[
'url'
,
'name'
]
class
MlModelSerializer
(
serializers
.
ModelSerializer
):
class
Meta
:
model
=
MlModel
fields
=
(
'__all__'
)
\ No newline at end of file
backend/backend/cms/views.py
View file @
eb14f11a
...
...
@@ -2,10 +2,17 @@ from http.client import HTTPResponse
from
django.contrib.auth.models
import
User
,
Group
from
rest_framework
import
viewsets
from
rest_framework
import
permissions
from
backend.cms.serializers
import
MlModelSerializer
from
backend.cms.serializers
import
UserSerializer
,
GroupSerializer
from
rest_framework.decorators
import
action
from
rest_framework.response
import
Response
from
.models
import
MlModel
from
.model.train
import
train
from
.model.predict
import
predict
class
UserViewSet
(
viewsets
.
ModelViewSet
):
"""
...
...
@@ -20,6 +27,9 @@ class UserViewSet(viewsets.ModelViewSet):
print
(
'Function ran'
)
results
=
train
()
print
(
results
)
return
Response
({
'success'
:
True
})
...
...
@@ -29,4 +39,29 @@ class GroupViewSet(viewsets.ModelViewSet):
"""
queryset
=
Group
.
objects
.
all
()
serializer_class
=
GroupSerializer
permission_classes
=
[
permissions
.
IsAuthenticated
]
\ No newline at end of file
permission_classes
=
[
permissions
.
IsAuthenticated
]
class
MlModelViewSet
(
viewsets
.
ViewSet
):
queryset
=
MlModel
.
objects
.
all
()
serializer_class
=
MlModelSerializer
permission_classes
=
[
permissions
.
IsAuthenticated
]
@
action
(
detail
=
False
)
def
train
(
*
args
,
**
kwargs
):
print
(
'Function ran'
)
results
=
train
()
print
(
results
)
return
Response
({
'success'
:
True
})
@
action
(
detail
=
False
)
def
predict
(
*
args
,
**
kwargs
):
print
(
'Function ran'
)
results
=
predict
()
print
(
results
)
return
Response
({
'success'
:
True
})
backend/backend/settings.py
View file @
eb14f11a
...
...
@@ -38,6 +38,7 @@ INSTALLED_APPS = [
'django.contrib.messages'
,
'django.contrib.staticfiles'
,
'rest_framework'
,
'backend.cms'
]
MIDDLEWARE
=
[
...
...
backend/backend/urls.py
View file @
eb14f11a
...
...
@@ -5,6 +5,7 @@ from backend.cms import views
router
=
routers
.
DefaultRouter
()
router
.
register
(
r'users'
,
views
.
UserViewSet
)
router
.
register
(
r'groups'
,
views
.
GroupViewSet
)
router
.
register
(
r'mlmodels'
,
views
.
MlModelViewSet
)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
...
...
backend/best_model_final.hdf5
0 → 100644
View file @
eb14f11a
File added
backend/db.sqlite3
View file @
eb14f11a
No preview for this file type
requirements.txt
0 → 100644
View file @
eb14f11a
B
absl-py
==1.0.0
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment