Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
2
2021-156
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Alahendra A.M.A.T.N.
2021-156
Commits
29aa6fa7
Commit
29aa6fa7
authored
Nov 21, 2021
by
Ranathunge R. A. D. O
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Added app folder
parent
32428019
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
274 additions
and
0 deletions
+274
-0
stress-detection/app/model.joblib
stress-detection/app/model.joblib
+1
-0
stress-detection/app/stress_detection.py
stress-detection/app/stress_detection.py
+273
-0
No files found.
stress-detection/app/model.joblib
0 → 100644
View file @
29aa6fa7
\ No newline at end of file
stress-detection/app/stress_detection.py
0 → 100644
View file @
29aa6fa7
import
os
import
random
import
sys
import
glob
import
keras
import
IPython.display
as
ipd
import
librosa
import
librosa.display
import
matplotlib.pyplot
as
plt
import
numpy
as
np
import
pandas
as
pd
import
plotly.graph_objs
as
go
import
plotly.offline
as
py
import
plotly.tools
as
tls
import
seaborn
as
sns
import
scipy.io.wavfile
import
tensorflow
py
.
init_notebook_mode
(
connected
=
True
)
from
tensorflow.keras
import
regularizers
from
tensorflow.keras.callbacks
import
ModelCheckpoint
,
LearningRateScheduler
,
EarlyStopping
from
tensorflow.keras.callbacks
import
History
,
ReduceLROnPlateau
,
CSVLogger
from
tensorflow.keras.models
import
Model
,
Sequential
from
tensorflow.keras.layers
import
Dense
,
Embedding
,
LSTM
from
tensorflow.keras.layers
import
Input
,
Flatten
,
Dropout
,
Activation
,
BatchNormalization
from
tensorflow.keras.layers
import
Conv1D
,
MaxPooling1D
,
AveragePooling1D
from
tensorflow.keras.preprocessing
import
sequence
from
tensorflow.keras.preprocessing.sequence
import
pad_sequences
from
tensorflow.keras.preprocessing.text
import
Tokenizer
from
keras.utils
import
np_utils
from
tensorflow.keras.utils
import
to_categorical
from
sklearn.metrics
import
confusion_matrix
from
sklearn.preprocessing
import
LabelEncoder
from
scipy.fftpack
import
fft
from
scipy
import
signal
from
scipy.io
import
wavfile
from
tqdm
import
tqdm
input_duration
=
3
data_path
=
r'Audio_Speech_Actors_01-24'
dir_list
=
os
.
listdir
(
data_path
)
print
(
dir_list
)
file_paths
=
[]
emotions
=
[]
for
actor
in
dir_list
:
#print(actor)
actor_path
=
os
.
path
.
join
(
data_path
,
actor
)
#print(actor_path)
file_names
=
os
.
listdir
(
actor_path
)
#print(file_names)
for
file_name
in
file_names
:
file_path
=
os
.
path
.
join
(
actor_path
,
file_name
)
#print(file_path)
nm
=
file_name
[:
-
4
]
.
split
(
'-'
)
emotion
=
int
(
nm
[
2
])
gender
=
int
(
nm
[
-
1
])
%
2
actor
=
int
(
nm
[
-
1
])
if
(
actor
!=
21
and
actor
!=
22
and
actor
!=
23
and
actor
!=
24
):
file_paths
.
append
(
file_path
)
emotions
.
append
(
emotion
)
print
(
gender
,
actor
)
print
(
file_paths
[
100
],
emotions
[
100
])
def
noise
(
data
):
"""
Adding White Noise.
"""
# you can take any distribution from https://docs.scipy.org/doc/numpy-1.13.0/reference/routines.random.html
noise_amp
=
0.005
*
np
.
random
.
uniform
()
*
np
.
amax
(
data
)
data
=
data
.
astype
(
'float64'
)
+
noise_amp
*
np
.
random
.
normal
(
size
=
data
.
shape
[
0
])
return
data
def
pitch
(
data
,
sample_rate
):
"""
Pitch Tuning.
"""
bins_per_octave
=
12
pitch_pm
=
2
pitch_change
=
pitch_pm
*
2
*
(
np
.
random
.
uniform
())
data
=
librosa
.
effects
.
pitch_shift
(
data
.
astype
(
'float64'
),
sample_rate
,
n_steps
=
pitch_change
,
bins_per_octave
=
bins_per_octave
)
return
data
def
log_specgram
(
audio
,
sample_rate
,
window_size
=
20
,
step_size
=
10
,
eps
=
1e-10
):
nperseg
=
int
(
round
(
window_size
*
sample_rate
/
1e3
))
noverlap
=
int
(
round
(
step_size
*
sample_rate
/
1e3
))
freqs
,
times
,
spec
=
signal
.
spectrogram
(
audio
,
fs
=
sample_rate
,
window
=
'hann'
,
nperseg
=
nperseg
,
noverlap
=
noverlap
,
detrend
=
False
)
return
freqs
,
times
,
np
.
log
(
spec
.
T
.
astype
(
np
.
float32
)
+
eps
)
data1
=
[]
for
i
in
tqdm
(
range
(
len
(
file_paths
))):
#print(i,file_paths[i])
X
,
sample_rate
=
librosa
.
load
(
file_paths
[
i
],
res_type
=
'kaiser_fast'
,
duration
=
input_duration
,
sr
=
22050
*
2
,
offset
=
0.5
)
sample_rate
=
np
.
array
(
sample_rate
)
mfccs
=
np
.
mean
(
librosa
.
feature
.
mfcc
(
y
=
X
,
sr
=
sample_rate
,
n_mfcc
=
13
),
axis
=
0
)
feature
=
mfccs
data1
.
append
(
feature
)
data2
=
[]
for
i
in
tqdm
(
range
(
len
(
file_paths
))):
X
,
sample_rate
=
librosa
.
load
(
file_paths
[
i
],
res_type
=
'kaiser_fast'
,
duration
=
input_duration
,
sr
=
22050
*
2
,
offset
=
0.5
)
X
=
noise
(
X
)
sample_rate
=
np
.
array
(
sample_rate
)
mfccs
=
np
.
mean
(
librosa
.
feature
.
mfcc
(
y
=
X
,
sr
=
sample_rate
,
n_mfcc
=
13
),
axis
=
0
)
feature
=
mfccs
a
=
random
.
uniform
(
0
,
1
)
data2
.
append
(
feature
)
data3
=
[]
for
i
in
tqdm
(
range
(
len
(
file_paths
))):
X
,
sample_rate
=
librosa
.
load
(
file_paths
[
i
],
res_type
=
'kaiser_fast'
,
duration
=
input_duration
,
sr
=
22050
*
2
,
offset
=
0.5
)
X
=
pitch
(
X
,
sample_rate
)
sample_rate
=
np
.
array
(
sample_rate
)
mfccs
=
np
.
mean
(
librosa
.
feature
.
mfcc
(
y
=
X
,
sr
=
sample_rate
,
n_mfcc
=
13
),
axis
=
0
)
feature
=
mfccs
a
=
random
.
uniform
(
0
,
1
)
data3
.
append
(
feature
)
data1_new
=
pd
.
DataFrame
(
data1
)
data2_new
=
pd
.
DataFrame
(
data2
)
data3_new
=
pd
.
DataFrame
(
data3
)
data1_new
=
data1_new
.
fillna
(
0
)
data2_new
=
data2_new
.
fillna
(
0
)
data3_new
=
data3_new
.
fillna
(
0
)
target
=
[]
for
emotion
in
emotions
:
if
(
emotion
==
1
or
emotion
==
2
or
emotion
==
3
or
emotion
==
8
):
target
.
append
(
0
)
elif
(
emotion
==
4
or
emotion
==
5
or
emotion
==
6
or
emotion
==
7
):
target
.
append
(
1
)
data_new
=
pd
.
concat
([
data1_new
,
data2_new
,
data3_new
])
.
values
#data_new=data1_new.values
target_new
=
np
.
array
(
target
)
target_new
=
np
.
concatenate
([
target
,
target
,
target
])
target_new
=
np_utils
.
to_categorical
(
target_new
)
print
(
data_new
.
shape
)
from
sklearn.preprocessing
import
MinMaxScaler
import
joblib
scaler
=
MinMaxScaler
()
data_new
=
scaler
.
fit_transform
(
data_new
)
data_new
=
data_new
.
reshape
(
data_new
.
shape
[
0
],
data_new
.
shape
[
1
],
1
)
joblib
.
dump
(
scaler
,
'scaler.sav'
)
from
sklearn.model_selection
import
train_test_split
train_data
,
test_data
,
train_target
,
test_target
=
train_test_split
(
data_new
,
target_new
,
test_size
=
0.2
)
# Set up Keras util functions
from
keras
import
backend
as
K
def
precision
(
y_true
,
y_pred
):
true_positives
=
K
.
sum
(
K
.
round
(
K
.
clip
(
y_true
*
y_pred
,
0
,
1
)))
predicted_positives
=
K
.
sum
(
K
.
round
(
K
.
clip
(
y_pred
,
0
,
1
)))
precision
=
true_positives
/
(
predicted_positives
+
K
.
epsilon
())
return
precision
def
recall
(
y_true
,
y_pred
):
true_positives
=
K
.
sum
(
K
.
round
(
K
.
clip
(
y_true
*
y_pred
,
0
,
1
)))
possible_positives
=
K
.
sum
(
K
.
round
(
K
.
clip
(
y_true
,
0
,
1
)))
recall
=
true_positives
/
(
possible_positives
+
K
.
epsilon
())
return
recall
def
fscore
(
y_true
,
y_pred
):
if
K
.
sum
(
K
.
round
(
K
.
clip
(
y_true
,
0
,
1
)))
==
0
:
return
0
p
=
precision
(
y_true
,
y_pred
)
r
=
recall
(
y_true
,
y_pred
)
f_score
=
2
*
(
p
*
r
)
/
(
p
+
r
+
K
.
epsilon
())
return
f_score
def
get_lr_metric
(
optimizer
):
def
lr
(
y_true
,
y_pred
):
return
optimizer
.
lr
return
lr
# New model
model
=
Sequential
()
model
.
add
(
Conv1D
(
256
,
8
,
padding
=
'valid'
,
input_shape
=
(
train_data
.
shape
[
1
],
1
)))
model
.
add
(
Activation
(
'relu'
))
model
.
add
(
Conv1D
(
256
,
8
,
padding
=
'same'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
Activation
(
'relu'
))
model
.
add
(
Dropout
(
0.25
))
model
.
add
(
MaxPooling1D
(
pool_size
=
(
8
)))
model
.
add
(
Conv1D
(
128
,
8
,
padding
=
'same'
))
model
.
add
(
Activation
(
'relu'
))
model
.
add
(
Conv1D
(
128
,
8
,
padding
=
'same'
))
model
.
add
(
Activation
(
'relu'
))
model
.
add
(
Conv1D
(
128
,
8
,
padding
=
'same'
))
model
.
add
(
Activation
(
'relu'
))
model
.
add
(
Conv1D
(
128
,
8
,
padding
=
'same'
))
model
.
add
(
BatchNormalization
())
model
.
add
(
Activation
(
'relu'
))
model
.
add
(
Dropout
(
0.25
))
model
.
add
(
MaxPooling1D
(
pool_size
=
(
8
)))
model
.
add
(
Conv1D
(
64
,
8
,
padding
=
'same'
))
model
.
add
(
Activation
(
'relu'
))
model
.
add
(
Conv1D
(
64
,
8
,
padding
=
'same'
))
model
.
add
(
Activation
(
'relu'
))
model
.
add
(
Flatten
())
# Edit according to target class no.
model
.
add
(
Dense
(
2
))
model
.
add
(
Activation
(
'softmax'
))
model
.
compile
(
loss
=
'categorical_crossentropy'
,
optimizer
=
'adam'
,
metrics
=
[
'accuracy'
])
model
.
summary
()
# Model Training
lr_reduce
=
ReduceLROnPlateau
(
monitor
=
'val_loss'
,
factor
=
0.9
,
patience
=
20
,
min_lr
=
0.000001
)
# Please change the model name accordingly.
mcp_save
=
ModelCheckpoint
(
'C:
\\
Users
\\
acer
\\
OneDrive
\\
3.1 Stress detector Model
\\
Data_noiseNshift.h5'
,
save_best_only
=
True
,
monitor
=
'val_loss'
,
mode
=
'min'
)
cnnhistory
=
model
.
fit
(
train_data
,
train_target
,
batch_size
=
16
,
epochs
=
100
,
validation_data
=
(
test_data
,
test_target
),
callbacks
=
[
mcp_save
,
lr_reduce
])
plt
.
plot
(
cnnhistory
.
history
[
'loss'
])
plt
.
plot
(
cnnhistory
.
history
[
'val_loss'
])
plt
.
title
(
'model loss'
)
plt
.
ylabel
(
'loss'
)
plt
.
xlabel
(
'epoch'
)
plt
.
legend
([
'train'
,
'test'
],
loc
=
'upper left'
)
plt
.
show
()
# Saving the model.json
import
json
model_json
=
model
.
to_json
()
with
open
(
"C:
\\
Users
\\
acer
\\
OneDrive
\\
3.1 Stress detector Model
\\
model.json"
,
"w"
)
as
json_file
:
json_file
.
write
(
model_json
)
import
tensorflow
as
tf
from
tensorflow.keras.initializers
import
glorot_uniform
loaded_model
=
tf
.
keras
.
models
.
load_model
(
"C:
\\
Users
\\
acer
\\
OneDrive
\\
3.1 Stress detector Model
\\
Data_noiseNshift.h5"
,
custom_objects
=
{
'GlorotUniform'
:
glorot_uniform
()})
print
(
"Loaded model from disk"
)
# evaluate loaded model on test data
loaded_model
.
compile
(
loss
=
'categorical_crossentropy'
,
optimizer
=
'adam'
,
metrics
=
[
'accuracy'
])
score
=
loaded_model
.
evaluate
(
test_data
,
test_target
,
verbose
=
0
)
print
(
"
%
s:
%.4
f
%%
"
%
(
loaded_model
.
metrics_names
[
1
],
score
[
1
]
*
100
))
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment