Commit dd96ab05 authored by Lihinikaduwa D.N.R.  's avatar Lihinikaduwa D.N.R.

Merge branch 'it18257632' into 'master'

genarate accuracy & loss graph

See merge request !85
parents 51b6109d 2fce640d
No preview for this file type
No preview for this file type
import json
import librosa
import os
DATASET_PATH = "dataset"
JSON_PATH = "data.json"
SAMPLES_TO_CONSIDER = 22050
def prepare_dataset(dataset_path, json_path, n_mfcc=13, hop_length=512, n_fft=2048):
# data dictionary
data = {
"mappings": [],
"labels": [],
"MFCCs": [],
"files": []
}
# loop through all sub-dirs
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dataset_path)):
if dirpath is not dataset_path:
# update mapping
category = dirpath.split("/")
data["mappings"].append(category)
print(f"Processing{category}")
# loop through aii the file name and extract MFCCs
for f in filenames:
# get file path
file_path = os.path.join(dirpath, f)
# load audio file
signal, sr = librosa.load(file_path)
# ensure the audio file is at least 1 sec
if len(signal) >= SAMPLES_TO_CONSIDER:
# enforce 1 sec signal
signal = signal[:SAMPLES_TO_CONSIDER]
# extract the MFCCs
MFCCs = librosa.feature.mfcc(signal, sr, n_mfcc=n_mfcc, hop_length=hop_length, n_fft=n_fft)
# store data
data["labels"].append(i - 1)
data["MFCCs"].append(MFCCs.T.tolist())
data["files"].append(file_path)
with open(json_path, "w") as fp:
json.dump(data, fp, indent=4)
if __name__ == "__main__":
prepare_dataset(DATASET_PATH, JSON_PATH)
import json
import librosa
import os
DATASET_PATH = "dataset"
JSON_PATH = "data.json"
SAMPLES_TO_CONSIDER = 22050
def prepare_dataset(dataset_path, json_path, n_mfcc=13, hop_length=512, n_fft=2048):
# data dictionary
data = {
"mappings": [],
"labels": [],
"MFCCs": [],
"files": []
}
# loop through all sub-dirs
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dataset_path)):
if dirpath is not dataset_path:
# update mapping
category = dirpath.split("/")[-1]
data["mappings"].append(category)
print(f"Processing{category}")
# loop through aii the file name and extract MFCCs
for f in filenames:
# get file path
file_path = os.path.join(dirpath, f)
# load audio file
signal, sr = librosa.load(file_path)
# ensure the audio file is at least 1 sec
if len(signal) >= SAMPLES_TO_CONSIDER:
# enforce 1 sec signal
signal = signal[:SAMPLES_TO_CONSIDER]
# extract the MFCCs
MFCCs = librosa.feature.mfcc(signal, n_mfcc=n_mfcc, hop_length=hop_length, n_fft=n_fft)
# store data
data["labels"].append(i - 1)
data["MFCCs"].append(MFCCs.T.tolist())
data["files"].append(file_path)
print(f"{file_path}: {i-1}")
# store in json file
with open(json_path, "w") as fp:
json.dump(data, fp, indent=4)
if __name__ == "__main__":
prepare_dataset(DATASET_PATH, JSON_PATH)
import json
import numpy as np
import matplotlib.pyplot as pyplot
import tensorflow.keras as keras
from sklearn.model_selection import train_test_split
......@@ -7,10 +8,10 @@ DATA_PATH = "data.json"
SAVE_MODEL_PATH = "model.h5"
LEARNING_RATE = 0.0001
EPOCHS = 40
EPOCHS = 100
BATCH_SIZE = 32
NUM_KEYWORDS = 15
NUM_KEYWORDS = 13
def load_dataset(data_path):
......@@ -78,6 +79,23 @@ def build_model(input_shape, learning_rate, error="sparse_categorical_crossentro
return model
def generate_graph(history):
pyplot.plot(history.history['loss'], 'r', label='train loss')
pyplot.plot(history.history['val_loss'], 'g', label='validation loss')
pyplot.legend()
pyplot.xlabel('Epochs')
pyplot.ylabel('Loss')
pyplot.show()
pyplot.subplot(1, 2, 1)
pyplot.ylabel('Accuracy')
pyplot.xlabel('Epochs')
pyplot.plot(history.history['accuracy'], label='Training Accuracy')
pyplot.plot(history.history['val_accuracy'], label='Validation Accuracy')
pyplot.legend()
pyplot.show()
def main():
# load train/validation/test data splits
X_train, X_validation, X_test, y_train, y_validation, y_test = get_data_splits(DATA_PATH)
......@@ -87,7 +105,8 @@ def main():
model = build_model(input_shape, LEARNING_RATE)
# train the model
model.fit(X_train, y_train, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(X_validation, y_validation))
history = model.fit(X_train, y_train, epochs=EPOCHS, batch_size=BATCH_SIZE,
validation_data=(X_validation, y_validation))
# evaluate the model
test_error, test_accuracy = model.evaluate(X_test, y_test)
......@@ -96,6 +115,8 @@ def main():
# save the model
model.save(SAVE_MODEL_PATH)
generate_graph(history)
if __name__ == "__main__":
main()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment