Commit cff90d92 authored by Linisha Siriwardana's avatar Linisha Siriwardana

Merge branch 'Lecture_Summarizing' into 'important_test_LEC_SUMM_01'

# Conflicts:
#   Test_text-summarizer/text-summarizer.py
parents ca56a46a d1264422
......@@ -9,7 +9,39 @@ This project will be created under the guidelines provided by the Research Proje
Main Objectives that will be targetted in this Research Project will be listed as follows.
* • Tracking student attendance using facial detection and facial recognition and notify and getting opinion from the students who are absent and students who are leaving the lecture in the during a specified time period (Attendance Register).
* • Monitoring student behavior within the classroom during lecture periods to identify potential problem areas in retaining student attention and adapting the lecturing styles to suit the student needs (Monitoring Student Behavior).
* • Summarize the converted text and present as a document and identify the important points of the lecture to display them highlighted (Lecture Summarizing).
* • Monitoring lecturers’ behavior by analyzing the teaching style in a lecture hall during the lecture hours (Monitor Lecturer Performance).
\ No newline at end of file
* Tracking student attendance using facial detection and facial recognition and notify and getting opinion from the students who are absent and students who are leaving the lecture in the during a specified time period (Attendance Register).
* Monitoring student behavior within the classroom during lecture periods to identify potential problem areas in retaining student attention and adapting the lecturing styles to suit the student needs (Monitoring Student Behavior).
* Summarize the converted text and present as a document and identify the important points of the lecture to display them highlighted (Lecture Summarizing).
* Monitoring lecturers’ behavior by analyzing the teaching style in a lecture hall during the lecture hours (Monitor Lecturer Performance).
## Main Research Questions
This Research Project will be conducted to find answers for the following 4 research questions.
* “What is the optimum way of tracking student attendance in a much efficient way and how does the lecturer analyze reasons for student absenteeism?” (Q1).
* “Does a correlation exist between lecturing style and student behavior in the classroom and how can Computer Vision and Artificial Intelligence be incorporated in determining this relationship?” (Q2).
* “How to summarize the lecture content to enable students to pay more attention to the lecture and reduce time spend for taking notes?” (Q3).
* “How to evaluate lecture performance by tracking their behavior during a lecture and analyzing the quality of the lecture content which is delivered by the lecturer?” (Q4).
## Individual Research Questions
### Attendance Register
### Monitoring Student Behavior
### Lecture Summarizing
### Monitoring Lecturer Performance
## Individual Objectives
### Attendance Register
### Monitoring Student Behavior
### Lecture Summarizing
### Monitoring Lecturer Performance
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# IPython Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# dotenv
.env
# virtualenv
venv/
ENV/
# Spyder project settings
.spyderproject
# Rope project settings
.ropeproject
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="JavaScriptSettings">
<option name="languageLevel" value="ES6" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.7" project-jdk-type="Python SDK" />
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/noise_reduction-master.iml" filepath="$PROJECT_DIR$/.idea/noise_reduction-master.iml" />
</modules>
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="TestRunnerService">
<option name="PROJECT_TEST_RUNNER" value="pytest" />
</component>
</module>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ChangeListManager">
<list default="true" id="d9c47fae-72ad-44d8-8c0a-264cd11b339a" name="Default Changelist" comment="" />
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
<option name="LAST_RESOLUTION" value="IGNORE" />
</component>
<component name="ProjectId" id="1aJLI7MFjDDgyF04AeP3LzmByl7" />
<component name="ProjectViewState">
<option name="hideEmptyMiddlePackages" value="true" />
<option name="showExcludedFiles" value="true" />
<option name="showLibraryContents" value="true" />
</component>
<component name="PropertiesComponent">
<property name="RunOnceActivity.ShowReadmeOnStart" value="true" />
<property name="WebServerToolWindowFactoryState" value="false" />
<property name="last_opened_file_path" value="$PROJECT_DIR$/../../2020-101-QA_RELEASE" />
<property name="node.js.detected.package.eslint" value="true" />
<property name="node.js.detected.package.tslint" value="true" />
<property name="node.js.path.for.package.eslint" value="project" />
<property name="node.js.path.for.package.tslint" value="project" />
<property name="node.js.selected.package.eslint" value="(autodetect)" />
<property name="node.js.selected.package.tslint" value="(autodetect)" />
</component>
<component name="RecentsManager">
<key name="CopyFile.RECENT_KEYS">
<recent name="D:\#SLIIT\4th Year\Research\noise_reduction-master\noise_reduction-master\01_samples_trimmed_noise_reduced" />
</key>
</component>
<component name="RunManager" selected="Python.noise">
<configuration name="noise" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="noise_reduction-master" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/noise.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="true" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="trim" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="noise_reduction-master" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/trim.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="true" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<recent_temporary>
<list>
<item itemvalue="Python.noise" />
<item itemvalue="Python.noise" />
<item itemvalue="Python.noise" />
<item itemvalue="Python.noise" />
<item itemvalue="Python.noise" />
</list>
</recent_temporary>
</component>
<component name="SvnConfiguration">
<configuration />
</component>
<component name="TaskManager">
<task active="true" id="Default" summary="Default task">
<changelist id="d9c47fae-72ad-44d8-8c0a-264cd11b339a" name="Default Changelist" comment="" />
<created>1586448666396</created>
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1586448666396</updated>
<workItem from="1586448684134" duration="3268000" />
<workItem from="1586492221866" duration="2610000" />
<workItem from="1586594655750" duration="579000" />
<workItem from="1587028762539" duration="1504000" />
</task>
<servers />
</component>
<component name="TypeScriptGeneratedFilesManager">
<option name="version" value="1" />
</component>
<component name="WindowStateProjectService">
<state x="504" y="216" key="#com.intellij.fileTypes.FileTypeChooser" timestamp="1586499944992">
<screen x="0" y="0" width="1536" height="824" />
</state>
<state x="504" y="216" key="#com.intellij.fileTypes.FileTypeChooser/0.0.1536.824@0.0.1536.824" timestamp="1586499944992" />
<state x="549" y="168" key="FileChooserDialogImpl" timestamp="1587029076432">
<screen x="0" y="0" width="1536" height="824" />
</state>
<state x="549" y="168" key="FileChooserDialogImpl/0.0.1536.824@0.0.1536.824" timestamp="1587029076432" />
<state x="465" y="238" key="com.intellij.ide.util.TipDialog" timestamp="1587028837925">
<screen x="0" y="0" width="1536" height="824" />
</state>
<state x="465" y="238" key="com.intellij.ide.util.TipDialog/0.0.1536.824@0.0.1536.824" timestamp="1587028837925" />
</component>
</project>
\ No newline at end of file
# noise_reduction
> See test results on: [https://dodiku.github.io/noise_reduction/](https://dodiku.github.io/noise_reduction/)
## Audio enhancements feature tests in Python3
#### Installation
To install:
1. ``$ brew install sox``
1. ``$ brew install vorbis-tools``
1. Create a virtualenv
1. Install dependencies in one of two options:
- manually *(recommended)*:
``$ pip3 install librosa``
``$ pip3 install pysndfx``
- or automatically using pip:
``$ pip3 install -r requirements.txt``
To run:
``$ python3 noise.py``
#### Interesting resources:
- LibROSA ([documentation](http://librosa.github.io/librosa/index.html) + [repository](https://github.com/librosa/librosa) + [paper](https://bmcfee.github.io/papers/scipy2015_librosa.pdf))
- Think DSP ([book](http://greenteapress.com/wp/think-dsp/) + [repository](https://github.com/AllenDowney/ThinkDSP/))
- Pyo ([blog post](http://www.matthieuamiguet.ch/blog/diy-guitar-effects-python) + [repository](https://github.com/belangeo/pyo))
- pysndfx ([repository](https://github.com/carlthome/python-audio-effects/tree/04dbee6063b0537b63346bb1e55deb03406e1170/pysndfx))
#### A bit less relevant papers:
- Noise Cancellation Method for Robust Speech Recognition ([PDF](http://research.ijcaonline.org/volume45/number11/pxc3879438.pdf))
- Robust Features for Noisy Speech Recognition using MFCC Computation from Magnitude Spectrum of Higher Order Autocorrelation Coefficients
([PDF](https://pdfs.semanticscholar.org/a483/5f28c02f07e6bef04ff9db948505dc990af7.pdf))
- Improving the Noise-Robustness of Mel-Frequency Cepstral Coefficients for Speech Processing
([PDF](http://www.sapaworkshops.org/2006/2006/papers/131.pdf))
Folder which includes the clips to reduce noise
00_samples
Folder which includes noise reduced files
01_samples_trimmed_noise_reduced
\ No newline at end of file
This diff is collapsed.
import librosa
from pysndfx import AudioEffectsChain
import numpy as np
import math
import python_speech_features
import scipy as sp
from scipy import signal
import soundfile
# http://python-speech-features.readthedocs.io/en/latest/
# https://github.com/jameslyons/python_speech_features
# http://practicalcryptography.com/miscellaneous/machine-learning/guide-mel-frequency-cepstral-coefficients-mfccs/#deltas-and-delta-deltas
# http://dsp.stackexchange.com/search?q=noise+reduction/
'''------------------------------------
FILE READER:
receives filename,
returns audio time series (y) and sampling rate of y (sr)
------------------------------------'''
def read_file(file_name):
sample_file = file_name
sample_directory = '00_samples/'
sample_path = sample_directory + sample_file
# generating audio time series and a sampling rate (int)
y, sr = librosa.load(sample_path)
return y, sr
'''------------------------------------
NOISE REDUCTION USING POWER:
receives an audio matrix,
returns the matrix after gain reduction on noise
------------------------------------'''
def reduce_noise_power(y, sr):
cent = librosa.feature.spectral_centroid(y=y, sr=sr)
threshold_h = round(np.median(cent))*1.5
threshold_l = round(np.median(cent))*0.1
less_noise = AudioEffectsChain().lowshelf(gain=-30.0, frequency=threshold_l, slope=0.8).highshelf(gain=-12.0, frequency=threshold_h, slope=0.5)#.limiter(gain=6.0)
y_clean = less_noise(y)
return y_clean
'''------------------------------------
NOISE REDUCTION USING CENTROID ANALYSIS:
receives an audio matrix,
returns the matrix after gain reduction on noise
------------------------------------'''
def reduce_noise_centroid_s(y, sr):
cent = librosa.feature.spectral_centroid(y=y, sr=sr)
threshold_h = np.max(cent)
threshold_l = np.min(cent)
less_noise = AudioEffectsChain().lowshelf(gain=-12.0, frequency=threshold_l, slope=0.5).highshelf(gain=-12.0, frequency=threshold_h, slope=0.5).limiter(gain=6.0)
y_cleaned = less_noise(y)
return y_cleaned
def reduce_noise_centroid_mb(y, sr):
cent = librosa.feature.spectral_centroid(y=y, sr=sr)
threshold_h = np.max(cent)
threshold_l = np.min(cent)
less_noise = AudioEffectsChain().lowshelf(gain=-30.0, frequency=threshold_l, slope=0.5).highshelf(gain=-30.0, frequency=threshold_h, slope=0.5).limiter(gain=10.0)
# less_noise = AudioEffectsChain().lowpass(frequency=threshold_h).highpass(frequency=threshold_l)
y_cleaned = less_noise(y)
cent_cleaned = librosa.feature.spectral_centroid(y=y_cleaned, sr=sr)
columns, rows = cent_cleaned.shape
boost_h = math.floor(rows/3*2)
boost_l = math.floor(rows/6)
boost = math.floor(rows/3)
# boost_bass = AudioEffectsChain().lowshelf(gain=20.0, frequency=boost, slope=0.8)
boost_bass = AudioEffectsChain().lowshelf(gain=16.0, frequency=boost_h, slope=0.5)#.lowshelf(gain=-20.0, frequency=boost_l, slope=0.8)
y_clean_boosted = boost_bass(y_cleaned)
return y_clean_boosted
'''------------------------------------
NOISE REDUCTION USING MFCC:
receives an audio matrix,
returns the matrix after gain reduction on noise
------------------------------------'''
def reduce_noise_mfcc_down(y, sr):
hop_length = 512
## librosa
# mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=hop_length, n_mfcc=13)
# librosa.mel_to_hz(mfcc)
## mfcc
mfcc = python_speech_features.base.mfcc(y)
mfcc = python_speech_features.base.logfbank(y)
mfcc = python_speech_features.base.lifter(mfcc)
sum_of_squares = []
index = -1
for r in mfcc:
sum_of_squares.append(0)
index = index + 1
for n in r:
sum_of_squares[index] = sum_of_squares[index] + n**2
strongest_frame = sum_of_squares.index(max(sum_of_squares))
hz = python_speech_features.base.mel2hz(mfcc[strongest_frame])
max_hz = max(hz)
min_hz = min(hz)
speech_booster = AudioEffectsChain().highshelf(frequency=min_hz*(-1)*1.2, gain=-12.0, slope=0.6).limiter(gain=8.0)
y_speach_boosted = speech_booster(y)
return (y_speach_boosted)
def reduce_noise_mfcc_up(y, sr):
hop_length = 512
## librosa
# mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=hop_length, n_mfcc=13)
# librosa.mel_to_hz(mfcc)
## mfcc
mfcc = python_speech_features.base.mfcc(y)
mfcc = python_speech_features.base.logfbank(y)
mfcc = python_speech_features.base.lifter(mfcc)
sum_of_squares = []
index = -1
for r in mfcc:
sum_of_squares.append(0)
index = index + 1
for n in r:
sum_of_squares[index] = sum_of_squares[index] + n**2
strongest_frame = sum_of_squares.index(max(sum_of_squares))
hz = python_speech_features.base.mel2hz(mfcc[strongest_frame])
max_hz = max(hz)
min_hz = min(hz)
speech_booster = AudioEffectsChain().lowshelf(frequency=min_hz*(-1), gain=12.0, slope=0.5)#.highshelf(frequency=min_hz*(-1)*1.2, gain=-12.0, slope=0.5)#.limiter(gain=8.0)
y_speach_boosted = speech_booster(y)
return (y_speach_boosted)
'''------------------------------------
NOISE REDUCTION USING MEDIAN:
receives an audio matrix,
returns the matrix after gain reduction on noise
------------------------------------'''
def reduce_noise_median(y, sr):
y = sp.signal.medfilt(y,3)
return (y)
'''------------------------------------
SILENCE TRIMMER:
receives an audio matrix,
returns an audio matrix with less silence and the amout of time that was trimmed
------------------------------------'''
def trim_silence(y):
y_trimmed, index = librosa.effects.trim(y, top_db=20, frame_length=2, hop_length=500)
trimmed_length = librosa.get_duration(y) - librosa.get_duration(y_trimmed)
return y_trimmed, trimmed_length
'''------------------------------------
AUDIO ENHANCER:
receives an audio matrix,
returns the same matrix after audio manipulation
------------------------------------'''
def enhance(y):
apply_audio_effects = AudioEffectsChain().lowshelf(gain=10.0, frequency=260, slope=0.1).reverb(reverberance=25, hf_damping=5, room_scale=5, stereo_depth=50, pre_delay=20, wet_gain=0, wet_only=False)#.normalize()
y_enhanced = apply_audio_effects(y)
return y_enhanced
'''------------------------------------
OUTPUT GENERATOR:
receives a destination path, file name, audio matrix, and sample rate,
generates a wav file based on input
------------------------------------'''
def output_file(destination ,filename, y, sr, ext=""):
destination = destination + filename[:-4] + ext + '.wav'
librosa.output.write_wav(destination, y, sr)
'''------------------------------------
LOGIC:
[1] load file
[2] reduce noise
[3] trim silence
[4] output file
sample files:
01_counting.m4a
02_wind_and_cars.m4a
03_truck.m4a
04_voices.m4a
05_ambeint.m4a
06_office.m4a
------------------------------------'''
samples = ['01_counting.m4a','02_wind_and_cars.m4a','03_truck.m4a','04_voices.m4a','05_ambeint.m4a','06_office.m4a']
for s in samples:
# reading a file
filename = s
y, sr = read_file(filename)
# reducing noise using db power
y_reduced_power = reduce_noise_power(y, sr)
y_reduced_centroid_s = reduce_noise_centroid_s(y, sr)
y_reduced_centroid_mb = reduce_noise_centroid_mb(y, sr)
y_reduced_mfcc_up = reduce_noise_mfcc_up(y, sr)
y_reduced_mfcc_down = reduce_noise_mfcc_down(y, sr)
y_reduced_median = reduce_noise_median(y, sr)
# trimming silences
y_reduced_power, time_trimmed = trim_silence(y_reduced_power)
# print (time_trimmed)
y_reduced_centroid_s, time_trimmed = trim_silence(y_reduced_centroid_s)
# print (time_trimmed)
y_reduced_power, time_trimmed = trim_silence(y_reduced_power)
# print (time_trimmed)
y_reduced_centroid_mb, time_trimmed = trim_silence(y_reduced_centroid_mb)
# print (time_trimmed)
y_reduced_mfcc_up, time_trimmed = trim_silence(y_reduced_mfcc_up)
# print (time_trimmed)
y_reduced_mfcc_down, time_trimmed = trim_silence(y_reduced_mfcc_down)
# print (time_trimmed)
y_reduced_median, time_trimmed = trim_silence(y_reduced_median)
# generating output file [1]
//output_file('01_samples_trimmed_noise_reduced/' ,filename, y_reduced_power, sr, '_pwr')
output_file('01_samples_trimmed_noise_reduced/' ,filename, y_reduced_centroid_s, sr, '_ctr_s')
output_file('01_samples_trimmed_noise_reduced/' ,filename, y_reduced_centroid_mb, sr, '_ctr_mb')
output_file('01_samples_trimmed_noise_reduced/' ,filename, y_reduced_mfcc_up, sr, '_mfcc_up')
output_file('01_samples_trimmed_noise_reduced/' ,filename, y_reduced_mfcc_down, sr, '_mfcc_down')
output_file('01_samples_trimmed_noise_reduced/' ,filename, y_reduced_median, sr, '_median')
//output_file('01_samples_trimmed_noise_reduced/' ,filename, y, sr, '_org')
appdirs==1.4.3
audioread==2.1.4
Cython==0.25.2
decorator==4.0.11
joblib==0.11
librosa==0.5.0
numpy==1.12.1
packaging==16.8
pyparsing==2.2.0
pysndfx==0.1.0
python-speech-features==0.5
resampy==0.1.5
scikit-learn==0.18.1
scipy==0.19.0
six==1.10.0
html {
background-color: LightSeaGreen;
}
body {
font-family: "Courier New", Courier, monospace;
color: #222;
font-size: 0.9em;
}
h1 {
font-size: 4em;
text-align: center;
margin-bottom: 0.5em;
margin-top: 2em;
}
h2 {
font-size: 2.2em;
font-weight:normal;
text-align: center;
margin-bottom: 48px;
margin-top: 0px;
}
.text_container {
margin: auto;
margin-top: 76px;
max-width: 600px;
line-height: 2em;
margin-bottom: 100px;
}
h3 {
font-size: 1.6em;
font-weight: bold;
/* text-align: center; */
margin-bottom: 0.5em;
margin-top: 4em;
}
h4 {
font-size: 1.2em;
/*font-weight: bold;*/
/* text-align: center; */
margin-bottom: 0.5em;
margin-top: 4em;
}
.code{
border: 0.5px solid lightgray;
background-color: GhostWhite;
border-radius: 3px;
padding-top: 4px;
padding-bottom: 4px;
padding-right: 12px;
padding-left: 12px;
color: gray;
}
.duration{
color: rgba(34,34,34,0.6);
font-style: italic;
/*margin-top: -20px;*/
}
ul {
line-height: 36px;
}
footer {
text-align: center;
font-size: 14px;
margin-top: 128px;
margin-bottom: 64px;
color: rgba(255, 255, 255, 0.4);
}
import librosa
'''
01_counting.m4a
02_wind_and_cars.m4a
03_truck.m4a
04_voices.m4a
05_ambeint.m4a
06_office.m4a
'''
sample_file = '06_office.m4a'
sample_directory = '00_samples/'
sample_path = sample_directory + sample_file
trimmed_destination = 'samples_trimmed/'
silenced_destination = 'samples_silence_reduced/'
y, sr = librosa.load(sample_path)
y_trimmed, index = librosa.effects.trim(y, top_db=12, frame_length=2)
print(librosa.get_duration(y), librosa.get_duration(y_trimmed))
destination = trimmed_destination + sample_file[:-4] + '.wav'
librosa.output.write_wav(destination, y_trimmed, sr)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment