Commit 766293e4 authored by LiniEisha's avatar LiniEisha

small changes

parent 0f32dfe3
...@@ -7,18 +7,7 @@ import scipy as sp ...@@ -7,18 +7,7 @@ import scipy as sp
from scipy import signal from scipy import signal
import soundfile import soundfile
# http://python-speech-features.readthedocs.io/en/latest/
# https://github.com/jameslyons/python_speech_features
# http://practicalcryptography.com/miscellaneous/machine-learning/guide-mel-frequency-cepstral-coefficients-mfccs/#deltas-and-delta-deltas
# http://dsp.stackexchange.com/search?q=noise+reduction/
'''------------------------------------
FILE READER:
receives filename,
returns audio time series (y) and sampling rate of y (sr)
------------------------------------'''
def read_file(file_name): def read_file(file_name):
sample_file = file_name sample_file = file_name
sample_directory = '00_samples/' sample_directory = '00_samples/'
...@@ -29,82 +18,25 @@ def read_file(file_name): ...@@ -29,82 +18,25 @@ def read_file(file_name):
return y, sr return y, sr
'''------------------------------------
NOISE REDUCTION USING POWER:
receives an audio matrix,
returns the matrix after gain reduction on noise
------------------------------------'''
def reduce_noise_power(y, sr):
cent = librosa.feature.spectral_centroid(y=y, sr=sr)
threshold_h = round(np.median(cent))*1.5
threshold_l = round(np.median(cent))*0.1
less_noise = AudioEffectsChain().lowshelf(gain=-30.0, frequency=threshold_l, slope=0.8).highshelf(gain=-12.0, frequency=threshold_h, slope=0.5)#.limiter(gain=6.0) '''CENTROID'''
y_clean = less_noise(y)
return y_clean
'''------------------------------------
NOISE REDUCTION USING CENTROID ANALYSIS:
receives an audio matrix,
returns the matrix after gain reduction on noise
------------------------------------'''
def reduce_noise_centroid_s(y, sr): def reduce_noise_centroid_s(y, sr):
cent = librosa.feature.spectral_centroid(y=y, sr=sr) cent = librosa.feature.spectral_centroid(y=y, sr=sr)
threshold_h = np.max(cent) threshold_h = np.max(cent)
threshold_l = np.min(cent) threshold_l = np.min(cent)
less_noise = AudioEffectsChain().lowshelf(gain=-12.0, frequency=threshold_l, slope=0.5).highshelf(gain=-12.0, frequency=threshold_h, slope=0.5).limiter(gain=6.0) less_noise = AudioEffectsChain().lowshelf(gain=-12.0, frequency=threshold_l, slope=0.5).highshelf(gain=-12.0, frequency=threshold_h, slope=0.5).limiter(gain=6.0)
y_cleaned = less_noise(y) y_cleaned = less_noise(y)
return y_cleaned return y_cleaned
def reduce_noise_centroid_mb(y, sr):
cent = librosa.feature.spectral_centroid(y=y, sr=sr)
threshold_h = np.max(cent)
threshold_l = np.min(cent)
less_noise = AudioEffectsChain().lowshelf(gain=-30.0, frequency=threshold_l, slope=0.5).highshelf(gain=-30.0, frequency=threshold_h, slope=0.5).limiter(gain=10.0)
# less_noise = AudioEffectsChain().lowpass(frequency=threshold_h).highpass(frequency=threshold_l)
y_cleaned = less_noise(y)
cent_cleaned = librosa.feature.spectral_centroid(y=y_cleaned, sr=sr)
columns, rows = cent_cleaned.shape
boost_h = math.floor(rows/3*2)
boost_l = math.floor(rows/6)
boost = math.floor(rows/3)
# boost_bass = AudioEffectsChain().lowshelf(gain=20.0, frequency=boost, slope=0.8)
boost_bass = AudioEffectsChain().lowshelf(gain=16.0, frequency=boost_h, slope=0.5)#.lowshelf(gain=-20.0, frequency=boost_l, slope=0.8)
y_clean_boosted = boost_bass(y_cleaned)
return y_clean_boosted
'''MFCC'''
'''------------------------------------ def mffc_highshelf(y, sr):
NOISE REDUCTION USING MFCC:
receives an audio matrix,
returns the matrix after gain reduction on noise
------------------------------------'''
def reduce_noise_mfcc_down(y, sr):
hop_length = 512
## librosa
# mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=hop_length, n_mfcc=13)
# librosa.mel_to_hz(mfcc)
## mfcc
mfcc = python_speech_features.base.mfcc(y) mfcc = python_speech_features.base.mfcc(y)
mfcc = python_speech_features.base.logfbank(y) mfcc = python_speech_features.base.logfbank(y)
mfcc = python_speech_features.base.lifter(mfcc) mfcc = python_speech_features.base.lifter(mfcc)
...@@ -128,15 +60,8 @@ def reduce_noise_mfcc_down(y, sr): ...@@ -128,15 +60,8 @@ def reduce_noise_mfcc_down(y, sr):
return (y_speach_boosted) return (y_speach_boosted)
def reduce_noise_mfcc_up(y, sr): def mfcc_lowshelf(y, sr):
hop_length = 512
## librosa
# mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=hop_length, n_mfcc=13)
# librosa.mel_to_hz(mfcc)
## mfcc
mfcc = python_speech_features.base.mfcc(y) mfcc = python_speech_features.base.mfcc(y)
mfcc = python_speech_features.base.logfbank(y) mfcc = python_speech_features.base.logfbank(y)
mfcc = python_speech_features.base.lifter(mfcc) mfcc = python_speech_features.base.lifter(mfcc)
...@@ -155,27 +80,12 @@ def reduce_noise_mfcc_up(y, sr): ...@@ -155,27 +80,12 @@ def reduce_noise_mfcc_up(y, sr):
max_hz = max(hz) max_hz = max(hz)
min_hz = min(hz) min_hz = min(hz)
speech_booster = AudioEffectsChain().lowshelf(frequency=min_hz*(-1), gain=12.0, slope=0.5)#.highshelf(frequency=min_hz*(-1)*1.2, gain=-12.0, slope=0.5)#.limiter(gain=8.0) speech_booster = AudioEffectsChain().lowshelf(frequency=min_hz*(-1), gain=12.0, slope=0.5)
y_speach_boosted = speech_booster(y) y_speach_boosted = speech_booster(y)
return (y_speach_boosted) return (y_speach_boosted)
'''------------------------------------
NOISE REDUCTION USING MEDIAN:
receives an audio matrix,
returns the matrix after gain reduction on noise
------------------------------------'''
def reduce_noise_median(y, sr):
y = sp.signal.medfilt(y,3)
return (y)
'''------------------------------------
SILENCE TRIMMER:
receives an audio matrix,
returns an audio matrix with less silence and the amout of time that was trimmed
------------------------------------'''
def trim_silence(y): def trim_silence(y):
y_trimmed, index = librosa.effects.trim(y, top_db=20, frame_length=2, hop_length=500) y_trimmed, index = librosa.effects.trim(y, top_db=20, frame_length=2, hop_length=500)
trimmed_length = librosa.get_duration(y) - librosa.get_duration(y_trimmed) trimmed_length = librosa.get_duration(y) - librosa.get_duration(y_trimmed)
...@@ -183,83 +93,37 @@ def trim_silence(y): ...@@ -183,83 +93,37 @@ def trim_silence(y):
return y_trimmed, trimmed_length return y_trimmed, trimmed_length
'''------------------------------------
AUDIO ENHANCER:
receives an audio matrix,
returns the same matrix after audio manipulation
------------------------------------'''
def enhance(y): def enhance(y):
apply_audio_effects = AudioEffectsChain().lowshelf(gain=10.0, frequency=260, slope=0.1).reverb(reverberance=25, hf_damping=5, room_scale=5, stereo_depth=50, pre_delay=20, wet_gain=0, wet_only=False)#.normalize() apply_audio_effects = AudioEffectsChain().lowshelf(gain=10.0, frequency=260, slope=0.1).reverb(reverberance=25, hf_damping=5, room_scale=5, stereo_depth=50, pre_delay=20, wet_gain=0, wet_only=False)#.normalize()
y_enhanced = apply_audio_effects(y) y_enhanced = apply_audio_effects(y)
return y_enhanced return y_enhanced
'''------------------------------------
OUTPUT GENERATOR:
receives a destination path, file name, audio matrix, and sample rate,
generates a wav file based on input
------------------------------------'''
def output_file(destination ,filename, y, sr, ext=""): def output_file(destination ,filename, y, sr, ext=""):
destination = destination + filename[:-4] + ext + '.wav' destination = destination + filename[:-4] + ext + '.wav'
librosa.output.write_wav(destination, y, sr) librosa.output.write_wav(destination, y, sr)
'''------------------------------------ samples = ['01_counting.m4a']
LOGIC:
[1] load file
[2] reduce noise
[3] trim silence
[4] output file
sample files:
01_counting.m4a
02_wind_and_cars.m4a
03_truck.m4a
04_voices.m4a
05_ambeint.m4a
06_office.m4a
------------------------------------'''
samples = ['01_counting.m4a','02_wind_and_cars.m4a','03_truck.m4a','04_voices.m4a','05_ambeint.m4a','06_office.m4a']
for s in samples: for s in samples:
# reading a file
filename = s filename = s
y, sr = read_file(filename) y, sr = read_file(filename)
# reducing noise using db power
y_reduced_power = reduce_noise_power(y, sr)
y_reduced_centroid_s = reduce_noise_centroid_s(y, sr) y_reduced_centroid_s = reduce_noise_centroid_s(y, sr)
y_reduced_centroid_mb = reduce_noise_centroid_mb(y, sr) y_reduced_mfcc_lowshelf = mfcc_lowshelf(y, sr)
y_reduced_mfcc_up = reduce_noise_mfcc_up(y, sr) y_reduced_mfcc_highshelf = mffc_highshelf(y, sr)
y_reduced_mfcc_down = reduce_noise_mfcc_down(y, sr)
y_reduced_median = reduce_noise_median(y, sr)
# trimming silences
y_reduced_power, time_trimmed = trim_silence(y_reduced_power)
# print (time_trimmed)
# trimming silences
y_reduced_centroid_s, time_trimmed = trim_silence(y_reduced_centroid_s) y_reduced_centroid_s, time_trimmed = trim_silence(y_reduced_centroid_s)
# print (time_trimmed) y_reduced_mfcc_up, time_trimmed = trim_silence(mfcc_lowshelf)
y_reduced_mfcc_down, time_trimmed = trim_silence(mffc_highshelf)
y_reduced_power, time_trimmed = trim_silence(y_reduced_power)
# print (time_trimmed)
y_reduced_centroid_mb, time_trimmed = trim_silence(y_reduced_centroid_mb)
# print (time_trimmed)
y_reduced_mfcc_up, time_trimmed = trim_silence(y_reduced_mfcc_up)
# print (time_trimmed)
y_reduced_mfcc_down, time_trimmed = trim_silence(y_reduced_mfcc_down)
# print (time_trimmed)
y_reduced_median, time_trimmed = trim_silence(y_reduced_median)
# generating output file [1]
//output_file('01_samples_trimmed_noise_reduced/' ,filename, y_reduced_power, sr, '_pwr')
output_file('01_samples_trimmed_noise_reduced/' ,filename, y_reduced_centroid_s, sr, '_ctr_s') output_file('01_samples_trimmed_noise_reduced/' ,filename, y_reduced_centroid_s, sr, '_ctr_s')
output_file('01_samples_trimmed_noise_reduced/' ,filename, y_reduced_centroid_mb, sr, '_ctr_mb')
output_file('01_samples_trimmed_noise_reduced/' ,filename, y_reduced_mfcc_up, sr, '_mfcc_up') output_file('01_samples_trimmed_noise_reduced/' ,filename, y_reduced_mfcc_up, sr, '_mfcc_up')
output_file('01_samples_trimmed_noise_reduced/' ,filename, y_reduced_mfcc_down, sr, '_mfcc_down') output_file('01_samples_trimmed_noise_reduced/' ,filename, y_reduced_mfcc_down, sr, '_mfcc_down')
output_file('01_samples_trimmed_noise_reduced/' ,filename, y_reduced_median, sr, '_median') output_file('01_samples_trimmed_noise_reduced/' ,filename, y, sr, '_org')
//output_file('01_samples_trimmed_noise_reduced/' ,filename, y, sr, '_org')
import sumy
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lex_rank import LexRankSummarizer
In an attempt to build an AI-ready workforce, Microsoft announced Intelligent Cloud Hub which has been launched to empower the next generation of students with AI-ready skills. Envisioned as a three-year collaborative program, Intelligent Cloud Hub will support around 100 institutions with AI infrastructure, course content and curriculum, developer support, development tools and give students access to cloud and AI services. As part of the program, the Redmond giant which wants to expand its reach and is planning to build a strong developer ecosystem in India with the program will set up the core AI infrastructure and IoT Hub for the selected campuses. The company will provide AI development tools and Azure AI services such as Microsoft Cognitive Services, Bot Services and Azure Machine Learning.According to Manish Prakash, Country General Manager-PS, Health and Education, Microsoft India, said, "With AI being the defining technology of our time, it is transforming lives and industry and the jobs of tomorrow will require a different skillset. This will require more collaborations and training and working with AI. That’s why it has become more critical than ever for educational institutions to integrate new cloud and AI technologies. The program is an attempt to ramp up the institutional set-up and build capabilities among the educators to educate the workforce of tomorrow." The program aims to build up the cognitive skills and in-depth understanding of developing intelligent cloud connected solutions for applications across industry. Earlier in April this year, the company announced Microsoft Professional Program In AI as a learning track open to the public. The program was developed to provide job ready skills to programmers who wanted to hone their skills in AI and data science with a series of online courses which featured hands-on labs and expert instructors as well. This program also included developer-focused AI school that provided a bunch of assets to help build AI skills.
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment