{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[{"file_id":"1oggeLJuRhywdk0U3oLtSn4RI4OuqY3C8","timestamp":1685676269137}]},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"}},"cells":[{"cell_type":"code","execution_count":1,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":752},"id":"O3mwjxmyn81o","executionInfo":{"status":"ok","timestamp":1685677082601,"user_tz":-330,"elapsed":137429,"user":{"displayName":"Sulakshana Geeth","userId":"14781868669328079827"}},"outputId":"97f652a7-a9b4-4bd6-9682-a2317482ba68"},"outputs":[{"output_type":"stream","name":"stdout","text":["Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n","Requirement already satisfied: librosa in /usr/local/lib/python3.10/dist-packages (0.10.0.post2)\n","Requirement already satisfied: audioread>=2.1.9 in /usr/local/lib/python3.10/dist-packages (from librosa) (3.0.0)\n","Requirement already satisfied: numpy!=1.22.0,!=1.22.1,!=1.22.2,>=1.20.3 in /usr/local/lib/python3.10/dist-packages (from librosa) (1.22.4)\n","Requirement already satisfied: scipy>=1.2.0 in /usr/local/lib/python3.10/dist-packages (from librosa) (1.10.1)\n","Requirement already satisfied: scikit-learn>=0.20.0 in /usr/local/lib/python3.10/dist-packages (from librosa) (1.2.2)\n","Requirement already satisfied: joblib>=0.14 in /usr/local/lib/python3.10/dist-packages (from librosa) (1.2.0)\n","Requirement already satisfied: decorator>=4.3.0 in /usr/local/lib/python3.10/dist-packages (from librosa) (4.4.2)\n","Requirement already satisfied: numba>=0.51.0 in /usr/local/lib/python3.10/dist-packages (from librosa) (0.56.4)\n","Requirement already satisfied: soundfile>=0.12.1 in /usr/local/lib/python3.10/dist-packages (from librosa) (0.12.1)\n","Requirement already satisfied: pooch<1.7,>=1.0 in /usr/local/lib/python3.10/dist-packages (from librosa) (1.6.0)\n","Requirement already satisfied: soxr>=0.3.2 in /usr/local/lib/python3.10/dist-packages (from librosa) (0.3.5)\n","Requirement already satisfied: typing-extensions>=4.1.1 in /usr/local/lib/python3.10/dist-packages (from librosa) (4.5.0)\n","Requirement already satisfied: lazy-loader>=0.1 in /usr/local/lib/python3.10/dist-packages (from librosa) (0.2)\n","Requirement already satisfied: msgpack>=1.0 in /usr/local/lib/python3.10/dist-packages (from librosa) (1.0.5)\n","Requirement already satisfied: llvmlite<0.40,>=0.39.0dev0 in /usr/local/lib/python3.10/dist-packages (from numba>=0.51.0->librosa) (0.39.1)\n","Requirement already satisfied: setuptools in /usr/local/lib/python3.10/dist-packages (from numba>=0.51.0->librosa) (67.7.2)\n","Requirement already satisfied: appdirs>=1.3.0 in /usr/local/lib/python3.10/dist-packages (from pooch<1.7,>=1.0->librosa) (1.4.4)\n","Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from pooch<1.7,>=1.0->librosa) (23.1)\n","Requirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.10/dist-packages (from pooch<1.7,>=1.0->librosa) (2.27.1)\n","Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.20.0->librosa) (3.1.0)\n","Requirement already satisfied: cffi>=1.0 in /usr/local/lib/python3.10/dist-packages (from soundfile>=0.12.1->librosa) (1.15.1)\n","Requirement already satisfied: pycparser in /usr/local/lib/python3.10/dist-packages (from cffi>=1.0->soundfile>=0.12.1->librosa) (2.21)\n","Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->pooch<1.7,>=1.0->librosa) (1.26.15)\n","Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->pooch<1.7,>=1.0->librosa) (2022.12.7)\n","Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->pooch<1.7,>=1.0->librosa) (2.0.12)\n","Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->pooch<1.7,>=1.0->librosa) (3.4)\n","Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.10/dist-packages (1.2.2)\n","Requirement already satisfied: numpy>=1.17.3 in /usr/local/lib/python3.10/dist-packages (from scikit-learn) (1.22.4)\n","Requirement already satisfied: scipy>=1.3.2 in /usr/local/lib/python3.10/dist-packages (from scikit-learn) (1.10.1)\n","Requirement already satisfied: joblib>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from scikit-learn) (1.2.0)\n","Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn) (3.1.0)\n","Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n","Requirement already satisfied: joblib in /usr/local/lib/python3.10/dist-packages (1.2.0)\n","Accuracy: 1.0\n","Model saved as audio_classifier_model.pkl\n"]},{"output_type":"display_data","data":{"text/plain":["<IPython.core.display.HTML object>"],"text/html":["\n"," <input type=\"file\" id=\"files-be4855f8-e3d2-40f8-bb2f-b95b611cedd6\" name=\"files[]\" multiple disabled\n"," style=\"border:none\" />\n"," <output id=\"result-be4855f8-e3d2-40f8-bb2f-b95b611cedd6\">\n"," Upload widget is only available when the cell has been executed in the\n"," current browser session. Please rerun this cell to enable.\n"," </output>\n"," <script>// Copyright 2017 Google LLC\n","//\n","// Licensed under the Apache License, Version 2.0 (the \"License\");\n","// you may not use this file except in compliance with the License.\n","// You may obtain a copy of the License at\n","//\n","// http://www.apache.org/licenses/LICENSE-2.0\n","//\n","// Unless required by applicable law or agreed to in writing, software\n","// distributed under the License is distributed on an \"AS IS\" BASIS,\n","// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n","// See the License for the specific language governing permissions and\n","// limitations under the License.\n","\n","/**\n"," * @fileoverview Helpers for google.colab Python module.\n"," */\n","(function(scope) {\n","function span(text, styleAttributes = {}) {\n"," const element = document.createElement('span');\n"," element.textContent = text;\n"," for (const key of Object.keys(styleAttributes)) {\n"," element.style[key] = styleAttributes[key];\n"," }\n"," return element;\n","}\n","\n","// Max number of bytes which will be uploaded at a time.\n","const MAX_PAYLOAD_SIZE = 100 * 1024;\n","\n","function _uploadFiles(inputId, outputId) {\n"," const steps = uploadFilesStep(inputId, outputId);\n"," const outputElement = document.getElementById(outputId);\n"," // Cache steps on the outputElement to make it available for the next call\n"," // to uploadFilesContinue from Python.\n"," outputElement.steps = steps;\n","\n"," return _uploadFilesContinue(outputId);\n","}\n","\n","// This is roughly an async generator (not supported in the browser yet),\n","// where there are multiple asynchronous steps and the Python side is going\n","// to poll for completion of each step.\n","// This uses a Promise to block the python side on completion of each step,\n","// then passes the result of the previous step as the input to the next step.\n","function _uploadFilesContinue(outputId) {\n"," const outputElement = document.getElementById(outputId);\n"," const steps = outputElement.steps;\n","\n"," const next = steps.next(outputElement.lastPromiseValue);\n"," return Promise.resolve(next.value.promise).then((value) => {\n"," // Cache the last promise value to make it available to the next\n"," // step of the generator.\n"," outputElement.lastPromiseValue = value;\n"," return next.value.response;\n"," });\n","}\n","\n","/**\n"," * Generator function which is called between each async step of the upload\n"," * process.\n"," * @param {string} inputId Element ID of the input file picker element.\n"," * @param {string} outputId Element ID of the output display.\n"," * @return {!Iterable<!Object>} Iterable of next steps.\n"," */\n","function* uploadFilesStep(inputId, outputId) {\n"," const inputElement = document.getElementById(inputId);\n"," inputElement.disabled = false;\n","\n"," const outputElement = document.getElementById(outputId);\n"," outputElement.innerHTML = '';\n","\n"," const pickedPromise = new Promise((resolve) => {\n"," inputElement.addEventListener('change', (e) => {\n"," resolve(e.target.files);\n"," });\n"," });\n","\n"," const cancel = document.createElement('button');\n"," inputElement.parentElement.appendChild(cancel);\n"," cancel.textContent = 'Cancel upload';\n"," const cancelPromise = new Promise((resolve) => {\n"," cancel.onclick = () => {\n"," resolve(null);\n"," };\n"," });\n","\n"," // Wait for the user to pick the files.\n"," const files = yield {\n"," promise: Promise.race([pickedPromise, cancelPromise]),\n"," response: {\n"," action: 'starting',\n"," }\n"," };\n","\n"," cancel.remove();\n","\n"," // Disable the input element since further picks are not allowed.\n"," inputElement.disabled = true;\n","\n"," if (!files) {\n"," return {\n"," response: {\n"," action: 'complete',\n"," }\n"," };\n"," }\n","\n"," for (const file of files) {\n"," const li = document.createElement('li');\n"," li.append(span(file.name, {fontWeight: 'bold'}));\n"," li.append(span(\n"," `(${file.type || 'n/a'}) - ${file.size} bytes, ` +\n"," `last modified: ${\n"," file.lastModifiedDate ? file.lastModifiedDate.toLocaleDateString() :\n"," 'n/a'} - `));\n"," const percent = span('0% done');\n"," li.appendChild(percent);\n","\n"," outputElement.appendChild(li);\n","\n"," const fileDataPromise = new Promise((resolve) => {\n"," const reader = new FileReader();\n"," reader.onload = (e) => {\n"," resolve(e.target.result);\n"," };\n"," reader.readAsArrayBuffer(file);\n"," });\n"," // Wait for the data to be ready.\n"," let fileData = yield {\n"," promise: fileDataPromise,\n"," response: {\n"," action: 'continue',\n"," }\n"," };\n","\n"," // Use a chunked sending to avoid message size limits. See b/62115660.\n"," let position = 0;\n"," do {\n"," const length = Math.min(fileData.byteLength - position, MAX_PAYLOAD_SIZE);\n"," const chunk = new Uint8Array(fileData, position, length);\n"," position += length;\n","\n"," const base64 = btoa(String.fromCharCode.apply(null, chunk));\n"," yield {\n"," response: {\n"," action: 'append',\n"," file: file.name,\n"," data: base64,\n"," },\n"," };\n","\n"," let percentDone = fileData.byteLength === 0 ?\n"," 100 :\n"," Math.round((position / fileData.byteLength) * 100);\n"," percent.textContent = `${percentDone}% done`;\n","\n"," } while (position < fileData.byteLength);\n"," }\n","\n"," // All done.\n"," yield {\n"," response: {\n"," action: 'complete',\n"," }\n"," };\n","}\n","\n","scope.google = scope.google || {};\n","scope.google.colab = scope.google.colab || {};\n","scope.google.colab._files = {\n"," _uploadFiles,\n"," _uploadFilesContinue,\n","};\n","})(self);\n","</script> "]},"metadata":{}},{"output_type":"stream","name":"stdout","text":["Saving futuristic-beat-146661.mp3 to futuristic-beat-146661.mp3\n","Predicted Label: autism\n"]}],"source":["!pip install librosa\n","!pip install scikit-learn\n","!pip install joblib\n","\n","\n","\n","\n","import os\n","import numpy as np\n","import librosa\n","from sklearn.model_selection import train_test_split\n","from sklearn.preprocessing import LabelEncoder\n","from sklearn.svm import SVC\n","from sklearn.metrics import accuracy_score\n","import joblib\n","\n","# Set the paths to your audio datasets\n","autism_folder = '/content/audio/autism'\n","non_autism_folder = '/content/audio/non autism'\n","\n","# Function to extract audio features using Librosa\n","def extract_features(file_path):\n"," audio, sr = librosa.load(file_path, sr=22050) # Load audio file\n"," features = librosa.feature.mfcc(y=audio, sr=sr) # Extract MFCC features\n"," features = np.mean(features.T, axis=0) # Take the mean of MFCC features\n"," return features\n","\n","# Load audio files and extract features\n","autism_files = os.listdir(autism_folder)\n","non_autism_files = os.listdir(non_autism_folder)\n","\n","X = [] # Feature vectors\n","y = [] # Labels\n","\n","for file in autism_files:\n"," file_path = os.path.join(autism_folder, file)\n"," features = extract_features(file_path)\n"," X.append(features)\n"," y.append('autism')\n","\n","for file in non_autism_files:\n"," file_path = os.path.join(non_autism_folder, file)\n"," features = extract_features(file_path)\n"," X.append(features)\n"," y.append('non autism')\n","\n","# Encode labels\n","label_encoder = LabelEncoder()\n","y = label_encoder.fit_transform(y)\n","\n","# Split the dataset into training and testing sets\n","X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n","\n","# Train a support vector machine (SVM) classifier\n","classifier = SVC(kernel='rbf', C=1.0, gamma='scale', random_state=42)\n","classifier.fit(X_train, y_train)\n","\n","# Evaluate the classifier\n","y_pred = classifier.predict(X_test)\n","accuracy = accuracy_score(y_test, y_pred)\n","print(\"Accuracy:\", accuracy)\n","\n","# Save the model\n","model_file = \"audio_classifier_model.pkl\"\n","joblib.dump(classifier, model_file)\n","print(\"Model saved as\", model_file)\n","\n","# Function to predict the label for a given audio file\n","def classify_audio(file_path):\n"," features = extract_features(file_path)\n"," features = np.array([features]) # Reshape for prediction\n"," label = label_encoder.inverse_transform(classifier.predict(features))[0]\n"," return label\n","\n","# Load the saved model\n","loaded_model = joblib.load(model_file)\n","\n","# Upload an audio file and classify it\n","from google.colab import files\n","\n","uploaded_file = files.upload()\n","audio_file_path = list(uploaded_file.keys())[0]\n","\n","predicted_label = classify_audio(audio_file_path)\n","print(\"Predicted Label:\", predicted_label)\n"]},{"cell_type":"code","source":[],"metadata":{"id":"Y8QtswiTvfgn"},"execution_count":null,"outputs":[]}]}