Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
R
R24-102
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
R24-102
R24-102
Commits
2ed1dd31
Commit
2ed1dd31
authored
Sep 07, 2024
by
Vihangi Yasuththara
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Carlini & Wagner Attack Defense Adversarial Training
parent
8fe57739
Pipeline
#7315
failed with stages
Changes
1
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
384 additions
and
0 deletions
+384
-0
Carlini_Wagner_Attack_Defense_Adversarial_training.ipynb
Carlini_Wagner_Attack_Defense_Adversarial_training.ipynb
+384
-0
No files found.
Carlini_Wagner_Attack_Defense_Adversarial_training.ipynb
0 → 100644
View file @
2ed1dd31
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"gpuType": "V28"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "TPU"
},
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"id": "QVtAGXS9ATS2"
},
"outputs": [],
"source": [
"import pandas as pd\n",
"from matplotlib import pyplot as plt\n",
"import numpy as np\n",
"%matplotlib inline\n",
"\n",
"import tensorflow as tf\n",
"from tensorflow.keras.models import Sequential, load_model\n",
"from tensorflow.keras.layers import Dense, Conv1D, MaxPooling1D, Flatten, Dropout\n",
"from tensorflow.keras.optimizers import Adam\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.preprocessing import StandardScaler\n",
"from sklearn.metrics import accuracy_score\n",
"import time\n",
"\n",
"from imblearn.under_sampling import NearMiss\n",
"from imblearn.over_sampling import SMOTE\n",
"from imblearn.over_sampling import ADASYN\n",
"from imblearn.combine import SMOTEENN"
]
},
{
"cell_type": "code",
"source": [
"from google.colab import drive\n",
"drive.mount('/content/drive')"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "ivq0p4LvBj7R",
"outputId": "7b61c4a0-2a20-4fe5-f290-3f916ebfc992"
},
"execution_count": 2,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"import pandas as pd\n",
"data = pd.read_csv('/content/drive/MyDrive/disease_preprocess4 (1).csv')\n",
"X = data.drop('HeartDisease', axis=1).values # Ensure .values to get numpy array\n",
"y = data['HeartDisease'].values"
],
"metadata": {
"id": "JuXH2HD6CLug"
},
"execution_count": 3,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Import the necessary function\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"# Split the data into training and testing sets\n",
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)"
],
"metadata": {
"id": "vK3ebIp0CtkC"
},
"execution_count": 4,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Import the necessary class\n",
"from sklearn.preprocessing import StandardScaler\n",
"\n",
"# Standardize the data\n",
"scaler = StandardScaler()\n",
"X_train_scaled = scaler.fit_transform(X_train)\n",
"X_test_scaled = scaler.transform(X_test)"
],
"metadata": {
"id": "SJW-CYqEC06X"
},
"execution_count": 5,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Import the necessary function\n",
"from tensorflow.keras.models import load_model\n",
"\n",
"# Load your model\n",
"model = load_model('/content/drive/MyDrive/1D_CNN_model_Final_1.h5')"
],
"metadata": {
"id": "mDYlKbq5DEbn"
},
"execution_count": 6,
"outputs": []
},
{
"cell_type": "code",
"source": [
"import tensorflow as tf\n",
"\n",
"def carlini_attack_binary_combined(model, X, y, batch_size=100, epsilon=0.1, max_iterations=50, learning_rate=0.01, targeted=False):\n",
" num_batches = int(np.ceil(len(X) / batch_size))\n",
" perturbed_Xs = []\n",
"\n",
" for i in range(num_batches):\n",
" start = i * batch_size\n",
" end = min((i + 1) * batch_size, len(X))\n",
" X_batch = tf.identity(X[start:end])\n",
" y_batch = tf.convert_to_tensor(y[start:end], dtype=tf.float32)\n",
" y_batch = tf.reshape(y_batch, (-1, 1)) # Ensure y_batch shape matches prediction shape\n",
"\n",
" for _ in range(max_iterations):\n",
" with tf.GradientTape() as tape:\n",
" tape.watch(X_batch)\n",
" prediction = model(X_batch)\n",
" if targeted:\n",
" target_labels = 1 - y_batch\n",
" loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=target_labels, logits=prediction))\n",
" else:\n",
" loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y_batch, logits=prediction))\n",
"\n",
" gradients = tape.gradient(loss, X_batch)\n",
" X_batch -= learning_rate * gradients\n",
" # Constraint perturbations within the epsilon-ball around the original image and within the valid range [0, 1]\n",
" X_batch = tf.clip_by_value(X_batch, X[start:end] - epsilon, X[start:end] + epsilon)\n",
" X_batch = tf.clip_by_value(X_batch, 0, 1)\n",
"\n",
" perturbed_Xs.append(X_batch)\n",
"\n",
" # Concatenate all batch results to get the final set of adversarial examples\n",
" perturbed_X = tf.concat(perturbed_Xs, axis=0)\n",
" return perturbed_X\n",
"\n"
],
"metadata": {
"id": "sexMLI5Qd9F2"
},
"execution_count": 7,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Evaluate the clean model on original test data\n",
"original_accuracy = accuracy_score(y_test, (model.predict(X_test_scaled) > 0.5).astype(int))\n",
"print(\"Original Model Accuracy: \", original_accuracy)"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "FXZkXiQhJp4M",
"outputId": "ac25b1c2-c2ed-4519-b45d-e1c0f258c7f6"
},
"execution_count": 8,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"1931/1931 [==============================] - 17s 8ms/step\n",
"Original Model Accuracy: 0.8338378850917098\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"# Generate adversarial examples\n",
"X_test_adv = carlini_attack_binary_combined(model, X_test_scaled, y_test)"
],
"metadata": {
"id": "h8M2uNAxKVPJ"
},
"execution_count": 9,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Evaluate the model on adversarial examples\n",
"y_pred_adv = (model.predict(X_test_adv) > 0.5).astype(int)\n",
"adv_accuracy = accuracy_score(y_test, y_pred_adv.flatten()) # Flatten if needed depending on the shape of y_test\n",
"print(\"Adversarial Model Accuracy: \", adv_accuracy)\n"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "21APqxQucaF9",
"outputId": "c1bbeac0-0192-458f-bb39-db5ff1188ec6"
},
"execution_count": 10,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"1931/1931 [==============================] - 8s 4ms/step\n",
"Adversarial Model Accuracy: 0.08136504184811642\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"# Function to generate adversarial examples during training\n",
"def generate_adversarial_training_data(model, X_train, y_train, epsilon=0.2, iterations=50, alpha=0.01):\n",
" X_adv = carlini_attack_binary_combined(model, X_train, y_train, epsilon, iterations, alpha)\n",
" return np.vstack((X_train, X_adv)), np.hstack((y_train, y_train))\n",
"\n"
],
"metadata": {
"id": "V4jvGKARcq4C"
},
"execution_count": 13,
"outputs": []
},
{
"cell_type": "code",
"source": [
"def generate_adversarial_training_data_cw(model, X_train, y_train, epsilon=0.2, max_iterations=50, learning_rate=0.01):\n",
" X_adv = carlini_attack_binary_combined(model, X_train, y_train, epsilon=epsilon, max_iterations=max_iterations, learning_rate=learning_rate)\n",
" return np.vstack((X_train, X_adv)), np.hstack((y_train, y_train))"
],
"metadata": {
"id": "f4HnyAJqloFt"
},
"execution_count": 14,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Function to generate adversarial examples using Carlini & Wagner attack during training\n",
"def generate_adversarial_training_data_cw(model, X_train, y_train, epsilon=0.2, max_iterations=50, learning_rate=0.01):\n",
" X_adv = carlini_attack_binary_combined(model, X_train, y_train, epsilon=epsilon, max_iterations=max_iterations, learning_rate=learning_rate)\n",
" return np.vstack((X_train, X_adv)), np.hstack((y_train, y_train))\n",
"\n",
"# Generate adversarial training data using C&W attack\n",
"X_train_adv_cw, y_train_adv_cw = generate_adversarial_training_data_cw(model, X_train_scaled, y_train)\n",
"\n",
"# Create a new model for adversarial training\n",
"def create_model(input_shape):\n",
" model = Sequential([\n",
" Conv1D(64, kernel_size=3, activation='relu', input_shape=input_shape),\n",
" MaxPooling1D(pool_size=2),\n",
" Flatten(),\n",
" Dense(100, activation='relu'),\n",
" Dropout(0.5),\n",
" Dense(1, activation='sigmoid')\n",
" ])\n",
" model.compile(optimizer=Adam(learning_rate=0.0001), loss='binary_crossentropy', metrics=['accuracy'])\n",
" return model\n",
"\n",
"# Create and train the new model on adversarially augmented data\n",
"input_shape = (X_train_scaled.shape[1], 1)\n",
"X_train_adv_cw = X_train_adv_cw.reshape(-1, X_train_adv_cw.shape[1], 1)\n",
"X_test_scaled = X_test_scaled.reshape(-1, X_test_scaled.shape[1], 1)\n",
"\n",
"adv_model_cw = create_model(input_shape)\n",
"adv_model_cw.fit(X_train_adv_cw, y_train_adv_cw, epochs=20, batch_size=64, validation_split=0.1)\n",
"\n",
"# Evaluate the adversarially trained model on original and adversarial examples\n",
"original_accuracy_cw = accuracy_score(y_test, (adv_model_cw.predict(X_test_scaled) > 0.5).astype(int))\n",
"X_test_adv_cw = carlini_attack_binary_combined(adv_model_cw, X_test_scaled, y_test)\n",
"adv_accuracy_cw = accuracy_score(y_test, (adv_model_cw.predict(X_test_adv_cw) > 0.5).astype(int))\n",
"\n",
"print(\"Adversarially Trained Model Accuracy on Original Data (C&W): \", original_accuracy_cw)\n",
"print(\"Adversarially Trained Model Accuracy on Adversarial Data (C&W): \", adv_accuracy_cw)\n",
"\n",
"# Comparing accuracies pre and post attack, and post-defense\n",
"adTrain_data_metrics_cw = adv_model_cw.evaluate(X_test_scaled, y_test)\n",
"print(\"Comparing Accuracies (C&W)\")\n",
"print(\"Pre Attack: \", original_accuracy)\n",
"print(\"Post Attack: \", adv_accuracy)\n",
"print(\"Post Defense - Adversarial Training (C&W): \", adTrain_data_metrics_cw[1])\n"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "wRVZSMlZmlcU",
"outputId": "6700bfda-6bfb-4038-ab34-876b4f89d961"
},
"execution_count": 15,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Epoch 1/20\n",
"6950/6950 [==============================] - 29s 4ms/step - loss: 0.2519 - accuracy: 0.9187 - val_loss: 0.2387 - val_accuracy: 0.9196\n",
"Epoch 2/20\n",
"6950/6950 [==============================] - 28s 4ms/step - loss: 0.2362 - accuracy: 0.9191 - val_loss: 0.2362 - val_accuracy: 0.9196\n",
"Epoch 3/20\n",
"6950/6950 [==============================] - 28s 4ms/step - loss: 0.2343 - accuracy: 0.9192 - val_loss: 0.2352 - val_accuracy: 0.9196\n",
"Epoch 4/20\n",
"6950/6950 [==============================] - 28s 4ms/step - loss: 0.2331 - accuracy: 0.9192 - val_loss: 0.2348 - val_accuracy: 0.9196\n",
"Epoch 5/20\n",
"6950/6950 [==============================] - 28s 4ms/step - loss: 0.2323 - accuracy: 0.9192 - val_loss: 0.2343 - val_accuracy: 0.9196\n",
"Epoch 6/20\n",
"6950/6950 [==============================] - 27s 4ms/step - loss: 0.2318 - accuracy: 0.9192 - val_loss: 0.2343 - val_accuracy: 0.9196\n",
"Epoch 7/20\n",
"6950/6950 [==============================] - 26s 4ms/step - loss: 0.2311 - accuracy: 0.9193 - val_loss: 0.2340 - val_accuracy: 0.9196\n",
"Epoch 8/20\n",
"6950/6950 [==============================] - 28s 4ms/step - loss: 0.2308 - accuracy: 0.9193 - val_loss: 0.2336 - val_accuracy: 0.9196\n",
"Epoch 9/20\n",
"6950/6950 [==============================] - 27s 4ms/step - loss: 0.2304 - accuracy: 0.9192 - val_loss: 0.2335 - val_accuracy: 0.9196\n",
"Epoch 10/20\n",
"6950/6950 [==============================] - 28s 4ms/step - loss: 0.2300 - accuracy: 0.9192 - val_loss: 0.2333 - val_accuracy: 0.9196\n",
"Epoch 11/20\n",
"6950/6950 [==============================] - 31s 4ms/step - loss: 0.2297 - accuracy: 0.9193 - val_loss: 0.2331 - val_accuracy: 0.9196\n",
"Epoch 12/20\n",
"6950/6950 [==============================] - 26s 4ms/step - loss: 0.2294 - accuracy: 0.9193 - val_loss: 0.2330 - val_accuracy: 0.9196\n",
"Epoch 13/20\n",
"6950/6950 [==============================] - 28s 4ms/step - loss: 0.2293 - accuracy: 0.9193 - val_loss: 0.2333 - val_accuracy: 0.9196\n",
"Epoch 14/20\n",
"6950/6950 [==============================] - 28s 4ms/step - loss: 0.2290 - accuracy: 0.9193 - val_loss: 0.2337 - val_accuracy: 0.9196\n",
"Epoch 15/20\n",
"6950/6950 [==============================] - 28s 4ms/step - loss: 0.2286 - accuracy: 0.9193 - val_loss: 0.2328 - val_accuracy: 0.9196\n",
"Epoch 16/20\n",
"6950/6950 [==============================] - 29s 4ms/step - loss: 0.2285 - accuracy: 0.9193 - val_loss: 0.2327 - val_accuracy: 0.9196\n",
"Epoch 17/20\n",
"6950/6950 [==============================] - 28s 4ms/step - loss: 0.2284 - accuracy: 0.9194 - val_loss: 0.2328 - val_accuracy: 0.9196\n",
"Epoch 18/20\n",
"6950/6950 [==============================] - 27s 4ms/step - loss: 0.2281 - accuracy: 0.9193 - val_loss: 0.2332 - val_accuracy: 0.9196\n",
"Epoch 19/20\n",
"6950/6950 [==============================] - 28s 4ms/step - loss: 0.2283 - accuracy: 0.9192 - val_loss: 0.2327 - val_accuracy: 0.9196\n",
"Epoch 20/20\n",
"6950/6950 [==============================] - 28s 4ms/step - loss: 0.2278 - accuracy: 0.9192 - val_loss: 0.2332 - val_accuracy: 0.9196\n",
"1931/1931 [==============================] - 3s 2ms/step\n",
"1931/1931 [==============================] - 4s 2ms/step\n",
"Adversarially Trained Model Accuracy on Original Data (C&W): 0.9195091547813699\n",
"Adversarially Trained Model Accuracy on Adversarial Data (C&W): 0.9191044341195707\n",
"1931/1931 [==============================] - 3s 2ms/step - loss: 0.2226 - accuracy: 0.9195\n",
"Comparing Accuracies (C&W)\n",
"Pre Attack: 0.8338378850917098\n",
"Post Attack: 0.08136504184811642\n",
"Post Defense - Adversarial Training (C&W): 0.9195091724395752\n"
]
}
]
}
]
}
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment