Commit b4e479c8 authored by Vihangi Yasuththara's avatar Vihangi Yasuththara

Carlini_attack

parent 48fe7b0a
# -*- coding: utf-8 -*-
"""Untitled15.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/18fYvJwWJU4mmcW_bHNk1O24c1q-q23Ny
"""
# Import necessary libraries
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import load_model
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, classification_report
import time
# Load and preprocess data
data = pd.read_csv('/content/drive/MyDrive/disease_preprocess4 (1).csv')
X = data.drop('HeartDisease', axis=1)
y = data['HeartDisease']
# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Standardize the data
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Load the model
model = load_model('/content/drive/MyDrive/1D_CNN_model_Final_1.h5')
# Get predictions and print evaluation metrics
y_pred = model.predict(X_test_scaled) > 0.5
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# Highlight: Define X_train_combined as X_train_scaled if they are meant to be the same
X_train_combined = X_train_scaled # Added this line to define X_train_combined
def carlini_attack_binary(model, X, y, batch_size=100, epsilon=0.1, max_iterations=50, learning_rate=0.01):
# Process in batches
num_batches = int(np.ceil(len(X) / batch_size))
perturbed_Xs = []
for i in range(num_batches):
start = i * batch_size
end = min((i + 1) * batch_size, len(X))
X_batch = tf.identity(X[start:end])
y_batch = tf.convert_to_tensor(y[start:end], dtype=tf.float32)
y_batch = tf.reshape(y_batch, (-1, 1)) # Ensure y_batch shape matches prediction shape
for _ in range(max_iterations):
with tf.GradientTape() as tape:
tape.watch(X_batch)
prediction = model(X_batch)
loss = tf.keras.losses.binary_crossentropy(y_batch, prediction)
gradients = tape.gradient(loss, X_batch)
X_batch -= learning_rate * gradients
X_batch = tf.clip_by_value(X_batch, X[start:end] - epsilon, X[start:end] + epsilon)
X_batch = tf.clip_by_value(X_batch, 0, 1)
perturbed_Xs.append(X_batch)
# Concatenate all batch results
perturbed_X = tf.concat(perturbed_Xs, axis=0)
return perturbed_X
def carlini_wagner_attack(model, x, y, c=1, lr=0.01, iterations=100):
x_adv = tf.Variable(x, dtype=tf.float32) # Make sure x is in the correct format
target = tf.constant(y, dtype=tf.float32) # Make sure y is in the correct format
binary_crossentropy = tf.keras.losses.BinaryCrossentropy()
optimizer = tf.optimizers.Adam(learning_rate=lr)
for i in range(iterations):
with tf.GradientTape() as tape:
tape.watch(x_adv)
prediction = model(x_adv)
loss = c * binary_crossentropy(target, prediction) + tf.norm(x_adv - x)
gradients = tape.gradient(loss, x_adv)
optimizer.apply_gradients([(gradients, x_adv)])
x_adv.assign(tf.clip_by_value(x_adv, 0, 1))
return x_adv.numpy()
# Generate adversarial examples
X_test_tensor = tf.convert_to_tensor(X_test_scaled, dtype=tf.float32)
X_test_adv = carlini_attack_binary(model, X_test_tensor, y_test)
# Evaluate the model on adversarial examples
adv_accuracy = model.evaluate(X_test_adv, y_test)
print("Model accuracy on adversarial examples:", adv_accuracy[1])
# Evaluate the model on clean data
org_accuracy = model.evaluate(X_test_scaled, y_test)
print("Model accuracy on clean data:", org_accuracy[1])
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment