Commit 155f6ac7 authored by Amani Kaleem's avatar Amani Kaleem

Update PGD_Attack.ipynb, PGD- Defense-feature_squeezing_model_hybrid.ipynb,...

Update PGD_Attack.ipynb, PGD- Defense-feature_squeezing_model_hybrid.ipynb, PGD-Defense -Stochastic_distilation_model_hybrid.ipynb, CARLINI-Model.ipynb, CARLINI_Adversarial_training.ipynb, CARLINI_Defensive_distillation.ipynb, Feed-forward_model.ipynb files
Deleted Carlini_attack_on_Churn_model.ipynb, Carlini_attack_on_Churn_model_accuracy.ipynb, PGD-model_smote.ipynb, FInalpreproccessing_1_.ipynb files
parent f3136aaf
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"id": "5V1K2W9WcD3G"
},
"outputs": [],
"source": [
"import tensorflow as tf\n",
"import pandas as pd\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.preprocessing import LabelEncoder, StandardScaler\n"
]
},
{
"cell_type": "code",
"source": [
"# Load the churn dataset\n",
"churn_data = pd.read_csv('/content/Telco_Cust_Churn.csv')"
],
"metadata": {
"id": "2MNJZhI2cLrm"
},
"execution_count": 3,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Preprocess the churn dataset\n",
"churn_data = churn_data.drop('customerID', axis=1) # Remove the customerID column\n",
"churn_data['TotalCharges'] = pd.to_numeric(churn_data['TotalCharges'], errors='coerce') # Convert TotalCharges to numeric\n"
],
"metadata": {
"id": "9efJu0d0cY38"
},
"execution_count": 4,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Perform label encoding for categorical columns\n",
"categorical_cols = churn_data.select_dtypes(include=['object']).columns\n",
"label_encoder = LabelEncoder()\n",
"for col in categorical_cols:\n",
" churn_data[col] = label_encoder.fit_transform(churn_data[col])\n"
],
"metadata": {
"id": "FEbCwXMMcdMx"
},
"execution_count": 5,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Split the dataset into features and target variable\n",
"X = churn_data.drop('Churn', axis=1)\n",
"y = churn_data['Churn']"
],
"metadata": {
"id": "BEi1DyOAcg87"
},
"execution_count": 6,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Split the dataset into train and test sets\n",
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n"
],
"metadata": {
"id": "WCBEzi2dciZD"
},
"execution_count": 7,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Normalize the input features\n",
"scaler = StandardScaler()\n",
"X_train = scaler.fit_transform(X_train)\n",
"X_test = scaler.transform(X_test)"
],
"metadata": {
"id": "vFvTLBZFcm7M"
},
"execution_count": 8,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Define a simple target model\n",
"def target_model(inputs):\n",
" model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(16, activation='relu', input_shape=(X_train.shape[1],)),\n",
" tf.keras.layers.Dense(1, activation='sigmoid')\n",
" ])\n",
" return model(inputs)"
],
"metadata": {
"id": "0plu-Gqlcp4E"
},
"execution_count": 9,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Define the objective function for the attack\n",
"def carlini_objective_function(inputs, target_class):\n",
" # Compute the logits (output) of the target model\n",
" logits = target_model(inputs)\n",
"\n",
" # Calculate the binary cross-entropy loss between the target class and the model's predicted probabilities\n",
" loss = tf.keras.losses.binary_crossentropy(target_class, logits)\n",
"\n",
" return loss"
],
"metadata": {
"id": "7qeX15-7crF4"
},
"execution_count": 10,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Set up the optimization algorithm\n",
"optimizer = tf.keras.optimizers.Adam(lr=0.01)"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "ChS1B5K8cvkc",
"outputId": "2322eaa9-e2ab-4968-b919-0720b02a1295"
},
"execution_count": 11,
"outputs": [
{
"output_type": "stream",
"name": "stderr",
"text": [
"WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"# Function to perform the Carlini attack\n",
"def carlini_attack(input_sample, target_class, epsilon=0.2, num_iterations=100):\n",
" input_sample = tf.convert_to_tensor(input_sample, dtype=tf.float32)\n",
" input_sample = tf.expand_dims(input_sample, axis=0) # Add batch dimension\n",
"\n",
" for i in range(num_iterations):\n",
" with tf.GradientTape() as tape:\n",
" # Record the perturbation for gradient computation\n",
" tape.watch(input_sample)\n",
"\n",
" # Compute the objective function\n",
" loss = carlini_objective_function(input_sample, target_class)\n",
"\n",
" # Compute the gradients of the objective function with respect to the input sample\n",
" gradients = tape.gradient(loss, input_sample)\n",
"\n",
" # Update the input sample using the gradients\n",
" input_sample = input_sample - epsilon * tf.sign(gradients)\n",
"\n",
" # Clip the input sample to maintain values within a valid range\n",
" input_sample = tf.clip_by_value(input_sample, -1, 1)\n",
"\n",
" return input_sample\n"
],
"metadata": {
"id": "9Lbcd_ObcyBK"
},
"execution_count": 13,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Select a sample from the test set for the attack\n",
"sample_index = 0\n",
"#input_sample = X_test.iloc[sample_index].values\n",
"input_sample = X_test[sample_index]"
],
"metadata": {
"id": "GdSLlEFbc6ci"
},
"execution_count": 15,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Specify the target class for the attack\n",
"#target_class = 1\n",
"target_class = tf.constant([[1.0]], dtype=tf.float32)"
],
"metadata": {
"id": "xD1IxjMYdCOx"
},
"execution_count": 18,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Generate the adversarial example using the Carlini attack\n",
"adversarial_example = carlini_attack(input_sample, target_class)\n"
],
"metadata": {
"id": "nwzcGIq4dDxA"
},
"execution_count": 19,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Perform prediction on the adversarial example\n",
"predictions = target_model(adversarial_example)\n",
"predicted_class = tf.round(predictions[0][0])"
],
"metadata": {
"id": "8-TrTfk_d_uZ"
},
"execution_count": 22,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Print the predicted class and confidence \n",
"print(\"Predicted Class:\", predicted_class) \n",
"print(\"Confidence:\", predictions[0][0])"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "f6zgP9kod_jL",
"outputId": "ea91e129-9dcd-4998-dce5-2a1b3c7497df"
},
"execution_count": 23,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Predicted Class: tf.Tensor(1.0, shape=(), dtype=float32)\n",
"Confidence: tf.Tensor(0.591373, shape=(), dtype=float32)\n"
]
}
]
}
]
}
\ No newline at end of file
This diff is collapsed.
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment