Commit 08c7f16f authored by Janith Gamage's avatar Janith Gamage

fix: update

parent 2098c101
import tensorflow as tf
import cv2
import numpy as np
# Load a pre-trained deep learning model for feature extraction
model = tf.keras.applications.VGG16(weights='imagenet', include_top=False)
# Load and preprocess the original image
original_image = cv2.imread('original_image.jpeg')
original_image = cv2.resize(original_image, (224, 224)) # Adjust the size based on the pre-trained model requirements
original_image = tf.keras.applications.vgg16.preprocess_input(original_image)
original_image = np.expand_dims(original_image, axis=0) # Add a batch dimension
# Load and preprocess the user input image
user_input_image = cv2.imread('user_input_image.jpeg')
user_input_image = cv2.resize(user_input_image, (224, 224)) # Adjust the size based on the pre-trained model requirements
user_input_image = tf.keras.applications.vgg16.preprocess_input(user_input_image)
user_input_image = np.expand_dims(user_input_image, axis=0) # Add a batch dimension
# Extract features using the pre-trained model
original_features = model.predict(original_image)
user_input_features = model.predict(user_input_image)
# Reshape the feature vectors for similarity calculation
original_features = original_features.reshape((original_features.shape[0], -1))
user_input_features = user_input_features.reshape((user_input_features.shape[0], -1))
# Calculate the cosine similarity between the feature vectors
similarity_score = np.dot(original_features, user_input_features.T) / (np.linalg.norm(original_features) * np.linalg.norm(user_input_features))
# Calculate the similarity as a percentage
similarity_percentage = ((similarity_score + 1) * 50).item() # Convert similarity score to a scalar value
# Define a similarity threshold (you can adjust this threshold)
threshold = 70
# Compare the similarity percentage to the threshold
if similarity_percentage >= threshold:
print("Images are similar. Similarity: {:.2f}%".format(similarity_percentage))
else:
print("Images are dissimilar. Similarity: {:.2f}%".format(similarity_percentage))
\ No newline at end of file
import tensorflow as tf
import cv2
import numpy as np
from sklearn.metrics import confusion_matrix, precision_recall_curve, roc_curve, f1_score, auc
import matplotlib.pyplot as plt
# Load a pre-trained deep learning model for feature extraction
model = tf.keras.applications.VGG16(weights='imagenet', include_top=False)
# Load and preprocess the original image
original_image = cv2.imread('original_image.jpeg')
original_image = cv2.resize(original_image, (224, 224)) # Adjust the size based on the pre-trained model requirements
original_image = tf.keras.applications.vgg16.preprocess_input(original_image)
original_image = np.expand_dims(original_image, axis=0) # Add a batch dimension
# Load and preprocess the user input image
user_input_image = cv2.imread('user_input_image.jpeg')
user_input_image = cv2.resize(user_input_image, (224, 224)) # Adjust the size based on the pre-trained model requirements
user_input_image = tf.keras.applications.vgg16.preprocess_input(user_input_image)
user_input_image = np.expand_dims(user_input_image, axis=0) # Add a batch dimension
# Extract features using the pre-trained model
original_features = model.predict(original_image)
user_input_features = model.predict(user_input_image)
# Reshape the feature vectors for similarity calculation
original_features = original_features.reshape((original_features.shape[0], -1))
user_input_features = user_input_features.reshape((user_input_features.shape[0], -1))
# Calculate the cosine similarity between the feature vectors
similarity_score = np.dot(original_features, user_input_features.T) / (np.linalg.norm(original_features) * np.linalg.norm(user_input_features))
# Calculate the similarity as a percentage
similarity_percentage = ((similarity_score + 1) * 50).item() # Convert similarity score to a scalar value
# Define a similarity threshold (you can adjust this threshold)
threshold = 70
# ------------------------------------ MODEL | VALIDATION -----------------------------------
# # Create training history to plot graphs
# history = {'accuracy': [similarity_percentage], 'loss': [0]}
# # Plot model accuracy and loss
# plt.figure(figsize=(12, 5))
# plt.subplot(1, 2, 1)
# plt.plot(history['accuracy'])
# plt.title('Model Accuracy')
# plt.xlabel('Epoch')
# plt.ylabel('Accuracy (%)')
# plt.subplot(1, 2, 2)
# plt.plot(history['loss'])
# plt.title('Model Loss')
# plt.xlabel('Epoch')
# plt.ylabel('Loss')
# plt.show()
# # Print similarity percentage
# print("Similarity Percentage: {:.2f}%".format(similarity_percentage))
# # Create ground truth labels (e.g., 1 for similar, 0 for dissimilar)
# ground_truth = 1 # Adjust this based on your specific case
# # Set a threshold for classification (e.g., 0.5 for binary classification)
# classification_threshold = 0.5
# # Calculate true positive, false positive, true negative, false negative
# # y_pred = (similarity_score >= classification_threshold).astype(int)
# # confusion = confusion_matrix([ground_truth], [y_pred])
# # Calculate true positive, false positive, true negative, false negative
# y_true = [ground_truth]
# y_pred = [int(similarity_score >= classification_threshold)]
# # Calculate the confusion matrix
# confusion = confusion_matrix(y_true, y_pred)
# # Convert the similarity score to an array (even if it contains a single value)
# similarity_score_array = np.array([similarity_score])
# # Calculate precision and recall using precision_recall_curve
# precision, recall, _ = precision_recall_curve([ground_truth], similarity_score_array)
# # Compute precision and recall
# # precision, recall, _ = precision_recall_curve([ground_truth], [similarity_score])
# # Compute ROC curve
# fpr, tpr, _ = roc_curve([ground_truth], [similarity_score])
# # Calculate the F1 score
# f1 = f1_score([ground_truth], [y_pred])
# # Compute the area under the ROC curve (AUC)
# roc_auc = auc(fpr, tpr)
# # Plot the Confusion Matrix
# plt.figure()
# plt.imshow(confusion, interpolation='nearest', cmap=plt.cm.Blues)
# plt.title('Confusion Matrix')
# plt.colorbar()
# plt.xticks([0, 1], ['Predicted Negative', 'Predicted Positive'])
# plt.yticks([0, 1], ['Actual Negative', 'Actual Positive'])
# thresh = confusion.max() / 2.
# for i in range(2):
# for j in range(2):
# plt.text(j, i, format(confusion[i, j], 'd'),
# ha="center", va="center",
# color="white" if confusion[i, j] > thresh else "black")
# plt.show()
# # Plot Precision-Recall Curve
# plt.figure()
# plt.plot(recall, precision, marker='.')
# plt.title('Precision-Recall Curve')
# plt.xlabel('Recall')
# plt.ylabel('Precision')
# plt.show()
# # Plot ROC Curve
# plt.figure()
# plt.plot(fpr, tpr, marker='.')
# plt.title('ROC Curve')
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.show()
# # Print F1 Score
# print(f'F1 Score: {f1:.2f}')
# # Print AUC for ROC Curve
# print(f'ROC AUC: {roc_auc:.2f}')
# Compare the similarity percentage to the threshold
if similarity_percentage >= threshold:
print("Images are similar. Similarity: {:.2f}%".format(similarity_percentage))
else:
print("Images are dissimilar. Similarity: {:.2f}%".format(similarity_percentage))
\ No newline at end of file
...@@ -4,12 +4,6 @@ import dotenv from "dotenv"; ...@@ -4,12 +4,6 @@ import dotenv from "dotenv";
import express from "express"; import express from "express";
import mongoose from "mongoose"; import mongoose from "mongoose";
import multer from "multer";
// Set up storage for uploaded images
const storage = multer.memoryStorage();
const upload = multer({ storage: storage });
//import routes //import routes
import curriculumRoutes from "./routes/curriculum.routes.js"; import curriculumRoutes from "./routes/curriculum.routes.js";
import feedbackRoutes from "./routes/feedback.routes.js"; import feedbackRoutes from "./routes/feedback.routes.js";
...@@ -23,14 +17,18 @@ import userProgressRoutes from "./routes/userProgress.routes.js"; ...@@ -23,14 +17,18 @@ import userProgressRoutes from "./routes/userProgress.routes.js";
dotenv.config(); dotenv.config();
const app = express(); const app = express();
const corsOptions = { // const corsOptions = {
origin: 'http://localhost:3000', // origin: 'http://localhost:3000',
origin: 'http://localhost:3001', // origin: 'http://localhost:3001',
}; // origin: 'http://localhost:3000',
// origin: 'http://172.28.144.1:3000'
// };
app.use(bodyParser.json({ limit: "30mb", extended: true })); app.use(bodyParser.json({ limit: "30mb", extended: true }));
app.use(bodyParser.urlencoded({ limit: "30mb", extended: true })); app.use(bodyParser.urlencoded({ limit: "30mb", extended: true }));
app.use(cors(corsOptions)); // app.use(cors(corsOptions));
app.use(cors());
//end //end
app.get("/", (req, res) => { app.get("/", (req, res) => {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment