SWIN_Angle_Detection_Car / New_file.txt
Nekshay's picture
Update New_file.txt
cd4bc0f
raw
history blame
4.18 kB
import torch
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from torchvision.models import resnet50
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
# Load a pre-trained ResNet-50 model
model = resnet50(pretrained=True)
model.eval()
# Define a function to preprocess images
def preprocess_image(image_path):
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
image = Image.open(image_path)
image = transform(image).unsqueeze(0) # Add a batch dimension
return image
# Load your ideal subset of images
ideal_image_paths = ["/content/trunck.jpg", "t4.jpg"] # Replace with your ideal image file paths
ideal_embeddings = []
for image_path in ideal_image_paths:
image = preprocess_image(image_path)
with torch.no_grad():
embedding = model(image).squeeze().numpy()
ideal_embeddings.append(embedding)
# Load a set of candidate images
candidate_image_paths = ["/content/trunck2.jpg", "t3.jpg", "car.jpg",] # Replace with your candidate image file paths
candidate_embeddings = []
for image_path in candidate_image_paths:
image = preprocess_image(image_path)
with torch.no_grad():
embedding = model(image).squeeze().numpy()
candidate_embeddings.append(embedding)
# Calculate similarities between ideal and candidate images using cosine similarity
similarities = cosine_similarity(ideal_embeddings, candidate_embeddings)
# Print the similarity matrix
print(similarities)
import torch
from transformers import SwinTransformer, SwinTransformerImageProcessor
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
# Load the pretrained Swin Transformer model and image processor
model_name = "microsoft/Swin-Transformer-base-patch4-in22k"
model = SwinTransformer.from_pretrained(model_name)
processor = SwinTransformerImageProcessor.from_pretrained(model_name)
# Define a function to preprocess images
def preprocess_image(image_path):
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
image = Image.open(image_path)
inputs = processor(images=image, return_tensors="pt")
return inputs
# Load your ideal and candidate subsets of images
ideal_image_paths = ["ideal_image1.jpg", "ideal_image2.jpg", "ideal_image3.jpg"] # Replace with your ideal image file paths
candidate_image_paths = ["candidate_image1.jpg", "candidate_image2.jpg", "candidate_image3.jpg"] # Replace with your candidate image file paths
# Calculate cosine similarities between ideal and candidate images
similarities = []
for ideal_path in ideal_image_paths:
ideal_embedding = None
inputs_ideal = preprocess_image(ideal_path)
with torch.no_grad():
output_ideal = model(**inputs_ideal)
ideal_embedding = output_ideal['pixel_values'][0].cpu().numpy()
for candidate_path in candidate_image_paths:
candidate_embedding = None
inputs_candidate = preprocess_image(candidate_path)
with torch.no_grad():
output_candidate = model(**inputs_candidate)
candidate_embedding = output_candidate['pixel_values'][0].cpu().numpy()
# Calculate cosine similarity between ideal and candidate embeddings
similarity = cosine_similarity([ideal_embedding], [candidate_embedding])[0][0]
similarities.append((ideal_path, candidate_path, similarity))
# Set a similarity threshold (e.g., 0.7)
threshold = 0.7
# Find similar image pairs based on the threshold
similar_pairs = []
for ideal_path, candidate_path, similarity in similarities:
if similarity > threshold:
similar_pairs.append((ideal_path, candidate_path))
# Print similar image pairs
for pair in similar_pairs:
print(f"Similar images: {pair[0]} and {pair[1]}")