import subprocess import sys import os, shutil import os, zipfile from torchvision import transforms from torchvision.datasets import ImageFolder from torchmetrics.image.fid import FrechetInceptionDistance from torch.utils.data import DataLoader from PIL import Image import torch from tqdm import tqdm import pandas as pd print("/tmp/data: ", ", ".join(os.listdir("/tmp/data")) if os.listdir("/tmp/data") else "The directory /tmp/data is empty.") print("/tmp/: ",", ".join(os.listdir("/tmp/")) if os.listdir("/tmp/") else "The directory /tmp/ is empty.") print("/tmp/model: ",", ".join(os.listdir("/tmp/model")) if os.listdir("/tmp/model") else "The directory /tmp/model is empty.") # print("/tmp/model/script.py: ", open('/tmp/model/script.py').read()) print("/tmp/model/params.json: ", open('/tmp/model/params.json').read()) os.makedirs('/tmp/data/hub/checkpoints/', exist_ok=True) os.environ['TORCH_HOME'] = '/tmp/data' shutil.move('/tmp/data/weights-inception-2015-12-05-6726825d.pth', '/tmp/data/hub/checkpoints/') print(f"Unzipping single") with zipfile.ZipFile("/tmp/data/real-images-single.zip", 'r') as zip_ref: zip_ref.extractall("/tmp/data") print(f"Unzipping multi") with zipfile.ZipFile("/tmp/data/real-images-multi.zip", 'r') as zip_ref: zip_ref.extractall("/tmp/data") # Directories with images fdir2 = "/tmp/model/generated" # Directory with fake images # Real data directories real_dirs = { "Single": "/tmp/data/real-images-single", "Multiple": "/tmp/data/real-images-multi", "Both": "/tmp/data/real-images-both" } ## generate real-images-both directory print("Preparing test dataset. . .") target_directory= '/tmp/data/real-images-both/images' os.makedirs(target_directory, exist_ok=True) for directory in ["/tmp/data/real-images-single", "/tmp/data/real-images-multi"]: print("for :"+ directory) images_dir = os.path.join(directory, "images") os.makedirs(images_dir, exist_ok=True) subprocess.run(f"mv {directory}/*.* {images_dir}/", shell=True, check=True) subprocess.run(f"ln {images_dir}/*.* {target_directory}/", shell=True, check=True) print("link generated images") # Organize directories for generated images dest_generated = "/tmp/model/generated/images" os.makedirs(dest_generated, exist_ok=True) subprocess.run(f"ln {fdir2}/*.* {dest_generated}/", shell=True, check=True) print("Init FID") # Initialize FID metric fid = FrechetInceptionDistance(feature=2048) # Define image transformations transform = transforms.Compose([ transforms.Resize((299, 299)), transforms.ToTensor(), ]) # Prepare fake dataset and loader generated_dataset = ImageFolder(root=fdir2, transform=transform) generated_loader = DataLoader(generated_dataset, batch_size=50, shuffle=False) # Compute FID scores for each real data directory fid_scores = {} for key, fdir1 in real_dirs.items(): print("for set: ", fdir1) # Organize real images dest_real = os.path.join(fdir1, "images") # Create dataset and loader for real images real_dataset = ImageFolder(root=fdir1, transform=transform) real_loader = DataLoader(real_dataset, batch_size=50, shuffle=False) # Reset FID metric for current folder fid.reset() # Process real images for images, _ in tqdm(real_loader, desc=f"Processing real images: {key}"): images = (images * 255).to(torch.uint8) fid.update(images, real=True) # Process generated images for images, _ in tqdm(generated_loader, desc=f"Processing fake images: {key}"): images = (images * 255).to(torch.uint8) fid.update(images, real=False) # Compute FID score fid_score = fid.compute() fid_scores[f"FID_Score_{key}"] = fid_score.item() print(f"FID Score ({key}):", fid_score.item()) # Save FID scores to CSV df = pd.DataFrame([fid_scores]) df.to_csv("submission.csv", index=False)