import os
import gc
import torch
from tqdm import tqdm
import numpy as np
from PIL import Image
from torchvision import models, transforms
import torch.nn as nn
import glob
import time
import argparse
import sys

"""
Feature Extraction for SAT Problem Images

This script provides two approaches for extracting 128-dimensional features:

1. ResNet18 + Projection Layer (current implementation):
   - Uses pre-trained ResNet18 backbone
   - Adds a linear projection layer to reduce 512 -> 128 dimensions
   - Maintains transfer learning benefits from ImageNet pre-training

2. Alternative: Custom CNN (commented out below):
   - Direct 128-dimensional output
   - Lighter architecture, faster inference
   - May require training from scratch

Usage:
    python feature_extraction.py scope --origin_image path/to/origin.jpg --sorted_image path/to/sorted.jpg --output_file features.npz --feature_dim 128
    python feature_extraction.py ascii --direct_image path/to/direct.jpg --min_image path/to/min.jpg --max_image path/to/max.jpg --mean_image path/to/mean.jpg --output_file features.npz --feature_dim 128
"""

class MultiScaleFeatureExtractor:
    def __init__(self, use_gpu=True, batch_size=32, feature_dim=512):
        self.device = torch.device("cuda" if torch.cuda.is_available() and use_gpu else "cpu")
        self.batch_size = batch_size
        self.feature_dim = feature_dim
        self.models = {}
        
        # Create ResNet18 with custom feature dimension
        resnet = models.resnet18(weights=models.ResNet18_Weights.DEFAULT)
        
        # Remove the final classification layer
        resnet_features = nn.Sequential(*list(resnet.children())[:-1])
        
        # Add a projection layer to get desired feature dimension
        if feature_dim != 512:
            self.models['resnet'] = nn.Sequential(
                resnet_features,
                nn.AdaptiveAvgPool2d((1, 1)),
                nn.Flatten(),
                nn.Linear(512, feature_dim),
                nn.ReLU(inplace=True)
            )
        else:
            self.models['resnet'] = nn.Sequential(
                resnet_features,
                nn.AdaptiveAvgPool2d((1, 1)),
                nn.Flatten()
            )
        
        for model in self.models.values():
            model.to(self.device)
            model.eval()
            if self.device.type == 'cuda':
                model = torch.jit.script(model)
        
        self.transforms = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    
    def extract_features(self, image_dict):
        features = {}
        for img_type, img in image_dict.items():
            model = self.models['resnet']
            img_tensor = self.transforms(img).unsqueeze(0).to(self.device)
            with torch.amp.autocast('cuda' if self.device.type == 'cuda' else 'cpu'):
                with torch.no_grad():
                    feature = model(img_tensor).squeeze().cpu().numpy()
            features[img_type] = feature
            if self.device.type == 'cuda':
                torch.cuda.empty_cache()
        return features

# Alternative: Custom CNN for direct 128-dimensional features
"""
class Custom128FeatureExtractor:
    def __init__(self, use_gpu=True, batch_size=32):
        self.device = torch.device("cuda" if torch.cuda.is_available() and use_gpu else "cpu")
        self.batch_size = batch_size
        
        # Custom CNN architecture that outputs 128 features directly
        self.model = nn.Sequential(
            # Initial conv layers
            nn.Conv2d(3, 32, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2),
            
            nn.Conv2d(32, 64, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2),
            
            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2),
            
            nn.Conv2d(128, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.AdaptiveAvgPool2d((1, 1)),
            
            # Final projection to 128 dimensions
            nn.Flatten(),
            nn.Linear(256, 128),
            nn.ReLU(inplace=True)
        )
        
        self.model.to(self.device)
        self.model.eval()
        
        self.transforms = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    
    def extract_features(self, image_dict):
        features = {}
        for img_type, img in image_dict.items():
            img_tensor = self.transforms(img).unsqueeze(0).to(self.device)
            with torch.no_grad():
                feature = self.model(img_tensor).squeeze().cpu().numpy()
            features[img_type] = feature
            if self.device.type == 'cuda':
                torch.cuda.empty_cache()
        return features
"""

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Extract features from SAT problem images')
    subparsers = parser.add_subparsers(dest='mode', required=True)

    # Scope mode (process one instance with origin and sorted images to generate scope features)
    scope_parser = subparsers.add_parser('scope', help='Process a single instance with origin and sorted images to generate scope features')
    scope_parser.add_argument('--origin_image', type=str, required=True, help='Path to the origin image file')
    scope_parser.add_argument('--sorted_image', type=str, required=True, help='Path to the sorted image file')
    scope_parser.add_argument('--output_file', type=str, required=True, help='Path to save the feature file (.npz)')
    scope_parser.add_argument('--uuid', type=str, help='UUID of the instance (optional, for logging)')
    scope_parser.add_argument('--feature_dim', type=int, default=128, help='Feature dimension (default: 128)')

    # Ascii mode (process one instance with ascii images to generate ascii features)
    ascii_parser = subparsers.add_parser('ascii', help='Process a single instance with ascii images to generate ascii features')
    ascii_parser.add_argument('--direct_image', type=str, required=True, help='Path to the direct ascii image file')
    ascii_parser.add_argument('--min_image', type=str, required=True, help='Path to the min ascii image file')
    ascii_parser.add_argument('--max_image', type=str, required=True, help='Path to the max ascii image file')
    ascii_parser.add_argument('--mean_image', type=str, required=True, help='Path to the mean ascii image file')
    ascii_parser.add_argument('--output_file', type=str, required=True, help='Path to save the feature file (.npz)')
    ascii_parser.add_argument('--uuid', type=str, help='UUID of the instance (optional, for logging)')
    ascii_parser.add_argument('--feature_dim', type=int, default=128, help='Feature dimension (default: 128)')

    args = parser.parse_args()

    if args.mode == 'scope':
        # Scope mode (process one instance with origin and sorted images to generate scope features)
        # Check if input files exist
        if not os.path.exists(args.origin_image):
            print(f"Error: Origin image file {args.origin_image} does not exist")
            sys.exit(1)
        if not os.path.exists(args.sorted_image):
            print(f"Error: Sorted image file {args.sorted_image} does not exist")
            sys.exit(1)
        
        # Create output directory if needed
        output_dir = os.path.dirname(args.output_file)
        if output_dir:
            os.makedirs(output_dir, exist_ok=True)
        
        # Load images
        try:
            img_origin = Image.open(args.origin_image).convert('RGB')
            img_sorted = Image.open(args.sorted_image).convert('RGB')
        except Exception as e:
            print(f"Error loading images: {e}")
            sys.exit(1)
        
        # Create image dictionary
        images = {'origin': img_origin, 'sorted': img_sorted}
        
        # Extract scope features
        extractor = MultiScaleFeatureExtractor(use_gpu=True, batch_size=32, feature_dim=args.feature_dim)
        start_time = time.time()
        features = extractor.extract_features(images)
        elapsed = (time.time() - start_time) * 1000  # ms
        
        # Save scope features
        np.savez_compressed(args.output_file, **features)
        
        # Log timing information
        uuid = args.uuid if args.uuid else "unknown"
        print(f"🕒 feature extraction time: {elapsed:.2f}ms, uuid: {uuid}")
        
        # Clean up
        del features, images
        gc.collect()
        if torch.cuda.is_available():
            torch.cuda.empty_cache()

    elif args.mode == 'ascii':
        # Ascii mode (process one instance with ascii images to generate ascii features)
        # Check if input files exist
        if not os.path.exists(args.direct_image):
            print(f"Error: Direct image file {args.direct_image} does not exist")
            sys.exit(1)
        if not os.path.exists(args.min_image):
            print(f"Error: Min image file {args.min_image} does not exist")
            sys.exit(1)
        if not os.path.exists(args.max_image):
            print(f"Error: Max image file {args.max_image} does not exist")
            sys.exit(1)
        if not os.path.exists(args.mean_image):
            print(f"Error: Mean image file {args.mean_image} does not exist")
            sys.exit(1)
        
        # Create output directory if needed
        output_dir = os.path.dirname(args.output_file)
        if output_dir:
            os.makedirs(output_dir, exist_ok=True)
        
        # Load images
        try:
            img_direct = Image.open(args.direct_image).convert('RGB')
            img_min = Image.open(args.min_image).convert('RGB')
            img_max = Image.open(args.max_image).convert('RGB')
            img_mean = Image.open(args.mean_image).convert('RGB')
        except Exception as e:
            print(f"Error loading images: {e}")
            sys.exit(1)
        
        # Create image dictionary
        images = {'direct': img_direct, 'min': img_min, 'max': img_max, 'mean': img_mean}
        
        # Extract ascii features
        extractor = MultiScaleFeatureExtractor(use_gpu=True, batch_size=32, feature_dim=args.feature_dim)
        start_time = time.time()
        features = extractor.extract_features(images)
        elapsed = (time.time() - start_time) * 1000  # ms
        
        # Save ascii features
        np.savez_compressed(args.output_file, **features)
        
        # Log timing information
        uuid = args.uuid if args.uuid else "unknown"
        print(f"🕒 feature extraction time: {elapsed:.2f}ms, uuid: {uuid}")
        
        # Clean up
        del features, images
        gc.collect()
        if torch.cuda.is_available():
            torch.cuda.empty_cache()

    else:
        print(f"Warning: Mode {args.mode} not recognized")
        sys.exit(1) 