import xml.etree.ElementTree as ET
import os
import glob
import argparse
from pathlib import Path
import shutil
from tqdm import tqdm
import random


VOC_CLASSES = [
    "aeroplane", "bicycle", "bird", "boat", "bottle",
    "bus", "car", "cat", "chair", "cow",
    "diningtable", "dog", "horse", "motorbike", "person",
    "pottedplant", "sheep", "sofa", "train", "tvmonitor"
]


def get_classes_from_xml(xml_dir):
    """Extract all unique class names from XML files"""
    classes = set()
    for xml_file in glob.glob(os.path.join(xml_dir, "*.xml")):
        tree = ET.parse(xml_file)
        root = tree.getroot()
        for obj in root.findall("object"):
            classes.add(obj.find("name").text)
    return sorted(list(classes))


def convert_box(size, box):
    """Convert VOC bbox to YOLO format"""
    dw = 1.0 / size[0]
    dh = 1.0 / size[1]
    x = (box[0] + box[2]) / 2.0
    y = (box[1] + box[3]) / 2.0
    w = box[2] - box[0]
    h = box[3] - box[1]
    x = x * dw
    w = w * dw
    y = y * dh
    h = h * dh
    return [x, y, w, h]


def convert_voc_to_yolo(xml_path, class_names):
    """Convert single XML file to YOLO format"""
    tree = ET.parse(xml_path)
    root = tree.getroot()
    
    # Get image size
    size = root.find("size")
    w = int(size.find("width").text)
    h = int(size.find("height").text)
    
    # Convert each object
    out_lines = []
    for obj in root.findall("object"):
        # Get class name and index
        class_name = obj.find("name").text
        if class_name not in class_names:
            continue
        class_idx = class_names.index(class_name)
        
        # Get bbox coordinates
        xmlbox = obj.find("bndbox")
        b = (
            float(xmlbox.find("xmin").text),
            float(xmlbox.find("ymin").text),
            float(xmlbox.find("xmax").text),
            float(xmlbox.find("ymax").text),
        )
        
        # Convert to YOLO format
        bb = convert_box((w, h), b)
        out_lines.append(f"{class_idx} {' '.join(map(str, bb))}")
    
    return out_lines


def create_dataset_structure(output_dir):
    """Create YOLO dataset directory structure"""
    # Create main directories
    os.makedirs(output_dir, exist_ok=True)
    for split in ['train', 'val']:
        for subdir in ['images', 'labels']:
            os.makedirs(os.path.join(output_dir, split, subdir), exist_ok=True)


def process_voc_dataset(args):
    """Process entire VOC dataset"""
    # Create output directory structure
    create_dataset_structure(args.output_dir)
    
    # Get class names
    if args.use_voc_classes:
        class_names = VOC_CLASSES
    else:
        class_names = get_classes_from_xml(args.xml_dir)
    
    # Save class names
    with open(os.path.join(args.output_dir, 'classes.txt'), 'w') as f:
        f.write('\n'.join(class_names))
    
    print(f"Found {len(class_names)} classes: {', '.join(class_names)}")
    
    # Get all image IDs
    all_image_ids = [os.path.splitext(os.path.basename(x))[0] 
                    for x in glob.glob(os.path.join(args.xml_dir, "*.xml"))]
    
    # Randomly shuffle the image IDs
    random.shuffle(all_image_ids)
    
    # Split into train and validation sets
    if args.image_sets_file:
        # If image_sets_file is provided, use it for splitting
        splits = {}
        for split in ['train', 'val']:
            with open(os.path.join(args.image_sets_file, f"{split}.txt")) as f:
                splits[split] = f.read().strip().split()
    else:
        # Use train_ratio for splitting
        n_train = int(len(all_image_ids) * args.train_ratio)
        splits = {
            'train': all_image_ids[:n_train],
            'val': all_image_ids[n_train:]
        }
    
    # Process each split
    for split in ['train', 'val']:
        image_ids = splits[split]
        print(f"\nProcessing {split} split: {len(image_ids)} images")
        
        # Process each image
        for image_id in tqdm(image_ids, desc=f"Converting {split} set"):
            # XML path
            xml_path = os.path.join(args.xml_dir, f"{image_id}.xml")
            if not os.path.exists(xml_path):
                print(f"Warning: {xml_path} does not exist")
                continue
            
            # Convert annotations
            yolo_lines = convert_voc_to_yolo(xml_path, class_names)
            
            if yolo_lines:
                # Save YOLO format annotations
                with open(os.path.join(args.output_dir, split, 'labels', f"{image_id}.txt"), 'w') as f:
                    f.write('\n'.join(yolo_lines))
                
                # Copy image
                img_src = os.path.join(args.image_dir, f"{image_id}.jpg")
                if not os.path.exists(img_src):
                    img_src = os.path.join(args.image_dir, f"{image_id}.jpeg")
                if not os.path.exists(img_src):
                    img_src = os.path.join(args.image_dir, f"{image_id}.png")
                
                if os.path.exists(img_src):
                    shutil.copy2(
                        img_src,
                        os.path.join(args.output_dir, split, 'images', os.path.basename(img_src))
                    )
                else:
                    print(f"Warning: Image not found for {image_id}")


def main():
    parser = argparse.ArgumentParser(description='Convert VOC format dataset to YOLO format')
    parser.add_argument('--xml_dir', type=str, required=True,
                        help='Directory containing XML annotation files')
    parser.add_argument('--image_dir', type=str, required=True,
                        help='Directory containing image files')
    parser.add_argument('--image_sets_file', type=str, default=None,
                        help='Path to directory containing train.txt/val.txt splits')
    parser.add_argument('--output_dir', type=str, required=True,
                        help='Output directory for YOLO format dataset')
    parser.add_argument('--use_voc_classes', action='store_true',
                        help='Use standard VOC classes instead of extracting from XML files')
    parser.add_argument('--train_ratio', type=float, default=0.8, help='Ratio of images to use for training (default: 0.8)')
    
    args = parser.parse_args()
    process_voc_dataset(args)
    
    print("\nConversion completed!")
    print(f"Dataset saved to: {args.output_dir}")
    print("\nDirectory structure:")
    print(f"{args.output_dir}/")
    print("├── train/")
    print("│   ├── images/")
    print("│   └── labels/")
    print("├── val/")
    print("│   ├── images/")
    print("│   └── labels/")
    print("└── classes.txt")


if __name__ == '__main__':
    main()
