import os
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset
import albumentations as A
from albumentations.pytorch import ToTensorV2
import random


class YOLODataset(Dataset):
    def __init__(self, img_dir, label_dir, is_train=True):
        self.img_dir = img_dir
        self.label_dir = label_dir
 
        self.is_train = is_train
        
        # Get all image files
        self.img_files = [f for f in os.listdir(img_dir) if f.endswith(('.png', '.jpg', '.jpeg'))]
        self.label_files = [os.path.splitext(f)[0] + '.txt' for f in self.img_files]
        
        # Augmentations
        self.train_transforms = A.Compose([
            A.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1, p=0.5),
            A.HorizontalFlip(p=0.5),
            A.Normalize(mean=[0.0,0.0,0.0], std=[1.0,1.0,1.0],max_pixel_value=255.0,p=1),
            ToTensorV2()
        ], bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))
        
        self.val_transforms = A.Compose([
            A.Normalize(mean=[0.0,0.0,0.0], std=[1.0,1.0,1.0],max_pixel_value=255.0,p=1),
            ToTensorV2()
        ], bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))

    def __len__(self):
        return len(self.img_files)

    def __getitem__(self, idx):
        # Load and process image and labels
        img_path = os.path.join(self.img_dir, self.img_files[idx])
        label_path = os.path.join(self.label_dir, self.label_files[idx])
        
        image = cv2.imread(img_path)
        if image is None:
            raise FileNotFoundError(f"Image not found: {img_path}")
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        
        labels = []
        if os.path.exists(label_path):
            with open(label_path, 'r') as f:
                for line in f.readlines():
                    class_id, x, y, w, h = map(float, line.strip().split())
                    labels.append([x, y, w, h, class_id])
        
        labels = np.array(labels) if labels else np.zeros((0, 5))
        
        transform = self.train_transforms if self.is_train else self.val_transforms
        transformed = transform(
            image=image,
            bboxes=labels[:, :4].tolist(),
            class_labels=labels[:, 4].tolist()
        )
        
        image = transformed['image']
        bboxes = transformed['bboxes']
        class_labels = transformed['class_labels']
        
        if len(bboxes) > 0:
            labels = np.concatenate([np.array(bboxes), np.array(class_labels)[:, None]], axis=1)
        else:
            labels = np.zeros((0, 5))
        _,height, width = image.shape
        return image, torch.tensor(labels, dtype=torch.float32), width, height


def create_dataloader(img_dir, label_dir, batch_size, base_image_size=640, num_workers=4, shuffle=True, is_train=True):
    dataset = YOLODataset(
        img_dir=img_dir,
        label_dir=label_dir,
        is_train=is_train
    )
    
    # 定义collate_fn
    if is_train:
        collate_fn = lambda x: train_collate_fn(x, base_image_size)
    else:
        collate_fn = lambda x: val_collate_fn(x, base_image_size)
    
    loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=batch_size,
        num_workers=num_workers,
        shuffle=shuffle and is_train,  # Only shuffle for training
        pin_memory=True,
        collate_fn=collate_fn
    )
    
    return loader


def train_collate_fn(batch,base_image_size):
    images, labels, widths, heights = zip(*batch)
    
    # Randomly select multi-scale dimensions
    # print("base_image_size:",base_image_size)
    scale_base = max(5, base_image_size//32)
    scales = np.array([scale_base-2,scale_base-1,scale_base,scale_base+1,scale_base+2])*32
    # print("scales:",scales)
    
    image_size = random.choice(scales.tolist())
    # print("image_size:",image_size)
    
    resized_images, adjusted_labels = resize_and_adjust_batch(images, labels, widths, heights, image_size)
    # print("resized images.shape:",len(resized_images))
    images = torch.stack(resized_images)
    # print("images.shape:",images.shape)
    labels = pad_labels(adjusted_labels)
    # print("labels.shape:",labels.shape)
    return images, labels


def val_collate_fn(batch,base_image_size):
    images, labels, widths, heights = zip(*batch)
    # print("images:",len(images))
    # for i in images:
    #     print(f"i:{i.shape} ")
    # print("labels:",labels)
    # print("width:",widths)
    # print("height:",heights)
    # print("base_image_size:",base_image_size)
    # Use the base image size without multi-scale

    scale_base = max(5, base_image_size//32)
    image_size = scale_base*32
    # print("image_size:",image_size)
    resized_images, adjusted_labels = resize_and_adjust_batch(images, labels, widths, heights, image_size)
    # print("resized images[0].shape:",resized_images[0].shape)
    images = torch.stack(resized_images)
    # print("images.shape:",images.shape)
    labels = pad_labels(adjusted_labels)
    # print("labels.shape:",labels.shape)
    return images, labels


def resize_and_adjust_batch(images, labels, widths, heights, image_size):
    resized_images = []
    adjusted_labels = []
    for img, label, width, height in zip(images, labels, widths, heights):
        if isinstance(img, torch.Tensor):
            # print("is tensor")
            img = img.permute(1, 2, 0).numpy()  # CHW -> HWC
        
        # 计算缩放比例
        scale_w = image_size / width
        scale_h = image_size / height
        scale = min(scale_w, scale_h)
        
        # 计算新的尺寸
        new_w = int(width * scale)
        new_h = int(height * scale)
        
        # 调整图像大小
        resized_img = A.Resize(height=new_h, width=new_w, interpolation=cv2.INTER_LINEAR)(image=img)['image']
        
        # 计算填充
        pad_w = image_size - new_w
        pad_h = image_size - new_h
        pad_left = pad_w // 2
        pad_top = pad_h // 2
        
        # 填充图像
        padded_img = A.PadIfNeeded(
            min_height=image_size, 
            min_width=image_size, 
            border_mode=cv2.BORDER_CONSTANT, 
            value=[114.0/255.0, 114.0/255.0, 114.0/255.0]
        )(image=resized_img)['image']
        
        resized_images.append(torch.tensor(padded_img).permute(2, 0, 1))
        
        if label.shape[0] > 0:  # 如果有标签
            # 创建标签副本
            label = label.clone()
            
            # YOLO格式：[center_x, center_y, width, height,class_id]
            # 1. 将归一化的坐标转换回原始图像坐标
            label[:, 0] *= width  # center_x
            label[:, 1] *= height  # center_y
            label[:, 2] *= width  # width
            label[:, 3] *= height  # height
            
            # 2. 应用缩放
            label[:, :5] *= scale  # 缩放所有坐标
            
            # 3. 应用填充偏移（只对中心点坐标）
            label[:, 0] += pad_left  # 调整中心点x坐标
            label[:, 1] += pad_top   # 调整中心点y坐标
            
            # 4. 将坐标归一化到新的图像尺寸
            label[:, 0] /= image_size  # 归一化center_x
            label[:, 1] /= image_size  # 归一化center_y
            label[:, 2] /= image_size  # 归一化width
            label[:, 3] /= image_size  # 归一化height
            
        adjusted_labels.append(label)
    
    return resized_images, adjusted_labels


def pad_labels(labels):
    max_num_labels = max(label.shape[0] for label in labels)
    batch_labels = torch.zeros(len(labels), max_num_labels, 5)
    for i, label in enumerate(labels):
        batch_labels[i, :label.shape[0]] = label
    return batch_labels