import torch
from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler
from PIL import Image
import os
import json
from torchvision import transforms
from randaugment import RandomAugment
from torchvision.transforms.functional import InterpolationMode
from collections import Counter
import math


class MultiModalTaggingDataset(Dataset):
    def __init__(self, data_path, image_dir, processor, tokenizer, transform=None, rare_weight=3):
        self.data = []
        with open(data_path, 'r', encoding='utf-8') as f:
            self.data = json.load(f)
            
        self.image_dir = image_dir
        self.processor = processor 
        self.tokenizer = tokenizer 
        self.transform = transform
        print(f"\n[INFO] rare: {rare_weight}")
        
        
        # 新增：统计标签频率并计算样本权重
        self._compute_sample_weights(rare_weight)

    def _compute_sample_weights(self, rare_weight):
        """计算每个样本的权重，低频标签样本获得更高权重"""
        # 统计所有标签的频率
        tag_counter = Counter()
        for item in self.data:
            tags = item['tag'].split(',')
            tag_counter.update(tags)
        
        # 确定低频标签（出现次数<=5的标签）
        self.rare_tags = set(tag for tag, cnt in tag_counter.items() if cnt <= 5)
        # print(f"低频标签数量: {len(self.rare_tags)}/{len(tag_counter)}")
        
        # 为每个样本计算权重
        self.sample_weights = []
        for item in self.data:
            tags = item['tag'].split(',')
            # 若样本包含低频标签，则权重为rare_weight，否则为1.0
            has_rare = any(tag in self.rare_tags for tag in tags)
            weight = rare_weight if has_rare else 1.0
            self.sample_weights.append(weight)
        
        # 打印低频样本比例和平均权重
        # low_freq_count = sum(1 for w in self.sample_weights if w > 1.0)
        # print(f"低频样本比例: {low_freq_count}/{len(self.data)} = {low_freq_count/len(self.data):.2%}")
        # print(f"平均样本权重: {sum(self.sample_weights)/len(self.sample_weights):.2f}")

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        item = self.data[idx]
        
        # 拼接完整的图像路径
        image_path = os.path.join(self.image_dir, item['image'])
        
        # 打开并转换图像
        image = Image.open(image_path).convert("RGB")
        
        prompt = item['prompt']
        tag = item['tag'] # 真实标签，仍然是逗号分隔的字符串

        # 在这里应用图像变换 
        if self.transform:
            image = self.transform(image)
            
        return {
            "image": image,
            "prompt": prompt,
            "tag": tag,
            "sample_weight": self.sample_weights[idx]  # 可选：返回样本权重
        }


def collate_fn(batch):
    images = [item["image"] for item in batch]
    prompts = [item["prompt"] for item in batch]
    tags = [item["tag"] for item in batch]

    return {
        "images": images,
        "prompts": prompts,
        "tags": tags
    }


def get_dataloaders(config, processor, tokenizer, min_scale=0.5):
    # 定义的图像变换
    normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
    transform_train = transforms.Compose([                        
            transforms.RandomResizedCrop(config.image_size,scale=(min_scale, 1.0),interpolation=InterpolationMode.BICUBIC),
            transforms.RandomHorizontalFlip(),
            RandomAugment(2,5,isPIL=True,augs=['Identity','AutoContrast','Brightness','Sharpness','Equalize',
                                              'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']),    
        ])        
    transform_test = transforms.Compose([
        transforms.Resize((config.image_size,config.image_size),interpolation=InterpolationMode.BICUBIC),
        ])
    
    # 数据集路径全部从 config 中读取
    train_data_path = os.path.join(config.ann_root, config.train_data_file)
    val_data_path = os.path.join(config.ann_root, config.val_data_file)
    test_data_path = os.path.join(config.ann_root, config.test_data_file)
    
    # 创建训练集（启用低频采样）
    train_dataset = MultiModalTaggingDataset(
        train_data_path, 
        config.image_train, 
        processor, 
        tokenizer, 
        transform=transform_train,
        rare_weight=config.rare_weight if hasattr(config, 'rare_weight') else 3.0  # 低频样本权重
    )
    
    val_dataset = MultiModalTaggingDataset(val_data_path, config.image_val, processor, tokenizer, transform=transform_test)
    test_dataset = MultiModalTaggingDataset(test_data_path, config.image_test, processor, tokenizer, transform=transform_test)

    # 创建低频采样器
    train_sampler = WeightedRandomSampler(
        weights=train_dataset.sample_weights,
        num_samples=len(train_dataset),  # 采样总数=数据集大小
        replacement=True  # 允许重复采样
    )

    train_loader = DataLoader(
        train_dataset,
        batch_size=config.batch_size_train,
        # shuffle=True,  # 注意：使用sampler时不应该设置shuffle
        sampler=train_sampler,  # 使用低频采样器
        collate_fn=collate_fn,
        num_workers=config.num_workers
    )
    
    val_loader = DataLoader(
        val_dataset,
        batch_size=config.batch_size_val,
        shuffle=False,
        collate_fn=collate_fn,
        num_workers=config.num_workers
    )
    
    test_loader = DataLoader(
        test_dataset,
        batch_size=config.batch_size_test,
        shuffle=False,
        collate_fn=collate_fn,
        num_workers=config.num_workers
    )
    
    return train_loader, val_loader, test_loader