import torch
from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler
from PIL import Image
import os
import json
from torchvision import transforms
from randaugment import RandomAugment
from torchvision.transforms.functional import InterpolationMode
from collections import Counter
import math


class MultiModalTaggingDataset(Dataset):
    def __init__(self, data_path, image_dir, processor, tokenizer, transform=None, 
                 rare_weight=3.0, weighting_strategy='hybrid'):
        self.data = []
        with open(data_path, 'r', encoding='utf-8') as f:
            self.data = json.load(f)
            
        self.image_dir = image_dir
        self.processor = processor 
        self.tokenizer = tokenizer 
        self.transform = transform
        self.weighting_strategy = weighting_strategy
        # print(f"\n[INFO] rare_weight: {rare_weight}, strategy: {weighting_strategy}")
        
        # 数据清洗
        self._clean_tags()
        
        # 构建标签词汇表
        self._build_tag_vocab()
        
        # 计算样本权重
        self._compute_sample_weights(rare_weight)
        
        # 准备损失函数权重
        self.loss_weights = self._prepare_loss_weights()

    def _clean_tags(self):
        """清洗标签数据"""
        for item in self.data:
            # 确保标签正确分割和清理
            tags = item['tag'].split(',')
            cleaned_tags = [tag.strip() for tag in tags if tag.strip()]
            item['tag'] = ','.join(cleaned_tags)

    def _build_tag_vocab(self):
        """构建标签词汇表"""
        all_tags = set()
        tag_lengths = []
        
        for item in self.data:
            tags = item['tag'].split(',')
            tags = [tag.strip() for tag in tags if tag.strip()]
            tag_lengths.append(len(tags))
            all_tags.update(tags)
        
        # print(f"[DEBUG] Avg tags per sample: {sum(tag_lengths)/len(tag_lengths):.2f}")
        # print(f"[DEBUG] Max tags per sample: {max(tag_lengths) if tag_lengths else 0}")
        
        self.tag_to_idx = {tag: idx for idx, tag in enumerate(sorted(all_tags))}
        self.idx_to_tag = {idx: tag for tag, idx in self.tag_to_idx.items()}
        # print(f"[INFO] Total unique tags: {len(self.tag_to_idx)}")
        # print(f"[INFO] Total samples: {len(self.data)}")

    def _compute_sample_weights_original(self, rare_weight):
        """原始权重计算方法"""
        # 统计所有标签的频率
        tag_counter = Counter()
        for item in self.data:
            tags = item['tag'].split(',')
            tags = [tag.strip() for tag in tags if tag.strip()]
            tag_counter.update(tags)
        
        # 确定低频标签（出现次数<=5的标签）
        self.rare_tags = set(tag for tag, cnt in tag_counter.items() if cnt <= 3)
        # print(f"[DEBUG] Found {len(self.rare_tags)} rare tags (freq <= 3)")
        if len(self.rare_tags) > 0:
            print(f"[DEBUG] Sample rare tags: {list(self.rare_tags)[:10]}")
        
        # 为每个样本计算权重
        self.sample_weights = []
        rare_sample_count = 0
        
        for i, item in enumerate(self.data):
            tags = item['tag'].split(',')
            tags = [tag.strip() for tag in tags if tag.strip()]
            # 若样本包含低频标签，则权重为rare_weight，否则为1.0
            has_rare = any(tag in self.rare_tags for tag in tags)
            weight = rare_weight if has_rare else 1.0
            
            if has_rare:
                rare_sample_count += 1
                # 调试：打印前几个包含稀有标签的样本
                # if rare_sample_count <= 3:
                #     print(f"[DEBUG] Rare sample {rare_sample_count}: tags={tags[:3]}..., has_rare={has_rare}")
            
            self.sample_weights.append(weight)
        
        return rare_sample_count

    def _compute_sample_weights_by_tags(self):
        """基于标签频率的权重计算"""
        tag_counter = Counter()
        for item in self.data:
            tags = item['tag'].split(',')
            tags = [tag.strip() for tag in tags if tag.strip()]
            tag_counter.update(tags)
        
        # 标签权重（逆频率）
        self.tag_weights = {}
        for tag, count in tag_counter.items():
            self.tag_weights[tag] = 1.0 / math.sqrt(count)
        
        # 样本权重（标签权重的平均值）
        self.sample_weights = []
        for item in self.data:
            tags = item['tag'].split(',')
            tags = [tag.strip() for tag in tags if tag.strip()]
            if len(tags) > 0:
                sample_weight = sum([self.tag_weights.get(tag, 1.0) for tag in tags]) / len(tags)
            else:
                sample_weight = 1.0
            self.sample_weights.append(sample_weight)

    def _compute_sample_weights_hybrid(self, rare_weight):
        """混合权重策略"""
        tag_counter = Counter()
        for item in self.data:
            tags = item['tag'].split(',')
            tags = [tag.strip() for tag in tags if tag.strip()]
            tag_counter.update(tags)
        
        # print(f"[DEBUG] Tag frequency stats - Total: {len(tag_counter)}, Rare (<5): {sum(1 for c in tag_counter.values() if c <= 5)}")
        
        # 确定低频标签
        self.rare_tags = set(tag for tag, cnt in tag_counter.items() if cnt <= 5)
        
        # 标签权重
        self.tag_weights = {}
        for tag, count in tag_counter.items():
            self.tag_weights[tag] = 1.0 / math.sqrt(count)
        
        # 样本权重
        self.sample_weights = []
        debug_printed = 0
        
        for i, item in enumerate(self.data):
            tags = item['tag'].split(',')
            tags = [tag.strip() for tag in tags if tag.strip()]
            if len(tags) > 0:
                # 基础权重：标签权重的平均值
                base_weight = sum([self.tag_weights.get(tag, 1.0) for tag in tags]) / len(tags)
                
                # 稀有标签加成
                has_rare = any(tag in self.rare_tags for tag in tags)
                if has_rare:
                    final_weight = base_weight * rare_weight
                else:
                    final_weight = base_weight
                    
                # 调试信息
                if debug_printed < 5 and has_rare:
                    # print(f"[DEBUG] Hybrid sample {debug_printed+1}: tags={tags[:2]}, base={base_weight:.3f}, final={final_weight:.3f}")
                    debug_printed += 1
                    
            else:
                final_weight = 1.0
                
            self.sample_weights.append(final_weight)

    def _compute_sample_weights(self, rare_weight):
        """计算样本权重"""
        # print(f"[DEBUG] Computing weights with strategy: {self.weighting_strategy}")
        
        if self.weighting_strategy == 'tag_based':
            self._compute_sample_weights_by_tags()
        elif self.weighting_strategy == 'hybrid':
            self._compute_sample_weights_hybrid(rare_weight)
        else:
            # 原始方法
            rare_count = self._compute_sample_weights_original(rare_weight)
            # print(f"[DEBUG] Original method found {rare_count} rare samples")
        

    def _prepare_loss_weights(self):
        """准备损失函数使用的标签权重"""
        tag_counter = Counter()
        for item in self.data:
            tags = item['tag'].split(',')
            tags = [tag.strip() for tag in tags if tag.strip()]
            tag_counter.update(tags)
        
        # 计算损失权重
        weights = torch.ones(len(self.tag_to_idx))
        for tag, count in tag_counter.items():
            if tag in self.tag_to_idx:
                idx = self.tag_to_idx[tag]
                weights[idx] = 1.0 / math.sqrt(count)
        
        return weights

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        item = self.data[idx]
        
        # 拼接完整的图像路径
        image_path = os.path.join(self.image_dir, item['image'])
        
        # 打开并转换图像
        image = Image.open(image_path).convert("RGB")
        
        prompt = item['prompt']
        tag = item['tag'] # 真实标签，仍然是逗号分隔的字符串

        # 在这里应用图像变换 
        if self.transform:
            image = self.transform(image)
            
        return {
            "image": image,
            "prompt": prompt,
            "tag": tag,
            "sample_weight": self.sample_weights[idx],  # 返回样本权重
            "idx": idx  # 返回索引，便于调试
        }

    def get_tag_to_idx(self):
        """获取标签到索引的映射"""
        return self.tag_to_idx
    
    def get_loss_weights(self):
        """获取损失函数权重"""
        return self.loss_weights


def collate_fn(batch):
    images = [item["image"] for item in batch]
    prompts = [item["prompt"] for item in batch]
    tags = [item["tag"] for item in batch]
    sample_weights = [item["sample_weight"] for item in batch]
    indices = [item["idx"] for item in batch]

    return {
        "images": images,
        "prompts": prompts,
        "tags": tags,
        "sample_weights": torch.tensor(sample_weights, dtype=torch.float32),
        "indices": indices
    }


def get_dataloaders(config, processor, tokenizer, min_scale=0.5):
    # 定义的图像变换
    normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
    transform_train = transforms.Compose([                        
            transforms.RandomResizedCrop(config.image_size, scale=(min_scale, 1.0), interpolation=InterpolationMode.BICUBIC),
            transforms.RandomHorizontalFlip(),
            RandomAugment(2, 5, isPIL=True, augs=['Identity','AutoContrast','Brightness','Sharpness','Equalize',
                                              'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']),    
        ])        
    transform_test = transforms.Compose([
        transforms.Resize((config.image_size, config.image_size), interpolation=InterpolationMode.BICUBIC),
        ])
    
    # 数据集路径全部从 config 中读取
    train_data_path = os.path.join(config.ann_root, config.train_data_file)
    val_data_path = os.path.join(config.ann_root, config.val_data_file)
    test_data_path = os.path.join(config.ann_root, config.test_data_file)
    
    # 获取权重策略参数
    rare_weight = getattr(config, 'rare_weight', 3.0)
    weighting_strategy = getattr(config, 'weighting_strategy', 'original')
    
    # 创建训练集（启用低频采样）
    train_dataset = MultiModalTaggingDataset(
        train_data_path, 
        config.image_train, 
        processor, 
        tokenizer, 
        transform=transform_train,
        rare_weight=rare_weight,
        weighting_strategy=weighting_strategy
    )
    
    val_dataset = MultiModalTaggingDataset(
        val_data_path, 
        config.image_val, 
        processor, 
        tokenizer, 
        transform=transform_test,
        weighting_strategy=weighting_strategy
    )
    test_dataset = MultiModalTaggingDataset(
        test_data_path, 
        config.image_test, 
        processor, 
        tokenizer, 
        transform=transform_test,
        weighting_strategy=weighting_strategy
    )

    # 创建低频采样器
    train_sampler = WeightedRandomSampler(
        weights=train_dataset.sample_weights,
        num_samples=len(train_dataset),  # 采样总数=数据集大小
        replacement=True  # 允许重复采样
    )

    train_loader = DataLoader(
        train_dataset,
        batch_size=config.batch_size_train,
        # shuffle=True,  # 注意：使用sampler时不应该设置shuffle
        sampler=train_sampler,  # 使用低频采样器
        collate_fn=collate_fn,
        num_workers=config.num_workers
    )
    
    val_loader = DataLoader(
        val_dataset,
        batch_size=config.batch_size_val,
        shuffle=False,
        collate_fn=collate_fn,
        num_workers=config.num_workers
    )
    
    test_loader = DataLoader(
        test_dataset,
        batch_size=config.batch_size_test,
        shuffle=False,
        collate_fn=collate_fn,
        num_workers=config.num_workers
    )
    
    # 返回标签映射和损失权重
    tag_to_idx = train_dataset.get_tag_to_idx()
    loss_weights = train_dataset.get_loss_weights()
    
    return train_loader, val_loader, test_loader, tag_to_idx, loss_weights