import os
import random
import tqdm
import torch
from PIL import Image,ImageEnhance
from collections import Counter
from transformers import CLIPModel, CLIPProcessor
import json

class DataAugmentor:
    def __init__(self, 
                 input_json="processed_gongzhuangdata_tag/train/cleaned_train.json",
                 output_json="augmented_train.json",
                 image_root="dataset_images_gongzhuang",
                 min_occurrences=5,
                 max_aug_per_sample=1,
                 clip_model_name="pth/clip-vit-large-patch14",
                 augment_ratio=1.5,
                 max_augment_ratio=2.0,
                 min_low_freq_aug=2,
                 max_low_freq_aug=5,
                 max_high_freq_aug=1,
                 memory_efficient=False  # 新增：内存优化标志（修复遗漏的初始化）
                ):
        # 原有初始化参数...
        self.input_json = input_json
        self.output_json = output_json
        self.image_root = image_root
        self.min_occurrences = min_occurrences
        self.max_aug_per_sample = max_aug_per_sample
        self.clip_model_name = clip_model_name
        self.augment_ratio = augment_ratio
        self.max_augment_ratio = max_augment_ratio
        self.min_low_freq_aug = min_low_freq_aug
        self.max_low_freq_aug = max_low_freq_aug
        self.max_high_freq_aug = max_high_freq_aug
        
        # 新增：初始化memory_efficient属性（修复错误）
        self.memory_efficient = memory_efficient
        
        # 其他属性...
        self.clip_model = None
        self.clip_processor = None
        self.data = []
        self.augmented_data = []
        self.tag_counter = Counter()
        self.rare_tags = set()

    def load_data(self):
        """加载输入JSON数据并统计标签"""
        with open(self.input_json, 'r', encoding='utf-8') as f:
            self.data = json.load(f)
        
        missing_images = []
        for item in self.data:
            img_path = os.path.join(self.image_root, item['image'])
            if not os.path.exists(img_path):
                missing_images.append(item['image'])
            else:
                tags = item['tag'].split(',')
                self.tag_counter.update(tags)
        
        print(f"[INFO] 加载原始数据：共{len(self.data)}条，缺失图像{len(missing_images)}条")
        if missing_images:
            print(f"[WARNING] 部分图像不存在：{missing_images[:3]}...")

    def identify_rare_tags(self):
        """识别低频标签(出现次数≤min_occurrences)"""
        self.rare_tags = {tag for tag, cnt in self.tag_counter.items() if cnt <= self.min_occurrences}
        print(f"[INFO] 识别低频标签：共{len(self.rare_tags)}个（出现次数≤{self.min_occurrences})")

    def _generate_single_augment(self, item):
        """生成单个样本的增强图像"""
        json_image_path = item['image']
        actual_image_path = os.path.join(self.image_root, json_image_path)
        
        try:
            img = Image.open(actual_image_path).convert("RGB")
            w, h = img.size
            if w < 200 or h < 200:
                return self._fake_augment(item)
            
            # 根据是否为低频样本选择增强方法
            tags = item['tag'].split(',')
            is_low_freq = any(tag in self.rare_tags for tag in tags)
            
            if is_low_freq:
                aug_methods = random.sample([
                    lambda x: x.transpose(Image.FLIP_LEFT_RIGHT),
                    lambda x: ImageEnhance.Brightness(x).enhance(random.uniform(0.9, 1.1)),
                    self._safe_crop
                ], k=1)
            else:
                aug_methods = random.sample([
                    lambda x: x.transpose(Image.FLIP_LEFT_RIGHT),
                    lambda x: x.rotate(random.randint(-3, 3), expand=False),
                    lambda x: ImageEnhance.Contrast(x).enhance(random.uniform(0.85, 1.15)),
                    self._safe_crop
                ], k=1)
            
            # 应用增强
            aug_img = img.copy()
            for method in aug_methods:
                aug_img = method(aug_img)
            
            # 保存增强图像
            img_dir = os.path.dirname(actual_image_path)
            img_name = os.path.basename(actual_image_path)
            name, ext = os.path.splitext(img_name)
            aug_actual_path = os.path.join(img_dir, f"{name}_aug{random.randint(1000, 9999)}{ext}")
            aug_img.save(aug_actual_path)
            
            # 返回增强样本信息
            aug_json_path = os.path.join(os.path.dirname(json_image_path), os.path.basename(aug_actual_path))
            return {
                "image": aug_json_path,
                "tag": item['tag'],
                "prompt": item['prompt'],
                "original_path": json_image_path
            }
        
        except Exception as e:
            print(f"[WARNING] 增强失败 {json_image_path}:{e}，使用原始图像替代")
            return self._fake_augment(item)

    def _fake_augment(self, item):
        """增强失败时返回原始样本（伪增强）"""
        return {
            "image": item['image'],
            "tag": item['tag'],
            "prompt": item['prompt'],
            "original_path": item['image']
        }

    def _safe_crop(self, img):
        """安全裁剪：仅裁剪边缘，保留中心区域"""
        w, h = img.size
        left = random.randint(0, int(w * 0.05))
        right = w - random.randint(0, int(w * 0.05))
        top = random.randint(0, int(h * 0.05))
        bottom = h - random.randint(0, int(h * 0.05))
        # 确保裁剪区域有效
        left = min(left, right - 10)
        top = min(top, bottom - 10)
        return img.crop((left, top, right, bottom))

    def filter_low_similarity(self, augmented_samples, original_samples):
        """分批次过滤低相似度样本，优化内存使用"""
        if not augmented_samples:
            return []
        
        if self.clip_model is None:
            self.clip_model = CLIPModel.from_pretrained(self.clip_model_name).eval()
            self.clip_processor = CLIPProcessor.from_pretrained(
                self.clip_model_name, 
                use_fast=True
            )
            if torch.cuda.is_available():
                self.clip_model = self.clip_model.cuda()
        
        batch_size = 16
        filtered = []

        for i in tqdm.tqdm(range(0, len(augmented_samples), batch_size), desc="过滤低相似度样本"):
            batch = augmented_samples[i:i+batch_size]
            batch_original_paths = [item.get('original_path') for item in batch]
            batch_original_images = {}
            
            for path in batch_original_paths:
                if path and path not in batch_original_images:
                    try:
                        batch_original_images[path] = Image.open(
                            os.path.join(self.image_root, path)
                        ).convert("RGB")
                    except:
                        pass
            
            for aug_item in batch:
                original_path = aug_item.get('original_path')
                original_img = batch_original_images.get(original_path)
                if original_img is None:
                    filtered.append(aug_item)
                    continue

                try:
                    aug_img_path = os.path.join(self.image_root, aug_item['image'])
                    aug_img = Image.open(aug_img_path).convert("RGB")
                except:
                    filtered.append(aug_item)
                    continue

                try:
                    inputs = self.clip_processor(images=[original_img, aug_img], return_tensors="pt")
                    if torch.cuda.is_available():
                        inputs = {k: v.cuda() for k, v in inputs.items()}

                    with torch.no_grad():
                        features = self.clip_model.get_image_features(**inputs)

                    # 如果得到的是 [2, D]，则可以直接计算
                    if features.shape[0] == 2:
                        similarity = torch.nn.functional.cosine_similarity(features[0], features[1], dim=0).item()
                    else:
                        # fallback: 分别处理两个图像
                        original_inputs = self.clip_processor(images=[original_img], return_tensors="pt")
                        aug_inputs = self.clip_processor(images=[aug_img], return_tensors="pt")

                        if torch.cuda.is_available():
                            original_inputs = {k: v.cuda() for k, v in original_inputs.items()}
                            aug_inputs = {k: v.cuda() for k, v in aug_inputs.items()}

                        with torch.no_grad():
                            original_features = self.clip_model.get_image_features(**original_inputs)[0]
                            aug_features = self.clip_model.get_image_features(**aug_inputs)[0]

                        similarity = torch.nn.functional.cosine_similarity(original_features, aug_features, dim=0).item()
                except Exception as e:
                    print(f"[WARNING] 相似度计算失败: {aug_item['image']} -> {e}")
                    similarity = 1.0  # 容错策略：当计算失败时不丢弃

                is_low_freq = any(tag in self.rare_tags for tag in aug_item['tag'].split(','))
                threshold = 0.65 if is_low_freq else 0.7

                if similarity >= threshold:
                    filtered.append(aug_item)

                del aug_img, inputs, features
                if torch.cuda.is_available():
                    torch.cuda.empty_cache()

            del batch_original_images
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
        
        return filtered


    def process_data(self):
        """处理数据并生成增强样本（分批次处理，优化内存）"""
        # 1. 划分高频/低频样本
        original_samples = [item for item in self.data if os.path.exists(os.path.join(self.image_root, item['image']))]
        low_freq_samples = []
        high_freq_samples = []
        
        for item in original_samples:
            tags = item['tag'].split(',')
            if any(tag in self.rare_tags for tag in tags):
                low_freq_samples.append(item)
            else:
                high_freq_samples.append(item)
        
        print(f"低频样本数：{len(low_freq_samples)}，高频样本数：{len(high_freq_samples)}")
        
        # 2. 计算增强额度
        original_count = len(original_samples)
        augment_ratio = min(max(self.augment_ratio, 0.5), self.max_augment_ratio)
        target_augment_count = int(original_count * augment_ratio)
        print(f"原始样本总数：{original_count}，目标增强样本数：{target_augment_count}（比例：{augment_ratio}）")
        print(f"目标总样本数：{original_count + target_augment_count}")

        # 3. 分配增强次数
        low_freq_count = len(low_freq_samples)
        high_freq_count = len(high_freq_samples)
        
        if low_freq_count > 0:
            base_low_freq_aug = min(self.min_low_freq_aug, target_augment_count // low_freq_count)
            remaining_quota = target_augment_count - low_freq_count * base_low_freq_aug
            extra_low_freq_aug = max(0, min(remaining_quota // low_freq_count, self.max_low_freq_aug - base_low_freq_aug))
            low_freq_aug_count = base_low_freq_aug + extra_low_freq_aug
        else:
            low_freq_aug_count = 0
        
        if high_freq_count > 0 and target_augment_count > len(low_freq_samples) * low_freq_aug_count:
            remaining_for_high_freq = target_augment_count - len(low_freq_samples) * low_freq_aug_count
            high_freq_aug_count = min(self.max_high_freq_aug, remaining_for_high_freq // high_freq_count)
        else:
            high_freq_aug_count = 0
        
        print(f"低频样本增强次数：{low_freq_aug_count}，高频样本增强次数：{high_freq_aug_count}")

        # 4. 分批次生成增强样本（避免内存溢出）
        batch_size = 500  # 每批次处理的样本数
        all_augmented_samples = []
        
        # 处理低频样本（分批次）
        low_freq_batches = [low_freq_samples[i:i+batch_size] for i in range(0, len(low_freq_samples), batch_size)]
        for batch_idx, batch in enumerate(low_freq_batches):
            print(f"[INFO] 处理低频样本批次 {batch_idx+1}/{len(low_freq_batches)}")
            batch_augmented = []
            
            for item in tqdm.tqdm(batch, desc=f"批次 {batch_idx+1} 增强中"):
                for _ in range(low_freq_aug_count):
                    aug_item = self._generate_single_augment(item)
                    batch_augmented.append(aug_item)
            
            all_augmented_samples.extend(batch_augmented)
            
            # 每批次处理后释放内存
            if self.memory_efficient:
                del batch_augmented
                if torch.cuda.is_available():
                    torch.cuda.empty_cache()
        
        # 处理高频样本（如果需要）
        if high_freq_aug_count > 0:
            high_freq_batches = [high_freq_samples[i:i+batch_size] for i in range(0, len(high_freq_samples), batch_size)]
            remaining = target_augment_count - len(all_augmented_samples)
            
            for batch in high_freq_batches:
                if remaining <= 0:
                    break
                    
                batch_augmented = []
                for item in tqdm.tqdm(batch, desc="生成高频样本增强"):
                    for _ in range(high_freq_aug_count):
                        aug_item = self._generate_single_augment(item)
                        batch_augmented.append(aug_item)
                        remaining -= 1
                        if remaining <= 0:
                            break
                    if remaining <= 0:
                        break
                
                all_augmented_samples.extend(batch_augmented)
                
                if self.memory_efficient:
                    del batch_augmented
                    if torch.cuda.is_available():
                        torch.cuda.empty_cache()
        
        # 5. 补充不足的增强样本
        if len(all_augmented_samples) < target_augment_count:
            supplement_count = target_augment_count - len(all_augmented_samples)
            print(f"[INFO] 补充{supplement_count}个增强样本")
            
            # 从高频样本中补充
            for i in range(supplement_count):
                item = high_freq_samples[i % len(high_freq_samples)]
                all_augmented_samples.append(self._generate_single_augment(item))
        
        # 截断至目标额度
        all_augmented_samples = all_augmented_samples[:target_augment_count]
        
        # 6. 分批次过滤低相似度样本（关键内存优化）
        filtered_augmented = self.filter_low_similarity(all_augmented_samples, original_samples)
        
        # 7. 合并原始与增强样本
        self.augmented_data = original_samples + filtered_augmented
        random.shuffle(self.augmented_data)
        print(f"最终训练集规模：{len(self.augmented_data)}（原始{len(original_samples)} + 增强{len(filtered_augmented)}）")

    def save_results(self):
        """保存增强后的JSON数据"""
        with open(self.output_json, 'w', encoding='utf-8') as f:
            json.dump(self.augmented_data, f, ensure_ascii=False, indent=2)
        print(f"增强数据已保存至：{self.output_json}")

    def run(self):
        """执行数据增强的主流程"""
        self.load_data()
        self.identify_rare_tags()
        self.process_data()
        self.save_results()
if __name__ == "__main__":
    augmentor = DataAugmentor(
        input_json="processed_gongzhuangdata_tag/train/cleaned_train.json",
        output_json="augmented_train_balanced.json",
        image_root="dataset_images_gongzhuang",
        min_occurrences=4,
        max_aug_per_sample=1,  # 每个原始样本生成1次增强
        augment_ratio=1.5,  # 新增：增强样本数 = 原始样本数 × 该比例（默认1.0即等长）
        max_augment_ratio=2.0, # 新增：最大比例（避免增强过多）
        memory_efficient=True
    )
    augmentor.run()