import torch
from transformers import CLIPModel, CLIPProcessor, AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig,AutoConfig,PreTrainedTokenizer
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training,TaskType
import torch.nn.functional as F
import random
import re
from model_old.model_ChineseCLIPSimilarityCalculator import *

# 1. 加载 CLIP 模型
def build_clip_model(clip_model_path):
    processor = CLIPProcessor.from_pretrained(clip_model_path,from_slow=True,use_fast=False)
    model = CLIPModel.from_pretrained(clip_model_path)
    return model, processor


# 2. 加载 LLM（如 Baichuan）并开启 4bit 量化
def build_llm_model(llm_path):
    tokenizer = AutoTokenizer.from_pretrained(llm_path, use_fast=False, trust_remote_code=True)
    if hasattr(tokenizer, "apply_chat_template"):
        def identity_apply_chat_template(*args, **kwargs):
            # 如果传的是 list，则取第一个元素
            if isinstance(args[0], list):
                return args[0][0]
            return args[0]
        tokenizer.apply_chat_template = identity_apply_chat_template

    llm_config = AutoConfig.from_pretrained(llm_path, trust_remote_code=True)
    llm_config.max_position_embeddings = 4096 # <--- IMPORTANT CHANGE HERE!
    print(f"DEBUG: Setting LLM max_position_embeddings to {llm_config.max_position_embeddings}") # Add this debug print to confirm!
    bnb_config = BitsAndBytesConfig(
        load_in_4bit=True,
        bnb_4bit_quant_type="nf4",
        bnb_4bit_use_double_quant=True,
        bnb_4bit_compute_dtype=torch.bfloat16,
    )
    model = AutoModelForCausalLM.from_pretrained(
        llm_path,
        config=llm_config,
        quantization_config=bnb_config,
        device_map="auto",
        trust_remote_code=True
    )

    # 某些版本的PEFT会自动处理，但明确调用更保险
    if hasattr(model, 'enable_input_require_grads'): # 某些模型可能需要这个
        model.enable_input_require_grads()
    if hasattr(model, 'gradient_checkpointing_enable'):
        model.gradient_checkpointing_enable()
    else:
        print("WARNING: Model does not have gradient_checkpointing_enable method.") # 打印警告以防万一
    model = prepare_model_for_kbit_training(model)
    return model, tokenizer

def add_lora_to_models(clip_model, llm_model):
    # --- 冻结所有模型参数 ---
    for param in clip_model.parameters():
        param.requires_grad = False
    for param in llm_model.parameters():
        param.requires_grad = False

    # --- CLIP 配置：解冻最后三层并应用LoRA ---
    clip_global_target_modules = []
    
    if hasattr(clip_model, 'vision_model') and \
       hasattr(clip_model.vision_model, 'encoder') and \
       hasattr(clip_model.vision_model.encoder, 'layers'):
        
        vision_layers = clip_model.vision_model.encoder.layers
        num_clip_layers = len(vision_layers)
        
        if num_clip_layers >= 3:
            # 解冻最后三层以增强视觉特征捕获
            last_three_layers = [num_clip_layers - 3, num_clip_layers - 2, num_clip_layers - 1]
            for layer_idx in last_three_layers:
                for param in vision_layers[layer_idx].parameters():
                    param.requires_grad = True
            print(f"[INFO] Unfroze last three layers of CLIP vision encoder (layers {last_three_layers})")
            
            # 优化LoRA目标模块选择
            target_proj_names = ["q_proj", "k_proj", "v_proj", "out_proj"]
            target_module_types = (torch.nn.Linear,)
            
            clip_global_target_modules = []
            for name, module in clip_model.named_modules():
                if isinstance(module, target_module_types):
                    if "vision_model.encoder.layers." in name:
                        layer_part = name.split("vision_model.encoder.layers.")[1]
                        layer_idx = int(layer_part.split(".")[0])
                        if layer_idx in last_three_layers:
                            if any(proj in name for proj in target_proj_names):
                                clip_global_target_modules.append(name)
            
            if clip_global_target_modules:
                print(f"[INFO] CLIP target modules for LoRA: {clip_global_target_modules}")
                clip_lora_config = LoraConfig(
                    r=8,  # 恢复秩值以保留更多视觉细节
                    lora_alpha=16,  # 恢复缩放因子
                    lora_dropout=0.2,  # 降低dropout以减少特征丢失
                    bias="none",
                    task_type=TaskType.FEATURE_EXTRACTION,
                    target_modules=clip_global_target_modules,
                    fan_in_fan_out=True
                )
                clip_model = get_peft_model(clip_model, clip_lora_config)
                print("--- CLIP Trainable Parameters ---")
                clip_model.print_trainable_parameters()
            else:
                print("[WARNING] No suitable target modules found for CLIP LoRA.")
        else:
            print(f"[WARNING] CLIP vision model has {num_clip_layers} layers (need at least 3).")
    else:
        print("[WARNING] CLIP model structure does not match expected format.")

    # --- Qwen1.5 LoRA 配置优化 ---
    qwen_lora_config = LoraConfig(
        r=8,  # 增加秩值以提高语言表达能力
        lora_alpha=16,  # 增加缩放因子
        # 添加更多目标模块以捕获细粒度语义
        target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
        lora_dropout=0.3,  # 降低dropout
        bias="none",
        task_type="CAUSAL_LM"
    )

    # 改进模块检查方法
    def check_lora_modules(model, target_modules):
        existing_modules = []
        for name, _ in model.named_modules():
            for module in target_modules:
                if name.endswith(module):  # 更精确的匹配
                    existing_modules.append(name)
        return existing_modules

    found_modules = check_lora_modules(llm_model, qwen_lora_config.target_modules)
    # print(f"[INFO] Found LoRA target modules in Qwen1.5: {found_modules}")
    
    # 应用LoRA前打印可训练参数
    print("--- LLM Trainable Parameters (before LoRA) ---")
    total_params = sum(p.numel() for p in llm_model.parameters())
    trainable_params = sum(p.numel() for p in llm_model.parameters() if p.requires_grad)
    print(f"Trainable: {trainable_params} ({trainable_params/total_params*100:.4f}%) | Total: {total_params}")
    
    # 应用LoRA配置
    if found_modules:
        llm_model = get_peft_model(llm_model, qwen_lora_config)
        print("--- LLM Trainable Parameters (after LoRA) ---")
        llm_model.print_trainable_parameters()
    else:
        print("[WARNING] No suitable target modules found in Qwen1.5.")
    
    return clip_model, llm_model    



class MultiModalContrastiveLoss(torch.nn.Module):
    """图像-文本对比学习损失（适配CLIP与LLM特征）"""
    def __init__(self, temperature):
        super().__init__()
        self.temperature = temperature
        self.cross_entropy = torch.nn.CrossEntropyLoss()

    def forward(self, image_features, text_features):
        """
        Args:
            image_features: 图像特征 [B, D]（CLIP输出）
            text_features: 文本特征 [B, D]（标签的特征）
        Returns:
            对比损失 + 匹配准确率
        """
        # 特征归一化（确保在同一空间）
        image_features = F.normalize(image_features, dim=-1)  # [B, D]
        text_features = F.normalize(text_features, dim=-1)    # [B, D]

        # 计算相似度矩阵（图像到文本）
        logits = torch.matmul(image_features, text_features.t()) / self.temperature  # [B, B]
        batch_size = logits.shape[0]
        labels = torch.arange(batch_size, device=logits.device)  # 对角线为正样本

        # 双向对比损失（图像→文本 + 文本→图像）
        loss_i2t = self.cross_entropy(logits, labels)
        loss_t2i = self.cross_entropy(logits.t(), labels)
        contrastive_loss = (loss_i2t + loss_t2i) / 2

        # 计算匹配准确率（监控用）
        with torch.no_grad():
            acc_i2t = (logits.argmax(dim=1) == labels).float().mean()
            acc_t2i = (logits.t().argmax(dim=1) == labels).float().mean()

        return contrastive_loss, {"acc_i2t": acc_i2t, "acc_t2i": acc_t2i}


class MultiModalTaggingModel(torch.nn.Module):
    def __init__(self, clip_model, llm_model, processor, tokenizer,similarity_calculator=None, similarity_threshold=0.33,temperature=0.5,contrastive_loss_weight=0.5,
                 contrastive_genloss_weight=0.4):
        super().__init__()
        self.clip = clip_model
        self.llm = llm_model
        self.processor = processor  # CLIP的processor
        self.tokenizer = tokenizer  # LLM的tokenizer
        self.similarity_threshold = similarity_threshold
        
        # 新增：独立的相似度计算器（用于标签过滤）
        self.similarity_calculator = similarity_calculator
        self.is_preprocessed = False
        
        # 视觉特征到LLM的投影层（原逻辑保留）
        vision_hidden_size = self.clip.vision_model.config.hidden_size
        d_llm = llm_model.config.hidden_size
        self.clip2llm_proj = torch.nn.Sequential(
            torch.nn.Linear(vision_hidden_size, d_llm),
            torch.nn.Dropout(0.2)
        )
        if isinstance(self.clip2llm_proj[0], torch.nn.Linear):
            torch.nn.init.xavier_normal_(self.clip2llm_proj[0].weight)
            if self.clip2llm_proj[0].bias is not None:
                torch.nn.init.zeros_(self.clip2llm_proj[0].bias)
        for param in self.clip2llm_proj.parameters():
            param.requires_grad = True
        
        # ---------------- 新增：对比学习相关组件 ----------------
        # 1. 图像特征投影层（将CLIP视觉特征映射到对比空间）
        self.image_contrast_proj = torch.nn.Linear(vision_hidden_size, 512)
        # 2. 文本特征投影层（将LLM文本特征映射到对比空间）
        self.text_contrast_proj = torch.nn.Linear(d_llm, 512)
        self.contrastive_loss_fn = MultiModalContrastiveLoss(temperature)  # 关键修改：温度从0.07→0.5
        # 4. 损失权重
        self.contrastive_loss_weight = contrastive_loss_weight
        self.contrastive_genloss_weight = contrastive_genloss_weight

    def forward(self, images, prompts=None, tags=None, is_training=False, target_visual_tokens=128, tag_weights=None, top_k=None, use_generated_tags=False):
        device = next(self.parameters()).device
        total_loss = None
        contrastive_metrics = {}

        # ====================== 1. 图像特征提取 ======================
        # 处理图像输入
        if isinstance(images, list) and all(isinstance(img, torch.Tensor) for img in images):
            pixel_values = torch.cat(images, dim=0).to(device)
        elif isinstance(images, torch.Tensor):
            pixel_values = images.to(device)
        else:
            clip_inputs = self.processor(images=images, return_tensors="pt").to(device)
            pixel_values = clip_inputs["pixel_values"]

        # CLIP视觉特征提取
        vision_outputs = self.clip.vision_model(pixel_values=pixel_values)
        vision_hidden_states = vision_outputs.last_hidden_state  # [B, N, D_v]

        # 提取图像全局特征（用于对比学习）
        image_global_feat = vision_hidden_states.mean(dim=1)  # [B, D_v]
        # 关键修改1：图像对比特征投影后添加L2归一化
        image_contrast_feat = self.image_contrast_proj(image_global_feat)  # [B, 512]
        image_contrast_feat = F.normalize(image_contrast_feat, p=2, dim=1)  # 归一化到单位球面

        # ====================== 2. LLM输入处理 ======================
        # 下采样视觉特征并投影到LLM维度
        if target_visual_tokens == 1:
            visual_features_for_llm = vision_hidden_states[:, 0:1, :]
        else:
            visual_features_for_llm = F.adaptive_avg_pool1d(
                vision_hidden_states.permute(0, 2, 1), target_visual_tokens
            ).permute(0, 2, 1)
        multimodal_embed = self.clip2llm_proj(visual_features_for_llm)  # [B, N, D_llm]
        mm_len = multimodal_embed.size(1)

        # 处理prompt
        prompt_text = [
            "严格根据图像内容提取工装设计相关标签(含办公空间、商业空间、酒店空间、商业餐饮等),"
            "需满足:1.优先按照风格空间场景细化,如('中式卧室双人床','北欧风婚纱摄影店','中式特色风格餐馆'),禁止泛化标签(如'客厅','房间');"
            "2. 其次包含'设计类型和功能定位'(如'科技感展厅,互动体验','轻食餐饮,社区社交空间');"
            "3. 接下来是能直接被搜索的工装元素,如('弧形玻璃隔断','透光软膜天花')"
            "4. 最后按'材质,氛围,场景适配'类别顺序排列,同一类别最多生成一个标签(如材质类别可包含'水泥漆,木饰面'),不适用的类别直接省略;"
            "5. 若图片不是场景提取相关物品标签。"
            "6. 仅用英文逗号分隔,不添加任何额外内容（如解释、符号）。仅生成图像中明确存在的标签（如图像有'圆桌包厢'才生成）,不猜测未出现的内容。"
        ] * len(images)
        text_inputs = self.tokenizer(prompt_text, return_tensors="pt", padding=True, truncation=True).to(device)

        # 构造LLM输入嵌入和注意力掩码
        llm_embed_layer = self.llm.get_input_embeddings()
        text_embeds = llm_embed_layer(text_inputs.input_ids)  # [B, T, D_llm]
        llm_input_embeds = torch.cat([multimodal_embed, text_embeds], dim=1)  # [B, N+T, D_llm]
        mm_attention_mask = torch.ones(multimodal_embed.size(0), mm_len, dtype=torch.long, device=device)
        final_attention_mask = torch.cat([mm_attention_mask, text_inputs.attention_mask], dim=1)  # [B, N+T]

        # ====================== 3. 损失计算 ======================
        if tags is not None:
            # ---------------- 3.1 生成损失（交叉熵）----------------
            prefix_len = multimodal_embed.size(1) + text_embeds.size(1)
            tag_tokenized = self.tokenizer(
                [t + self.tokenizer.eos_token for t in tags],
                return_tensors="pt",
                padding="longest",
                truncation=True,
                max_length=self.llm.config.max_position_embeddings - prefix_len
            ).input_ids.to(device)

            # 处理超长标签
            total_len = prefix_len + tag_tokenized.size(1)
            if total_len > self.llm.config.max_position_embeddings:
                tag_tokenized = tag_tokenized[:, :self.llm.config.max_position_embeddings - prefix_len]

            # 构造标签并计算生成损失
            labels = torch.full((len(images), prefix_len + tag_tokenized.size(1)), -100, dtype=torch.long, device=device)
            for i in range(len(images)):
                tag_len = (tag_tokenized[i] != self.tokenizer.pad_token_id).sum().item()
                labels[i, prefix_len : prefix_len + tag_len] = tag_tokenized[i, :tag_len]
            tag_embeds = llm_embed_layer(tag_tokenized)
            llm_input_embeds = torch.cat([llm_input_embeds, tag_embeds], dim=1)
            tag_attention_mask = (tag_tokenized != self.tokenizer.pad_token_id).long().to(device)
            final_attention_mask = torch.cat([final_attention_mask, tag_attention_mask], dim=1)

            # 计算生成损失
            llm_outputs = self.llm(
                inputs_embeds=llm_input_embeds,
                attention_mask=final_attention_mask,
                labels=labels,
                return_dict=True
            )
            gen_loss = llm_outputs.loss.mean()
            total_loss = gen_loss  # 初始化总损失


            # ---------------- 3.2 对比损失（图像-文本特征对比）----------------
            # 提取文本全局特征（真实标签）
            with torch.no_grad():
                tag_embeds = llm_embed_layer(tag_tokenized)  # [B, L, D_llm]
                tag_mask = (tag_tokenized != self.tokenizer.pad_token_id).unsqueeze(-1).float()  # [B, L, 1]
                # 关键修改2：避免除以0（真实标签）
                tag_mask_sum = tag_mask.sum(dim=1)  # [B, 1]
                tag_mask_sum = torch.clamp(tag_mask_sum, min=1.0)  # 分母最小为1.0
                text_global_feat = (tag_embeds * tag_mask).sum(dim=1) / tag_mask_sum  # [B, D_llm]
            
            # 关键修改3：文本对比特征投影后添加L2归一化（真实标签）
            text_contrast_feat = self.text_contrast_proj(text_global_feat)  # [B, 512]
            text_contrast_feat = F.normalize(text_contrast_feat, p=2, dim=1)  # 归一化到单位球面

            # 计算真实标签的对比损失
            contrastive_loss_real, contrastive_metrics = self.contrastive_loss_fn(
                image_contrast_feat, text_contrast_feat
            )

            # 生成标签的对比损失
            contrastive_loss_gen = 0.0
            if use_generated_tags and is_training:
                # 生成标签
                generated_tags = self.generate_tags(images, prompts, top_k=5)

    
                # 过滤空生成标签（关键修改4：避免空标签导致异常）
                for i in range(len(generated_tags)):
                    if not generated_tags[i].strip():  # 若生成标签为空
                        generated_tags[i] = "默认标签"  # 填充默认值，避免全pad
                # 处理生成标签
                gen_tag_tokenized = self.tokenizer(
                    [t + self.tokenizer.eos_token for t in generated_tags],
                    return_tensors="pt", padding="longest", truncation=True
                ).input_ids.to(device)

                # 提取生成标签的特征
                with torch.no_grad():
                    gen_tag_embeds = llm_embed_layer(gen_tag_tokenized)
                    gen_tag_mask = (gen_tag_tokenized != self.tokenizer.pad_token_id).unsqueeze(-1).float()
                    # 关键修改5：避免除以0（生成标签）
                    gen_tag_mask_sum = gen_tag_mask.sum(dim=1)  # [B, 1]
                    gen_tag_mask_sum = torch.clamp(gen_tag_mask_sum, min=1.0)  # 分母最小为1.0
                    gen_text_global_feat = (gen_tag_embeds * gen_tag_mask).sum(dim=1) / gen_tag_mask_sum  # [B, D_llm]
                
                # 关键修改6：生成标签对比特征归一化
                gen_text_contrast_feat = self.text_contrast_proj(gen_text_global_feat)  # [B, 512]
                gen_text_contrast_feat = F.normalize(gen_text_contrast_feat, p=2, dim=1)  # 归一化
                # 计算生成标签的对比损失
                contrastive_loss_gen, _ = self.contrastive_loss_fn(
                    image_contrast_feat, gen_text_contrast_feat
                )

            # 混合对比损失
            if use_generated_tags and is_training:
                contrastive_loss =  (1 - self.contrastive_genloss_weight) * contrastive_loss_real + self.contrastive_genloss_weight * contrastive_loss_gen
            else:
                contrastive_loss = contrastive_loss_real

            # 总损失
            total_loss = (1 - self.contrastive_loss_weight) * gen_loss + self.contrastive_loss_weight * contrastive_loss

        # ====================== 4. 推理模式（生成标签）======================
        else:
            generated_ids = self.llm.generate(
                inputs_embeds=llm_input_embeds,
                attention_mask=final_attention_mask,
                max_new_tokens=30,
                num_beams=4,
                early_stopping=True,
                pad_token_id=self.tokenizer.pad_token_id,
                eos_token_id=self.tokenizer.eos_token_id,
                repetition_penalty=1.5,
                no_repeat_ngram_size=2
            )

            # 处理生成的标签
            generated_tags = []
            for ids in generated_ids:
                # 张量转列表处理eos
                ids_list = ids.tolist()
                if self.tokenizer.eos_token_id in ids_list:
                    eos_idx = ids_list.index(self.tokenizer.eos_token_id)
                    ids_truncated = ids_list[:eos_idx]
                else:
                    ids_truncated = ids_list
                
                # 解码并处理标签
                tags_text = self.tokenizer.decode(ids_truncated, skip_special_tokens=True)
                tags_list = [tag.strip() for tag in tags_text.split(',') if tag.strip()]
                generated_tags.append(tags_list)

            # 过滤并获取相似度
            if self.similarity_calculator is not None and not is_training:
                filtered_tags_all = []  # 过滤后的标签
                filtered_sims_all = []  # 过滤后的标签对应的相似度
                all_sims_all = []       # 所有原始标签的相似度（包括被过滤的）

                for i, tags_list in enumerate(generated_tags):
                    img = images[i] if isinstance(images, list) else images
                    # 调用修改后的过滤函数，获取标签和相似度
                    filtered_texts, filtered_sims, all_sims = self.similarity_calculator.filter_texts_by_similarity(
                        img, tags_list, threshold=self.similarity_threshold, verbose=False
                    )
                    # 应用top_k
                    if top_k is not None and top_k > 0 and len(filtered_texts) > top_k:
                        filtered_texts = filtered_texts[:top_k]
                        filtered_sims = filtered_sims[:top_k]
                    # 保存结果
                    filtered_tags_all.append(",".join(filtered_texts))
                    filtered_sims_all.append(filtered_sims)
                    all_sims_all.append(all_sims)

                # 返回三个值：过滤后的标签、过滤标签的相似度、所有原始标签的相似度
                return filtered_tags_all, filtered_sims_all, all_sims_all
            else:
                # 不使用过滤时，返回原始标签
                return [",".join(tags) for tags in generated_tags], [], []

        # 返回损失和指标
        return {
            "loss": total_loss,
            "gen_loss": gen_loss if tags is not None else None,
            "contrastive_loss": contrastive_loss if tags is not None else None,** contrastive_metrics
        }


    def generate_tags(self, images, prompts, top_k=5):
        original_use_cache = self.llm.config.use_cache
        self.llm.config.use_cache = False  # 临时禁用KV缓存
        with torch.no_grad():
            # 调用forward，获取三元组结果
            generated_tags, _, _ = self.forward(images, prompts, tags=None, is_training=False, top_k=top_k)
        self.llm.config.use_cache = original_use_cache
        return generated_tags  # 只返回标签列表（字符串列表）



# 5. 构建整体模型
def build_multimodal_model(config):
    print("[INFO] Loading CLIP + LLM...")
    clip_model, processor = build_clip_model(config.clip_path)
    llm_model, tokenizer = build_llm_model(config.baichuan_path)

    if config.use_lora:
       clip_model, llm_model = add_lora_to_models(clip_model,llm_model)

        # ---------------- 新增：初始化独立相似度计算器 ----------------
    if hasattr(config, 'independent_clip_path') and config.independent_clip_path:
        print("[INFO] Loading independent Chinese-CLIP for similarity filtering...")
        similarity_calculator = ChineseCLIPSimilarityCalculator(
            clip_model_path=config.independent_clip_path,
            device="cuda" if torch.cuda.is_available() else "cpu"
        )
    else:
        similarity_calculator = None
        print("[WARNING] No independent CLIP path provided. Similarity filtering will be disabled.")

    model = MultiModalTaggingModel(clip_model, llm_model, processor, tokenizer,
                                   similarity_calculator=similarity_calculator,
                                   similarity_threshold = config.similarity_threshold, #相似度阈值
                                   temperature = config.temperature,
                                   contrastive_loss_weight = config.contrastive_loss_weight,
                                   contrastive_genloss_weight = config.contrastive_genloss_weight
                                   )
    return model
