import torch
import torch.nn as nn
import deepspeed
from transformers import Trainer
from transformers.trainer_pt_utils import nested_detach
from transformers.utils import is_sagemaker_mp_enabled
from transformers.trainer import *
from transformers.integrations import is_deepspeed_zero3_enabled
import time
import re


class Trainer(Trainer):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        # 🔧 新增：支持评估专用的数据整理器
        self.eval_data_collator = None
    
    def compute_loss(self, model, inputs, return_outputs=False):
        if "labels" in inputs:
            labels = inputs.pop("labels")
        else:
            labels = None
        
        if not self.args.use_lora:
            outputs = self.model(data = inputs, use_cache=False)
        else:
            with self.model._enable_peft_forward_hooks(**inputs):
                outputs = self.model.base_model(data = inputs, use_cache=False)
                
        if labels is not None:
            # Flatten the tokens
            loss_fct = nn.CrossEntropyLoss()
            logits = outputs.logits.view(-1,
                                         self.model.config.vocab_size).contiguous()
            labels = labels.view(-1).long().contiguous()
            # Enable model parallelism
            labels = labels.to(logits.device)
            loss = loss_fct(logits, labels)
        else:
            if isinstance(outputs, dict) and "loss" not in outputs:
                raise ValueError(
                    "The model did not return a loss from the inputs, only the following keys: "
                    f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}."
                )
            # We don't use .loss here since the model may return tuples instead of ModelOutput.
            loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]

        return (loss, outputs) if return_outputs else loss

    # 🔧 新增：简化的评估预测步骤
    def _simple_prediction_step(self, model, inputs):
        """使用 model.chat() 的简化预测步骤"""
        model.eval()
        
        predictions = []
        labels = []
        
        # 🔧 新增：评估开始提示
        batch_size = len(inputs['msgs']) if 'msgs' in inputs else 0
        print(f"\n🔍 开始评估批次: {batch_size} 个样本")
        
        try:
            with torch.no_grad():
                for i, msgs in enumerate(inputs['msgs']):
                    try:
                        # 🔧 新增：每个样本的详细信息
                        image_id = inputs['image_ids'][i] if i < len(inputs['image_ids']) else f"sample_{i}"
                        question = inputs['questions'][i] if i < len(inputs['questions']) else ""
                        task_type = inputs['task_types'][i] if i < len(inputs['task_types']) else "unknown"
                        
                        print(f"📝 样本 {i+1}/{batch_size}:")
                        print(f"   图片: {image_id}")
                        print(f"   任务: {task_type}")
                        print(f"   问题: {question[:100]}{'...' if len(question) > 100 else ''}")
                        
                        # 🔧 计时推理过程
                        start_time = time.time()
                        
                        # 🔧 使用类似测试脚本的推理方式
                        response = model.chat(
                            image=None,
                            msgs=msgs,
                            tokenizer=self.tokenizer,
                            sampling=False,
                            max_new_tokens=256
                        )
                        
                        inference_time = time.time() - start_time
                        
                        predictions.append(response.strip())
                        labels.append(inputs['ground_truths'][i])
                        
                        # 🔧 显示推理结果
                        print(f"   预测: {response.strip()[:100]}{'...' if len(response.strip()) > 100 else ''}")
                        print(f"   标答: {inputs['ground_truths'][i][:100]}{'...' if len(inputs['ground_truths'][i]) > 100 else ''}")
                        print(f"   耗时: {inference_time:.2f}s")
                        
                        # 🔧 快速正确性判断
                        is_correct = self._is_correct_prediction(
                            response.strip(), 
                            inputs['ground_truths'][i], 
                            task_type,
                            inputs['answer_choices'][i] if i < len(inputs['answer_choices']) else []
                        )
                        print(f"   ✅ 正确" if is_correct else f"   ❌ 错误")
                        print()
                            
                    except Exception as e:
                        print(f"❌ 推理失败 样本 {i+1}: {e}")
                        predictions.append("")
                        labels.append(inputs['ground_truths'][i] if i < len(inputs['ground_truths']) else "")
        
        except Exception as e:
            print(f"❌ 评估批次失败: {e}")
            # 返回占位数据
            return (
                torch.tensor(0.0),  # loss
                torch.tensor([[0]]),  # logits
                torch.tensor([[0]])   # labels
            )
        
        # 🔧 计算并显示批次统计
        correct = 0
        total = len(predictions)
        
        for pred, label, task_type in zip(predictions, labels, inputs.get('task_types', ['caption'] * len(predictions))):
            choices = inputs.get('answer_choices', [[] for _ in range(len(predictions))])
            choice = choices[len([p for p in predictions if p == pred])] if choices else []
            
            if self._is_correct_prediction(pred, label, task_type, choice):
                correct += 1
        
        accuracy = correct / total if total > 0 else 0.0
        print(f"📊 批次评估完成:")
        print(f"   正确: {correct}/{total}")
        print(f"   准确率: {accuracy:.3f} ({accuracy*100:.1f}%)")
        print(f"   任务类型: {inputs.get('task_types', ['unknown'])[0]}")
        print("-" * 50)
        
        # 返回格式：(loss, logits, labels)
        return (
            torch.tensor(1.0 - accuracy),  # 用 1-accuracy 作为 loss
            torch.tensor([predictions]),   # predictions 作为 logits
            torch.tensor([labels])         # labels
        )

    # 🔧 新增：正确性判断函数
    def _is_correct_prediction(self, prediction, ground_truth, task_type, choices=None):
        """判断预测是否正确"""
        if not prediction or not ground_truth:
            return False
            
        if task_type == "vqa" and choices:
            return self._match_mcq_answer(ground_truth, prediction, choices)
        else:
            # Caption任务：简单的包含关系判断
            pred_lower = prediction.lower().strip()
            truth_lower = ground_truth.lower().strip()
            return truth_lower in pred_lower or pred_lower in truth_lower

    # 🔧 新增：选择题答案匹配
    def _match_mcq_answer(self, ground_truth, prediction, choices):
        """匹配选择题答案（参考 valid_evaluator 逻辑）"""
        if not ground_truth or not prediction:
            return False
            
        gt = ground_truth.strip().upper()
        pred = prediction.strip().upper()
        
        # 多选题处理
        if len(gt) > 1 and all(c in 'ABCD' for c in gt):
            gt_options = set(gt)
            pred_letters = set(re.findall(r'\b[A-D]\b', pred))
            if pred_letters == gt_options:
                return True
            if gt_options.issubset(pred_letters):
                return True
            return False
        else:
            # 单选题处理
            if gt in pred:
                return True
            pred_letters = re.findall(r'\b[A-D]\b', pred)
            if pred_letters and gt in pred_letters:
                return True
            
            # 检查选项内容匹配
            if choices:
                for i, choice in enumerate(choices):
                    choice_letter = chr(65 + i)  # A, B, C, D
                    if choice_letter == gt:
                        choice_content = choice.strip()
                        if choice_content.startswith(f"({choice_letter})"):
                            choice_content = choice_content[3:].strip()
                        if choice_content.lower() in pred.lower():
                            return True
            return False

    def prediction_step(
        self,
        model: nn.Module,
        inputs: Dict[str, Union[torch.Tensor, Any]],
        prediction_loss_only: bool,
        ignore_keys: Optional[List[str]] = None,
    ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
        """
        🔧 修改：支持简化评估的预测步骤
        """
        
        # 🔧 检查是否为简化评估（有 msgs 字段）
        if 'msgs' in inputs and hasattr(self, 'eval_data_collator'):
            print(f"🎯 使用简化评估模式")
            return self._simple_prediction_step(model, inputs)
        
        # 🔧 否则使用原始的预测步骤（训练数据或其他格式）
        print(f"📚 使用标准预测模式")
        
        has_labels = (
            False
            if len(self.label_names) == 0
            else all(inputs.get(k) is not None for k in self.label_names)
        )
        # For CLIP-like models capable of returning loss values.
        # If `return_loss` is not specified or being `None` in `inputs`, we check if the default value of `return_loss`
        # is `True` in `model.forward`.
        return_loss = inputs.get("return_loss", None)
        if return_loss is None:
            return_loss = self.can_return_loss
        loss_without_labels = (
            True if len(self.label_names) == 0 and return_loss else False
        )

        inputs = self._prepare_inputs(inputs)
        if ignore_keys is None:
            if hasattr(self.model, "config"):
                ignore_keys = getattr(
                    self.model.config, "keys_to_ignore_at_inference", []
                )
            else:
                ignore_keys = []

        # labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
        if has_labels or loss_without_labels:
            labels = nested_detach(tuple(inputs.get(name)
                                   for name in self.label_names))
            if len(labels) == 1:
                labels = labels[0]
        else:
            labels = None

        with torch.no_grad():
            if is_sagemaker_mp_enabled():
                raw_outputs = smp_forward_only(model, inputs)
                if has_labels or loss_without_labels:
                    if isinstance(raw_outputs, dict):
                        loss_mb = raw_outputs["loss"]
                        logits_mb = tuple(
                            v
                            for k, v in raw_outputs.items()
                            if k not in ignore_keys + ["loss"]
                        )
                    else:
                        loss_mb = raw_outputs[0]
                        logits_mb = raw_outputs[1:]

                    loss = loss_mb.reduce_mean().detach().cpu()
                    logits = smp_nested_concat(logits_mb)
                else:
                    loss = None
                    if isinstance(raw_outputs, dict):
                        logits_mb = tuple(
                            v for k, v in raw_outputs.items() if k not in ignore_keys
                        )
                    else:
                        logits_mb = raw_outputs
                    logits = smp_nested_concat(logits_mb)
            else:
                if has_labels or loss_without_labels:
                    with self.compute_loss_context_manager():
                        loss, outputs = self.compute_loss(
                            model, inputs, return_outputs=True
                        )
                    loss = loss.mean().detach()

                    if isinstance(outputs, dict):
                        logits = tuple(
                            v
                            for k, v in outputs.items()
                            if k not in ignore_keys + ["loss"]
                        )
                    else:
                        logits = outputs[1:]
                else:
                    loss = None
                    with self.compute_loss_context_manager():
                        outputs = model(**inputs)
                    if isinstance(outputs, dict):
                        logits = tuple(
                            v for k, v in outputs.items() if k not in ignore_keys
                        )
                    else:
                        logits = outputs
                    # TODO: this needs to be fixed and made cleaner later.
                    if self.args.past_index >= 0:
                        self._past = outputs[self.args.past_index - 1]

        if prediction_loss_only:
            return (loss, None, None)

        logits = nested_detach(logits)
        if len(logits) == 1:
            logits = logits[0]

        return (loss, logits, labels)

    # 🔧 新增：重写评估循环以显示整体进度
    def evaluation_loop(
        self,
        dataloader,
        description: str,
        prediction_loss_only: Optional[bool] = None,
        ignore_keys: Optional[List[str]] = None,
        metric_key_prefix: str = "eval",
    ):
        """重写评估循环，添加详细的进度信息"""
        
        print(f"\n🚀 开始 {description} 评估")
        print(f"📊 数据集大小: {len(dataloader.dataset)} 样本")
        print(f"📦 批次数量: {len(dataloader)} 批次")
        print(f"🎯 批次大小: {dataloader.batch_size}")
        print("=" * 60)
        
        # 调用原始的评估循环
        return super().evaluation_loop(
            dataloader=dataloader,
            description=description,
            prediction_loss_only=prediction_loss_only,
            ignore_keys=ignore_keys,
            metric_key_prefix=metric_key_prefix,
        )
        
    def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
        """
        Perform a training step on a batch of inputs.

        Subclass and override to inject custom behavior.

        Args:
            model (`nn.Module`):
                The model to train.
            inputs (`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

                The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
                argument `labels`. Check your model's documentation for all accepted arguments.

        Return:
            `torch.Tensor`: The tensor with training loss on this batch.
        """
        model.train()
        inputs = self._prepare_inputs(inputs)

        if is_sagemaker_mp_enabled():
            loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)
            return loss_mb.reduce_mean().detach().to(self.args.device)

        with self.compute_loss_context_manager():
            loss = self.compute_loss(model, inputs)

        del inputs
        torch.cuda.empty_cache()

        if self.args.n_gpu > 1:
            loss = loss.mean()  # mean() to average on multi-gpu parallel training

        if self.use_apex:
            with amp.scale_loss(loss, self.optimizer) as scaled_loss:
                scaled_loss.backward()
        else:
            self.accelerator.backward(loss)

        return loss.detach() / self.args.gradient_accumulation_steps
    
    def _save(self, output_dir: Optional[str] = None, state_dict=None):
        # If we are executing this function, we are the process zero, so we don't check for that.
        output_dir = output_dir if output_dir is not None else self.args.output_dir
        os.makedirs(output_dir, exist_ok=True)
        logger.info(f"Saving model checkpoint to {output_dir}")

        supported_classes = (PreTrainedModel,) if not is_peft_available() else (PreTrainedModel, PeftModel)
        # Save a trained model and configuration using `save_pretrained()`.
        # They can then be reloaded using `from_pretrained()`
        if not isinstance(self.model, supported_classes):
            if state_dict is None:
                state_dict = self.model.state_dict()

            if isinstance(unwrap_model(self.model), supported_classes):
                unwrap_model(self.model).save_pretrained(
                    output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors
                )
            else:
                logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
                if self.args.save_safetensors:
                    safetensors.torch.save_file(
                        state_dict, os.path.join(output_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"}
                    )
                else:
                    torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
        else:
            
            self.model.save_pretrained(
                output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors
            )

        if self.tokenizer is not None:
            self.tokenizer.save_pretrained(output_dir)

        # Good practice: save your training arguments together with the trained model
        torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))