#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import random
import torch

from .base_checkpointer import BaseCheckpointerPlugin


class EarlyStoppingReason:
    """早停原因枚举"""
    NOT_STOPPED = 0
    STOPPING_THRESHOLD = 1
    DIVERGENCE_THRESHOLD = 2
    PATIENCE_EXHAUSTED = 3
    NON_FINITE_METRIC = 4


class EarlyStoppingCheckpointerPlugin(BaseCheckpointerPlugin):
    """早停法保存点插件"""
    
    mode_dict = {"min": torch.lt, "max": torch.gt}
    order_dict = {"min": "<", "max": ">"}
    
    def __init__(self, config):
        """
        初始化早停法保存点插件
        
        Args:
            config (dict): 配置参数字典
        """
        super().__init__(config)
        self.monitor = config.get('early_stopping_monitor', 'loss')  # 监控的指标
        self.min_delta = config.get('early_stopping_min_delta', 1e-4)  # 最小改善阈值
        self.patience = config.get('early_stopping_patience', 10)  # 容忍没有改善的次数
        self.verbose = config.get('early_stopping_verbose', False)  # 是否输出详细信息
        self.mode = config.get('early_stopping_mode', 'min')  # 模式：min 或 max
        self.strict = config.get('early_stopping_strict', True)  # 是否严格检查指标存在
        self.check_finite = config.get('early_stopping_check_finite', True)  # 是否检查有限值
        self.stopping_threshold = config.get('early_stopping_stopping_threshold', None)  # 停止阈值
        self.divergence_threshold = config.get('early_stopping_divergence_threshold', None)  # 发散阈值
        
        # 样本打印配置
        self.sample_printing_enabled = config.get('early_stopping_sample_printing_enabled', False)
        self.sample_type = config.get('early_stopping_sample_type', 'fixed')  # 'fixed' 或 'random'
        self.sample_text = config.get('early_stopping_sample_text', "今天天气怎么样？")  # 固定样本文本
        self.print_frequency = config.get('early_stopping_print_frequency', 1)  # 打印频率
        self.save_count = 0  # 保存计数器
        
        # 内部状态
        self.wait_count = 0  # 等待计数器
        self.stopped_epoch = 0  # 停止的epoch
        self.stopping_reason = EarlyStoppingReason.NOT_STOPPED  # 停止原因
        self.stopping_reason_message = None  # 停止原因消息
        
        # 验证模式参数
        if self.mode not in self.mode_dict:
            raise ValueError(f"`mode` can be {', '.join(self.mode_dict.keys())}, got {self.mode}")
            
        # 根据模式调整min_delta符号
        self.min_delta *= 1 if self.monitor_op == torch.gt else -1
        torch_inf = torch.tensor(torch.inf)
        self.best_score = torch_inf if self.monitor_op == torch.lt else -torch_inf
        
    @property
    def monitor_op(self):
        """获取比较操作符"""
        return self.mode_dict[self.mode]
        
    def should_save_checkpoint(self, current_loss, avg_loss, epoch, step):
        """
        判断是否应该保存检查点（基于早停法）
        
        Args:
            current_loss (float): 当前损失值
            avg_loss (float): 平均损失值
            epoch (int): 当前epoch
            step (int): 当前step
            
        Returns:
            bool: 是否应该保存检查点
        """
        # 根据监控指标选择使用的值
        if self.monitor == 'avg_loss':
            current_metric = avg_loss
        else:
            current_metric = current_loss
            
        # 评估停止条件
        should_stop, reason = self._evaluate_stopping_criteria(current_metric)
        
        if should_stop:
            self.stopped_epoch = epoch
            self.stopping_reason_message = reason
            
        # 检查是否有改善
        current_tensor = torch.tensor(current_metric)
        if self.monitor_op(current_tensor - self.min_delta, self.best_score.to(current_tensor.device)):
            # 有改善，更新最佳分数
            self.best_score = current_tensor
            self.wait_count = 0
            return True
        else:
            # 没有改善，增加等待计数
            self.wait_count += 1
            # 如果耐心用完，触发早停
            if self.wait_count >= self.patience:
                return True
            return False
            
    def should_early_stop(self):
        """
        判断是否应该早停
        
        Returns:
            bool: 是否应该早停
        """
        return self.wait_count >= self.patience
        
    def _evaluate_stopping_criteria(self, current):
        """
        评估停止条件
        
        Args:
            current (float): 当前指标值
            
        Returns:
            tuple: (是否应该停止, 停止原因消息)
        """
        should_stop = False
        reason = None
        current_tensor = torch.tensor(current)
        
        # 检查非有限值
        if self.check_finite and not torch.isfinite(current_tensor):
            should_stop = True
            self.stopping_reason = EarlyStoppingReason.NON_FINITE_METRIC
            reason = (
                f"Monitored metric {self.monitor} = {current} is not finite."
                f" Previous best value was {self.best_score:.3f}. Signaling Trainer to stop."
            )
        # 检查停止阈值
        elif self.stopping_threshold is not None and self.monitor_op(current_tensor, self.stopping_threshold):
            should_stop = True
            self.stopping_reason = EarlyStoppingReason.STOPPING_THRESHOLD
            reason = (
                "Stopping threshold reached:"
                f" {self.monitor} = {current} {self.order_dict[self.mode]} {self.stopping_threshold}."
                " Signaling Trainer to stop."
            )
        # 检查发散阈值
        elif self.divergence_threshold is not None and self.monitor_op(-current_tensor, -self.divergence_threshold):
            should_stop = True
            self.stopping_reason = EarlyStoppingReason.DIVERGENCE_THRESHOLD
            reason = (
                "Divergence threshold reached:"
                f" {self.monitor} = {current} {self.order_dict[self.mode]} {self.divergence_threshold}."
                " Signaling Trainer to stop."
            )
            
        return should_stop, reason
        
    def _improvement_message(self, current):
        """
        生成改善信息
        
        Args:
            current (torch.Tensor): 当前指标值
            
        Returns:
            str: 改善信息
        """
        if torch.isfinite(self.best_score):
            msg = (
                f"Metric {self.monitor} improved by {abs(self.best_score - current):.3f} >="
                f" min_delta = {abs(self.min_delta)}. New best score: {current:.3f}"
            )
        else:
            msg = f"Metric {self.monitor} improved. New best score: {current:.3f}"
        return msg
        
    def reset(self):
        """重置早停计数器"""
        super().reset()
        self.wait_count = 0
        torch_inf = torch.tensor(torch.inf)
        self.best_score = torch_inf if self.monitor_op == torch.lt else -torch_inf
        self.stopped_epoch = 0
        self.stopping_reason = EarlyStoppingReason.NOT_STOPPED
        self.stopping_reason_message = None
        
    def print_sample(self, model, tokenizer, device):
        """
        打印模型生成的样本
        
        Args:
            model: 模型实例
            tokenizer: 分词器实例
            device: 设备
        """
        if not self.sample_printing_enabled:
            return
            
        self.save_count += 1
        if self.save_count % self.print_frequency != 0:
            return
            
        # 确保模型处于评估模式
        model.eval()
        
        try:
            # 准备输入文本
            if self.sample_type == 'random':
                # 随机样本（这里只是示例，实际应用中可能需要更复杂的逻辑）
                sample_texts = [
                    "人工智能的未来发展如何？",
                    "今天天气怎么样？",
                    "如何学习深度学习？",
                    "自然语言处理的应用有哪些？",
                    "机器学习和深度学习有什么区别？"
                ]
                input_text = random.choice(sample_texts)
            else:
                # 固定样本
                input_text = self.sample_text
                
            # 编码输入文本
            input_ids = tokenizer.encode(input_text, return_tensors='pt').to(device)
            
            # 生成文本
            with torch.no_grad():
                output = model.generate(
                    input_ids,
                    max_length=min(len(input_ids[0]) + 50, 128),
                    num_beams=1,
                    do_sample=True,
                    temperature=0.7,
                    pad_token_id=tokenizer.pad_token_id or tokenizer.eos_token_id,
                    eos_token_id=tokenizer.eos_token_id
                )
                
            # 解码输出
            generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
            
            # 打印结果
            print("=" * 50)
            print(f"早停检查点 #{self.save_count} - 样本生成结果")
            print(f"输入: {input_text}")
            print(f"输出: {generated_text}")
            print("=" * 50)
            
        except Exception as e:
            print(f"[WARNING] 打印样本时出错: {e}")
            
        finally:
            # 恢复模型训练模式
            model.train()