import os
import time
from datetime import timedelta
from modelscope.trainers import build_trainer
from modelscope.msdatasets import MsDataset
from modelscope.utils.constant import DownloadMode
from modelscope.trainers.hooks import Hook, Priority
from modelscope.utils.logger import get_logger

logger = get_logger()

import os
# os.environ['PYTORCH_CUDA_ALLOC_CONF'] = "expandable_segments:True,max_split_size_mb:2048"


class EnhancedTrainingHook(Hook):
    """
    增强版训练监控Hook，确保在ModelScope中正确生效
    """
    priority = Priority.VERY_HIGH  # 设置高优先级确保Hook被执行

    def __init__(self, interval=10):
        super().__init__()
        self.interval = interval
        self.start_time = None
        self.epoch_start_time = None
        self.last_iter_time = None
        self.iter_times = []

    def before_run(self, trainer):
        """训练开始前调用"""
        self.start_time = time.time()
        logger.info("\n🚀 训练开始!")
        logger.info(f"📊 总Epochs: {trainer._max_epochs}")
        logger.info("=" * 60)

    def before_train_epoch(self, trainer):
        """每个epoch开始时调用"""
        self.epoch_start_time = time.time()
        self.last_iter_time = time.time()
        logger.info(f"\n🏁 Epoch {trainer._epoch + 1}/{trainer._max_epochs} 开始")

    def after_train_iter(self, trainer):
        try:
            # 1. 获取基本训练信息
            current_epoch = getattr(trainer, 'epoch', 0) + 1
            current_iter = getattr(trainer, '_iter', 0) + 1
            total_iters = len(trainer.data_loader)  # 使用确认存在的dataloader属性

            # 2. 计算时间信息
            now = time.time()
            iter_time = now - self.last_iter_time
            self.iter_times.append(iter_time)
            avg_iter_time = sum(self.iter_times[-10:]) / len(self.iter_times[-10:])
            eta_seconds = avg_iter_time * (total_iters - current_iter)

            # 3. 获取学习率
            lr = self._get_learning_rate(trainer)

            # 4. 获取损失值
            # loss = getattr(trainer, 'metric_values', {}).get('loss', float('nan'))
            loss = trainer.train_outputs['loss'].item()

            # 5. 按间隔记录或每epoch最后一步记录
            if current_iter % self.interval == 0 or current_iter == total_iters:
                logger.info(
                    f"epoch [{current_epoch}][{current_iter}/{total_iters}]\t"
                    f"lr: {lr:.3e}, "
                    f"loss: {loss:.4f}, "
                    f"iter_time: {iter_time:.3f}s, "
                    f"eta: {str(timedelta(seconds=int(eta_seconds)))}, "
                    f"avg_iter_time: {avg_iter_time:.3f}s"
                )

                self.last_iter_time = now
        except Exception as e:
            logger.warning(f"记录训练信息时出错: {str(e)}")

    def _get_learning_rate(self, trainer):
        """安全获取当前学习率"""
        try:
            if hasattr(trainer, 'optimizer') and trainer.optimizer is not None:
                for param_group in trainer.optimizer.param_groups:
                    return param_group['lr']
            if hasattr(trainer, 'current_lr'):
                return trainer.current_lr()
            if hasattr(trainer, 'get_lr'):
                return trainer.get_lr()[0]
        except:
            return float('nan')

    def after_train_epoch(self, trainer):
        """每个epoch结束时调用"""
        if not hasattr(trainer, 'metric_values'):
            return

        epoch_time = time.time() - self.epoch_start_time

        logger.info("\n" + "=" * 60)
        logger.info(f"✅ Epoch {trainer._epoch + 1} 完成!")
        logger.info(f"⏱️ 耗时: {str(timedelta(seconds=int(epoch_time)))}")

        # 打印评估指标（如果存在）
        if hasattr(trainer, 'evaluate_results'):
            logger.info("📊 评估指标:")
            for metric, value in trainer.evaluate_results.items():
                logger.info(f"  - {metric}: {value:.4f}")
        logger.info("=" * 60)

    def after_run(self, trainer):
        """训练结束时调用"""
        total_time = time.time() - self.start_time
        logger.info("\n🎉 训练完成!")
        logger.info(f"总训练时间: {str(timedelta(seconds=int(total_time)))}")
        logger.info("=" * 60)


def train_model():
    model_id = 'damo/nlp_structbert_siamese-uninlu_chinese-base'
    WORK_DIR = 'models'

    # # 加载数据集
    # train_dataset = MsDataset.load(
    #     'people_daily_ner_1998_tiny',
    #     namespace='damo',
    #     split='train',
    #     download_mode=DownloadMode.FORCE_REDOWNLOAD
    # )
    # eval_dataset = MsDataset.load(
    #     'people_daily_ner_1998_tiny',
    #     namespace='damo',
    #     split='validation',
    #     download_mode=DownloadMode.FORCE_REDOWNLOAD
    # )

    # 从本地加载训练集和验证集
    train_dataset = MsDataset.load(
        'csv',  # 指定加载器类型为CSV
        data_files={'train': 'datasets/train.csv'},
        split='train',
    )

    eval_dataset = MsDataset.load(
        'csv',  # 指定加载器类型为CSV
        data_files={'validation': 'datasets/dev.csv'},
        split='validation'
    )


    # 训练配置
    kwargs = {
        'model': model_id,
        'model_revision': 'master',
        'train_dataset': train_dataset,
        'eval_dataset': eval_dataset,
        'max_epochs': 3,
        'work_dir': WORK_DIR,
        'train_dataloader_cfg': {
            'batch_size': 16,  # 设置训练集batch_size
            'shuffle': True,  # 训练集通常需要打乱数据
            'num_workers': 4,  # 并行加载数据的进程数
            'pin_memory': True  # 加速CPU到GPU的数据传输
        },
        'val_dataloader_cfg': {
            'batch_size': 32,  # 设置验证集batch_size（通常可以更大）
            'shuffle': False,  # 验证集不需要打乱数据
            'num_workers': 4,  # 并行加载数据的进程数
            'pin_memory': True  # 加速CPU到GPU的数据传输
        }
    }

    # 构建训练器
    trainer = build_trainer('siamese-uie-trainer', default_args=kwargs)

    # 关键点：手动注册Hook（备选方案）
    if not any(isinstance(hook, EnhancedTrainingHook) for hook in trainer.hooks):
        trainer.register_hook(EnhancedTrainingHook(interval=10))
        logger.warning("通过register_hook方法手动注册Hook")

    logger.info(f"已注册的Hooks: {[type(h).__name__ for h in trainer.hooks]}")

    trainer.train()

    for i in range(3):
        eval_results = trainer.evaluate(f'{WORK_DIR}/epoch_{i + 1}.pth')
        print(f'epoch {i} evaluation result:')
        print(eval_results)


if __name__ == '__main__':
    train_model()