import json
import os
from typing import Optional, Callable

from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.trainers import build_trainer
from modelscope.msdatasets import MsDataset
from modelscope.utils.constant import DownloadMode
from transformers import Trainer

from src.app.services.enhance_hook import EnhancedTrainingHook

current_dir = os.path.dirname(os.path.abspath(__file__))
local_data_dir = os.path.join(current_dir, '../../datasets')
# model_dir = os.path.join(current_dir, '../../models/nlp_structbert_siamese-uninlu_chinese-base')
model_dir = 'models/nlp_structbert_siamese-uninlu_chinese-base'
TRAIN_OUTPUT_DIR = os.path.join(current_dir, '../../train_models_cache')
# model_dir = os.path.join(current_dir, '../../models/nlp_structbert_v2')


class NerFeatureIdentify:
    def __init__(self, model_path):
        self.ner_pipline = pipeline(Tasks.siamese_uie, model_path)
        self.schema = {
            '人物': None,
            '地理位置': None,
            '组织机构': None
        }

    def identify(self, data: str, schema: dict = None):
        feature_info = []
        if schema is None:
            schema = self.schema

        ner_result = self.ner_pipline(data, schema = schema)
        for ner_label in ner_result['output']:
            feature_info.append({
                'from': ner_label[0]['offset'][0],
                'to': ner_label[0]['offset'][1],
                'label': ner_label[0]['type'],
                'span': ner_label[0]['span']
            })
        return feature_info

    def train_ner_model(self, progress_callback: Optional[Callable] = None):
        # 从本地加载训练集和验证集
        train_dataset = MsDataset.load(
            'csv',  # 指定加载器类型为CSV
            data_files={'train': 'datasets/train.csv'},
            # local_path='datasets/train.csv',  # 本地训练集CSV文件路径
            split='train'
        )

        eval_dataset = MsDataset.load(
            'csv',  # 指定加载器类型为CSV
            data_files={'validation': 'datasets/dev.csv'},
            # local_path='datasets/dev.csv',  # 本地验证集CSV文件路径
            split='validation'
        )

        max_epochs = 3
        kwargs = {
            "model": model_dir,
            "model_revision": 'master',
            "train_dataset": train_dataset,
            "eval_dataset": eval_dataset,
            "max_epochs": max_epochs,
            "work_dir": TRAIN_OUTPUT_DIR,
        }

        trainer = build_trainer('siamese-uie-trainer', default_args=kwargs)
        # 修改batch size
        trainer.cfg.train.batch_size_per_gpu = 16
        # 关键点：手动注册Hook（备选方案）
        if not any(isinstance(hook, EnhancedTrainingHook) for hook in trainer.hooks):
            trainer.register_hook(EnhancedTrainingHook(interval=1, callback=progress_callback))
            print("通过register_hook方法手动注册Hook")

        print(f"已注册的Hooks: {[type(h).__name__ for h in trainer.hooks]}")

        print('===============================================================')
        print('pre-trained model loaded, training started:')
        print('===============================================================')

        trainer.train()

        print('===============================================================')
        print('train success.')
        print('===============================================================')

        for i in range(max_epochs):
            eval_results = trainer.evaluate(f'{TRAIN_OUTPUT_DIR}/epoch_{i + 1}.pth')
            print(f'epoch {i} evaluation result:')
            print(eval_results)

        print('===============================================================')
        print('evaluate success')
        print('===============================================================')

    def eval_model(self, model_name: str):
        # 1. 准备评估数据集
        eval_dataset = MsDataset.load('people_daily_ner_1998_tiny', namespace='damo', split='validation',
                                      download_mode=DownloadMode.FORCE_REDOWNLOAD)

        # 2. 构建训练器（只用于评估）
        kwargs = {
            'model': model_name,
            'eval_dataset': eval_dataset,
            'model_revision': 'master',
            # 指定只进行评估
            'train.dataloader.batch_size_per_gpu': 0,  # 设置为0表示不训练
            'evaluation.dataloader.batch_size_per_gpu': 16,
            'evaluation.metrics': ['accuracy', 'f1'],  # 指定评估指标
            'max_epochs': 1,
            # 数据集类型指定
            'dataset.type': 'bert',
            # 其他必要配置
            'framework': 'pytorch',
            'task': 'siamese-uie'
        }

        trainer = build_trainer(default_args=kwargs)

        # 3. 只进行评估
        eval_results = trainer.evaluate()
        print("评估结果:", eval_results)

ner_identify = NerFeatureIdentify(model_dir)


if __name__ == '__main__':

    # result = ner_identify.identify('1944年毕业于北大的名古屋铁道会长谷口清太郎等人在日本积极筹资，共筹款2.7亿日元，参加捐款的日本企业有69家。')

    # print(result)
    # ner_identify.train()
    ner_identify.eval_model('damo/nlp_structbert_siamese-uninlu_chinese-base')