import os
import shutil
import time
from collections import deque, defaultdict
from datetime import timedelta
from pathlib import Path
from threading import Thread
from typing import Dict, List, Set, Tuple, Union

import pandas as pd
from tqdm import tqdm
import torch
from modelscope.trainers.hooks import Hook, Priority
from modelscope.utils.logger import get_logger
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
import json
from modelscope.trainers import build_trainer
from modelscope.msdatasets import MsDataset

if os.name != 'nt':
    import multiprocessing
    multiprocessing.set_start_method('fork')  # 显式设置为fork

CUR_FILE_PATH = os.path.realpath(__file__)
CUR_FILE_DIR = os.path.dirname(CUR_FILE_PATH)

logger = get_logger()
for handler in logger.handlers[:]:
    logger.removeHandler(handler)

# --------------------------------------------------------------------------- #
# 全局可调参数
# --------------------------------------------------------------------------- #
MAX_EPOCHS = 10             # 最大训练轮次
LOG_INTERVAL = 1000         # 每隔多少 iter 打一次日志
BATCH_SIZE_TRAIN = 24       # 训练批大小
BATCH_SIZE_VAL = 32         # 验证批大小
NUM_WORKERS = 4             # DataLoader num_workers

training_status = {
    'progress': 0,
    'status': 'idle',
    'epoch': '0/0',
    'iter': '0/0'
}


# --------------------------------------------------------------------------- #
# Hook：记录速度 / ETA / Loss
# --------------------------------------------------------------------------- #
class EnhancedTrainingHook(Hook):
    priority = Priority.VERY_HIGH

    def __init__(self, interval: int = LOG_INTERVAL, callback=None, logger=logger):
        super().__init__()
        self.epoch_start_time = None
        self.last_iter = None
        self.start_time = None
        self.interval = interval
        self.iter_times = deque(maxlen=100)
        self.callback = callback
        self.logger = logger

    def before_run(self, trainer):
        self.start_time = time.time()
        self.last_iter = self.start_time
        self.logger.info(f'训练开始，最大 Epoch: {trainer.max_epochs}')
        if self.callback:
            self.callback({
                'progress': 0,
                'status': 'training',
                'epoch': f'0/{trainer.max_epochs}',
                'iter': f'0/{len(trainer.data_loader)}'
            })
        self.logger.info(training_status)

    def before_train_epoch(self, trainer):
        self.epoch_start_time = time.time()
        self.last_iter = self.start_time
        self.logger.debug(f'Epoch {trainer.epoch + 1}/{trainer.max_epochs} 开始')
        if self.callback:
            self.callback({
                'progress': int((trainer.epoch + 1) / trainer.max_epochs * 100),
                'status': 'training',
                'epoch': f'{trainer._epoch + 1} / {trainer._max_epochs}',
                'iter': f'0/{len(trainer.data_loader)}'
            })
        self.logger.debug(training_status)

    def after_train_iter(self, trainer):
        self.logger.debug(f"after train iter...")
        now = time.time()
        self.iter_times.append(now - self.last_iter)
        self.last_iter = now
        current_epoch = getattr(trainer, 'epoch', 0) + 1
        current_iter = getattr(trainer, '_iter', 0) + 1
        total_iter = len(trainer.data_loader)
        process = int(current_iter / total_iter / trainer._max_epochs * 100)
        current_iter = current_iter % total_iter
        if (trainer.iter + 1) % self.interval == 0:
            loss = self._get_loss(trainer)
            lr = trainer.optimizer.param_groups[0]['lr']
            eta = (sum(self.iter_times) / len(self.iter_times)) * (len(trainer.data_loader) - trainer.iter - 1)
            self.logger.debug(
                f'epoch[{trainer.epoch + 1:02d}] iter[{trainer.iter + 1:05d}/{len(trainer.data_loader)}] '
                f'lr:{lr:.3e} loss:{loss:.4f} ETA:{timedelta(seconds=int(eta))}')
        if self.callback:
            self.callback({
                'progress': process,
                'status': 'training',
                'epoch': f'{current_epoch} / {trainer._max_epochs}',
                'iter': f'{current_iter} / {len(trainer.data_loader)}'
            })
        self.logger.debug(training_status)

    def after_run(self, trainer):
        self.logger.debug(f'训练结束，总耗时: {timedelta(seconds=int(time.perf_counter() - self.start_time))}')
        if self.callback:
            self.callback({
                'progress': 100,
                'status': 'completed',
                'epoch': f'{trainer._max_epochs} / {trainer._max_epochs}',
                'iter': f'{len(trainer.data_loader) / len(trainer.data_loader)}'
            })
        self.logger.debug(training_status)

    @staticmethod
    def _get_loss(trainer):
        # 兼容多种 trainer 输出
        if hasattr(trainer, 'batch_outputs') and isinstance(trainer.batch_outputs, dict):
            return float(trainer.batch_outputs.get('loss', float('nan')))
        if hasattr(trainer, 'train_outputs') and isinstance(trainer.train_outputs, dict):
            return float(trainer.train_outputs.get('loss', float('nan')))
        if hasattr(trainer, 'log_buffer') and 'loss' in trainer.log_buffer.output:
            return float(trainer.log_buffer.output['loss'])
        return float('nan')


# --------------------------------------------------------------------------- #
# 自定义评估实现（完全沿用用户 evaluate_ner.py 的逻辑）
# --------------------------------------------------------------------------- #
def flatten_info_list(info: Union[list, dict]) -> List[Dict]:
    """递归扁平化 info_list."""
    flattened: List[Dict] = []
    if isinstance(info, list):
        for item in info:
            flattened.extend(flatten_info_list(item))
    elif isinstance(info, dict):
        flattened.append(info)
    return flattened


class NerFeatureIdentify:
    """封装 pipeline 推理，复用同一实例避免 GPU 重复加载。"""
    def __init__(self, model_path: str):
        logger.info(f"正在从 '{model_path}' 加载 Siamese‑UIE 模型 …")
        self.ner_pipeline = pipeline(Tasks.siamese_uie, model_path, device='cpu')
        logger.info("模型加载完成。")

    def identify(self, text: str, schema: Dict) -> List[Dict]:
        feature_info: List[Dict] = []
        try:
            ner_result = self.ner_pipeline(text, schema=schema)
            for ner_group in ner_result.get('output', []):
                for ner in ner_group:
                    feature_info.append({
                        'from': ner.get('offset', [None, None])[0],
                        'to': ner.get('offset', [None, None])[1],
                        'label': ner.get('type'),
                        'span': ner.get('span')})
        except Exception as e:
            logger.warning(f"推理失败（文本截断）: '{text[:50]}…'  错误: {e}")
        return feature_info


class NerTrain:
    def __init__(self):
        self.base_model_name = 'ner_struct_bert_md'  # 模型名称
        self.work_dir = 'train_dir'             # 工作目录
        self.train_csv = 'train.csv'                     # 训练数据集
        self.test_csv = 'train.csv'                      # 测试数据集
        self.resume_from = ''                   # 恢复训练
        self.max_epochs = MAX_EPOCHS
        self.train_batch_size = BATCH_SIZE_TRAIN
        self.num_workers = NUM_WORKERS
        self.test_batch_size = BATCH_SIZE_VAL
        self.logger = logger

    @staticmethod
    def get_train_info():
        return training_status

    def train_model(self, train_logger=None, call_back=None):
        """
        训练模型
        """
        if train_logger:
            self.logger = train_logger

        def training_thread():
            self.logger.debug(f"base model name: {self.base_model_name}")
            def progress_call_back(data):
                global training_status
                training_status = {
                    'progress': data['progress'],
                    'status': data['status'],
                    'epoch': data['epoch'],
                    'iter': data['iter']
                }
            model_dir = os.path.join(CUR_FILE_DIR, f"../../models/{self.base_model_name}")
            work_dir = os.path.join(CUR_FILE_DIR, f"../../{self.work_dir}")

            train_ds = MsDataset.load('csv', data_files={'train': self.train_csv}, split='train')
            eval_ds = MsDataset.load('csv', data_files={'validation': self.test_csv}, split='validation')

            default_args = dict(
                model=str(model_dir),
                train_dataset=train_ds,
                eval_dataset=eval_ds,
                max_epochs=self.max_epochs,
                work_dir=str(work_dir),
                device='gpu' if torch.cuda.is_available() else 'cpu',
                train_dataloader_cfg={'batch_size': self.train_batch_size, 'num_workers': self.num_workers},
                val_dataloader_cfg={'batch_size': self.test_batch_size, 'num_workers': self.num_workers},
            )

            if self.resume_from:
                default_args['checkpoint'] = str(Path(self.resume_from).resolve())

            trainer = build_trainer('siamese-uie-trainer', default_args=default_args)
            trainer.register_hook(EnhancedTrainingHook(callback=progress_call_back, logger=self.logger))

            if self.resume_from:
                self.logger.info(f"从 {self.resume_from} 恢复训练…")

            trainer.train()

            self.logger.debug(f"train finished.")
            if call_back:
                call_back()

        thread = Thread(target=training_thread)
        thread.start()

    @staticmethod
    def _export_ckpt_as_model(base_model_dir: Path, ckpt_path: Path, dest_dir: Path):
        """把某个 checkpoint 转成 pipeline 可加载的目录结构 (pytorch_model.bin 等)。"""
        if dest_dir.exists():
            shutil.rmtree(dest_dir)
        shutil.copytree(base_model_dir, dest_dir, dirs_exist_ok=True)
        ckpt_obj = torch.load(ckpt_path, map_location='cpu')
        state_dict = ckpt_obj.get('state_dict') or ckpt_obj.get('model') or ckpt_obj
        torch.save(state_dict, dest_dir / 'pytorch_model.bin')

    @staticmethod
    def evaluate_ner(csv_path: str, output_csv_path: str, model_dir: str) -> float:
        """
        评估NER
        """
        """返回 micro‑F1，并把详细报告写入 CSV。"""
        identifier = NerFeatureIdentify(model_dir)

        logger.info(f"读取测试集 '{csv_path}' …")
        df = pd.read_csv(csv_path)
        required_cols = {'text', 'info_list', 'schema'}
        if not required_cols.issubset(df.columns):
            raise ValueError(f"CSV 必须包含列: {', '.join(required_cols)}")

        schema: Dict = json.loads(df['schema'].iloc[0])
        logger.info(f"已载入 {len(schema)} 种实体类型。")

        stats = defaultdict(lambda: {'TP': 0, 'FP': 0, 'FN': 0})
        total_processed = 0
        start_ts = time.time()

        for _, row in tqdm(df.iterrows(), total=len(df), desc='⏳  评估进度', ncols=80):
            text = row['text'] if isinstance(row['text'], str) else ''
            if not text.strip():
                continue

            # 真实实体
            try:
                true_info = flatten_info_list(json.loads(row['info_list']))
            except Exception:
                true_info = []
            true_entities: Set[Tuple[str, str]] = {(ent['span'], ent['type']) for ent in true_info}

            # 预测实体
            pred_info = identifier.identify(text, schema)
            pred_entities: Set[Tuple[str, str]] = {(ent['span'], ent['label']) for ent in pred_info}

            tp = true_entities & pred_entities
            fp = pred_entities - true_entities
            fn = true_entities - pred_entities

            for _, label in tp:
                stats[label]['TP'] += 1
            for _, label in fp:
                stats[label]['FP'] += 1
            for _, label in fn:
                stats[label]['FN'] += 1

            total_processed += 1

        end_ts = time.time()
        total_tp = total_fp = total_fn = 0
        results: List[Dict] = []
        all_labels = set(schema.keys()) | set(stats.keys())

        for label in sorted(all_labels):
            tp = stats[label]['TP']
            fp = stats[label]['FP']
            fn = stats[label]['FN']
            total_tp += tp
            total_fp += fp
            total_fn += fn
            precision = tp / (tp + fp) if (tp + fp) else 0.0
            recall = tp / (tp + fn) if (tp + fn) else 0.0
            f1 = 2 * precision * recall / (precision + recall) if (precision + recall) else 0.0
            results.append({'标签名': label, '查准率': f"{precision:.4f}", '查全率': f"{recall:.4f}", '综合得分': f"{f1:.4f}",
                            '实际样本数': tp + fn, '模型正确识别的次数': tp, '模型错误识别的次数': fp, '模型漏掉的次数': fn})

        # 保存详细报告
        pd.DataFrame(results).sort_values(by='综合得分', ascending=False).to_csv(output_csv_path, index=False, encoding='utf-8-sig')
        micro_p = total_tp / (total_tp + total_fp) if (total_tp + total_fp) else 0.0
        micro_r = total_tp / (total_tp + total_fn) if (total_tp + total_fn) else 0.0
        micro_f1 = 2 * micro_p * micro_r / (micro_p + micro_r) if (micro_p + micro_r) else 0.0
        logger.info(f"评估完成 | Micro‑F1={micro_f1:.4f} | 用时 {(end_ts - start_ts):.1f}s | 详见 {output_csv_path}")
        return micro_f1


ner_trainer = NerTrain()


if __name__ == '__main__':
    # train_model(model_dir=args.model_dir,
    #             train_csv=args.train_csv,
    #             test_csv=args.test_csv,
    #             work_dir=args.work_dir,
    #             resume_from=args.resume_from)

    # train_model(model_dir='ner_struct_bert_md',
    #             train_csv='train_0714.csv',
    #             test_csv='test_0709.csv',
    #             work_dir='models',
    # #             resume_from=None)
    # train_model(model_dir='ner_struct_bert_md',
    #             train_csv='train.csv',
    #             test_csv='train.csv',
    #             work_dir='models',
    #             resume_from=None)
    ner_train = NerTrain()
    ner_train.train_model()
