# -*- coding: utf-8 -*-
import time
import pandas as pd
import os
import json
import argparse
from datetime import datetime

import config
from src.training import TrainingPipeline


class HyperparameterTuner:
    def __init__(self, model_name, train_dataset_path, eval_dataset_path, output_dir_base, hyperparameter_grid):
        self.model_name = model_name
        self.train_dataset_path = train_dataset_path
        self.eval_dataset_path = eval_dataset_path
        self.output_dir_base = output_dir_base
        self.hyperparameter_grid = hyperparameter_grid
        self.results = []

    def run_tuning(self):
        for i, params in enumerate(self.hyperparameter_grid):
            print("-" * 80)
            print(f"超参数搜索: 第 {i + 1}/{len(self.hyperparameter_grid)} 轮")
            print(f"当前参数: {params}")
            print("-" * 80)

            config = params.copy()
            config['output_dir_base'] = self.output_dir_base

            pipeline = TrainingPipeline(
                model_name=self.model_name,
                train_dataset_path=self.train_dataset_path,
                eval_dataset_path=self.eval_dataset_path,
                config=config
            )

            start_time = time.time()
            train_result = pipeline.run()
            end_time = time.time()

            # 记录结果
            result_summary = {
                "learning_rate": config['learning_rate'],
                "batch_size": config['batch_size'],
                "num_samples": config['num_samples'],
                "epochs": config['epochs'],
                "training_time_seconds": round(end_time - start_time, 2),
                "eval_precision": None,
                "eval_recall": None,
                "eval_f1": None,
            }
            self.results.append(result_summary)

            # 保存单次运行结果到datamodel目录
            self._save_single_result(result_summary, i)

            self._print_summary()

    def _print_summary(self):
        print("\n" + "=" * 80)
        print("超参数搜索结果总结")
        print("=" * 80)
        if not self.results:
            print("没有可用的结果。")
            return

        try:
            df = pd.DataFrame(self.results)
            print(df.to_string())
        except ImportError:
            for result in self.results:
                print(result)
        print("\n")

    def _save_single_result(self, result_summary, index):
        """保存单次运行结果到datamodel目录，并根据时间戳创建子文件夹存储"""
        # 创建datamodel目录（如果不存在）
        datamodel_dir = os.path.join("data", "datamodel")
        os.makedirs(datamodel_dir, exist_ok=True)

        # 生成带时间戳的子目录名
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        run_dir = os.path.join(datamodel_dir, f"run_{index + 1}_{timestamp}")
        os.makedirs(run_dir, exist_ok=True)

        # 保存结果到JSON文件
        filepath = os.path.join(run_dir, "result.json")
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(result_summary, f, ensure_ascii=False, indent=4)

        print(f"单次运行结果已保存到: {filepath}")


def main():
    # --- 配置参数 ---
    import config
    MODEL_NAME = config.model_config.model_path
    TRAIN_DATASET_PATH = "/data/train_class/data/trian_data/training_data.jsonl"
    TEST_DATASET_PATH = "/data/train_class/data/test_data/test_data.jsonl"
    OUTPUT_DIR_BASE = "./ernie_sft_lora_tuning_results"

    # --- 定义要搜索的超参数网格 ---
    hyperparameter_grid = [
        {'learning_rate': 5e-5, 'batch_size': 4, 'num_samples': 3000, 'epochs': 3}
    ]

    # --- 启动超参数搜索 ---
    tuner = HyperparameterTuner(
        model_name=MODEL_NAME,
        train_dataset_path=TRAIN_DATASET_PATH,
        eval_dataset_path=TEST_DATASET_PATH,
        output_dir_base=OUTPUT_DIR_BASE,
        hyperparameter_grid=hyperparameter_grid
    )
    tuner.run_tuning()


def cli_main():
    parser = argparse.ArgumentParser(description='医疗命名实体识别模型训练')
    parser.add_argument('--model_name', type=str, default=config.model_config.model_path, help='模型名称')
    parser.add_argument('--train_data', type=str,default=config.model_config.train_data_path,help='训练数据路径')
    parser.add_argument('--test_data', type=str,default=config.model_config.test_data_path,help='测试数据路径')
    parser.add_argument('--output_dir', type=str, default=config.model_config.output_dir,help='输出目录')
    parser.add_argument('--learning_rate', type=float, default=5e-5, help='学习率')
    parser.add_argument('--batch_size', type=int, default=4, help='批次大小')
    parser.add_argument('--num_samples', type=int, default=3000, help='训练样本数')
    parser.add_argument('--epochs', type=int, default=3, help='训练轮数')
    
    args = parser.parse_args()
    
    # 定义单组超参数
    hyperparameter_config = [{
        'learning_rate': args.learning_rate,
        'batch_size': args.batch_size,
        'num_samples': args.num_samples,
        'epochs': args.epochs
    }]
    
    # 启动训练
    tuner = HyperparameterTuner(
        model_name=args.model_name,
        train_dataset_path=args.train_data,
        eval_dataset_path=args.test_data,
        output_dir_base=args.output_dir,
        hyperparameter_grid=hyperparameter_config
    )
    tuner.run_tuning()


if __name__ == '__main__':
    import sys
    if len(sys.argv) > 1 and '--help' not in sys.argv:
        # 如果提供了命令行参数，则使用CLI模式
        cli_main()
    else:
        # 否则使用默认的网格搜索模式
        main()