import os
import torch
import pandas as pd
import numpy as np
import json
import pickle
from tqdm import tqdm
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
from torch.utils.data import DataLoader
from transformers import BertTokenizer

# 导入优化后的训练函数
from model_training import train_model, evaluate_model, plot_training_history
from model_design import OptimizedTCMBert
from data_loading import load_fixed_datasets
from text_preprocessing import preprocess_text, combine_text_features, extract_tcm_features
from dataset_class import TCMBertDataset

def save_preprocessed_data(data, file_path):
    with open(file_path, 'wb') as f:
        pickle.dump(data, f)

def load_preprocessed_data(file_path):
    with open(file_path, 'rb') as f:
        return pickle.load(f)

def main():
    torch.manual_seed(42)
    np.random.seed(42)

    preprocessed_data_path = 'preprocessed_data.pkl'
    if os.path.exists(preprocessed_data_path):
        print("检测到已保存的预处理数据，正在加载...")
        data = load_preprocessed_data(preprocessed_data_path)
        train_texts, train_struct_features, train_labels, val_texts, val_struct_features, val_labels, syndrome_df, syndrome_to_id, id_to_syndrome = data
    else:
        print("加载训练集和验证集...")
        train_df, val_df, syndrome_df, syndrome_to_id, id_to_syndrome = load_fixed_datasets(
            'train.json', 'dev.json', 'syndrome.json'
        )
        syndrome_list = list(syndrome_to_id.keys())
        print(f"训练集大小: {len(train_df)}, 验证集大小: {len(val_df)}")
        print(f"证型数量: {len(syndrome_list)}")

        print("进行文本预处理和特征工程...")
        train_texts = []
        train_struct_features = []
        train_labels = []

        for _, row in tqdm(train_df.iterrows(), total=len(train_df)):
            combined_text = combine_text_features(row)
            train_texts.append(combined_text)
            row['combined_text'] = combined_text
            struct_features = extract_tcm_features(row)
            train_struct_features.append(struct_features)
            label = syndrome_to_id.get(row['syndrome'], -1)
            if label == -1:
                print(f"警告: 训练集中发现未知证型 '{row['syndrome']}'")
            train_labels.append(label)

        val_texts = []
        val_struct_features = []
        val_labels = []
        for _, row in tqdm(val_df.iterrows(), total=len(val_df)):
            combined_text = combine_text_features(row)
            val_texts.append(combined_text)
            row['combined_text'] = combined_text
            struct_features = extract_tcm_features(row)
            val_struct_features.append(struct_features)
            label = syndrome_to_id.get(row['syndrome'], -1)
            if label == -1:
                print(f"警告: 验证集中发现未知证型 '{row['syndrome']}'")
            val_labels.append(label)

        data = [train_texts, train_struct_features, train_labels, val_texts, val_struct_features, val_labels, syndrome_df, syndrome_to_id, id_to_syndrome]
        save_preprocessed_data(data, preprocessed_data_path)
        print("预处理数据已保存，下次可直接加载使用")

    print("初始化tokenizer和数据集...")
    model_type = "base"
    model_dir = "bert-base-chinese"

    required_files = ["config.json", "pytorch_model.bin", "tokenizer_config.json", "vocab.txt"]
    if not all(os.path.exists(os.path.join(model_dir, f)) for f in required_files):
        print(f"错误: 模型文件不完整，请检查 {model_dir} 目录")
        return

    print(f"使用模型路径: {model_dir}")
    tokenizer = BertTokenizer.from_pretrained(model_dir, local_files_only=True)

    syndrome_df = data[6]
    syndrome_to_id = data[7]
    id_to_syndrome = data[8]
    struct_feature_cols = [
        col for col in syndrome_df.columns
        if col.startswith('symptom_') or col.startswith('tongue_') or col.startswith('pulse_')
    ]

    valid_train_indices = [i for i, label in enumerate(train_labels) if label != -1]
    valid_val_indices = [i for i, label in enumerate(val_labels) if label != -1]

    if not valid_train_indices:
        print("错误: 训练集中所有样本都是未知证型，无法继续")
        return

    train_texts_filtered = [train_texts[i] for i in valid_train_indices]
    train_struct_features_filtered = [train_struct_features[i] for i in valid_train_indices]
    train_labels_filtered = [train_labels[i] for i in valid_train_indices]

    train_dict = {
        'combined_text': train_texts_filtered,
        'syndrome': [id_to_syndrome[label] for label in train_labels_filtered]
    }
    for i, col in enumerate(struct_feature_cols):
        train_dict[col] = [train_struct_features_filtered[j][i] for j in range(len(train_struct_features_filtered))]
    train_df = pd.DataFrame(train_dict)

    val_texts_filtered = [val_texts[i] for i in valid_val_indices]
    val_struct_features_filtered = [val_struct_features[i] for i in valid_val_indices]
    val_labels_filtered = [val_labels[i] for i in valid_val_indices]

    val_dict = {
        'combined_text': val_texts_filtered,
        'syndrome': [id_to_syndrome[label] for label in val_labels_filtered]
    }
    for i, col in enumerate(struct_feature_cols):
        val_dict[col] = [val_struct_features_filtered[j][i] for j in range(len(val_struct_features_filtered))]
    val_df = pd.DataFrame(val_dict)

    train_dataset = TCMBertDataset(
        df=train_df,
        tokenizer=tokenizer,
        max_length=128,
        use_structured_features=True
    )

    val_dataset = TCMBertDataset(
        df=val_df,
        tokenizer=tokenizer,
        max_length=128,
        use_structured_features=True
    )

    # 动态调整批次大小（提升速度）
    batch_size = 16 if torch.cuda.is_available() else 8
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, num_workers=4)

    print("初始化模型...")
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    model = OptimizedTCMBert(
        num_classes=len(syndrome_to_id),
        model_path=model_dir,
        use_structured=True
    ).to(device)

    print("开始训练模型...")
    # 训练模型（支持断点续训和混合精度）
    model, history, val_preds, val_true = train_model(
        model, train_loader, val_loader,
        num_epochs=10,                # 增加训练轮次
        learning_rate=5e-5,           # 调整学习率
        weight_decay=0.01,
        warmup_steps=500,             # 添加热身步骤
        device=device,
        use_amp=True                  # 启用混合精度
    )

    print("\n加载最佳模型进行最终评估...")
    model.load_state_dict(torch.load('best_tcm_model.pth'))
    accuracy, report, cm = evaluate_model(model, val_loader, list(syndrome_to_id.keys()), device)

    # 保存分类报告
    with open('classification_report.txt', 'w', encoding='utf-8') as f:
        f.write(report)

    # 绘制训练历史
    plot_training_history(history)

    print("\n训练完成!")
    print("最佳模型已保存为 'best_tcm_model.pth'")
    print("分类报告已保存为 'classification_report.txt'")
    print("混淆矩阵和训练历史已保存为图片文件")

if __name__ == "__main__":
    main()