import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.ensemble import RandomForestClassifier
import json
import argparse
import sys
import os
import joblib
import time
import difflib  # 用于近似语义匹配

# 获取当前脚本所在目录
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
# 定义模型文件路径
MODEL_PATH = os.path.join(SCRIPT_DIR, "drug_recommendation_model.pkl")
ENCODER_PATH = os.path.join(SCRIPT_DIR, "drug_recommendation_encoders.pkl")
SCALER_PATH = os.path.join(SCRIPT_DIR, "drug_recommendation_scaler.pkl")

def train_model():
    """训练药品推荐模型并保存"""
    print("开始训练药品推荐模型...", flush=True)

    # 加载数据
    print("正在加载数据...", flush=True)
    file_path = r"D:\data.xlsx"
    try:
        df = pd.read_excel(file_path)
        print(f"成功加载数据，共 {len(df)} 条记录", flush=True)
    except Exception as e:
        print(f"数据加载失败: {str(e)}", flush=True)
        raise Exception("数据加载错误")

    # 数据预处理
    print("正在进行数据预处理...", flush=True)
    label_encoders = {}
    categorical_columns = ["性别", "病症", "过敏历史", "药品"]
    for col in categorical_columns:
        le = LabelEncoder()
        df[col] = le.fit_transform(df[col])
        label_encoders[col] = le
    print("数据预处理完成", flush=True)

    # 准备特征和标签
    X = df[["性别", "年龄", "病症", "过敏历史", "气温"]]
    y = df["药品"]

    # 特征标准化
    print("正在进行特征标准化...", flush=True)
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)
    print("特征标准化完成", flush=True)

    # 划分训练集和测试集
    print("正在划分数据集...", flush=True)
    X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42)
    print(f"数据集划分完成，训练集: {len(X_train)} 条，测试集: {len(X_test)} 条", flush=True)

    # 模型训练与调优
    print("开始模型训练与参数调优...", flush=True)
    model = RandomForestClassifier(random_state=42)
    param_grid = {
        'n_estimators': [50, 100],        # 减少树的数量
        'max_depth': [5, 10],            # 限制树的最大深度
        'min_samples_split': [5, 10],    # 增加最小分割样本数
        'min_samples_leaf': [2, 4]       # 增加叶子节点最小样本数
    }
    skf = StratifiedKFold(n_splits=2, shuffle=True, random_state=42)
    grid_search = GridSearchCV(model, param_grid, cv=skf, scoring='accuracy')

    try:
        start_time = time.time()
        grid_search.fit(X_train, y_train)
        best_model = grid_search.best_estimator_
        training_time = time.time() - start_time
        print(f"模型训练完成，耗时: {training_time:.2f}秒", flush=True)
        print(f"最佳参数组合: {grid_search.best_params_}", flush=True)
        print(f"最佳交叉验证准确率: {grid_search.best_score_:.4f}", flush=True)
    except Exception as e:
        print(f"模型训练失败: {str(e)}", flush=True)
        raise Exception("模型训练错误")

    # 保存模型和预处理工具
    print("正在保存模型...", flush=True)
    try:
        joblib.dump(best_model, MODEL_PATH, compress=3)
        joblib.dump(label_encoders, ENCODER_PATH, compress=3)
        joblib.dump(scaler, SCALER_PATH, compress=3)
        print(f"模型已保存到 {MODEL_PATH}", flush=True)
    except Exception as e:
        print(f"模型保存失败: {str(e)}", flush=True)
        raise Exception("模型保存错误")

    return best_model, label_encoders, scaler

def load_model():
    """加载已训练的模型"""
    print("尝试加载已有模型...", flush=True)

    # 检查模型文件是否存在
    if not (os.path.exists(MODEL_PATH) and os.path.exists(ENCODER_PATH) and os.path.exists(SCALER_PATH)):
        print("未找到已保存的模型，将训练新模型", flush=True)
        return None, None, None

    try:
        # 加载模型和预处理工具
        model = joblib.load(MODEL_PATH)
        label_encoders = joblib.load(ENCODER_PATH)
        scaler = joblib.load(SCALER_PATH)
        print("模型加载成功", flush=True)
        return model, label_encoders, scaler
    except Exception as e:
        print(f"模型加载失败: {str(e)}，将训练新模型", flush=True)
        return None, None, None

def predict_medicine(gender, age, symptom, allergy, temperature):
    """使用模型进行药品推荐"""
    # 尝试加载模型
    model, label_encoders, scaler = load_model()

    # 如果模型不存在则训练新模型
    if model is None or label_encoders is None or scaler is None:
        model, label_encoders, scaler = train_model()

    # 编码输入数据
    print("正在处理输入数据...", flush=True)
    try:
        gender_enc = label_encoders["性别"].transform([gender])[0]

        # 处理症状（含近似语义匹配）
        known_symptoms = label_encoders["病症"].classes_
        if symptom in known_symptoms:
            symptom_enc = label_encoders["病症"].transform([symptom])[0]
            print(f"症状 '{symptom}' 在训练集中找到，直接编码", flush=True)
        else:
            # 使用 difflib 找到最相似的症状
            closest_symptom = difflib.get_close_matches(symptom, known_symptoms, n=1, cutoff=0.3)
            if closest_symptom:
                closest_symptom = closest_symptom[0]
                symptom_enc = label_encoders["病症"].transform([closest_symptom])[0]
                print(f"警告: 症状 '{symptom}' 未在训练集中出现，使用近似匹配 '{closest_symptom}'", flush=True)
            else:
                # 如果没有近似匹配，使用第一个已知症状
                symptom_enc = label_encoders["病症"].transform([known_symptoms[0]])[0]
                print(f"警告: 症状 '{symptom}' 未找到近似匹配，使用默认值 '{known_symptoms[0]}'", flush=True)

        allergy_enc = label_encoders["过敏历史"].transform([allergy])[0]
    except Exception as e:
        print(f"输入数据编码失败: {str(e)}", flush=True)
        return {"status": "error", "message": f"输入数据错误: {str(e)}"}

    # 准备并标准化输入
    input_data = [[gender_enc, age, symptom_enc, allergy_enc, temperature]]
    input_scaled = scaler.transform(input_data)
    print("输入数据处理完成", flush=True)

    # 进行预测
    print("正在进行药品推荐...", flush=True)
    medicine_enc = model.predict(input_scaled)
    medicine = label_encoders["药品"].inverse_transform(medicine_enc)[0]
    print("药品推荐完成", flush=True)

    return {"status": "success", "medicine": medicine}

def force_retrain():
    """强制重新训练模型"""
    print("开始强制重新训练模型...", flush=True)
    # 删除现有模型文件
    if os.path.exists(MODEL_PATH):
        os.remove(MODEL_PATH)
    if os.path.exists(ENCODER_PATH):
        os.remove(ENCODER_PATH)
    if os.path.exists(SCALER_PATH):
        os.remove(SCALER_PATH)

    # 训练新模型
    train_model()
    return {"status": "success", "message": "模型重新训练成功"}

if __name__ == "__main__":
    # 解析命令行参数
    parser = argparse.ArgumentParser(description='药品推荐系统')
    parser.add_argument('--gender', help='患者性别')
    parser.add_argument('--age', type=int, help='患者年龄')
    parser.add_argument('--symptom', help='患者症状')
    parser.add_argument('--allergy', help='患者过敏史')
    parser.add_argument('--temperature', type=float, help='患者体温')
    parser.add_argument('--retrain', action='store_true', help='强制重新训练模型')
    args = parser.parse_args()

    # 处理重新训练请求
    if args.retrain:
        try:
            result = force_retrain()
            print(json.dumps(result, ensure_ascii=False), flush=True)
        except Exception as e:
            print(json.dumps({"status": "error", "message": str(e)}, ensure_ascii=False), flush=True)
            sys.exit(1)
    # 处理药品推荐请求
    elif all([args.gender, args.age is not None, args.symptom, args.allergy, args.temperature is not None]):
        print(f"收到请求参数: 性别={args.gender}, 年龄={args.age}, 症状={args.symptom}, 过敏史={args.allergy}, 体温={args.temperature}", flush=True)
        try:
            result = predict_medicine(args.gender, args.age, args.symptom, args.allergy, args.temperature)
            print(json.dumps(result, ensure_ascii=False), flush=True)
        except Exception as e:
            print(json.dumps({"status": "error", "message": str(e)}, ensure_ascii=False), flush=True)
            sys.exit(1)
    else:
        print(json.dumps({"status": "error", "message": "缺少必要的参数"}, ensure_ascii=False), flush=True)
        sys.exit(1)
