import numpy as np
import torch
import json
from tqdm import tqdm
from model_design import OptimizedTCMBert
from transformers import BertTokenizer
import os


def load_model_and_tokenizer(model_path='best_tcm_model.pth', bert_model_path='bert-base-chinese'):
    """加载训练好的模型和分词器，支持非标准JSON格式"""
    # 加载模型权重以获取类别数量
    try:
        checkpoint = torch.load(model_path, map_location=torch.device('cpu'))
        # 从分类器权重形状推断类别数量
        num_classes = checkpoint['classifier.1.weight'].shape[0]
        print(f"从模型权重中检测到类别数量: {num_classes}")
    except Exception as e:
        print(f"无法从模型权重中获取类别数量: {e}")
        num_classes = 10  # 默认值

    # 初始化证型映射
    syndrome_to_id = {}
    id_to_syndrome = {}
    syndrome_details = {}

    # 尝试加载证型映射（支持每行一个JSON对象的格式）
    try:
        if os.path.exists('syndrome.json'):
            with open('syndrome.json', 'r', encoding='utf-8') as f:
                for line_num, line in enumerate(f):
                    line = line.strip()
                    if not line:
                        continue

                    try:
                        # 解析每行的JSON对象
                        obj = json.loads(line)
                        syndrome_id = obj.get("id")
                        syndrome_name = obj.get("Name")

                        if syndrome_id is not None and syndrome_name is not None:
                            # 构建映射
                            syndrome_to_id[syndrome_name] = syndrome_id
                            id_to_syndrome[syndrome_id] = syndrome_name
                            syndrome_details[syndrome_id] = obj
                        else:
                            print(f"警告: 行 {line_num + 1} 缺少Name或id字段，跳过")

                    except json.JSONDecodeError as e:
                        print(f"解析行 {line_num + 1} 时出错: {e}")
                        print(f"行内容: {line[:100]}...")

            print(f"从syndrome.json加载了 {len(syndrome_to_id)} 个证型")

            # 验证类别数量是否匹配
            if len(syndrome_to_id) != num_classes:
                print(f"警告: 证型映射中的类别数量({len(syndrome_to_id)})与模型权重中的类别数量({num_classes})不匹配")
                # 补充缺失的ID映射
                for i in range(num_classes):
                    if i not in id_to_syndrome:
                        id_to_syndrome[i] = f"证型{i}"
                        syndrome_to_id[f"证型{i}"] = i
                        syndrome_details[i] = {"Name": f"证型{i}", "id": i}
        else:
            print("警告: syndrome.json文件不存在，使用默认类别")
            # 使用默认映射
            for i in range(num_classes):
                id_to_syndrome[i] = f"证型{i}"
                syndrome_to_id[f"证型{i}"] = i
                syndrome_details[i] = {"Name": f"证型{i}", "id": i}

    except Exception as e:
        print(f"加载证型映射失败，使用默认类别: {e}")
        # 使用默认映射
        for i in range(num_classes):
            id_to_syndrome[i] = f"证型{i}"
            syndrome_to_id[f"证型{i}"] = i
            syndrome_details[i] = {"Name": f"证型{i}", "id": i}

    # 初始化模型
    model = OptimizedTCMBert(
        num_classes=num_classes,
        model_path=bert_model_path,
        use_structured=True
    )

    # 加载训练好的模型权重
    try:
        model.load_state_dict(checkpoint)
        print(f"成功加载模型权重: {model_path}")
    except Exception as e:
        print(f"模型权重加载失败: {e}")
        print("使用随机初始化模型")

    model.eval()
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = model.to(device)

    # 加载分词器
    tokenizer = BertTokenizer.from_pretrained(bert_model_path)

    return model, tokenizer, id_to_syndrome, syndrome_details


def preprocess_input_text(text, chief_complaint="", description="", detection=""):
    """预处理输入的患者描述文本"""
    # 如果未提供详细字段，假设text包含所有信息
    if not chief_complaint and not description and not detection:
        if "主诉：" in text:
            chief_complaint = text.split("主诉：")[1].split("现病史：")[0].strip()
            description = text.split("现病史：")[1].split("四诊信息：")[0].strip() if "现病史：" in text else ""
            detection = text.split("四诊信息：")[1].strip() if "四诊信息：" in text else ""
        else:
            description = text

    # 合并文本
    combined_text = f"{chief_complaint} {description} {detection}".strip()
    return combined_text


def extract_structured_features(text, feature_names=None):
    """从文本中提取结构化中医特征（补充到30维）"""
    if feature_names is None:
        feature_names = [
            'symptom_头晕', 'symptom_乏力', 'symptom_咳嗽', 'symptom_发热', 'symptom_呕吐',
            'tongue_舌红', 'tongue_舌淡', 'pulse_脉细', 'pulse_脉弦',
            'symptom_口干', 'symptom_口苦', 'symptom_失眠', 'symptom_盗汗',
            'tongue_苔黄', 'tongue_苔白', 'pulse_脉数', 'pulse_脉缓',
            'symptom_胸痛', 'symptom_腹痛', 'symptom_腹泻', 'symptom_便秘',
            'symptom_头痛', 'symptom_腰痛', 'symptom_胸闷', 'symptom_气短',
            'tongue_舌紫', 'tongue_舌暗', 'pulse_脉涩', 'pulse_脉滑',
            'symptom_纳呆'  # 新增第30个特征，确保维度匹配
        ]

    structured_features = {f: 0 for f in feature_names}
    for feature in feature_names:
        keyword = feature.split('_')[1]
        if keyword in text:
            structured_features[feature] = 1

    return list(structured_features.values())


def predict_syndrome(text, model, tokenizer, id_to_syndrome, syndrome_details, max_length=128, return_prob=False):
    """对输入文本进行中医证型预测"""
    device = next(model.parameters()).device  # 获取模型所在设备

    # 预处理输入文本
    processed_text = preprocess_input_text(text)

    # 提取结构化特征
    structured_features = extract_structured_features(processed_text)
    structured_tensor = torch.tensor(structured_features, dtype=torch.float32).unsqueeze(0).to(device)

    # 文本编码
    encoding = tokenizer.encode_plus(
        processed_text,
        add_special_tokens=True,
        max_length=max_length,
        padding='max_length',
        truncation=True,
        return_attention_mask=True,
        return_tensors='pt'
    )

    # 移至设备
    input_ids = encoding['input_ids'].to(device)
    attention_mask = encoding['attention_mask'].to(device)

    # 模型推理
    with torch.no_grad():
        outputs = model(
            input_ids=input_ids,
            attention_mask=attention_mask,
            struct_features=structured_tensor
        )
        probs = torch.softmax(outputs, dim=1).cpu().numpy()[0]

    # 获取预测结果
    pred_id = np.argmax(probs)
    confidence = probs[pred_id]
    pred_syndrome = id_to_syndrome.get(pred_id, f"未知证型_{pred_id}")

    result = {
        "输入文本": text,
        "预测证型": pred_syndrome,
        "证型ID": pred_id,
        "置信度": float(confidence),
    }

    # 添加证型详细信息
    if str(pred_id) in syndrome_details:
        details = syndrome_details[str(pred_id)]
        result["证型详情"] = {
            "定义": details.get("Definition", "暂无定义"),
            "典型表现": details.get("Typical_performance", "暂无典型表现"),
            "常见疾病": details.get("Common_disease", "暂无常见疾病"),
        }

    if return_prob:
        result["各证型概率"] = {
            id_to_syndrome.get(i, f"未知证型_{i}"): float(probs[i]) for i in range(len(probs))
        }

    return result


def batch_predict(json_file, model, tokenizer, id_to_syndrome, syndrome_details, output_file="predictions.json"):
    """批量预测JSON文件中的患者描述"""
    if not os.path.exists(json_file):
        print(f"错误：JSON文件不存在: {json_file}")
        return []

    # 加载JSON数据
    with open(json_file, 'r', encoding='utf-8') as f:
        try:
            data = json.load(f)
        except json.JSONDecodeError as e:
            print(f"JSON解析失败: {e}")
            return []

    # 批量预测
    predictions = []
    for item in tqdm(data, desc="预测中"):
        text = item.get("description", "") + " " + item.get("detection", "")
        if not text:
            text = item.get("chief_complaint", "")
        result = predict_syndrome(text, model, tokenizer, id_to_syndrome, syndrome_details)
        predictions.append({
            "user_id": item.get("user_id", f"user_{len(predictions)}"),
            "预测结果": result
        })

    # 保存结果
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(predictions, f, ensure_ascii=False, indent=2)

    print(f"批量预测完成，结果已保存至{output_file}")
    return predictions


# 示例使用
if __name__ == "__main__":
    try:
        # 加载模型和分词器（注意增加syndrome_details返回值）
        model, tokenizer, id_to_syndrome, syndrome_details = load_model_and_tokenizer(
            model_path='best_tcm_model.pth',
            bert_model_path='bert-base-chinese'
        )
        print("模型和分词器加载成功")

        # 示例1：简单文本预测
        sample_text = "患者头晕乏力，舌淡苔白，脉细无力，时有出血症状"
        result = predict_syndrome(sample_text, model, tokenizer, id_to_syndrome, syndrome_details, return_prob=True)
        print("示例1预测结果:")
        print(f"预测证型: {result['预测证型']}, 置信度: {result['置信度']:.4f}")

        # 打印证型详情
        if "证型详情" in result:
            print("\n证型详情:")
            for key, value in result["证型详情"].items():
                print(f"  {key}: {value}")

        # 打印高概率证型
        if "各证型概率" in result:
            print("\n高概率证型:")
            for syndrome, prob in sorted(result["各证型概率"].items(), key=lambda x: x[1], reverse=True)[:5]:
                if prob > 0.01:  # 只显示概率大于1%的证型
                    print(f"  {syndrome}: {prob:.4f}")

        # 示例2：复杂文本预测
        complex_text = "主诉：反复出血1月余。现病史：患者1月前无明显诱因出现牙龈出血，伴神疲乏力，食欲不振，大便溏薄，舌淡苔白，脉细弱。四诊信息：神志清楚，精神萎靡，面色萎黄，爪甲淡白，舌淡胖有齿痕，苔白腻，脉细弱。"
        result = predict_syndrome(complex_text, model, tokenizer, id_to_syndrome, syndrome_details)
        print("\n示例2预测结果:")
        print(f"预测证型: {result['预测证型']}, 置信度: {result['置信度']:.4f}")

        # 打印证型详情
        if "证型详情" in result:
            print("\n证型详情:")
            for key, value in result["证型详情"].items():
                print(f"  {key}: {value}")

    except Exception as e:
        print(f"推理过程中发生错误: {e}")