import torch
import torch.nn as nn
import os
import sys
import time
from pathlib import Path
from transformers import BertModel, BertTokenizer
import requests
import hashlib


class OptimizedTCMBert(nn.Module):
    def __init__(self, num_classes, model_path, dropout_rate=0.1, use_structured=True):
        super().__init__()

        # 使用绝对路径加载模型
        self.bert = BertModel.from_pretrained(model_path)

        # 减少特征维度
        self.text_proj = nn.Sequential(
            nn.Linear(768, 128),
            nn.ReLU()
        )

        self.use_structured = use_structured
        if use_structured:
            self.struct_proj = nn.Sequential(
                nn.Linear(30, 32),
                nn.ReLU()
            )
            fusion_dim = 128 + 32
        else:
            fusion_dim = 128

        # 简化分类头
        self.classifier = nn.Sequential(
            nn.Dropout(dropout_rate),
            nn.Linear(fusion_dim, num_classes)
        )

    def forward(self, input_ids, attention_mask, struct_features=None):
        # BERT 编码
        outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
        text_feat = outputs.pooler_output  # [batch_size, 768]

        # 投影到低维空间
        text_feat = self.text_proj(text_feat)  # [batch_size, 128]

        # 融合结构化特征
        if self.use_structured and struct_features is not None:
            struct_feat = self.struct_proj(struct_features)  # [batch_size, 32]
            combined = torch.cat([text_feat, struct_feat], dim=1)  # [batch_size, 160]
        else:
            # 使用全零张量填充结构化特征
            batch_size = text_feat.size(0)
            struct_feat = torch.zeros(batch_size, 32, device=text_feat.device)
            combined = torch.cat([text_feat, struct_feat], dim=1)  # [batch_size, 160]

        # 分类
        return self.classifier(combined)


def download_with_retry(url, save_path, max_retries=5, timeout=30):
    """带重试机制的文件下载函数"""
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
    }

    for attempt in range(max_retries):
        try:
            print(f"尝试下载 {url} (尝试 {attempt + 1}/{max_retries})")
            response = requests.get(url, stream=True, headers=headers, timeout=timeout)
            response.raise_for_status()

            # 检查目录是否存在
            os.makedirs(os.path.dirname(save_path), exist_ok=True)

            # 写入文件
            with open(save_path, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):
                    if chunk:
                        f.write(chunk)

            print(f"文件已保存到: {save_path}")
            return True
        except (requests.exceptions.RequestException, IOError) as e:
            print(f"下载失败: {e}")
            time.sleep(2 ** attempt)  # 指数退避等待

    print(f"所有 {max_retries} 次尝试均失败")
    return False


def download_bert_model(model_name="bert-base-chinese"):
    """下载BERT模型文件到本地"""
    # 创建模型目录
    save_dir = os.path.join(os.getcwd(), model_name)
    os.makedirs(save_dir, exist_ok=True)
    print(f"模型将保存到: {save_dir}")

    # 需要下载的文件列表
    files_to_download = [
        "config.json",
        "pytorch_model.bin",
        "tokenizer_config.json",
        "vocab.txt"
    ]

    # 使用镜像站下载
    base_url = f"https://hf-mirror.com/google-bert/bert-base-chinese/resolve/main/"

    # 下载所有必需的文件
    for file_name in files_to_download:
        file_url = base_url + file_name
        save_path = os.path.join(save_dir, file_name)

        # 如果文件已存在且大小正常，则跳过
        if os.path.exists(save_path) and os.path.getsize(save_path) > 0:
            print(f"文件已存在: {save_path}")
            continue

        # 下载文件
        if not download_with_retry(file_url, save_path):
            print(f"关键文件 {file_name} 下载失败，模型可能无法使用")

    return save_dir


def verify_model_files(model_dir):
    """验证模型文件是否完整"""
    required_files = [
        "config.json",
        "pytorch_model.bin",
        "tokenizer_config.json",
        "vocab.txt"
    ]

    missing_files = []
    for file_name in required_files:
        file_path = os.path.join(model_dir, file_name)
        if not os.path.exists(file_path) or os.path.getsize(file_path) == 0:
            missing_files.append(file_name)

    if missing_files:
        print(f"警告: 模型文件不完整，缺失文件: {', '.join(missing_files)}")
        return False
    return True


if __name__ == "__main__":
    # 1. 下载并验证模型
    model_name = "bert-base-chinese"
    model_dir = download_bert_model(model_name)

    if not verify_model_files(model_dir):
        print("模型文件不完整，请检查网络连接后重试")
        sys.exit(1)

    print(f"使用模型路径: {model_dir}")

    # 2. 设备设置
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 3. 初始化模型
    try:
        model = OptimizedTCMBert(
            num_classes=10,
            model_path=model_dir,
            use_structured=True
        ).to(device)
        model.eval()
        print("模型加载成功!")
    except Exception as e:
        print(f"模型加载失败: {e}")
        sys.exit(1)

    # 4. 初始化tokenizer
    try:
        tokenizer = BertTokenizer.from_pretrained(model_dir)
        print("Tokenizer 加载成功!")
    except Exception as e:
        print(f"Tokenizer 加载失败: {e}")
        sys.exit(1)

    # 5. 测试推理
    text = ["患者主诉：头晕目眩，心悸失眠，面色苍白，舌质淡，脉细弱。辨证为气血两虚证。"] * 2
    inputs = tokenizer(
        text,
        return_tensors='pt',
        padding=True,
        truncation=True,
        max_length=128
    )
    struct_feats = torch.randn(2, 30)

    # 数据移到设备
    inputs = {k: v.to(device) for k, v in inputs.items()}
    struct_feats = struct_feats.to(device)

    # 6. 性能测试
    start_time = time.time()
    with torch.no_grad():
        outputs = model(
            input_ids=inputs['input_ids'],
            attention_mask=inputs['attention_mask'],
            struct_features=struct_feats
        )
    elapsed = time.time() - start_time
    print(f"\n推理完成! 耗时: {elapsed:.4f}s | 批量大小: {len(text)} | 平均每样本: {elapsed / len(text):.4f}s")

    # 7. 参数量统计
    total = sum(p.numel() for p in model.parameters())
    trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f"总参数: {total / 1e6:.1f}M | 可训练参数: {trainable / 1e6:.1f}M")

    # 8. 显示预测结果示例
    print("\n预测结果示例:")
    for i, prob in enumerate(outputs.cpu().numpy()):
        print(f"样本 {i + 1}:")
        for j, p in enumerate(prob):
            print(f"  证型 {j}: {p:.4f}")