import jieba
import torch
import re
import os
import numpy as np
from tqdm import tqdm
import pandas as pd
from paddlenlp import Taskflow
from transformers import XLNetTokenizer, XLNetModel, BertTokenizer, BertModel
from torch.utils.data import DataLoader, TensorDataset
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim


# 构造 BiLSTM 网络
class BiLSTMModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
        super(BiLSTMModel, self).__init__()
        self.hidden_dim = hidden_dim

        # 双向 LSTM
        self.lstm = nn.LSTM(
            input_dim, 
            hidden_dim, 
            num_layers=num_layers, 
            bidirectional=True, 
            batch_first=True
        )
        
        # 全连接层，用于分类
        self.fc1 = nn.Linear(hidden_dim * 2, output_dim)  # 双向 LSTM 输出需要 ×2

    def forward(self, x):
        # LSTM 输出 (batch, seq_length, hidden_dim * 2)
        lstm_out, _ = self.lstm(x)
        
        # 只取最后一个时间步的输出 (batch, hidden_dim * 2)
        last_out = lstm_out[:, -1, :]
        
        # 全连接层分类

        output = F.softmax(self.fc1(last_out), dim=-1)
        return output


# 数据预处理并分词
def pre_text(text):
    text = text.replace('！','').replace('，','').replace('。','').replace('”','').replace('“','').replace('-','').replace('？','').replace('：','')  # 将标点符号处理掉
    return jieba.lcut(text)

def pre_text_2(text):
    text = text.replace('！','').replace('，','').replace('。','').replace('”','').replace('“','').replace('-','').replace('？','').replace('：','') # 将标点符号处理掉
    cleaned_text = re.sub(r'\s+', ' ', text)
    return cleaned_text.strip()

# 清理“病情描述”中的无关数字和多余空格
def clean_description(description):
    # 使用正则表达式去掉数字和前后的空格
    cleaned = re.sub(r'^\d+\s*', '', description)  # 去掉开头的数字和空格
    return cleaned.strip()  # 去除两端的多余空格

def get_medical_result(text):
        # 获取实体
        ner = Taskflow("ner", model="medical")
        results = ner(text)

        # 进行实体筛选ner任务并富集关键词

        final_word = []
        for result in results:
            if result[1] in ['疾病损伤类', '术语类_生物体', '个性特征', '物体类_概念', '场景事件']:
                final_word.append(result[0])
   
        return final_word

def get_hosipital_gpt(model_path='weights/bilstm_model_xlnet_new.pth'):

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    #print('device:',device)

    if 'xlnet' in model_path:
        # 加载XLNet的中文预训练模型和分词器
        print('加载XLNet词嵌入。')
        tokenizer = XLNetTokenizer.from_pretrained('hfl/chinese-xlnet-base')
        model_word = XLNetModel.from_pretrained('hfl/chinese-xlnet-base')
        print('加载成功。')
    elif 'RoBerta' in model_path:
        # 加载中文 RoBERTa 预训练模型
        print('加载RoBerta词嵌入。')
        tokenizer = BertTokenizer.from_pretrained('hfl/chinese-roberta-wwm-ext')
        model_word = BertModel.from_pretrained('hfl/chinese-roberta-wwm-ext')
        print('加载成功。')
    elif 'bert' in model_path:
        # 加载模型和分词器
        print('加载bert词嵌入。')
        tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
        model_word = BertModel.from_pretrained('bert-base-chinese')
        print('加载成功。')
    else:
        print('请选择正确的权重。')

    # 记载分类模型
    input_dim = 768    # 每个向量的维度
    seq_length = 512     # 序列长度
    num_classes = 25   # 类别数量
    hidden_dim = 128
    num_layers = 2
    model = BiLSTMModel(input_dim, hidden_dim, num_classes, num_layers)
    qweight = torch.load(model_path, map_location=torch.device(device))
    model.load_state_dict(qweight)
    model = model.to(device)

    return tokenizer, model_word, model