import jieba
import torch
import numpy as np
import re
from tqdm import tqdm
import pandas as pd
from transformers import XLNetTokenizer, XLNetModel
from torch.utils.data import DataLoader, TensorDataset
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import os

#os.environ['CUDA_LAUNCH_BLOCKING'] = "1"

# 构造 BiLSTM 网络
class BiLSTMModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
        super(BiLSTMModel, self).__init__()
        self.hidden_dim = hidden_dim

        # 双向 LSTM
        self.lstm = nn.LSTM(
            input_dim, 
            hidden_dim, 
            num_layers=num_layers, 
            bidirectional=True, 
            batch_first=True
        )
        
        # 全连接层，用于分类
        self.fc1 = nn.Linear(hidden_dim * 2, output_dim)  # 双向 LSTM 输出需要 ×2

    def forward(self, x):
        # LSTM 输出 (batch, seq_length, hidden_dim * 2)
        lstm_out, _ = self.lstm(x)
        
        # 只取最后一个时间步的输出 (batch, hidden_dim * 2)
        last_out = lstm_out[:, -1, :]
        
        # 全连接层分类

        output = F.softmax(self.fc1(last_out), dim=-1)
        return output
    


# 数据预处理并分词
def pre_text(text):
    text = text.replace('！','').replace('，','').replace('。','').replace('”','').replace('“','').replace('-','').replace('？','').replace('：','')  # 将标点符号处理掉
    return jieba.lcut(text)

def pre_text_2(text):
    text = text.replace('！','').replace('，','').replace('。','').replace('”','').replace('“','').replace('-','').replace('？','').replace('：','') # 将标点符号处理掉
    cleaned_text = re.sub(r'\s+', ' ', text)
    return cleaned_text.strip()

# 清理“病情描述”中的无关数字和多余空格
def clean_description(description):
    # 使用正则表达式去掉数字和前后的空格
    cleaned = re.sub(r'^\d+\s*', '', description)  # 去掉开头的数字和空格
    return cleaned.strip()  # 去除两端的多余空格

# 读取CSV文件并处理
def process_csv(input_csv):
    # 读取CSV文件
    df = pd.read_csv(input_csv, encoding='utf-8')

    # 检查科室列是否存在
    if '科室' not in df.columns or '病情描述' not in df.columns:
        print("CSV文件中没有'科室'或'病情描述'列，无法处理！")
        return None

    '''# 清理“病情描述”列
    df['病情描述'] = df['病情描述'].apply(lambda x: clean_description(str(x)))'''

    # 初始化列表来保存数据和科室标签
    data = []
    labels = []

    # 获取科室的唯一值
    departments = df['科室'].unique()

    # 遍历每个科室
    for department in departments:
        # 获取该科室对应的行号
        department_indices = df.index[df['科室'] == department].tolist()

        # 如果该科室的样本数超过最大限制，进行随机采样
        if len(department_indices) > max_samples_per_department:
            department_indices = pd.Series(department_indices).sample(n=max_samples_per_department, random_state=42).tolist()

        # 根据行号从原始数据中提取对应的“病情描述”列数据
        sampled_data = df.loc[department_indices, '病情描述'].tolist()
        
        # 对应科室标签
        department_labels = [department] * len(sampled_data)
        
        # 将数据和标签添加到列表
        data.extend(sampled_data)
        labels.extend(department_labels)

    # 返回处理后的数据和标签
    return data, labels

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('device:',device)

standard_departments = [
    "呼吸内科", "心血管内科", "消化内科", "内分泌科", "神经内科", 
    "普通外科", "心胸外科", "神经外科", "骨科", "泌尿外科", 
    "烧伤整形科", "妇科", "产科", "普通儿科", "新生儿科", 
    "眼科", "耳鼻喉科", "皮肤科", "精神心理科", "肿瘤科", 
    "感染科", "康复科", "中医科", "放射科", "急诊"
]

print(len(standard_departments))
department_to_onehot = {dept: np.eye(len(standard_departments))[i] for i, dept in enumerate(standard_departments)}
print(department_to_onehot)

# 设置每个科室的最大样本数
max_samples_per_department = 450  # 例如，最多50个样本

# 调用方法处理CSV文件
input_csv = '/home/tom/fsas/data/medical_cases_new.csv'  # 输入的CSV文件路径
X, y = process_csv(input_csv)
x_data = [pre_text_2(x) for x in X]
y_data = [department_to_onehot[dep] for dep in y]

# 加载XLNet的中文预训练模型和分词器
tokenizer = XLNetTokenizer.from_pretrained('hfl/chinese-xlnet-base')
model_xlnet = XLNetModel.from_pretrained('hfl/chinese-xlnet-base')

model_xlnet = model_xlnet.to(device)

text_vectors = []
# tokenizer.encode_plus负责将每个文本转换为BERT输入格式

for text_x in tqdm(x_data[:]):
    inputs = tokenizer(
        text_x, padding='max_length', truncation=True, return_tensors='pt', max_length=512
    ).to(device)
    with torch.no_grad():
        # 使用BERT模型进行嵌入
        outputs = model_xlnet(**inputs)
        # 获取最后一个隐藏层表示,其形状为 [batch_size, sequence_length, hidden_size]
        last_hidden_state = outputs.last_hidden_state
        #print(last_hidden_state.shape)
        token_embeddings = torch.squeeze(last_hidden_state, dim=0)
        #token_embeddings = last_hidden_state

        text_vectors.append(token_embeddings.cpu().detach().numpy())

y_data_new = np.array(y_data)
text_vectors = np.array(text_vectors)

#torch.save(y_data_new, '/home/tom/fsas/results/bert/y_data_new.npy')
#torch.save(text_vectors, '/home/tom/fsas/results/bert/text_vectors.npy')
#print('Save embedding successfully.')

print(text_vectors.shape)
print(y_data_new.shape)

text_vectors = torch.tensor(text_vectors, dtype=torch.float32)
y_data_new = torch.tensor(y_data_new, dtype=torch.float32)

dataset = TensorDataset(text_vectors, y_data_new)
dataloader = DataLoader(dataset, batch_size=64, shuffle=True)

# 数据维度信息
input_dim = 768    # 每个向量的维度
seq_length = 512     # 序列长度
num_classes = 25   # 类别数量
hidden_dim = 128
num_layers = 2
model = BiLSTMModel(input_dim, hidden_dim, num_classes, num_layers).to(device)

# 损失函数和优化器
criterion = nn.CrossEntropyLoss()  # 多分类任务
optimizer = optim.Adam(model.parameters(), lr=0.005)

# 训练过程
num_epochs = 800
best_loss = 1000.0
step = 0
patience = 50
loss_list = []
acc_list = []
for epoch in tqdm(range(num_epochs)):
    model.train()
    epoch_loss = 0.0
    epoch_acc = 0.0
    for batch_data, batch_labels in dataloader:
        
        batch_data = batch_data.to(device)
        batch_labels = batch_labels.to(device)
        
        # 前向传播
        outputs = model(batch_data)
        
        # 计算损失
        loss = criterion(outputs, batch_labels)

        # 1. 找到每个样本预测的类别（即最大概率对应的类别）
        _, predicted = torch.max(outputs, dim=1)

        # 2. 计算准确率
        correct = (predicted == torch.argmax(batch_labels, dim=1)).float()  # 如果预测和标签相同，返回1，否则返回0
        accuracy = correct.sum() / len(correct)  # 准确率 = 正确的样本数 / 总样本数
        
        
        # 反向传播和优化
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        epoch_loss += loss.item()
        epoch_acc += accuracy.item()
    
    final_loss = epoch_loss/len(dataloader)
    final_acc = epoch_acc/len(dataloader)
    loss_list.append(final_loss)
    acc_list.append(final_acc)
    print('正确率：', epoch_acc/len(dataloader))

    if best_loss > epoch_loss/len(dataloader):
        best_loss = epoch_loss/len(dataloader)
        step = 0
        print('best loss to save:'+str(epoch_loss/len(dataloader)))
        # 保存模型
        torch.save(model.state_dict(), "/home/tom/fsas/results/XLNET/bilstm_model_xlnet.pth")
        np.save('/home/tom/fsas/results/XLNET/loss_XLNet.npy', loss_list)
        np.save('/home/tom/fsas/results/XLNET/acc_XLNet.npy', acc_list)
    else:
        step += 1
        if step > patience:
            print('模型没有变得更好，暂停。')
            break
    print(f"Epoch [{epoch+1}/{num_epochs}], Loss: {epoch_loss/len(dataloader):.4f}")
