import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Embedding, LSTM, Dropout
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, classification_report

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']  # 指定中文字体为黑体
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

# 1. 数据加载
data = pd.read_csv('data/queries.csv')  # 从CSV文件加载数据

# 2. 标签编码
label_encoder = LabelEncoder()  # 创建标签编码器
data['relevance'] = label_encoder.fit_transform(data['relevance'])  # 将相关性标签转换为数值

# 获取标签对应的文本
labels = label_encoder.classes_.tolist()  # 确保标签是字符串列表

# 3. 数据分割
X_train, X_test, y_train, y_test = train_test_split(data['query'], data['relevance'], 
                                                    test_size=0.2, random_state=42)

# 4. 文本处理
tokenizer = Tokenizer(num_words=5000)  # 创建Tokenizer，限制词汇表为5000个词
tokenizer.fit_on_texts(X_train)  # 在训练集上拟合Tokenizer
X_train_seq = tokenizer.texts_to_sequences(X_train)  # 将训练文本转换为序列
X_test_seq = tokenizer.texts_to_sequences(X_test)  # 将测试文本转换为序列

max_length = max(len(x) for x in X_train_seq)  # 获取训练集中最长文本的长度
X_train_pad = pad_sequences(X_train_seq, maxlen=max_length, padding='post')  # 填充训练序列
X_test_pad = pad_sequences(X_test_seq, maxlen=max_length, padding='post')  # 填充测试序列

# 5. 模型构建
model = Sequential()  # 初始化序贯模型
model.add(Embedding(input_dim=5000, output_dim=128, input_length=max_length))  # 嵌入层
model.add(LSTM(128, return_sequences=True))  # 第一层LSTM
model.add(Dropout(0.5))  # 添加Dropout，防止过拟合
model.add(LSTM(64))  # 第二层LSTM
model.add(Dropout(0.5))  # 再次添加Dropout
model.add(Dense(len(labels), activation='softmax'))  # 动态获取类别数量，输出层

# 6. 编译模型
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

# 7. 训练模型
start_time = time.time()  # 记录开始时间
history = model.fit(X_train_pad, y_train, epochs=10, batch_size=32, validation_split=0.2)

# 8. 评估模型
loss, accuracy = model.evaluate(X_test_pad, y_test)  # 在测试集上评估模型性能

# 9. 输出结果
print(f'训练完成！')
for i in range(len(history.history['accuracy'])):
    print(f"第 {i + 1} 轮训练:")
    print(f"  训练准确率: {history.history['accuracy'][i]:.4f}")
    print(f"  训练损失: {history.history['loss'][i]:.4f}")
    print(f"  验证准确率: {history.history['val_accuracy'][i]:.4f}")
    print(f"  验证损失: {history.history['val_loss'][i]:.4f}")
    print("")

print(f'测试集准确率: {accuracy:.2f}')
print(f'测试集损失: {loss:.4f}')
print(f'总训练时间: {time.time() - start_time:.2f} 秒')

# 10. 保存模型
model.save('medical_query_model.keras')  # 保存训练好的模型为Keras格式
print("模型已保存。")

# 11. 混淆矩阵与分类报告
y_pred = np.argmax(model.predict(X_test_pad), axis=-1)  # 获取测试集的预测结果
cm = confusion_matrix(y_test, y_pred)  # 计算混淆矩阵

print("混淆矩阵：")
print(cm)

# 生成分类报告
if len(np.unique(y_test)) > 1 and len(np.unique(y_pred)) > 1:
    print("\n分类报告：")
    print(classification_report(y_test, y_pred, target_names=labels, zero_division=0))  # 打印分类报告
else:
    print("只有一个类别，无法生成分类报告。")

# 12. 可视化学习曲线
plt.figure(figsize=(12, 4))  # 创建图形

# 绘制训练和验证准确率
plt.subplot(1, 2, 1)  # 创建第一个子图
plt.plot(history.history['accuracy'], label='训练准确率')  # 绘制训练准确率
plt.plot(history.history['val_accuracy'], label='验证准确率')  # 绘制验证准确率
plt.title('准确率随时间变化')  # 标题
plt.xlabel('轮次')  # x轴标签
plt.ylabel('准确率')  # y轴标签
plt.xticks(range(len(history.history['accuracy'])))  # 设置x轴刻度
plt.legend()  # 显示图例

# 绘制训练和验证损失
plt.subplot(1, 2, 2)  # 创建第二个子图
plt.plot(history.history['loss'], label='训练损失')  # 绘制训练损失
plt.plot(history.history['val_loss'], label='验证损失')  # 绘制验证损失
plt.title('损失随时间变化')  # 标题
plt.xlabel('轮次')  # x轴标签
plt.ylabel('损失')  # y轴标签
plt.xticks(range(len(history.history['loss'])))  # 设置x轴刻度
plt.legend()  # 显示图例

plt.tight_layout()  # 调整子图间距
plt.show()  # 显示图形

# 13. 预测新数据
def predict_new_queries(queries):
    new_sequences = tokenizer.texts_to_sequences(queries)  # 将新查询转换为序列
    new_padded = pad_sequences(new_sequences, maxlen=max_length, padding='post')  # 填充序列
    predictions = model.predict(new_padded)  # 进行预测
    for query, prediction in zip(queries, predictions):
        predicted_label = labels[np.argmax(prediction)]  # 获取预测的标签文本
        print(f'查询: {query}, 预测相关性: {predicted_label}')  # 输出查询及预测结果

# 示例预测
def predict_new_queries():
    queries = []
    while True:
        query = input("请输入您的查询（输入 'exit' 结束）：")
        if query.lower() == 'exit':
            break
        queries.append(query)

    new_sequences = tokenizer.texts_to_sequences(queries)  # 将新查询转换为序列
    new_padded = pad_sequences(new_sequences, maxlen=max_length, padding='post')  # 填充序列
    predictions = model.predict(new_padded)  # 进行预测
    
    for query, prediction in zip(queries, predictions):
        predicted_label = labels[np.argmax(prediction)]  # 获取预测的标签文本
        print(f'查询: {query}, 预测相关性: {predicted_label}')  # 输出查询及预测结果

# 调用函数进行用户输入的预测
predict_new_queries()