import jieba
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report
from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense, Dropout
from keras.optimizers import Adam
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.callbacks import ModelCheckpoint, EarlyStopping
from multiprocessing import Pool, cpu_count
import joblib  # 用于保存TF-IDF和LabelEncoder
import os

# 2. 中文分词：使用多进程进行分词
def chinese_tokenizer(text):
    return list(jieba.cut(text))

def tokenize_texts(text_list):
    with Pool(cpu_count()) as pool:
        tokenized_texts = pool.map(chinese_tokenizer, text_list)
    return tokenized_texts

if __name__ == '__main__':
    # 1. 数据准备 - 从Excel文件加载数据
    df = pd.read_excel('TEXT-B5601_NS3294_ZBSJ-ZSHC__20240815.xlsx')  # 读取Excel文件

    # 提取文本和标签
    texts = df['text'].values.tolist()
    labels = df['label'].values.tolist()

    # 分词处理
    x_data = tokenize_texts(texts)

    # 标签编码
    label_encoder = LabelEncoder()
    y_data = label_encoder.fit_transform(labels)

    # 保存标签编码器（以后加载时用）
    joblib.dump(label_encoder, 'label_encoder.pkl')

    # 3. 使用TF-IDF进行文本向量化
    vectorizer = TfidfVectorizer(tokenizer=chinese_tokenizer, max_features=5000)
    x_data_vectorized = vectorizer.fit_transform(texts).toarray()

    # 保存向量化器（以后加载时用）
    joblib.dump(vectorizer, 'tfidf_vectorizer.pkl')

    # 4. 数据划分：训练集与验证集
    x_train, x_val, y_train, y_val = train_test_split(x_data_vectorized, y_data, test_size=0.2, random_state=42)

    # 5. 模型构建与训练（Logistic Regression）
    # 创建并训练Logistic Regression模型
    lr_model = LogisticRegression()
    lr_model.fit(x_train, y_train)

    # 保存Logistic Regression模型
    joblib.dump(lr_model, 'lr_model.pkl')

    # 评估Logistic Regression模型
    lr_y_pred = lr_model.predict(x_val)
    print("Logistic Regression - Accuracy:", accuracy_score(y_val, lr_y_pred))
    print(classification_report(y_val, lr_y_pred))

    # 6. 深度学习模型：LSTM
    # 填充序列，使得每个输入文本长度一致
    max_length = 100  # 假设文本最大长度为30
    x_data_padded = pad_sequences(x_data_vectorized, padding='post', maxlen=max_length)
    x_train_padded, x_val_padded = train_test_split(x_data_padded, test_size=0.2, random_state=42)

    # 转换标签为 one-hot 编码
    num_classes = len(set(y_data))
    y_train_onehot = to_categorical(y_train, num_classes=num_classes)
    y_val_onehot = to_categorical(y_val, num_classes=num_classes)

    # 构建LSTM模型
    lstm_model = Sequential()
    lstm_model.add(Embedding(input_dim=5000, output_dim=128, input_length=max_length))
    lstm_model.add(LSTM(64, dropout=0.1, recurrent_dropout=0.1))
    lstm_model.add(Dense(num_classes, activation='softmax'))  # 修改为多分类问题支持

    lstm_model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.0005), metrics=['accuracy'])

    # 设置回调：模型保存与早停
    checkpoint = ModelCheckpoint('lstm_model_best.keras', monitor='val_accuracy', save_best_only=True, mode='max')
    early_stopping = EarlyStopping(monitor='val_loss', patience=3, mode='min')

    # 训练LSTM模型
    lstm_model.fit(
        x_train_padded, y_train_onehot,
        validation_data=(x_val_padded, y_val_onehot),
        epochs=20,
        batch_size=64,

        callbacks=[checkpoint, early_stopping]
    )

    # 评估LSTM模型
    score, accuracy = lstm_model.evaluate(x_val_padded, y_val_onehot, batch_size=64)
    print("LSTM Model - Test Accuracy: %.2f%%" % (accuracy * 100))
