import os
import jieba
#import pkuseg
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report
from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense, Dropout, Bidirectional, GRU
from keras.optimizers import Adam
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.callbacks import ModelCheckpoint, EarlyStopping
from multiprocessing import Pool, cpu_count
import joblib  # 用于保存TF-IDF和LabelEncoder
from attention import Attention  # 导入自定义的 Attention 层


# 定义数据目录
DATA_FOLDER = 'clean_data/'  # 修改为包含多个 Excel 文件的目录路径

# 定义函数：读取目录中的所有 Excel 文件
def load_all_excel_data(folder_path):
    dataframes = []
    for file in os.listdir(folder_path):
        if file.endswith('.xlsx'):
            file_path = os.path.join(folder_path, file)
            df = pd.read_excel(file_path)  # 读取 Excel 文件
            dataframes.append(df)
    return pd.concat(dataframes, ignore_index=True)  # 合并为一个 DataFrame

# 中文分词：使用多进程分词
# def chinese_tokenizer(text):
#     return list(jieba.cut(text))

def chinese_tokenizer(text):
    try:
        if isinstance(text, str):
            return list(jieba.cut(text))
        else:
            return []  # 非字符串返回空分词
    except Exception as e:
        print(f"Error in tokenizing: {text} - {e}")
        return []


#seg = pkuseg.pkuseg()
#def chinese_tokenizer(text):
#    return seg.cut(text)


def tokenize_texts(text_list):
    with Pool(cpu_count()) as pool:
        tokenized_texts = pool.map(chinese_tokenizer, text_list)
    return tokenized_texts

if __name__ == '__main__':
    # 加载所有数据
    df = load_all_excel_data(DATA_FOLDER)

    # 移除空值或无效文本
    df = df[df['text'].notnull()]  # 去除文本列为 NaN 的行
    df = df[df['text'].str.strip() != ""]  # 去除文本列为空字符串的行

    # 提取文本和标签
    texts = df['text'].values.tolist()
    labels = df['label'].values.tolist()

    # 分词处理
    x_data = tokenize_texts(texts)

    # 标签编码
    label_encoder = LabelEncoder()
    y_data = label_encoder.fit_transform(labels)

    # 保存标签编码器（以后加载时用）
    joblib.dump(label_encoder, 'label_encoder.pkl')

    # 使用TF-IDF进行文本向量化
    #vectorizer = TfidfVectorizer(tokenizer=chinese_tokenizer, max_features=5000)
    vectorizer = TfidfVectorizer(tokenizer=chinese_tokenizer, max_features=1000, ngram_range=(1, 2))
    x_data_vectorized = vectorizer.fit_transform(texts).toarray()

    # 保存向量化器（以后加载时用）
    joblib.dump(vectorizer, 'tfidf_vectorizer.pkl')

    # 数据划分：训练集与验证集
    x_train, x_val, y_train, y_val = train_test_split(x_data_vectorized, y_data, test_size=0.2, random_state=42)

    # Logistic Regression 模型
    lr_model = LogisticRegression()
    lr_model.fit(x_train, y_train)

    # 保存模型
    joblib.dump(lr_model, 'lr_model.pkl')

    # 评估 Logistic Regression 模型
    lr_y_pred = lr_model.predict(x_val)
    print("Logistic Regression - Accuracy:", accuracy_score(y_val, lr_y_pred))
    print(classification_report(y_val, lr_y_pred))

    # LSTM 模型
    # 定义 LSTM + Attention 模型
    max_length = 100  # 假设文本最大长度为100
    vocab_size = 5000  # 词汇表大小
    embedding_dim = 128  # 嵌入维度
    num_classes = len(set(y_data))  # 类别数目


    x_data_padded = pad_sequences(x_data_vectorized, padding='post', maxlen=max_length)
    x_train_padded, x_val_padded = train_test_split(x_data_padded, test_size=0.2, random_state=42)

    y_train_onehot = to_categorical(y_train, num_classes=num_classes)
    y_val_onehot = to_categorical(y_val, num_classes=num_classes)

    lstm_model = Sequential()
    lstm_model.add(Embedding(input_dim=vocab_size, output_dim=embedding_dim, input_length=max_length))

    #改进 LSTM 模型，使用堆叠双向 LSTM 或 GRU:
    #lstm_model.add(LSTM(100, dropout=0.1, recurrent_dropout=0.1))
    lstm_model.add(Bidirectional(LSTM(128, dropout=0.2, recurrent_dropout=0.2, return_sequences=True)))
    #lstm_model.add(Bidirectional(GRU(128, dropout=0.3, recurrent_dropout=0.3, return_sequences=False)))
    lstm_model.add(Attention())  # 添加自定义的注意力层
    lstm_model.add(Dense(num_classes, activation='softmax'))
    #优化器和学习率调整： 使用自适应学习率优化器：
    lstm_model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.002), metrics=['accuracy'])

    checkpoint = ModelCheckpoint('lstm_attention_best.keras', monitor='val_accuracy', save_best_only=True, mode='max')
    early_stopping = EarlyStopping(monitor='val_loss', patience=3, mode='min')

    lstm_model.fit(
        x_train_padded, y_train_onehot,
        validation_data=(x_val_padded, y_val_onehot),
        epochs=10,
        batch_size=64,
        callbacks=[checkpoint, early_stopping]
    )

    score, accuracy = lstm_model.evaluate(x_val_padded, y_val_onehot, batch_size=64)
    print("LSTM Model - Test Accuracy: %.2f%%" % (accuracy * 100))
    text_lengths = [len(text) for text in texts]
    print(pd.Series(text_lengths).describe())

