import json
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Concatenate, Input
from tensorflow.keras.metrics import BinaryAccuracy
from tensorflow.keras.models import Model
from tqdm.keras import TqdmCallback  # 实时显示训练进度
from gensim.models import Word2Vec
from nltk.corpus import stopwords
import nltk

nltk.download('stopwords')

# 加载标签列表
label_file_path = r'D:\研一\课程\自然语言处理\data\data\label_list.txt'
with open(label_file_path, 'r', encoding='utf-8') as f:
    label_list = [line.strip() for line in f.readlines()]

# 加载数据集
def load_dataset(file_path):
    texts, labels = [], []
    with open(file_path, 'r', encoding='utf-8') as f:
        for line in f:
            data = json.loads(line.strip())
            texts.append(data['text'])
            labels.append(data['label'])
    return texts, labels

train_texts, train_labels = load_dataset(r'data/train.json')
valid_texts, valid_labels = load_dataset(r'data/valid.json')

test_file_path = r'data/test.txt'
with open(test_file_path, 'r', encoding='utf-8') as f:
    test_texts = [line.strip() for line in f]

# 停用词处理
stop_words = set(stopwords.words('english'))
additional_stopwords = {'would', 'could', 'also', 'said'}
stop_words.update(additional_stopwords)

def remove_stopwords(text):
    tokens = text.split()
    return ' '.join([word for word in tokens if word.lower() not in stop_words])

train_texts = [remove_stopwords(text) for text in train_texts]
valid_texts = [remove_stopwords(text) for text in valid_texts]
test_texts = [remove_stopwords(text) for text in test_texts]

# ======= 1. 构建 TF-IDF 特征 =======
tfidf_vectorizer = TfidfVectorizer(max_features=10000)  # 增大特征维度至 10000
train_tfidf = tfidf_vectorizer.fit_transform(train_texts).toarray()
valid_tfidf = tfidf_vectorizer.transform(valid_texts).toarray()
test_tfidf = tfidf_vectorizer.transform(test_texts).toarray()

# ======= 2. 构建 Word2Vec 特征 =======
all_texts_tokenized = [text.split() for text in train_texts + valid_texts + test_texts]
w2v_model = Word2Vec(sentences=all_texts_tokenized, vector_size=100, window=5, min_count=2, workers=4)

def generate_word2vec_features(texts, w2v_model):
    features = []
    for text in texts:
        words = text.split()
        vector = np.mean(
            [w2v_model.wv[word] for word in words if word in w2v_model.wv] or [np.zeros(100)], axis=0
        )
        features.append(vector)
    return np.array(features)

train_w2v = generate_word2vec_features(train_texts, w2v_model)
valid_w2v = generate_word2vec_features(valid_texts, w2v_model)
test_w2v = generate_word2vec_features(test_texts, w2v_model)

# ======= 3. 特征拼接 =======
# 标准化特征
scaler = StandardScaler()
train_tfidf = scaler.fit_transform(train_tfidf)
valid_tfidf = scaler.transform(valid_tfidf)
test_tfidf = scaler.transform(test_tfidf)

train_features = np.hstack([train_tfidf, train_w2v])
valid_features = np.hstack([valid_tfidf, valid_w2v])
test_features = np.hstack([test_tfidf, test_w2v])

# 标签二值化（多标签处理）
mlb = MultiLabelBinarizer(classes=label_list)
train_Y = mlb.fit_transform(train_labels)
valid_Y = mlb.transform(valid_labels)

# ======= 4. 构建模型 =======
input_dim = train_features.shape[1]

model = Sequential([
    Dense(512, input_dim=input_dim, activation='relu'),
    Dropout(0.5),
    Dense(256, activation='relu'),
    Dropout(0.5),
    Dense(len(label_list), activation='sigmoid')  # 多标签分类使用 sigmoid
])

# 编译模型
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[BinaryAccuracy(threshold=0.5)])

# ======= 5. 训练模型 =======
model.fit(
    train_features, train_Y,
    epochs=15,
    batch_size=64,
    validation_data=(valid_features, valid_Y),
    callbacks=[TqdmCallback(verbose=1)]
)

# 评估模型
train_loss, train_accuracy = model.evaluate(train_features, train_Y)
print("Train Accuracy:", train_accuracy)

# ======= 6. 预测测试集 =======
test_predictions = model.predict(test_features)
test_pred_labels = mlb.inverse_transform((test_predictions > 0.5).astype(int))
# 保存预测结果
output_file_path = r'data/mlp.txt'
with open(output_file_path, 'w', encoding='utf-8') as f:
    for labels in test_pred_labels:
        if not labels:
            f.write('No_Mentioned\n')
        else:
            f.write(', '.join(labels) + '\n')

print(f"Predicted labels have been saved to {output_file_path}")
