import json
import numpy as np
from gensim.models import Word2Vec
from sklearn.preprocessing import MultiLabelBinarizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D, GlobalMaxPooling1D, Dense, Embedding, Dropout
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.metrics import BinaryAccuracy

# 加载标签列表
label_file_path = r'D:\研一\课程\自然语言处理\data\data\label_list.txt'
with open(label_file_path, 'r', encoding='utf-8') as f:
    label_list = [line.strip() for line in f.readlines()]

# 加载数据集的通用函数
def load_dataset(file_path):
    texts, labels = [], []
    with open(file_path, 'r', encoding='utf-8') as f:
        for line in f:
            data = json.loads(line.strip())
            texts.append(data['text'])
            labels.append(data['label'])
    return texts, labels

# 加载训练集和验证集
train_texts, train_labels = load_dataset(r'D:\研一\课程\自然语言处理\data\data\train.json')
valid_texts, valid_labels = load_dataset(r'D:\研一\课程\自然语言处理\data\data\valid.json')

# 加载并处理测试集
test_file_path = r'D:\研一\课程\自然语言处理\data\data\test.txt'
with open(test_file_path, 'r', encoding='utf-8') as f:
    test_texts = [line.strip() for line in f]

# 分词
all_texts = train_texts + valid_texts + test_texts
all_texts_tokenized = [text.split() for text in all_texts]

# 生成Word2Vec词向量
w2v_model = Word2Vec(sentences=all_texts_tokenized, vector_size=100, window=5, min_count=2, workers=4)
embedding_matrix = np.zeros((len(w2v_model.wv), 100))
for i in range(len(w2v_model.wv)):
    embedding_matrix[i] = w2v_model.wv[w2v_model.wv.index_to_key[i]]

# 将文本转换为序列
tokenizer = Tokenizer()
tokenizer.fit_on_texts(all_texts)
train_X = pad_sequences(tokenizer.texts_to_sequences(train_texts), maxlen=100, padding='post')
valid_X = pad_sequences(tokenizer.texts_to_sequences(valid_texts), maxlen=100, padding='post')
test_X = pad_sequences(tokenizer.texts_to_sequences(test_texts), maxlen=100, padding='post')

# 标签二进制化
mlb = MultiLabelBinarizer(classes=label_list)
train_Y = mlb.fit_transform(train_labels)
valid_Y = mlb.transform(valid_labels)

# 构建TextCNN模型
model = Sequential([
    Embedding(input_dim=len(w2v_model.wv), output_dim=100, weights=[embedding_matrix], input_length=100, trainable=False),
    Conv1D(128, kernel_size=3, activation='relu'),
    GlobalMaxPooling1D(),
    Dropout(0.5),
    Dense(128, activation='relu'),
    Dropout(0.5),
    Dense(len(label_list), activation='sigmoid')
])

# 编译模型
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[BinaryAccuracy()])

# 训练模型
model.fit(train_X, train_Y, epochs=10, batch_size=32, validation_data=(valid_X, valid_Y))

# 评估模型在训练集上的准确率
train_loss, train_accuracy = model.evaluate(train_X, train_Y)
print("Train Accuracy:", train_accuracy)

# 预测测试集的标签
test_predictions = model.predict(test_X)
test_pred_labels = mlb.inverse_transform((test_predictions > 0.5).astype(int))

# 输出预测结果
# for i, (text, labels) in enumerate(zip(test_texts, test_pred_labels)):
#     print(f"Text: {text}")
#     print(f"Predicted Labels: {labels}\n")

# 输出预测结果到指定文件路径
output_file_path = r'D:\桌面文件\2.txt'
with open(output_file_path, 'w', encoding='utf-8') as f:
    for labels in test_pred_labels:
        # 如果标签为空，添加默认值 No_Mentioned
        if not labels:
            f.write('No_Mentioned\n')
        else:
            f.write(', '.join(labels) + '\n')

print(f"Predicted labels have been saved to {output_file_path}")