import json
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from sklearn.metrics import accuracy_score
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tqdm import tqdm  # 导入 tqdm 库
import numpy as np  # 导入 NumPy
import torch.nn.functional as F
from gensim.models import Word2Vec  # 导入 gensim 库
import os

# 文件路径
label_path = os.path.join("data", "label_list.txt")
train_path = os.path.join("data", "train.json")
valid_path = os.path.join("data", "valid.json")
test_path = os.path.join("data", "test.txt")
output_path = os.path.join("output", "rnn.txt")

# 加载标签列表
with open(label_path, "r", encoding="utf-8") as f:
    labels = [line.strip() for line in f.readlines()]
label_to_idx = {label: idx for idx, label in enumerate(labels)}
idx_to_label = {idx: label for label, idx in label_to_idx.items()}

# 加载数据
def load_json_data(file_path):
    with open(file_path, "r", encoding="utf-8") as f:
        return [json.loads(line.strip()) for line in f]

train_data = load_json_data(train_path)
valid_data = load_json_data(valid_path)

# 加载测试数据
with open(test_path, "r", encoding="utf-8") as f:
    test_data = [line.strip() for line in f.readlines()]

# 构建 Word2Vec 模型
def build_word2vec_model(data, embedding_dim=300):
    sentences = [d['text'].split() for d in data]
    model = Word2Vec(sentences, vector_size=embedding_dim, window=5, min_count=1, workers=4)
    return model

# 加载 Word2Vec 词向量
embedding_dim = 300
word2vec_model = build_word2vec_model(train_data + valid_data, embedding_dim)

# 构建词嵌入矩阵
def create_embedding_matrix(tokenizer, word2vec_model, embedding_dim=300):
    word_index = tokenizer.word_index
    num_words = len(word_index) + 1
    embedding_matrix = np.zeros((num_words, embedding_dim))
    for word, i in word_index.items():
        if i < num_words:
            embedding_vector = word2vec_model.wv[word] if word in word2vec_model.wv else None
            if embedding_vector is not None:
                embedding_matrix[i] = embedding_vector
    return embedding_matrix

# 初始化 tokenizer
tokenizer = Tokenizer()
tokenizer.fit_on_texts([data['text'] for data in train_data + valid_data])

embedding_matrix = create_embedding_matrix(tokenizer, word2vec_model, embedding_dim)

# 构建数据集
class MultiLabelDataset(Dataset):
    def __init__(self, data, tokenizer, label_to_idx, max_length=512):
        self.data = data
        self.tokenizer = tokenizer
        self.label_to_idx = label_to_idx
        self.max_length = max_length

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        text = self.data[idx]["text"]
        labels = self.data[idx].get("label", [])
        sequence = self.tokenizer.texts_to_sequences([text])[0]
        padded_sequence = pad_sequences([sequence], maxlen=self.max_length, padding='post', truncating='post')[0]
        label_ids = torch.zeros(len(self.label_to_idx))
        for label in labels:
            label_ids[self.label_to_idx[label]] = 1
        return torch.tensor(padded_sequence, dtype=torch.long), label_ids

train_dataset = MultiLabelDataset(train_data, tokenizer, label_to_idx)
valid_dataset = MultiLabelDataset(valid_data, tokenizer, label_to_idx)

train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True)
valid_loader = DataLoader(valid_dataset, batch_size=4)

# 构建 TextRNN 模型
class TextRNN(nn.Module):
    def __init__(self, embedding_matrix, num_labels, hidden_dim=128, num_layers=2, bidirectional=True):
        super(TextRNN, self).__init__()
        self.embedding = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float32))
        self.lstm = nn.LSTM(
            input_size=embedding_matrix.shape[1],  # 嵌入维度
            hidden_size=hidden_dim,  # 隐藏层维度
            num_layers=num_layers,  # 层数
            bidirectional=bidirectional,  # 双向 LSTM
            batch_first=True  # 输入的维度为 (batch, seq, feature)
        )
        self.dropout = nn.Dropout(0.5)
        self.fc = nn.Linear(hidden_dim * 2 if bidirectional else hidden_dim, num_labels)  # 输出维度为标签数
        self.sigmoid = nn.Sigmoid()

    def forward(self, input_ids):
        embedded = self.embedding(input_ids)  # (batch_size, seq_len, embedding_dim)
        lstm_out, _ = self.lstm(embedded)  # (batch_size, seq_len, hidden_dim * num_directions)
        lstm_out = torch.mean(lstm_out, dim=1)  # 对时间步取平均池化 (batch_size, hidden_dim * num_directions)
        lstm_out = self.dropout(lstm_out)
        logits = self.fc(lstm_out)  # (batch_size, num_labels)
        probs = self.sigmoid(logits)  # 多标签分类任务，使用 Sigmoid 激活
        return probs

# 初始化 TextRNN 模型
device = "cpu"  # 切换到 "cuda" 如果有 GPU
model = TextRNN(embedding_matrix, num_labels=len(labels)).to(device)
optimizer = optim.AdamW(model.parameters(), lr=2e-5)
criterion = nn.BCELoss()  # 多标签分类任务的损失函数

# 训练和验证的逻辑保持不变
def train_model(model, train_loader, optimizer, criterion, device):
    model.train()
    total_loss = 0

    progress_bar = tqdm(train_loader, desc="Training", leave=False)
    for input_ids, labels in progress_bar:
        input_ids, labels = input_ids.to(device), labels.to(device)
        optimizer.zero_grad()
        outputs = model(input_ids)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
        progress_bar.set_postfix(loss=loss.item())

    return total_loss / len(train_loader)

def validate_model(model, valid_loader, criterion, device):
    model.eval()
    total_loss = 0
    all_preds, all_labels = [], []

    progress_bar = tqdm(valid_loader, desc="Validation", leave=False)
    with torch.no_grad():
        for input_ids, labels in progress_bar:
            input_ids, labels = input_ids.to(device), labels.to(device)
            outputs = model(input_ids)
            loss = criterion(outputs, labels)
            total_loss += loss.item()
            all_preds.extend((outputs > 0.5).cpu().numpy())
            all_labels.extend(labels.cpu().numpy())
            progress_bar.set_postfix(loss=loss.item())

    accuracy = accuracy_score(all_labels, all_preds)
    return total_loss / len(valid_loader), accuracy

# 预测并保存结果
def predict_and_save_results(model, test_data, tokenizer, output_path):
    model.eval()
    predictions = []
    with torch.no_grad():
        for text in test_data:
            sequence = tokenizer.texts_to_sequences([text])[0]
            padded_sequence = pad_sequences([sequence], maxlen=512, padding='post', truncating='post')[0]
            input_ids = torch.tensor(padded_sequence, dtype=torch.long).unsqueeze(0).to(device)
            outputs = model(input_ids)
            predicted_labels = [idx_to_label[idx] for idx, prob in enumerate(outputs[0]) if prob > 0.5]
            predictions.append(",".join(predicted_labels))
    with open(output_path, "w", encoding="utf-8") as f:
        for line in predictions:
            f.write(line + "\n")

# 训练和验证
num_epochs = 2  # 可根据需求调整
for epoch in range(num_epochs):
    print(f"Epoch {epoch + 1}/{num_epochs}")

    train_loss = train_model(model, train_loader, optimizer, criterion, device)
    valid_loss, valid_accuracy = validate_model(model, valid_loader, criterion, device)

    print(f"Train Loss: {train_loss:.4f}, Valid Loss: {valid_loss:.4f}, Accuracy: {valid_accuracy:.4f}")

# 测试并保存结果
predict_and_save_results(model, test_data, tokenizer, output_path)
print(f"预测结果已保存至 {output_path}")