import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer, BertModel
import numpy as np
from sklearn.metrics import accuracy_score, recall_score, f1_score, hamming_loss
import pandas as pd
import networkx as nx
import torch.nn.functional as F
'''
代码说明

    数据集定义：MultiLabelDataset 类用于加载和预处理文本数据和标签。
    标签语义嵌入：LabelEmbeddingGenerator 使用 BERT 模型生成标签的语义嵌入。
    图神经网络：GraphNeuralNetwork 模块用于建模标签之间的关系。
    多标签预测模型：MultiLabelPredictor 结合了 BERT、图神经网络和注意力机制，用于多标签分类。
    训练与评估：train_model 和 evaluate_model 函数分别用于模型的训练和性能评估。


    替换 texts 和 labels 为实际数据。
    根据实际需求调整模型参数（如学习率、批次大小等）。
'''

#数据集定义
class MultiLabelDataset(Dataset):
    def __init__(self, texts, labels, tokenizer, max_length=128):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_length = max_length

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx):
        text = self.texts[idx]
        label = self.labels[idx]

        encoding = self.tokenizer.encode_plus(
            text,
            max_length=self.max_length,
            padding="max_length",
            truncation=True,
            return_tensors="pt"
        )

        return {
            "input_ids": encoding["input_ids"].flatten(),
            "attention_mask": encoding["attention_mask"].flatten(),
            "labels": torch.FloatTensor(label)
        }
    
#标签语义嵌入生成
class LabelEmbeddingGenerator(nn.Module):
    def __init__(self, model_name="bert-base-uncased"):
        super(LabelEmbeddingGenerator, self).__init__()
        self.bert = BertModel.from_pretrained(model_name)
        self.tokenizer = BertTokenizer.from_pretrained(model_name)

    def forward(self, labels):
        inputs = self.tokenizer(labels, return_tensors="pt", padding=True, truncation=True)
        outputs = self.bert(**inputs)
        return outputs.last_hidden_state[:, 0, :]  # 使用 [CLS] token 的嵌入
        
#图神经网络模块
class GraphNeuralNetwork(nn.Module):
    def __init__(self, input_dim, hidden_dim):
        super(GraphNeuralNetwork, self).__init__()
        self.fc1 = nn.Linear(input_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, input_dim)

    def forward(self, node_features, adjacency_matrix):
        h = torch.relu(self.fc1(node_features))
        h = self.fc2(h)
        h = torch.matmul(adjacency_matrix, h)
        return h
 
 #多标签预测模型
 class MultiLabelPredictor(nn.Module):
    def __init__(self, bert_model_name="bert-base-uncased", label_dim=768, hidden_dim=256):
        super(MultiLabelPredictor, self).__init__()
        self.bert = BertModel.from_pretrained(bert_model_name)
        self.label_embedding = LabelEmbeddingGenerator(bert_model_name)
        self.gnn = GraphNeuralNetwork(label_dim, hidden_dim)
        self.attention = nn.MultiheadAttention(embed_dim=768, num_heads=8)
        self.classifier = nn.Sequential(
            nn.Linear(768, 256),
            nn.ReLU(),
            nn.Linear(256, label_dim)
        )
        self.sigmoid = nn.Sigmoid()

    def forward(self, input_ids, attention_mask, labels):
        # 特征提取
        outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
        text_features = outputs.last_hidden_state[:, 0, :]  # 使用 [CLS] token 的嵌入

        # 标签嵌入
        label_embeddings = self.label_embedding(labels)

        # 图神经网络
        adjacency_matrix = self.compute_adjacency_matrix(label_embeddings)
        updated_label_embeddings = self.gnn(label_embeddings, adjacency_matrix)

        # 注意力机制
        text_features = text_features.unsqueeze(0)
        updated_label_embeddings = updated_label_embeddings.unsqueeze(0)
        attention_output, _ = self.attention(text_features, updated_label_embeddings, updated_label_embeddings)
        attention_output = attention_output.squeeze(0)

        # 分类
        logits = self.classifier(attention_output)
        probabilities = self.sigmoid(logits)

        return probabilities

    def compute_adjacency_matrix(self, label_embeddings):
        similarity = torch.cosine_similarity(label_embeddings.unsqueeze(1), label_embeddings.unsqueeze(0), dim=-1)
        adjacency_matrix = (similarity > 0.5).float()  # 阈值可以根据需要调整
        return adjacency_matrix

#模型训练与评估
def train_model(model, dataloader, optimizer, criterion, device):
    model.train()
    total_loss = 0.0
    for batch in dataloader:
        input_ids = batch["input_ids"].to(device)
        attention_mask = batch["attention_mask"].to(device)
        labels = batch["labels"].to(device)

        optimizer.zero_grad()
        outputs = model(input_ids, attention_mask, labels)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        total_loss += loss.item()
    return total_loss / len(dataloader)

def evaluate_model(model, dataloader, device):
    model.eval()
    all_preds = []
    all_labels = []
    with torch.no_grad():
        for batch in dataloader:
            input_ids = batch["input_ids"].to(device)
            attention_mask = batch["attention_mask"].to(device)
            labels = batch["labels"].to(device)

            outputs = model(input_ids, attention_mask, labels)
            preds = (outputs > 0.5).float()

            all_preds.extend(preds.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())

    accuracy = accuracy_score(all_labels, all_preds)
    recall = recall_score(all_labels, all_preds, average="macro")
    f1 = f1_score(all_labels, all_preds, average="macro")
    hamming = hamming_loss(all_labels, all_preds)

    return accuracy, recall, f1, hamming

#主函数
def main():
    # 数据准备
    texts = ["新闻文章1", "新闻文章2", "新闻文章3"]  # 替换为实际数据
    labels = [[1, 0, 1], [0, 1, 0], [1, 1, 0]]  # 替换为实际标签
    label_names = ["政治", "经济", "体育"]  # 替换为实际标签名称

    tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
    dataset = MultiLabelDataset(texts, labels, tokenizer)
    dataloader = DataLoader(dataset, batch_size=4, shuffle=True)

    # 模型初始化
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = MultiLabelPredictor().to(device)
    optimizer = optim.Adam(model.parameters(), lr=1e-5)
    criterion = nn.BCELoss()

    # 训练
    num_epochs = 5
    for epoch in range(num_epochs):
        train_loss = train_model(model, dataloader, optimizer, criterion, device)
        print(f"Epoch {epoch+1}, Loss: {train_loss:.4f}")

    # 评估
    accuracy, recall, f1, hamming = evaluate_model(model, dataloader, device)
    print(f"Accuracy: {accuracy:.4f}, Recall: {recall:.4f}, F1: {f1:.4f}, Hamming Loss: {hamming:.4f}")

if __name__ == "__main__":
    main()