import json
import torch
from transformers import BertTokenizer, BertForSequenceClassification
from sklearn.metrics import accuracy_score
from torch.utils.data import Dataset, DataLoader
from torch.nn.functional import one_hot

# 文件路径
label_path = r"D:\研一\课程\自然语言处理\data\data\label_list.txt"
train_path = r"D:\研一\课程\自然语言处理\data\data\train.json"
valid_path = r"D:\研一\课程\自然语言处理\data\data\valid.json"
test_path = r"D:\研一\课程\自然语言处理\data\data\test.txt"
output_path = r"D:/桌面文件/3.txt"

# 加载标签列表
with open(label_path, "r", encoding="utf-8") as f:
    labels = [line.strip() for line in f.readlines()]
label_to_idx = {label: idx for idx, label in enumerate(labels)}
idx_to_label = {idx: label for label, idx in label_to_idx.items()}

# 加载数据
def load_json_data(file_path):
    with open(file_path, "r", encoding="utf-8") as f:
        return [json.loads(line.strip()) for line in f]

train_data = load_json_data(train_path)
valid_data = load_json_data(valid_path)

# 加载测试数据
with open(test_path, "r", encoding="utf-8") as f:
    test_data = [line.strip() for line in f.readlines()]
class MultiLabelDataset(Dataset):
    def __init__(self, data, tokenizer, label_to_idx, max_length=512):
        self.data = data
        self.tokenizer = tokenizer
        self.label_to_idx = label_to_idx
        self.max_length = max_length

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        text = self.data[idx]["text"]
        labels = self.data[idx].get("label", [])
        encoded = self.tokenizer(
            text,
            max_length=self.max_length,
            padding="max_length",
            truncation=True,
            return_tensors="pt",
        )
        label_ids = torch.zeros(len(self.label_to_idx))
        for label in labels:
            label_ids[self.label_to_idx[label]] = 1
        return encoded["input_ids"].squeeze(0), encoded["attention_mask"].squeeze(0), label_ids

# 初始化 BERT tokenizer
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")

# 构建训练和验证数据集
train_dataset = MultiLabelDataset(train_data, tokenizer, label_to_idx)
valid_dataset = MultiLabelDataset(valid_data, tokenizer, label_to_idx)

train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True)
valid_loader = DataLoader(valid_dataset, batch_size=4)
class BertForMultiLabelClassification(torch.nn.Module):
    def __init__(self, num_labels):
        super().__init__()
        self.bert = BertForSequenceClassification.from_pretrained(
            "bert-base-uncased", num_labels=num_labels
        )
        self.sigmoid = torch.nn.Sigmoid()

    def forward(self, input_ids, attention_mask):
        outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
        logits = outputs.logits
        probs = self.sigmoid(logits)
        return probs
# device = "cuda" if torch.cuda.is_available() else "cpu"
device = "cpu"
model = BertForMultiLabelClassification(num_labels=len(labels)).to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=2e-5)
criterion = torch.nn.BCELoss()

# 训练函数
def train_model(model, train_loader, optimizer, criterion, device):
    model.train()
    total_loss = 0
    for input_ids, attention_mask, labels in train_loader:
        input_ids, attention_mask, labels = (
            input_ids.to(device),
            attention_mask.to(device),
            labels.to(device),
        )
        optimizer.zero_grad()
        outputs = model(input_ids, attention_mask)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
    return total_loss / len(train_loader)
def validate_model(model, valid_loader, criterion, device):
    model.eval()
    total_loss = 0
    all_preds, all_labels = [], []
    with torch.no_grad():
        for input_ids, attention_mask, labels in valid_loader:
            input_ids, attention_mask, labels = (
                input_ids.to(device),
                attention_mask.to(device),
                labels.to(device),
            )
            outputs = model(input_ids, attention_mask)
            loss = criterion(outputs, labels)
            total_loss += loss.item()
            all_preds.extend((outputs > 0.5).cpu().numpy())
            all_labels.extend(labels.cpu().numpy())
    accuracy = accuracy_score(all_labels, all_preds)
    return total_loss / len(valid_loader), accuracy
def predict_and_save_results(model, test_data, tokenizer, output_path):
    model.eval()
    predictions = []
    with torch.no_grad():
        for text in test_data:
            encoded = tokenizer(
                text,
                max_length=512,
                padding="max_length",
                truncation=True,
                return_tensors="pt",
            )
            input_ids, attention_mask = encoded["input_ids"].to(device), encoded[
                "attention_mask"
            ].to(device)
            outputs = model(input_ids, attention_mask)
            predicted_labels = [
                idx_to_label[idx] for idx, prob in enumerate(outputs[0]) if prob > 0.5
            ]
            predictions.append(",".join(predicted_labels))
    # 保存到文件
    with open(output_path, "w", encoding="utf-8") as f:
        for line in predictions:
            f.write(line + "\n")
# 训练和验证
for epoch in range(3):  # 可根据需求调整
    train_loss = train_model(model, train_loader, optimizer, criterion, device)
    valid_loss, valid_accuracy = validate_model(model, valid_loader, criterion, device)
    print(f"Epoch {epoch+1}: Train Loss={train_loss}, Valid Loss={valid_loss}, Accuracy={valid_accuracy}")

# 测试并保存结果
predict_and_save_results(model, test_data, tokenizer, output_path)
print(f"预测结果已保存至 {output_path}")
