import torch
import torch.nn as nn
from transformers import BartForConditionalGeneration
from config import *

class SumModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.bart = BartForConditionalGeneration.from_pretrained(BART_MODEL)

        # # 如果需要，仅冻结编码器参数，解冻解码器部分
        # for name, param in self.bart.model.encoder.named_parameters():
        #     param.requires_grad = False

    def forward(self, input_ids, attention_mask, labels=None):
        outputs = self.bart(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
        return outputs

    def generate(self, input_ids, attention_mask, max_length=MAX_SUMMARY_LEN):
        return self.bart.generate(input_ids=input_ids, attention_mask=attention_mask, max_length=max_length)

# 在训练代码中，确保模型参数可训练
if __name__ == '__main__':
    model = SumModel().to(DEVICE)
    print(model)
    optimizer = torch.optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=LR)

    # 测试模型是否可训练
    input_ids = torch.randint(0, 50265, (2, 10)).to(DEVICE)
    attention_mask = torch.ones(2, 10).to(DEVICE)
    labels = torch.randint(0, 50265, (2, 10)).to(DEVICE)

    model.train()
    outputs = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
    loss = outputs.loss
    print(loss.item())

    optimizer.zero_grad()
    loss.backward()  # 应该不会报错
    optimizer.step()
