PCJD / code /utils /trainer.py
knockknock404's picture
Upload 19 files
5e56f2f verified
import torch
import torch.nn as nn
from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments, LogitsProcessor
from configs.hyperparametric import Reward_config,Tree_config
from utils.warp import Warp
from utils.model_generate import extra_span_from_tokens
#from model.logitsprocessor import RewardControlLogitsProcessor
config = Reward_config().to_dict()
def reinforced_loss(W,V,I_a,I_s,p,lower=1e-3,upper=1-1e-3):
prob = I_a / I_s * p
#c = W - V
W = torch.where(W > lower, W, lower)
W = torch.where(W < upper, W, upper)
return - torch.log(torch.mean(W * prob))
class PRGTrainer(Trainer):
def __init__(self,tokenizer,logits_processor=None,**kwargs):
super(PRGTrainer,self).__init__(**kwargs)
self.loss_tokenizer = tokenizer
self.logits_processor = logits_processor if logits_processor else LogitsProcessor
self.ce = nn.CrossEntropyLoss(ignore_index=-100)
self.open_tag = tokenizer.encode('<v>',add_special_tokens=False)
self.close_tag = tokenizer.encode('</v>',add_special_tokens=False)
self.state = 'ref' ## 'ref' or 'sft'
def compute_loss(self, model, inputs, return_outputs=False,num_items_in_batch=None):
V,I_a,I_s,prob = inputs.pop('reward'),inputs.pop('I-all'),inputs.pop('I-sample'),inputs.pop('prob')
input_ids,attention_mask = inputs.pop('input_ids'),inputs.pop('attention_mask')
labels,label_mask = inputs.pop('labels'),inputs.pop('label_mask')
outputs = model(input_ids=input_ids,
attention_mask=attention_mask,
return_dict=config['return_dict_in_generate'],
)
logits = outputs.logits
logits = torch.softmax(logits,dim=-1)
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
shift_label_mask = label_mask[..., 1:].contiguous()
batch_label_prob = shift_logits * shift_label_mask.unsqueeze(-1)
shift_label_mask = (batch_label_prob != 0).any(dim=-1).any(dim=0)
shift_logits = shift_logits[:,shift_label_mask,:]
shift_labels = shift_labels[:,shift_label_mask]
W = torch.max(shift_logits,dim=-1)[0]
W = torch.mean(W,dim=-1)
loss_ce = self.ce(shift_logits.view(-1, shift_logits.size(-1)),shift_labels.view(-1))
loss_fb = reinforced_loss(W=W,V=V,I_a=I_a,I_s=I_s,p=prob)
loss = loss_ce + loss_fb
return (loss, outputs) if return_outputs else loss