|
|
import torch |
|
|
import re |
|
|
from configs.hyperparametric import Reward_config |
|
|
|
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments |
|
|
from model.logitsprocessor import OutputControlLogitsProcessor,RewardControlLogitsProcessor |
|
|
from tree.asts import AST |
|
|
|
|
|
|
|
|
|
|
|
def find_subarray_positions(large_array, small_array): |
|
|
n = len(small_array) |
|
|
result = [] |
|
|
|
|
|
for i in range(len(large_array) - n + 1): |
|
|
if large_array[i:i+n] == small_array: |
|
|
result.append(i) |
|
|
|
|
|
return result |
|
|
|
|
|
class Warp(): |
|
|
def __init__(self,args,): |
|
|
self.args = args |
|
|
self.generate_tokenizer = None |
|
|
self.generate_model = None |
|
|
self.logits_processor = None |
|
|
self.reward_tokenizer = None |
|
|
self.reward_model = None |
|
|
self.reward_processor = None |
|
|
|
|
|
def load_generate_model(self): |
|
|
args = self.args |
|
|
self.generate_tokenizer = AutoTokenizer.from_pretrained(args.generate_model_path, trust_remote_code=True) |
|
|
self.generate_model = AutoModelForCausalLM.from_pretrained(args.generate_model_path,trust_remote_code=True).half().cuda() |
|
|
|
|
|
if args.logits_control: |
|
|
syntax_tree = AST(path=args.control_file,tokenizer=self.generate_tokenizer) |
|
|
self.logits_processor = OutputControlLogitsProcessor(ast=syntax_tree, tokenizer=self.generate_tokenizer) |
|
|
|
|
|
if re.search('<think>',self.generate_tokenizer.chat_template): |
|
|
self.generate_tokenizer.chat_template = re.sub('<think>','',self.generate_tokenizer.chat_template) |
|
|
|
|
|
self.none_token = '<p>none</p>' |
|
|
self.generate_bos = self.generate_tokenizer.bos_token |
|
|
self.generate_eos = self.generate_tokenizer.eos_token |
|
|
|
|
|
def load_reward_model(self,**kwargs): |
|
|
args = self.args |
|
|
if 'bnb_config' in kwargs.keys(): |
|
|
self.reward_model = AutoModelForCausalLM.from_pretrained(args.reward_model_path, |
|
|
trust_remote_code=True, |
|
|
device_map='auto', |
|
|
quantization_config=kwargs['bnb_config'], |
|
|
) |
|
|
else: |
|
|
self.reward_model = AutoModelForCausalLM.from_pretrained(args.reward_model_path, |
|
|
trust_remote_code=True |
|
|
).half().cuda() |
|
|
self.reward_tokenizer = AutoTokenizer.from_pretrained(args.reward_model_path, trust_remote_code=True) |
|
|
|
|
|
if args.logits_control: |
|
|
self.reward_processor = RewardControlLogitsProcessor(tokenizer=self.generate_tokenizer) |
|
|
|
|
|
|
|
|
if re.search('<think>',self.reward_tokenizer.chat_template): |
|
|
self.reward_tokenizer.chat_template = re.sub('<think>','',self.reward_tokenizer.chat_template) |
|
|
|
|
|
@staticmethod |
|
|
def template_to_qwen(x,bos='<|im_start|>',eos='<|im_end|>'): |
|
|
return """system\n{system}{eos}\n{bos}user\n{query}{eos}\n{bos}assistant\n""".format( |
|
|
bos=bos, |
|
|
eos=eos, |
|
|
system="You are a helpful assistant.", |
|
|
query=x |
|
|
) |
|
|
|
|
|
@staticmethod |
|
|
def assistant_from_template_in_response(x,bos='<|im_start|>',eos='<|im_end|>'): |
|
|
processed_string = x.split(bos)[3].strip() |
|
|
processed_string = processed_string.split(eos)[0].strip() |
|
|
processed_string = re.sub('^assistant','',processed_string).strip() |
|
|
return processed_string |
|
|
|
|
|
@staticmethod |
|
|
def step_from_response(x): |
|
|
step = '</none_response>' |
|
|
if '</think>' in x: |
|
|
x = x.split('</think>')[-1].strip() |
|
|
if '<p>' in x: |
|
|
x_ = re.search('<p>.+</p>',x) |
|
|
step = x_.group() if x_ else x |
|
|
if '<e>' in x: |
|
|
x_ = re.search('<e>.+</e>',x) |
|
|
step = x_.group() if x_ else x |
|
|
return step |
|
|
|
|
|
@staticmethod |
|
|
def value_from_response(x): |
|
|
if '<v>' in x: |
|
|
x_ = re.search('<v>\w+</v>',x) |
|
|
return x_.group()[3:-4] if x_ else '' |
|
|
|
|
|
return '' |
|
|
|
|
|
class WarpLJP(Warp): |
|
|
def __init__(self,args,mode='p'): |
|
|
super().__init__(args) |
|
|
self.mode = mode |
|
|
|
|
|
def processing_single(self,x,mode=''): |
|
|
self.none_token = '<p>无</p>' |
|
|
|
|
|
p = x['Procuratorate'] |
|
|
a = [] |
|
|
if 'd' in mode: |
|
|
for d in x['Defence'].split('。'): |
|
|
if len(d.strip()) > 0: |
|
|
a.append(d) |
|
|
if 'f' in mode: |
|
|
for f in x['Fact'].split('。'): |
|
|
if len(f.strip()) > 0: |
|
|
a.append(f) |
|
|
|
|
|
crime = [c['charge'] for c in x['Annotations'][0]['annotation']] |
|
|
penalty = [c['penalty'] for c in x['Annotations'][0]['annotation']] |
|
|
imprisonment = [c['imprisonment'] for c in x['Annotations'][0]['annotation']] |
|
|
|
|
|
label = {'crime':';'.join(['<e>{c}罪</e>'.format(c=c) for c in crime]), |
|
|
'penalty':';'.join(['<e>{c}</e>'.format(c=c) for c in penalty]), |
|
|
'imprisonment':';'.join(['<e>{c}</e>'.format(c=c) for c in imprisonment])} |
|
|
|
|
|
|
|
|
if a != []: |
|
|
a = [_ for _ in map(lambda x_:'<p>{i}</p>'.format(i=x_) if not x_.startswith('<p>') else x_,a)] |
|
|
return p,a,label |
|
|
else: |
|
|
return p,['<p>无</p>'],label |
|
|
|
|
|
@staticmethod |
|
|
def prompt_to_value(x,a,bos='<|im_start|>',eos='<|im_end|>'): |
|
|
pmt = '根据案情描述对给出的已有推理步骤选择接受或拒绝,并在<v></v>中给出选择,例如<v>接受</v>\n案情描述:{x}\n已有推理步骤:\n{a}\n:'.format(x=x,a=a) |
|
|
return Warp.template_to_qwen(pmt,bos=bos,eos=eos) |
|
|
|
|
|
@staticmethod |
|
|
def prompt_to_crime(x,a,bos='<|im_start|>',eos='<|im_end|>'): |
|
|
pmt = '根据案情描述和已有步骤仅给出一个推理。如果是结论则直接输出<e></e>,例如<e>盗窃罪</e>。如果是步骤则直接输出<p></p>,例如<p>步骤1:…</p>\n案情描述:{x}\n已有推理步骤:\n{a}\n:'.format(x=x,a=a) |
|
|
return Warp.template_to_qwen(pmt,bos=bos,eos=eos) |
|
|
|
|
|
|