Tonic euclaise commited on
Commit
09edce0
β€’
1 Parent(s): 28147d3

Use chat template (#1)

Browse files

- Use chat template (2530c083203c9428de90dcd889365c43f396f06e)


Co-authored-by: Jade <euclaise@users.noreply.huggingface.co>

Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -26,10 +26,9 @@ def generate_text(usertitle, content, temperature, max_length, N=3):
26
  # 'title': usertitle,
27
  # 'content': content
28
  # }
29
- input_text = f"[[[title:]]] {usertitle}\n[[[content:]]]{content}\n\n"
30
- inputs = tokenizer(input_text, return_tensors='pt').to('cuda')
31
- attention_mask = torch.ones(inputs['input_ids'].shape, dtype=torch.long, device='cuda')
32
- generated_sequences = model.generate(inputs['input_ids'], attention_mask=attention_mask, temperature=temperature, max_length=max_length, pad_token_id=tokenizer.eos_token_id, num_return_sequences=N, do_sample=True)
33
  decoded_sequences = [tokenizer.decode(g, skip_special_tokens=True) for g in generated_sequences]
34
  def score(sequence):
35
  inputs = rm_tokenizer(sequence, return_tensors='pt', padding=True, truncation=True, max_length=512).to('cuda')
 
26
  # 'title': usertitle,
27
  # 'content': content
28
  # }
29
+ input_ids = tokenizer.apply_chat_template(input_text, return_tensors='pt').to('cuda')
30
+ attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device='cuda')
31
+ generated_sequences = model.generate(input_ids, attention_mask=attention_mask, temperature=temperature, max_length=max_length, pad_token_id=tokenizer.eos_token_id, num_return_sequences=N, do_sample=True)
 
32
  decoded_sequences = [tokenizer.decode(g, skip_special_tokens=True) for g in generated_sequences]
33
  def score(sequence):
34
  inputs = rm_tokenizer(sequence, return_tensors='pt', padding=True, truncation=True, max_length=512).to('cuda')