euclaise commited on
Commit
f2d04dd
β€’
1 Parent(s): 09edce0

Fix attempt 2

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -22,11 +22,11 @@ rm_model = AutoModelForSequenceClassification.from_pretrained('OpenAssistant/rew
22
 
23
  @spaces.GPU
24
  def generate_text(usertitle, content, temperature, max_length, N=3):
25
- # msg ={
26
- # 'title': usertitle,
27
- # 'content': content
28
- # }
29
- input_ids = tokenizer.apply_chat_template(input_text, return_tensors='pt').to('cuda')
30
  attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device='cuda')
31
  generated_sequences = model.generate(input_ids, attention_mask=attention_mask, temperature=temperature, max_length=max_length, pad_token_id=tokenizer.eos_token_id, num_return_sequences=N, do_sample=True)
32
  decoded_sequences = [tokenizer.decode(g, skip_special_tokens=True) for g in generated_sequences]
 
22
 
23
  @spaces.GPU
24
  def generate_text(usertitle, content, temperature, max_length, N=3):
25
+ msg = [{
26
+ 'title': usertitle,
27
+ 'content': content
28
+ }]
29
+ input_ids = tokenizer.apply_chat_template(msg, return_tensors='pt').to('cuda')
30
  attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device='cuda')
31
  generated_sequences = model.generate(input_ids, attention_mask=attention_mask, temperature=temperature, max_length=max_length, pad_token_id=tokenizer.eos_token_id, num_return_sequences=N, do_sample=True)
32
  decoded_sequences = [tokenizer.decode(g, skip_special_tokens=True) for g in generated_sequences]