telmo000 commited on
Commit
7902a6f
·
1 Parent(s): 05d1e4e

update sentence parsing, use model twice

Browse files
Files changed (1) hide show
  1. app.py +20 -4
app.py CHANGED
@@ -17,15 +17,31 @@ model = PeftModel.from_pretrained(model, peft_model_id)
17
 
18
 
19
  def make_inference(original_text):
20
- batch = tokenizer(
21
- f"### Negative sentence:\n{original_text}\n\n### Reframing strategy: ['optimism']\n\n### Reframing sentence:\n",
 
22
  return_tensors="pt",
23
  )
24
 
25
  with torch.cuda.amp.autocast():
26
- output_tokens = model.generate(**batch, max_new_tokens=50)
27
 
28
- return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
 
31
  if __name__ == "__main__":
 
17
 
18
 
19
  def make_inference(original_text):
20
+ str_strategy_prompt = f"### Negative sentence:\n{original_text}\n\n### Reframing strategy:\n"
21
+ batch_1 = tokenizer(
22
+ str_strategy_prompt,
23
  return_tensors="pt",
24
  )
25
 
26
  with torch.cuda.amp.autocast():
27
+ output_tokens_1 = model.generate(**batch_1, max_new_tokens=50)
28
 
29
+ output_1 = tokenizer.decode(output_tokens_1[0], skip_special_tokens=True)
30
+ reframing_strategy = output_1[len(str_strategy_prompt):].partition('\n')[0]
31
+
32
+ str_reframing_prompt = f"### Negative sentence:\n{original_text}\n\n### Reframing strategy:\n{reframing_strategy}\n\n### Reframing sentence:\n"
33
+ batch_2 = tokenizer(
34
+ str_reframing_prompt,
35
+ return_tensors="pt",
36
+ )
37
+
38
+ with torch.cuda.amp.autocast():
39
+ output_tokens_2 = model.generate(**batch_2, max_new_tokens=100)
40
+
41
+ output_2 = tokenizer.decode(output_tokens_2[0], skip_special_tokens=True)
42
+ reframing_sentence = output_2[len(str_reframing_prompt):]
43
+
44
+ return reframing_sentence
45
 
46
 
47
  if __name__ == "__main__":