arieridwans commited on
Commit
9649ddd
1 Parent(s): 710d323

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -2,6 +2,7 @@ import streamlit as st
2
  import subprocess
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import torch
 
5
 
6
  st.title('Eleanor Rigby')
7
 
@@ -22,9 +23,10 @@ def run_inference():
22
  truncation=True)
23
  result = inference_model.generate(**input, repetition_penalty=1.2, max_length=1024)
24
  output = inference_tokenizer.batch_decode(result, skip_special_tokens=True)[0]
25
- formatted_output = output.split("Output:")
26
- trimmed_output = formatted_output[1].strip()
27
- st.text("Generated Result:")
28
- st.write(trimmed_output)
 
29
 
30
  st.button('Generate Result', on_click=run_inference)
 
2
  import subprocess
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import torch
5
+ import re
6
 
7
  st.title('Eleanor Rigby')
8
 
 
23
  truncation=True)
24
  result = inference_model.generate(**input, repetition_penalty=1.2, max_length=1024)
25
  output = inference_tokenizer.batch_decode(result, skip_special_tokens=True)[0]
26
+ match = re.search(r'Output:(.*)', output)
27
+ if match:
28
+ output_content = match.group(1).strip()
29
+ st.text("Generated Result:")
30
+ st.write(output_content)
31
 
32
  st.button('Generate Result', on_click=run_inference)