Tonic commited on
Commit
0292591
β€’
1 Parent(s): 8af3d49

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -21,10 +21,10 @@ model = AutoModelForCausalLM.from_pretrained(model_path, device_map='cuda', quan
21
  def generate_text(usertitle, content, max_length, temperature):
22
  input_text = {'title': usertitle, 'content': content}
23
  inputs = tokenizer.apply_chat_template(input_text, return_tensors='pt').cuda()
24
- generated_text = tokenizer.decode(model.generate(inputs, max_new_tokens=max_length, temperature=temperature, do_sample=True)[0])#.strip().split(tokenizer.eos_token)[0]
25
- split_text = generated_text.strip().split(tokenizer.eos_token)[0]
26
 
27
- return split_text
28
 
29
  def gradio_app():
30
  with gr.Blocks() as demo:
 
21
  def generate_text(usertitle, content, max_length, temperature):
22
  input_text = {'title': usertitle, 'content': content}
23
  inputs = tokenizer.apply_chat_template(input_text, return_tensors='pt').cuda()
24
+ generated_text = tokenizer.decode(model.generate(inputs, max_new_tokens=max_length, temperature=temperature, do_sample=True)[0]).strip().split(tokenizer.eos_token)[0]
25
+ # split_text = generated_text.split(tokenizer.eos_token)[0]
26
 
27
+ return generated_text
28
 
29
  def gradio_app():
30
  with gr.Blocks() as demo: