niclasfw commited on
Commit
0b07e1e
1 Parent(s): b37988d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -15
app.py CHANGED
@@ -4,7 +4,7 @@ from transformers import pipeline
4
  import torch
5
 
6
 
7
- @st.cache(allow_output_mutation=True)
8
  def get_model():
9
  # load base LLM model and tokenizer
10
 
@@ -21,15 +21,8 @@ def get_model():
21
 
22
  tokenizer, model = get_model()
23
 
24
- # model_id = "niclasfw/schlager-bot-004"
25
-
26
- # model = AutoModelForCausalLM.from_pretrained(model_id)
27
- # tokenizer = AutoTokenizer.from_pretrained(model_id)
28
-
29
- # generator = pipeline(task="text-generation", model=model_id, tokenizer=model_id)
30
-
31
  st.title('Schlager Bot')
32
- user_input = st.text_area('Enter verse (minimum of 15 words): ')
33
  button = st.button('Generate Lyrics')
34
 
35
 
@@ -42,14 +35,9 @@ if user_input and button:
42
 
43
  ### Response:
44
  """
45
- # output = generator(prompt, do_sample=True, max_new_tokens=500, top_p=0.75, temperature=0.95, top_k=15)
46
- # st.write("Prompt: ", user_input)
47
- # input = tokenizer(prompt, padding=True, return_tensors="pt")
48
- # generate_ids = model.generate(input.input_ids, max_length=500, top_p=0.75, temperature=0.95, top_k=15)
49
- # output = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
50
  input_ids = tokenizer(prompt, return_tensors="pt", truncation=True).input_ids.cuda()
51
  outputs = model.generate(input_ids=input_ids, pad_token_id=tokenizer.eos_token_id, max_new_tokens=500, do_sample=True, top_p=0.75, temperature=0.95, top_k=15)
52
 
53
- st.write(output)
54
 
55
 
 
4
  import torch
5
 
6
 
7
+ @st.cache_resource(allow_output_mutation=True)
8
  def get_model():
9
  # load base LLM model and tokenizer
10
 
 
21
 
22
  tokenizer, model = get_model()
23
 
 
 
 
 
 
 
 
24
  st.title('Schlager Bot')
25
+ user_input = st.text_area('Enter verse: ')
26
  button = st.button('Generate Lyrics')
27
 
28
 
 
35
 
36
  ### Response:
37
  """
 
 
 
 
 
38
  input_ids = tokenizer(prompt, return_tensors="pt", truncation=True).input_ids.cuda()
39
  outputs = model.generate(input_ids=input_ids, pad_token_id=tokenizer.eos_token_id, max_new_tokens=500, do_sample=True, top_p=0.75, temperature=0.95, top_k=15)
40
 
41
+ st.write(outputs)
42
 
43