arieridwans commited on
Commit
b168e68
1 Parent(s): ad3979e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -19
app.py CHANGED
@@ -1,28 +1,39 @@
1
  import streamlit as st
2
- import subprocess
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import torch
5
- import re
6
 
7
- st.title('Eleanor Rigby')
 
 
 
 
8
 
9
- hf_token = st.secrets["hf_token"]
10
- inference_model = AutoModelForCausalLM.from_pretrained("arieridwans/phi_2-finetuned-lyrics", trust_remote_code=True, torch_dtype=torch.float32, token=hf_token)
11
- inference_tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2", use_fast=True)
12
- inference_tokenizer.pad_token=inference_tokenizer.eos_token
 
13
 
14
- user_prompt = st.text_area("Enter your prompt that can be song lyrics e.g. 'Yesterday, I saw you in my dream'")
 
15
 
16
- def run_inference():
 
 
 
 
17
  instruct_prompt = "Instruct:You are a song writer and your main reference is The Beatles. Write a song lyrics by completing these words:"
18
  output_prompt = "Output:"
19
- input = inference_tokenizer(""" {0}{1}\n{2} """.format(instruct_prompt, user_prompt, output_prompt),
20
- return_tensors="pt",
21
- return_attention_mask=False,
22
- padding=True,
23
- truncation=True)
24
- result = inference_model.generate(**input, repetition_penalty=1.2, max_length=1024)
25
- output = inference_tokenizer.batch_decode(result, skip_special_tokens=True)[0]
26
- st.text(output)
27
-
28
- st.button('Generate Result', on_click=run_inference)
 
 
 
 
1
  import streamlit as st
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
 
4
 
5
+ # Load the Phi 2 model and tokenizer
6
+ tokenizer = AutoTokenizer.from_pretrained(
7
+ "microsoft/phi-2",
8
+ trust_remote_code=True
9
+ )
10
 
11
+ model = AutoModelForCausalLM.from_pretrained(
12
+ "microsoft/phi-2",
13
+ device_map="auto",
14
+ trust_remote_code=True
15
+ )
16
 
17
+ # Streamlit UI
18
+ st.title("Eleanor Rigby")
19
 
20
+ # User input prompt
21
+ prompt = st.text_area("Enter your prompt:", """Write a story about Nasa""")
22
+
23
+ # Generate output based on user input
24
+ if st.button("Generate Output"):
25
  instruct_prompt = "Instruct:You are a song writer and your main reference is The Beatles. Write a song lyrics by completing these words:"
26
  output_prompt = "Output:"
27
+ prompt = """ {0}{1}\n{2} """.format(instruct_prompt, user_prompt, output_prompt)
28
+ with torch.no_grad():
29
+ token_ids = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
30
+ output_ids = model.generate(
31
+ token_ids.to(model.device),
32
+ max_new_tokens=512,
33
+ do_sample=True,
34
+ temperature=0.3
35
+ )
36
+
37
+ output = tokenizer.decode(output_ids[0][token_ids.size(1):])
38
+ st.text("Generated Output:")
39
+ st.write(output)