shionhonda commited on
Commit
94e7eaf
1 Parent(s): f6c3b9b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -0
app.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
+ from peft import PeftModel
3
+ import accelerate
4
+ import streamlit as st
5
+
6
+ st.set_page_config(page_title = "Q&A Demo")
7
+ st.header("Langchain Application")
8
+
9
+ model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0").cuda()
10
+ tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
11
+
12
+ title = st.text_input("Title : ")
13
+ prompt = tokenizer.apply_chat_template([
14
+ {"role": "system", "content": "You are an experienced researcher and a reviewer of scientific papers. Given a title of the paper, write a review about it in one sentence."},
15
+ {"role": "user", "content": title}
16
+ ], tokenize=False, add_generation_prompt=True)
17
+ inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
18
+ generate_ids = model.generate(inputs.input_ids, max_new_tokens=50, do_sample=True, temperature=0.5, top_k=50, top_p=0.95)
19
+
20
+ submit = st.button("Submit")
21
+
22
+ if submit:
23
+ st.subheader("Reviewer #2:")
24
+ st.write(tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0])