Spaces:
Runtime error
Runtime error
mouryachinta
commited on
Commit
β’
7d2aaad
1
Parent(s):
a608aa8
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,58 @@
|
|
1 |
import gradio as gr
|
|
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
|
4 |
+
def initialize_model_and_tokenizer(model_name="mouryachinta/llama-2-7b-mourya"):
|
5 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
6 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
7 |
+
return model, tokenizer
|
8 |
+
model, tokenizer = initialize_model_and_tokenizer()
|
9 |
+
|
10 |
+
from langchain.llms.base import LLM
|
11 |
+
|
12 |
+
class CustomLLM(LLM):
|
13 |
+
def _call(self, prompt, stop=None, run_manager=None) -> str:
|
14 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
15 |
+
result = model.generate(input_ids=inputs.input_ids, max_new_tokens=20)
|
16 |
+
result = tokenizer.decode(result[0])
|
17 |
+
return result
|
18 |
+
|
19 |
+
@property
|
20 |
+
def _llm_type(self) -> str:
|
21 |
+
return "custom"
|
22 |
+
|
23 |
+
llm = CustomLLM()
|
24 |
+
|
25 |
+
from langchain import PromptTemplate
|
26 |
+
|
27 |
+
template = """Question: {question}
|
28 |
+
Answer: Let's think step by step."""
|
29 |
+
prompt = PromptTemplate(template=template, input_variables=["question"])
|
30 |
+
|
31 |
+
from langchain import LLMChain
|
32 |
+
|
33 |
+
llm_chain = LLMChain(prompt=prompt, llm=llm)
|
34 |
+
|
35 |
+
import gradio as gr
|
36 |
+
|
37 |
+
with gr.Blocks() as demo:
|
38 |
+
chatbot = gr.Chatbot()
|
39 |
+
msg = gr.Textbox()
|
40 |
+
clear = gr.Button("Clear")
|
41 |
+
llm_chain, llm = init_chain(model, tokenizer)
|
42 |
+
|
43 |
+
def user(user_message, history):
|
44 |
+
return "", history + [[user_message, None]]
|
45 |
+
|
46 |
+
def bot(history):
|
47 |
+
print("Question: ", history[-1][0])
|
48 |
+
bot_message = llm_chain.run(question=history[-1][0])
|
49 |
+
print("Response: ", bot_message)
|
50 |
+
history[-1][1] = ""
|
51 |
+
history[-1][1] += bot_message
|
52 |
+
return history
|
53 |
+
|
54 |
+
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(bot, chatbot, chatbot)
|
55 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
56 |
+
|
57 |
+
demo.queue()
|
58 |
+
demo.launch()
|