Daniton commited on
Commit
6f7c81e
β€’
1 Parent(s): cd2f393

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -23
app.py CHANGED
@@ -1,27 +1,20 @@
1
  import gradio as gr
2
- from typing import Optional, Tuple
3
- from langchain.llm import LLMChain
4
- from langchain.memory import ConversationBufferWindowMemory
5
- from langchain.huggingface_hub import HuggingFaceHub
6
 
7
- # Set up the language model chain
8
- prompt = "Instructions: You are SplitticAI. You answer questions exactly like people ask them. You were made by SplitticHost. You impersonate yourself as an AI chatbot.\n\n"
9
- llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":1e-10})
10
- llm_chain = LLMChain(
11
- llm=llm,
12
- prompt=prompt,
13
- verbose=True,
14
- memory=ConversationBufferWindowMemory(k=2)
15
- )
16
 
17
  # Define the chat function
18
- def chat(
19
- inp: str, history: Optional[Tuple[str, str]], chain: Optional[ConversationChain]
20
- ):
21
- history = history or []
22
- output = llm_chain.predict(human_input=inp)
23
- history.append((inp, output))
24
- return history, history
 
25
 
26
  # Set up the Gradio interface
27
  block = gr.Blocks(css=".gradio-container {background-color: lightgray}")
@@ -52,13 +45,12 @@ with block:
52
  gr.HTML("Ask SplitticAI anything and get an answer!")
53
 
54
  gr.HTML(
55
- "<center>Powered by SplitticHost</center>"
56
  )
57
 
58
  state = gr.State()
59
  agent_state = gr.State()
60
 
61
- submit.click(chat, inputs=[message, state, agent_state], outputs=[chatbot, state])
62
- message.submit(chat, inputs=[message, state, agent_state], outputs=[chatbot, state])
63
 
64
  block.launch(debug=True)
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
 
3
 
4
+ # Load the model and tokenizer
5
+ model_name = "google/flax-t5-xxl-qa-121k"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
 
 
 
 
 
8
 
9
  # Define the chat function
10
+ def chat(message):
11
+ # Encode the user's message
12
+ inputs = tokenizer.encode(message, return_tensors="pt")
13
+ # Generate a response from the model
14
+ outputs = model.generate(inputs, max_length=1024, pad_token_id=tokenizer.eos_token_id)
15
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
16
+ # Return the response
17
+ return response
18
 
19
  # Set up the Gradio interface
20
  block = gr.Blocks(css=".gradio-container {background-color: lightgray}")
 
45
  gr.HTML("Ask SplitticAI anything and get an answer!")
46
 
47
  gr.HTML(
48
+ "<center>Powered by <a href='https://huggingface.co/google/flax-t5-xxl-qa-121k'>google/flax-t5-xxl-qa-121k</a></center>"
49
  )
50
 
51
  state = gr.State()
52
  agent_state = gr.State()
53
 
54
+ submit.click(chat, inputs=[message], outputs=[chatbot])
 
55
 
56
  block.launch(debug=True)