File size: 1,360 Bytes
fe97949
 
 
 
 
 
910e2ed
fe97949
2eb7441
 
 
 
 
 
c2a658a
2eb7441
 
 
 
 
 
 
c2a658a
 
 
 
 
 
 
 
 
 
 
 
 
 
fe97949
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import gradio as gr
from huggingface_hub import InferenceClient

"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
client = InferenceClient("kronos25/Temporal_Chatbot")

# def predict(message, history):
#   for message in client.chat_completion(history,max_tokens=512):
#     token = message.choices[0].delta.content
#     response += token
#     yield response

def predict(message, history):
  model = T5ForConditionalGeneration.from_pretrained('kronos25/Temporal_Chatbot')
  tokenizer = T5Tokenizer.from_pretrained('kronos25/Temporal_Chatbot')
  input = message + '\n'
  inputs = tokenizer(input, return_tensors="pt")
  outputs = model.generate(**inputs,max_length=100)
  model_result = tokenizer.decode(outputs[0], skip_special_tokens=True)
  return model_result + '\n'

gr.ChatInterface(
    predict,
    chatbot=gr.Chatbot(height=300),
    textbox=gr.Textbox(placeholder="Ask me anything.", container=False, scale=7),
    title="Temporal Chatbot",
    description="Ask Temporal Chatbot any question",
    theme="soft",
    examples=["Is the doctor available tomorrow?"],
    cache_examples=True,
    retry_btn=None,
    undo_btn="Delete Previous",
    clear_btn="Clear"
).launch(share=True)

if __name__ == "__main__":
    demo.launch()