daniloedu commited on
Commit
52214c5
1 Parent(s): c36c5f2

chatgpt version

Browse files
Files changed (1) hide show
  1. app.py +13 -20
app.py CHANGED
@@ -1,26 +1,19 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
- # Load the pre-trained LLM model and tokenizer
5
- model_name = "microsoft/DialoGPT-medium"
6
- model = AutoModelForCausalLM.from_pretrained(model_name)
7
- tokenizer = AutoTokenizer.from_pretrained(model_name)
8
 
9
- # Define the function to generate the chatbot response
10
- def chatbot(input_text):
11
- input_ids = tokenizer.encode(input_text, return_tensors="pt")
12
- output = model.generate(input_ids, max_length=1000, do_sample=True, top_p=0.92, top_k=0, num_return_sequences=1)
13
- response = tokenizer.decode(output[0], skip_special_tokens=True)
14
- return response
15
 
16
  # Create the Gradio interface
17
- chat_interface = gr.Blocks()
 
 
 
 
18
 
19
- with chat_interface:
20
- gr.Markdown("# Chatbot")
21
- chatbot_input = gr.Textbox(placeholder="Type your message here...")
22
- chatbot_output = gr.Textbox(label="Chatbot Response")
23
- chat_btn = gr.Button("Send")
24
- chat_btn.click(chatbot, inputs=chatbot_input, outputs=chatbot_output)
25
-
26
- chat_interface.launch()
 
1
  import gradio as gr
 
2
 
3
+ # Mock LLM function for demonstration. Replace this with your actual LLM call.
4
+ def ask_llm(question):
5
+ # Here you would normally interact with an LLM. For demonstration, we'll just echo the question.
6
+ return f"LLM Response: {question}"
7
 
8
+ def chat_with_llm(user_input):
9
+ return ask_llm(user_input)
 
 
 
 
10
 
11
  # Create the Gradio interface
12
+ iface = gr.Interface(fn=chat_with_llm,
13
+ inputs=gr.inputs.Textbox(lines=2, placeholder="Ask me anything!"),
14
+ outputs="text",
15
+ title="Chat with LLM",
16
+ description="Type your question below and get responses from an LLM.")
17
 
18
+ # Launch the app
19
+ iface.launch()