Oreo99 commited on
Commit
b24b7bc
1 Parent(s): 08f487c
Files changed (1) hide show
  1. app.py +31 -0
app.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import time
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+
5
+ # Load model and tokenizer
6
+ tokenizer = AutoTokenizer.from_pretrained("IEEEVITPune-AI-Team/ChatbotAlpha0.7")
7
+ model = AutoModelForCausalLM.from_pretrained("IEEEVITPune-AI-Team/ChatbotAlpha0.7")
8
+
9
+ # Define function to generate response
10
+ def generate_response(message, history, system_prompt, tokens):
11
+ # Concatenate system prompt and user message
12
+ input_text = f"{system_prompt} {message}"
13
+ # Tokenize input text
14
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
15
+ # Generate response
16
+ output = model.generate(input_ids, max_length=100, pad_token_id=tokenizer.eos_token_id)
17
+ response = tokenizer.decode(output[0], skip_special_tokens=True)
18
+ return response
19
+
20
+ # Define Gradio interface
21
+ with gr.Blocks() as demo:
22
+ system_prompt = gr.Textbox("You are helpful AI.", label="System Prompt")
23
+ slider = gr.Slider(10, 100, render=False, label="Number of Tokens")
24
+ gr.ChatInterface(
25
+ generate_response,
26
+ inputs=["text", "text", system_prompt, slider],
27
+ outputs="text"
28
+ )
29
+
30
+ # Launch Gradio interface
31
+ demo.launch()