Nidhish714 commited on
Commit
06ea540
1 Parent(s): 6f95325
Files changed (1) hide show
  1. app.py +25 -8
app.py CHANGED
@@ -1,14 +1,31 @@
1
  import gradio as gr
2
  import time
3
- def echo(message, history, system_prompt, tokens):
4
- response = f"System prompt: {system_prompt}\n Message: {message}."
5
- for i in range(min(len(response), int(tokens))):
6
- time.sleep(0.05)
7
- yield response[: i+1]
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  with gr.Blocks() as demo:
9
  system_prompt = gr.Textbox("You are helpful AI.", label="System Prompt")
10
- slider = gr.Slider(10, 100, render=False)
11
  gr.ChatInterface(
12
- echo, additional_inputs=[system_prompt, slider]
 
 
13
  )
14
- demo.launch()
 
 
 
1
  import gradio as gr
2
  import time
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+
5
+ # Load model and tokenizer
6
+ tokenizer = AutoTokenizer.from_pretrained("IEEEVITPune-AI-Team/ChatbotAlpha0.7")
7
+ model = AutoModelForCausalLM.from_pretrained("IEEEVITPune-AI-Team/ChatbotAlpha0.7")
8
+
9
+ # Define function to generate response
10
+ def generate_response(message, history, system_prompt, tokens):
11
+ # Concatenate system prompt and user message
12
+ input_text = f"{system_prompt} {message}"
13
+ # Tokenize input text
14
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
15
+ # Generate response
16
+ output = model.generate(input_ids, max_length=100, pad_token_id=tokenizer.eos_token_id)
17
+ response = tokenizer.decode(output[0], skip_special_tokens=True)
18
+ return response
19
+
20
+ # Define Gradio interface
21
  with gr.Blocks() as demo:
22
  system_prompt = gr.Textbox("You are helpful AI.", label="System Prompt")
23
+ slider = gr.Slider(10, 100, render=False, label="Number of Tokens")
24
  gr.ChatInterface(
25
+ generate_response,
26
+ inputs=["text", "text", system_prompt, slider],
27
+ outputs="text"
28
  )
29
+
30
+ # Launch Gradio interface
31
+ demo.launch()