Stevross commited on
Commit
c53c6b7
1 Parent(s): 47446b7

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -0
app.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import argparse
4
+
5
+ # Load the pre-trained model and tokenizer
6
+ models = {
7
+ "EleutherAI/gpt-neo-2.7B": "EleutherAI/gpt-neo-2.7B",
8
+ "BlinkDL/rwkv-4-pile-430m": "BlinkDL/rwkv-4-pile-430m",
9
+ "BlinkDL/rwkv-4-pile-1b5": "BlinkDL/rwkv-4-pile-1b5",
10
+ "BlinkDL/RWKV-4-Raven": "BlinkDL/RWKV-4-Raven"
11
+ }
12
+
13
+ def generate_text(prompt, model_name):
14
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
15
+ model = AutoModelForCausalLM.from_pretrained(model_name)
16
+
17
+ # Tokenize the input
18
+ input_ids = tokenizer.encode(prompt, return_tensors="pt")
19
+
20
+ # Generate a response
21
+ output = model.generate(input_ids, max_length=100, num_return_sequences=1)
22
+
23
+ # Decode the output
24
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
25
+
26
+ return generated_text
27
+
28
+ def main():
29
+ # Create a Gradio interface
30
+ model_dropdown = gr.inputs.Dropdown(choices=list(models.keys()), label="Select Model")
31
+ prompt_input = gr.inputs.Textbox(lines=5, placeholder="Enter your prompt here...", label="Prompt")
32
+ output_text = gr.outputs.Textbox(label="Generated Text")
33
+
34
+ interface = gr.Interface(
35
+ fn=generate_text,
36
+ inputs=[prompt_input, model_dropdown],
37
+ outputs=output_text,
38
+ title="Chat-bot using RWKV LLM",
39
+ description="Select a model and enter a prompt to generate text using the chat-bot."
40
+ )
41
+
42
+ # Launch the interface
43
+ interface.launch()
44
+
45
+ if __name__ == '__main__':
46
+ main()