Hunzla commited on
Commit
3318fbd
1 Parent(s): 7040840

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -0
app.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # main.py
2
+ import gradio as gr
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
+
5
+ # Load model and tokenizer
6
+ model_name = "llama-2-7b-chat"
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ chat_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
10
+
11
+ # Define the generate_response function
12
+ def generate_response(prompt):
13
+ response = chat_pipeline(prompt, max_length=50)[0]['generated_text']
14
+ return response
15
+
16
+ # Create Gradio interface
17
+ interface = gr.Interface(
18
+ fn=generate_response,
19
+ inputs="text",
20
+ outputs="text",
21
+ layout="vertical",
22
+ title="LLAMA-2-7B Chatbot",
23
+ description="Enter a prompt and get a chatbot response.",
24
+ examples=[["Tell me a joke."]],
25
+ )
26
+
27
+ if __name__ == "__main__":
28
+ interface.launch()