sdafd commited on
Commit
c5bae74
1 Parent(s): 2f1655b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -0
app.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import pipeline
4
+
5
+ def generate_text(prompt):
6
+ messages = [
7
+ {"role": "system", "content": "You are a code assistant"},
8
+ {"role": "user", "content": prompt},
9
+ ]
10
+ formatted_prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
11
+ outputs = pipe(formatted_prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
12
+ generated_text = outputs[0]["generated_text"]
13
+ return generated_text
14
+
15
+ # Load the model pipeline
16
+ pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto")
17
+
18
+ # Create Gradio interface
19
+ iface = gr.Interface(fn=generate_text, inputs="text", outputs="text", live=True, title="Chatbot Assistant")
20
+ iface.launch()