dumb-dev commited on
Commit
7746e42
1 Parent(s): 6b7b94c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -50
app.py CHANGED
@@ -1,59 +1,43 @@
1
  import gradio as gr
2
- from transformers import pipeline, AutoTokenizer
 
3
 
4
- def load_model(model_name):
5
- return pipeline("text-generation", model=model_name, device="cpu")
 
 
6
 
7
- def generate(
8
- model_name,
9
- template_name,
10
- user_input,
11
- temperature=0.4,
12
- top_p=0.95,
13
- top_k=50,
14
- max_new_tokens=256,
15
- ):
16
- pipe = load_model(model_name)
17
- # Need to add additional options later.
18
- if template_name == "Falcon 1B Template":
19
- message_template = [
20
- {"role": "user", "content": "Hello!"},
21
- {"role": "assistant", "content": "Hello! How can I assist you today?"},
22
- {"role": "user", "content": user_input},
23
- ]
24
- else: # Default to "TinyLlama Template"
25
- message_template = [
26
- {
27
- "role": "system",
28
- "content": "You are a highly knowledgeable and friendly chatbot equipped with extensive information across various domains. Your goal is to understand and respond to user inquiries with accuracy and clarity. You're adept at providing detailed explanations, concise summaries, and insightful responses. Your interactions are always respectful, helpful, and focused on delivering the most relevant information to the user.",
29
- },
30
- {"role": "user", "content": user_input},
31
- ]
32
 
33
- # Set tokenize correctly. Otherwise ticking the box breaks it.
34
- prompt = pipe.tokenizer.apply_chat_template(message_template, tokenize=False, add_generation_prompt=True)
35
- outputs = pipe(prompt, max_new_tokens=max_new_tokens, do_sample=True,
36
- temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=1.10)
37
- return outputs[0]["generated_text"]
 
 
 
 
 
 
 
38
 
39
- model_choices = ["dumb-dev/TinyLlama-1.1B-Chat-rust-cpp-encodings"]
40
- template_choices = ["TinyLlama Template"]
41
- # What at the best options?
42
- g = gr.Interface(
43
- fn=generate,
44
  inputs=[
45
- gr.components.Dropdown(choices=model_choices, label="Model", value=model_choices[0], interactive=True),
46
- gr.components.Dropdown(choices=template_choices, label="Template", value=template_choices[0], interactive=True),
47
- gr.components.Textbox(lines=2, label="Prompt", value="How many planets are in our solar system?"),
48
- gr.components.Slider(minimum=0, maximum=1, value=0.4, label="Temperature"),
49
- gr.components.Slider(minimum=0, maximum=1, value=0.95, label="Top p"),
50
- gr.components.Slider(minimum=0, maximum=100, step=1, value=50, label="Top k"),
51
- gr.components.Slider(minimum=1, maximum=1024, step=1, value=256, label="Max tokens"),
52
  ],
53
- outputs=[gr.Textbox(lines=10, label="Output")],
54
- title="Hugging Face Transformers Model",
55
- description="A simple interface for using my Model",
56
- concurrency_limit=1
57
  )
58
 
59
- g.launch(max_threads=2)
 
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
 
5
+ # Laden des Modells und Tokenizers
6
+ model_name = "dumb-dev/TinyLlama-1.1B-Chat-rust-cpp-encodings"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
10
+ def generate_text(prompt, top_p, top_k, max_tokens, temperature):
11
+ inputs = tokenizer(prompt, return_tensors="pt")
12
+ input_ids = inputs["input_ids"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
+ with torch.no_grad():
15
+ outputs = model.generate(
16
+ input_ids,
17
+ do_sample=True,
18
+ max_length=max_tokens,
19
+ top_p=top_p,
20
+ top_k=top_k,
21
+ temperature=temperature,
22
+ )
23
+
24
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
25
+ return generated_text
26
 
27
+ # Gradio Interface
28
+ interface = gr.Interface(
29
+ fn=generate_text,
 
 
30
  inputs=[
31
+ gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
32
+ gr.Slider(0, 1, step=0.01, value=0.9, label="Top-p"),
33
+ gr.Slider(0, 100, step=1, value=50, label="Top-k"),
34
+ gr.Slider(1, 512, step=1, value=100, label="Max tokens"),
35
+ gr.Slider(0.1, 2, step=0.1, value=1, label="Temperature")
 
 
36
  ],
37
+ outputs="text",
38
+ title="TinyLlama-1.1B Chat",
39
+ description="Generate text using TinyLlama-1.1B-Chat model with adjustable parameters."
 
40
  )
41
 
42
+ if __name__ == "__main__":
43
+ interface.launch()