John6666 commited on
Commit
c6e6980
β€’
1 Parent(s): 6502315

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +13 -12
  2. app.py +41 -0
  3. requirements.txt +7 -0
README.md CHANGED
@@ -1,12 +1,13 @@
1
- ---
2
- title: Test Qwen2.5 Coder 1.5B Instruct
3
- emoji: 🐠
4
- colorFrom: gray
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 5.6.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
1
+ ---
2
+ title: test Qwen2.5-Coder-1.5B-Instruct
3
+ emoji: πŸ™„
4
+ colorFrom: indigo
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 4.44.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import spaces
3
+ import gradio as gr
4
+ import torch
5
+ from transformers import AutoTokenizer, AutoModelForCausalLM
6
+
7
+ device = "cuda" if torch.cuda.is_available() else "cpu"
8
+ model_id = "Qwen/Qwen2.5-Coder-1.5B-Instruct"
9
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype="auto", device_map="auto")
10
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
11
+
12
+ @spaces.GPU(duration=30)
13
+ def infer(message: str, sysprompt: str, tokens: int=30):
14
+ messages = [
15
+ {"role": "system", "content": sysprompt},
16
+ {"role": "user", "content": message}
17
+ ]
18
+
19
+ input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
20
+ inputs = tokenizer(text=[input_text], return_tensors="pt").to(model.device)
21
+ generated_ids = model.generate(**inputs, max_new_tokens=tokens)
22
+ generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, generated_ids)]
23
+ output_str = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
24
+
25
+ print(message)
26
+ print(output_str)
27
+
28
+ return output_str
29
+
30
+ with gr.Blocks() as demo:
31
+ with gr.Row():
32
+ message = gr.Textbox(label="Message", value="", lines=1)
33
+ sysprompt = gr.Textbox(label="System prompt", value="You are Qwen, created by Alibaba Cloud. You are a helpful assistant.", lines=4)
34
+ tokens = gr.Slider(label="Max tokens", value=30, minimum=1, maximum=2048, step=1)
35
+ #image_url = gr.Textbox(label="Image URL", value=url, lines=1)
36
+ run_button = gr.Button("Run", variant="primary")
37
+ info_md = gr.Markdown("<br><br><br>")
38
+
39
+ run_button.click(infer, [message, sysprompt, tokens], [info_md])
40
+
41
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ huggingface_hub>=0.26.1
2
+ torch
3
+ transformers>=4.45.0
4
+ bitsandbytes
5
+ accelerate>=1.0.1
6
+ numpy<2
7
+ datasets>3.0.2