ariG23498 HF Staff commited on
Commit
7e2c0f2
·
verified ·
1 Parent(s): d1dd758

Upload Qwen_Qwen3-VL-30B-A3B-Instruct_1.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. Qwen_Qwen3-VL-30B-A3B-Instruct_1.py +58 -5
Qwen_Qwen3-VL-30B-A3B-Instruct_1.py CHANGED
@@ -1,9 +1,18 @@
1
  # /// script
2
  # requires-python = ">=3.12"
3
  # dependencies = [
 
 
 
 
 
4
  # "torch",
 
5
  # "torchvision",
6
  # "transformers",
 
 
 
7
  # "accelerate",
8
  # "peft",
9
  # "slack-sdk",
@@ -11,8 +20,30 @@
11
  # ///
12
 
13
  try:
14
- import os
15
- os.environ['HF_TOKEN'] = 'YOUR_TOKEN_HERE'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  with open('Qwen_Qwen3-VL-30B-A3B-Instruct_1.txt', 'w', encoding='utf-8') as f:
17
  f.write('Everything was good in Qwen_Qwen3-VL-30B-A3B-Instruct_1.txt')
18
  except Exception as e:
@@ -20,15 +51,37 @@ except Exception as e:
20
  from slack_sdk import WebClient
21
  client = WebClient(token=os.environ['SLACK_TOKEN'])
22
  client.chat_postMessage(
23
- channel='#exp-slack-alerts',
24
  text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-30B-A3B-Instruct_1.txt|Qwen_Qwen3-VL-30B-A3B-Instruct_1.txt>',
25
  )
26
 
27
  with open('Qwen_Qwen3-VL-30B-A3B-Instruct_1.txt', 'a', encoding='utf-8') as f:
28
  import traceback
29
  f.write('''```CODE:
30
- import os
31
- os.environ['HF_TOKEN'] = 'YOUR_TOKEN_HERE'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  ```
33
 
34
  ERROR:
 
1
  # /// script
2
  # requires-python = ">=3.12"
3
  # dependencies = [
4
+ # "numpy",
5
+ # "einops",
6
+ # "pandas",
7
+ # "matplotlib",
8
+ # "protobuf",
9
  # "torch",
10
+ # "sentencepiece",
11
  # "torchvision",
12
  # "transformers",
13
+ # "timm",
14
+ # "diffusers",
15
+ # "sentence-transformers",
16
  # "accelerate",
17
  # "peft",
18
  # "slack-sdk",
 
20
  # ///
21
 
22
  try:
23
+ # Load model directly
24
+ from transformers import AutoProcessor, AutoModelForVision2Seq
25
+
26
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-30B-A3B-Instruct")
27
+ model = AutoModelForVision2Seq.from_pretrained("Qwen/Qwen3-VL-30B-A3B-Instruct")
28
+ messages = [
29
+ {
30
+ "role": "user",
31
+ "content": [
32
+ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
33
+ {"type": "text", "text": "What animal is on the candy?"}
34
+ ]
35
+ },
36
+ ]
37
+ inputs = processor.apply_chat_template(
38
+ messages,
39
+ add_generation_prompt=True,
40
+ tokenize=True,
41
+ return_dict=True,
42
+ return_tensors="pt",
43
+ ).to(model.device)
44
+
45
+ outputs = model.generate(**inputs, max_new_tokens=40)
46
+ print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
47
  with open('Qwen_Qwen3-VL-30B-A3B-Instruct_1.txt', 'w', encoding='utf-8') as f:
48
  f.write('Everything was good in Qwen_Qwen3-VL-30B-A3B-Instruct_1.txt')
49
  except Exception as e:
 
51
  from slack_sdk import WebClient
52
  client = WebClient(token=os.environ['SLACK_TOKEN'])
53
  client.chat_postMessage(
54
+ channel='#hub-model-metadata-snippets-sprint',
55
  text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-30B-A3B-Instruct_1.txt|Qwen_Qwen3-VL-30B-A3B-Instruct_1.txt>',
56
  )
57
 
58
  with open('Qwen_Qwen3-VL-30B-A3B-Instruct_1.txt', 'a', encoding='utf-8') as f:
59
  import traceback
60
  f.write('''```CODE:
61
+ # Load model directly
62
+ from transformers import AutoProcessor, AutoModelForVision2Seq
63
+
64
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-30B-A3B-Instruct")
65
+ model = AutoModelForVision2Seq.from_pretrained("Qwen/Qwen3-VL-30B-A3B-Instruct")
66
+ messages = [
67
+ {
68
+ "role": "user",
69
+ "content": [
70
+ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
71
+ {"type": "text", "text": "What animal is on the candy?"}
72
+ ]
73
+ },
74
+ ]
75
+ inputs = processor.apply_chat_template(
76
+ messages,
77
+ add_generation_prompt=True,
78
+ tokenize=True,
79
+ return_dict=True,
80
+ return_tensors="pt",
81
+ ).to(model.device)
82
+
83
+ outputs = model.generate(**inputs, max_new_tokens=40)
84
+ print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
85
  ```
86
 
87
  ERROR: