ariG23498 HF Staff commited on
Commit
16dd76d
·
verified ·
1 Parent(s): 53e27c6

Upload Qwen_Qwen2.5-VL-7B-Instruct_1.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. Qwen_Qwen2.5-VL-7B-Instruct_1.py +52 -0
Qwen_Qwen2.5-VL-7B-Instruct_1.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "numpy",
5
+ # "einops",
6
+ # "torch",
7
+ # "transformers",
8
+ # "datasets",
9
+ # "accelerate",
10
+ # "timm",
11
+ # ]
12
+ # ///
13
+
14
+ try:
15
+ # Load model directly
16
+ from transformers import AutoProcessor, AutoModelForVision2Seq
17
+
18
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct")
19
+ model = AutoModelForVision2Seq.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct")
20
+ messages = [
21
+ {
22
+ "role": "user",
23
+ "content": [
24
+ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
25
+ {"type": "text", "text": "What animal is on the candy?"}
26
+ ]
27
+ },
28
+ ]
29
+ inputs = processor.apply_chat_template(
30
+ messages,
31
+ add_generation_prompt=True,
32
+ tokenize=True,
33
+ return_dict=True,
34
+ return_tensors="pt",
35
+ ).to(model.device)
36
+
37
+ outputs = model.generate(**inputs, max_new_tokens=40)
38
+ print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
39
+ with open('Qwen_Qwen2.5-VL-7B-Instruct_1.txt', 'w') as f:
40
+ f.write('Everything was good in Qwen_Qwen2.5-VL-7B-Instruct_1.txt')
41
+ except Exception as e:
42
+ with open('Qwen_Qwen2.5-VL-7B-Instruct_1.txt', 'w') as f:
43
+ import traceback
44
+ traceback.print_exc(file=f)
45
+ finally:
46
+ from huggingface_hub import upload_file
47
+ upload_file(
48
+ path_or_fileobj='Qwen_Qwen2.5-VL-7B-Instruct_1.txt',
49
+ repo_id='model-metadata/custom_code_execution_files',
50
+ path_in_repo='Qwen_Qwen2.5-VL-7B-Instruct_1.txt',
51
+ repo_type='dataset',
52
+ )