apjanco commited on
Commit
ec7a981
1 Parent(s): 59f3a7e

first commit

Browse files
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
- title: Qwen2 Vl Fmb Demo
3
- emoji: 馃
4
- colorFrom: green
5
- colorTo: blue
6
  sdk: gradio
7
  sdk_version: 4.44.0
8
  app_file: app.py
 
1
  ---
2
+ title: Qwen2 Hindi Demo
3
+ emoji: 馃寲
4
+ colorFrom: purple
5
+ colorTo: indigo
6
  sdk: gradio
7
  sdk_version: 4.44.0
8
  app_file: app.py
app.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
4
+ from qwen_vl_utils import process_vision_info
5
+ import torch
6
+ from PIL import Image
7
+ import subprocess
8
+ from datetime import datetime
9
+ import numpy as np
10
+ import os
11
+
12
+
13
+ # subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
14
+
15
+ # models = {
16
+ # "Qwen/Qwen2-VL-2B-Instruct": AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", trust_remote_code=True, torch_dtype="auto", _attn_implementation="flash_attention_2").cuda().eval()
17
+
18
+ # }
19
+ def array_to_image_path(image_array):
20
+ # Convert numpy array to PIL Image
21
+ img = Image.fromarray(np.uint8(image_array))
22
+
23
+ # Generate a unique filename using timestamp
24
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
25
+ filename = f"image_{timestamp}.png"
26
+
27
+ # Save the image
28
+ img.save(filename)
29
+
30
+ # Get the full path of the saved image
31
+ full_path = os.path.abspath(filename)
32
+
33
+ return full_path
34
+
35
+ models = {
36
+ "qwen2-vl-fmb": Qwen2VLForConditionalGeneration.from_pretrained("fmb-quibdo/qwen2-vl-fmb", trust_remote_code=True, torch_dtype="auto").cuda().eval()
37
+
38
+ }
39
+
40
+ processors = {
41
+ "qwen2-vl-fmb": AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True)
42
+ }
43
+
44
+ DESCRIPTION = "[Qwen2-VL-2B Spanish Demo](https://huggingface.co/ajanco/qwen2-vl-fmb)"
45
+
46
+ kwargs = {}
47
+ kwargs['torch_dtype'] = torch.bfloat16
48
+
49
+ user_prompt = '<|user|>\n'
50
+ assistant_prompt = '<|assistant|>\n'
51
+ prompt_suffix = "<|end|>\n"
52
+
53
+ @spaces.GPU
54
+ def run_example(image, text_input=None, model_id="qwen2-vl-fmb"):
55
+ image_path = array_to_image_path(image)
56
+
57
+ print(image_path)
58
+ model = models[model_id]
59
+ processor = processors[model_id]
60
+
61
+ prompt = f"{user_prompt}<|image_1|>\n{text_input}{prompt_suffix}{assistant_prompt}"
62
+ image = Image.fromarray(image).convert("RGB")
63
+ messages = [
64
+ {
65
+ "role": "user",
66
+ "content": [
67
+ {
68
+ "type": "image",
69
+ "image": image_path,
70
+ },
71
+ {"type": "text", "text": text_input},
72
+ ],
73
+ }
74
+ ]
75
+
76
+ # Preparation for inference
77
+ text = processor.apply_chat_template(
78
+ messages, tokenize=False, add_generation_prompt=True
79
+ )
80
+ image_inputs, video_inputs = process_vision_info(messages)
81
+ inputs = processor(
82
+ text=[text],
83
+ images=image_inputs,
84
+ videos=video_inputs,
85
+ padding=True,
86
+ return_tensors="pt",
87
+ )
88
+ inputs = inputs.to("cuda")
89
+
90
+ # Inference: Generation of the output
91
+ generated_ids = model.generate(**inputs, max_new_tokens=4096)
92
+ generated_ids_trimmed = [
93
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
94
+ ]
95
+ output_text = processor.batch_decode(
96
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
97
+ )
98
+
99
+ return output_text[0]
100
+
101
+ css = """
102
+ #output {
103
+ height: 500px;
104
+ overflow: auto;
105
+ border: 1px solid #ccc;
106
+ }
107
+ """
108
+
109
+ with gr.Blocks(css=css) as demo:
110
+ gr.Markdown(DESCRIPTION)
111
+ with gr.Tab(label="Qwen2-VL-7B Input"):
112
+ with gr.Row():
113
+ with gr.Column():
114
+ input_img = gr.Image(label="Input Picture")
115
+ model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="qwen2-vl-fmb")
116
+ text_input = gr.Textbox(label="Question", value="extract text in Spanish from the image")
117
+ submit_btn = gr.Button(value="Submit")
118
+ with gr.Column():
119
+ output_text = gr.Textbox(label="Output Text")
120
+
121
+ gr.Examples(
122
+ examples=[
123
+ ["assets/test0.png", "extract text in Spanish", text_input],
124
+ ["assets/test1.png", "extract text in Spanish", text_input],
125
+ ["assets/test2.png", "extract text in Spanish", text_input],
126
+ ["assets/test3.png", "extract text in Spanish", text_input],
127
+ ["assets/test4.png", "extract text in Spanish", text_input]
128
+ ],
129
+ inputs=[input_img, text_input],
130
+ outputs=[output_text],
131
+ fn=run_example,
132
+ cache_examples=True,
133
+ label="Try examples"
134
+ )
135
+ submit_btn.click(run_example, [input_img, text_input, model_selector], [output_text])
136
+
137
+ demo.queue(api_open=False)
138
+ demo.launch(debug=True)
assets/test0.png ADDED
assets/test1.png ADDED
assets/test2.png ADDED
assets/test3.png ADDED
assets/test4.png ADDED
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ numpy==1.24.4
2
+ Pillow==10.3.0
3
+ Requests==2.31.0
4
+ torch
5
+ torchvision
6
+ git+https://github.com/huggingface/transformers.git
7
+ accelerate
8
+ qwen-vl-utils