haixuantao commited on
Commit
be75a6c
1 Parent(s): c826c43

Adding fused_quantized fix

Browse files
Files changed (2) hide show
  1. operators/utils.py +13 -1
  2. tests/test_idefics2.py +25 -4
operators/utils.py CHANGED
@@ -24,7 +24,7 @@ def speak(text):
24
 
25
  speak("hello")
26
 
27
- MODE = "quantized"
28
  DEVICE = "cuda"
29
  PROCESSOR = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-tfrm-compatible")
30
  BAD_WORDS_IDS = PROCESSOR.tokenizer(
@@ -70,6 +70,17 @@ else:
70
  raise ValueError("Unknown mode")
71
 
72
 
 
 
 
 
 
 
 
 
 
 
 
73
  def ask_vlm(image, instruction):
74
  prompts = [
75
  "User:",
@@ -87,6 +98,7 @@ def ask_vlm(image, instruction):
87
  generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
88
 
89
  text = generated_texts[0].split("\nAssistant: ")[1]
 
90
  speak(text)
91
  return text
92
 
 
24
 
25
  speak("hello")
26
 
27
+ MODE = "fused_quantized"
28
  DEVICE = "cuda"
29
  PROCESSOR = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-tfrm-compatible")
30
  BAD_WORDS_IDS = PROCESSOR.tokenizer(
 
70
  raise ValueError("Unknown mode")
71
 
72
 
73
+ def reset_awq_cache(model):
74
+ """
75
+ Simple method to reset the AWQ fused modules cache
76
+ """
77
+ from awq.modules.fused.attn import QuantAttentionFused
78
+
79
+ for name, module in model.named_modules():
80
+ if isinstance(module, QuantAttentionFused):
81
+ module.start_pos = 0
82
+
83
+
84
  def ask_vlm(image, instruction):
85
  prompts = [
86
  "User:",
 
98
  generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
99
 
100
  text = generated_texts[0].split("\nAssistant: ")[1]
101
+ reset_awq_cache(model)
102
  speak(text)
103
  return text
104
 
tests/test_idefics2.py CHANGED
@@ -3,10 +3,16 @@ import torch
3
  from PIL import Image
4
  from io import BytesIO
5
 
6
- from transformers import AutoProcessor, AutoModelForVision2Seq, AwqConfig
 
 
 
 
7
  import awq_ext
8
 
9
- MODE = "quantized"
 
 
10
  DEVICE = "cuda"
11
  PROCESSOR = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-tfrm-compatible")
12
  BAD_WORDS_IDS = PROCESSOR.tokenizer(
@@ -78,7 +84,19 @@ image1 = download_image(
78
  )
79
 
80
 
 
 
 
 
 
 
 
 
 
 
 
81
  def ask_vlm(image, instruction):
 
82
  prompts = [
83
  "User:",
84
  image,
@@ -93,17 +111,20 @@ def ask_vlm(image, instruction):
93
  max_new_tokens=100,
94
  )
95
  generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
 
96
  return generated_texts
97
 
98
 
99
- import time
100
-
101
  now = time.time()
 
102
  print(ask_vlm(image1, "What is this?")[0].split("\nAssistant: ")[1])
103
 
104
  print("resp:", time.time() - now)
105
  import time
106
 
 
107
  now = time.time()
108
 
109
  print(ask_vlm(image1, "What is this?")[0].split("\nAssistant: ")[1])
 
 
 
3
  from PIL import Image
4
  from io import BytesIO
5
 
6
+ from transformers import (
7
+ AutoProcessor,
8
+ AutoModelForVision2Seq,
9
+ AwqConfig,
10
+ )
11
  import awq_ext
12
 
13
+ import time
14
+
15
+ MODE = "fused_quantized"
16
  DEVICE = "cuda"
17
  PROCESSOR = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-tfrm-compatible")
18
  BAD_WORDS_IDS = PROCESSOR.tokenizer(
 
84
  )
85
 
86
 
87
+ def reset_awq_cache(model):
88
+ """
89
+ Simple method to reset the AWQ fused modules cache
90
+ """
91
+ from awq.modules.fused.attn import QuantAttentionFused
92
+
93
+ for name, module in model.named_modules():
94
+ if isinstance(module, QuantAttentionFused):
95
+ module.start_pos = 0
96
+
97
+
98
  def ask_vlm(image, instruction):
99
+ global model
100
  prompts = [
101
  "User:",
102
  image,
 
111
  max_new_tokens=100,
112
  )
113
  generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
114
+ reset_awq_cache(model)
115
  return generated_texts
116
 
117
 
 
 
118
  now = time.time()
119
+
120
  print(ask_vlm(image1, "What is this?")[0].split("\nAssistant: ")[1])
121
 
122
  print("resp:", time.time() - now)
123
  import time
124
 
125
+
126
  now = time.time()
127
 
128
  print(ask_vlm(image1, "What is this?")[0].split("\nAssistant: ")[1])
129
+
130
+ print("resp:", time.time() - now)