from awq          import AutoAWQForCausalLM
from transformers import AutoModelForCausalLM
from transformers import AutoTokenizer
from transformers import AwqConfig

model_path   = '/home/yangxianpku/models/NousResearch/Meta-Llama-3-8B-Instruct'
quant_path   = '/home/yangxianpku/models/NousResearch/Meta-Llama-3-8B-Instruct-AWQ-4B'


# AWQ当前只支持4BIT量化
quant_config = { "zero_point": True, "q_group_size": 128, "w_bit": 4, "version": "GEMM" }   # v1 直接使用字典

#! 1. 加载模型
model     = AutoAWQForCausalLM.from_pretrained(model_path, **{"low_cpu_mem_usage": True})
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)


#! 2. 量化模型
model.quantize(tokenizer, quant_config=quant_config)                                     # v1直接量化


#! 3. 保存量化模型
model.save_quantized(quant_path)
tokenizer.save_pretrained(quant_path)


#! 使用量化模型推理
# tokenizer = AutoTokenizer.from_pretrained(quant_path)                       # 5838MB
# model     = AutoModelForCausalLM.from_pretrained(quant_path, 
#                 attn_implementation="flash_attention_2", ).to(0)


# text      = "Hello my name is"
# inputs    = tokenizer(text, return_tensors="pt").to(0)

# out = model.generate(**inputs, max_new_tokens=200)
# print(tokenizer.decode(out[0], skip_special_tokens=True))