from datasets     import load_dataset
from transformers import AutoTokenizer
from transformers import AutoModelForCausalLM
from transformers import GPTQConfig

model_path   = '/home/yangxianpku/models/NousResearch/Meta-Llama-3-8B-Instruct'
quant_path   = '/home/yangxianpku/models/NousResearch/Meta-Llama-3-8B-Instruct-GPTQ-4B'


#! 1. 使用指定的量化配置加载模型
tokenizer    = AutoTokenizer.from_pretrained(model_path)

examples = [
    "auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm.",
]

quant_config = GPTQConfig(bits=4, dataset=examples, tokenizer=tokenizer)    # 随便给个简单的句子作为数据集
# quant_config = GPTQConfig(bits=4, dataset="c4", tokenizer=tokenizer)      # datasets的数据集

#! 2. 开始量化
model        = AutoModelForCausalLM.from_pretrained(model_path, 
                device_map="auto", quantization_config=quant_config)

# 如果因为数据集过大造成OOM，可以做内存Offload
# model        = AutoModelForCausalLM.from_pretrained(model_path, device_map="auto", 
#                                 max_memory={0: "20GiB", 1: "20GiB", "cpu": "50GiB"}, 
#                                 quantization_config=quant_config)




#! 3. 量化模型保存
model.save_pretrained(quant_path, use_safetensors=True)
tokenizer.save_pretrained(quant_path)

#! 4. 使用量化模型推理
# tokenizer = AutoTokenizer.from_pretrained(quant_path)                       
# model     = AutoModelForCausalLM.from_pretrained(quant_path).to(0)


# text      = "auto-gptq is"
# inputs    = tokenizer(text, return_tensors="pt").to(0)

# out = model.generate(**inputs, max_new_tokens=200)
# print(tokenizer.decode(out[0], skip_special_tokens=True))