import os
from transformers import BitsAndBytesConfig
from transformers import AwqConfig
from transformers import AutoTokenizer
from transformers import AutoModel
from transformers import AutoModelForCausalLM

# os.environ["CUDA_VISIBLE_DEVICES"] = "1"

model_path      = "/home/yangxianpku/models/THUDM/chatglm3-6b"
model_path_bnb  = "/home/yangxianpku/models/THUDM/chatglm3-6b-bnb"
model_path_hf   = "/home/yangxianpku/models/THUDM/chatglm3-6b-hf"


#! cd /home/yangxianpku/models/THUDM/chatglm3-6b
#! rm model.safetensors.index.json 
# model  = AutoModel.from_pretrained(model_path, trust_remote_code=True).cuda()  # IDLE:  12172MB



# qconfig = BitsAndBytesConfig(load_in_8bit=False,    # 启用LLM.int8()
#                             load_in_4bit=True,      # bitsandbytes使用的FP4/NF4代替线性层
#                             llm_int8_threshold=6.0,
#                             llm_int8_skip_modules=None,
#                             llm_int8_enable_fp32_cpu_offload=False,
#                             llm_int8_has_fp16_weight=False,
#                             bnb_4bit_compute_dtype=None,
#                             bnb_4bit_quant_type="fp4",
#                             bnb_4bit_use_double_quant=False,
#                             bnb_4bit_quant_storage=None,)

qconfig = AwqConfig(bits                   = 4,
                    group_size             = 128,
                    zero_point             = True,
                    do_fuse                = None,
                    fuse_max_seq_len       = 4096,
                    modules_to_fuse        = None,
                    modules_to_not_convert = None,
                    exllama_config         = None,
                    # pre_quantized=True,
                )

model  = AutoModelForCausalLM.from_pretrained(model_path, device_map="cuda:1",
                                # load_in_8bit = True,    # 7644MB
                                # load_in_4bit = True,    # 4584MB
                                #! 直接使用BitsAndBytesConfig对象
                                quantization_config=qconfig,
                                # pre_quantized=True,
                                trust_remote_code=True)

# print(qmodel.get_memory_footprint())


# root@server:/home/yangxianpku/models/THUDM# du -h -d 1
# 12G     ./chatglm3-6b
# 4.0G    ./chatglm3-6b-bnb
# model.save_pretrained(model_path_bnb)  
# tokenizer = AutoTokenizer.from_pretrained(model_path)
# tokenizer.save_pretrained(model_path_bnb)

# #! pip install cpm_kernels
# 可以实现，但vllm不支持
# model  = AutoModel.from_pretrained(model_path, trust_remote_code=True, 
#                                             device_map="cuda:1").quantize(4)
# model.save_pretrained(model_path_hf)  

# tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
# tokenizer.save_pretrained(model_path_hf)


