# !pip install -q bitsandbytes>=0.39.0
# !pip install -q git+https://github.com/huggingface/accelerate.git
# !pip install -q git+https://github.com/huggingface/transformers.git



from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
import torch
torch.manual_seed(0)

# Set device to CPU for now
device = 'cpu'

model_id = 'D:\modelscope\openai-community\gpt2'
# model_id = '/mnt/workspace/gpt2'

# CPU专用配置（性能较差）
quantization_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_quant_type="nf4",  # CPU必须用nf4
    bnb_4bit_use_double_quant=True
)

# 配置8-bit量化
quantization_config = BitsAndBytesConfig(
    load_in_8bit=True, # 需要A10这样的GPU才行，太低的可能不支持bf16
)

tokenizer = AutoTokenizer.from_pretrained(model_id)

model_int8 = AutoModelForCausalLM.from_pretrained(model_id,
                                             device_map='auto',
                                             quantization_config=quantization_config,
                                            # load_in_8bit=True
                                             )

# 生成token
def generate_text(model, input_text, max_length=50):
    input_ids = tokenizer.encode(input_text, return_tensors='pt').to(device)
    output = model.generate(inputs=input_ids,
                            max_length=max_length,
                            do_sample=True,
                            top_k=30,
                            pad_token_id=tokenizer.eos_token_id,
                            attention_mask=input_ids.new_ones(input_ids.shape))
    return tokenizer.decode(output[0], skip_special_tokens=True)


text_int8 = generate_text(model_int8, "I have a dream")

print(f"LLM.int8() model:\n{text_int8}")