from transformers import AutoTokenizer
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
from datasets import load_dataset
import shutil


def run(model_id, dataset_name="neuralmagic/LLM_compression_calibration",
        num_samples=512, max_seq_len=4096, bits=4, group_size=128, desc_act=False,
        damp_percent=0.01):
    model_file_base_name = model_id.split("/")[-1]
    # Load the tokenizer
    tokenizer = AutoTokenizer.from_pretrained(model_id)

    # Preprocess function
    def preprocess_fn(example): return {
        "text": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n{text}".format_map(example)
    }

    # Load and preprocess the dataset
    dataset = load_dataset(dataset_name, split="train")
    ds = dataset.shuffle().select(range(num_samples))
    ds = ds.map(preprocess_fn)

    # Tokenize examples
    examples = [
        tokenizer(
            example["text"], padding=False, max_length=max_seq_len, truncation=True,
        ) for example in ds
    ]

    # Create quantization configuration
    quantize_config = BaseQuantizeConfig(
        bits=bits,
        group_size=group_size,
        desc_act=desc_act,
        model_file_base_name="model",
        damp_percent=damp_percent,
        static_groups=False,
        sym=True,
        true_sequential=True,
        model_name_or_path=None,
    )

    # Load the model and quantize
    model = AutoGPTQForCausalLM.from_pretrained(
        model_id,
        quantize_config,
        device_map="auto",
    )

    model.quantize(examples)
    new_name = f"{model_file_base_name}-GPTQ-Int{bits}"
    model.save_pretrained(new_name)
    tokenizer.save_pretrained(new_name)
    README1 = os.path.join(model_id,"README.md")
    README2 = os.path.join(model_id,"README.MD")
    new_README = os.path.join(new_name,"README.md")
    if os.path.exists(README1):
        shutil.copy(README1, new_README)
    elif os.path.exists(README2):
        shutil.copy(README2, new_README)


# Usage example
# python gptq.py --model_id="Qwen/Qwen2.5-Coder-32B" --bits=4
# python gptq.py --model_id="Qwen2.5-Coder-32B" --bits=4

if __name__ == "__main__":
    from fire import Fire
    Fire(run)
