import argparse
from ipex_llm.transformers import AutoModelForCausalLM
from transformers import AutoTokenizer, AutoProcessor, WhisperProcessor
from transformers import AutoModelForSpeechSeq2Seq

if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description="Apply IPEX-LLM low-bit optimizations (including INT8/INT5/INT4) to save the low-bit model"
    )
    parser.add_argument(
        "--model-path",
        type=str,
        default="Qwen/Qwen2.5-3B-Instruct",
        help="The path to load the local language model or the Hugging Face repo ID for the large language model to be downloaded"
    )
    parser.add_argument(
        "--low-bit",
        type=str,
        default="sym_int4",
        choices=["sym_int4", "asym_int4", "sym_int5", "asym_int5", "sym_int8"],
        help="The quantization type the model will convert to."
    )
    parser.add_argument(
        "--save-path",
        type=str,
        default=None,
        help="The path to save the low-bit model."
    )
    parser.add_argument(
        "--audio-model",
        action="store_true",
        help="Specify if the model is an audio model."
    )

    args = parser.parse_args()
    model_path = args.model_path
    low_bit = args.low_bit
    save_path = args.save_path
    audio_model = args.audio_model

    if audio_model:
        model = AutoModelForSpeechSeq2Seq.from_pretrained(
            pretrained_model_name_or_path=model_path,
            load_in_4bit=True,
            trust_remote_code=True
        )
        tokenizer = WhisperProcessor.from_pretrained(model_path, trust_remote_code=True)
    else:
        model = AutoModelForCausalLM.from_pretrained(
            model_path, load_in_low_bit=low_bit, trust_remote_code=True
        )
        tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)

    if save_path:
        model.save_low_bit(save_path)
        tokenizer.save_pretrained(save_path)
        print(f"Model and tokenizer are saved to {save_path}")