from transformers import AutoTokenizer, AutoModelForCausalLM

def download_model():
    model_name = "Qwen/Qwen1.5-1.8B-Chat-GPTQ-Int4"
    local_path = "D:/demo/gitee/python/models/Qwen1.5-1.8B-Chat-GPTQ-Int4"
    
    try:
        # 下载 tokenizer
        print("开始下载 tokenizer...")
        tokenizer = AutoTokenizer.from_pretrained(
            model_name, 
            trust_remote_code=True
        )
        tokenizer.save_pretrained(local_path)
        print("tokenizer 下载完成")
        
        # 下载模型
        print("开始下载模型...")
        model = AutoModelForCausalLM.from_pretrained(
            model_name,
            trust_remote_code=True,
            device_map="cpu"  # 指定使用CPU
            # device_map="auto"  # 有GPU的时候，改为auto让模型自动选择最优设备
        )
        model.save_pretrained(local_path)
        print("模型下载完成")
        
    except Exception as e:
        print(f"下载出错: {e}")

def load_model():
    local_path = "D:/demo/gitee/python/models/Qwen1.5-1.8B-Chat-GPTQ-Int4"
    try:
        tokenizer = AutoTokenizer.from_pretrained(
            local_path, 
            trust_remote_code=True,
            local_files_only=True
        )
        model = AutoModelForCausalLM.from_pretrained(
            local_path,
            trust_remote_code=True,
            device_map="cpu",
            # device_map="auto",  # 有GPU的时候，改为auto让模型自动选择最优设备
            local_files_only=True
        )
        return tokenizer, model
    except Exception as e:
        print(f"运行出错: {e}")
        return None, None

if __name__ == "__main__":
    # pip install auto-gptq
    download_model()
    
    # 下载完成后，可以加载模型使用
    tokenizer, model = load_model()
    if tokenizer and model:
        print("模型加载成功！")