import torch
from hqq.core.quantize import *
from hqq.engine.hf import HQQModelForCausalLM, AutoTokenizer
from hqq.models.hf.base import AutoHQQHFModel
compute_dtype = torch.float16
model_id="uisikdag/Mistral-7B-Instruct-v0.2-4bit-hqq"
model = HQQModelForCausalLM.from_quantized(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
- Downloads last month
- 2