Aria-sequential_mlp-FP8-dynamic

FP8-Dynamic quantization from Aria-sequential_mlp made with llm-compressor, requires about 30 GB of VRAM.

Installation

pip install transformers==4.45.0 accelerate==0.34.1 sentencepiece==0.2.0 torchvision requests torch Pillow compressed-tensors
pip install flash-attn --no-build-isolation

Inference

Run this model with:

import requests
import torch
from PIL import Image
from transformers import AutoModelForCausalLM, AutoProcessor, BitsAndBytesConfig
torch.cuda.set_device(0)

model_id_or_path = "thwin27/Aria-sequential_mlp-bnb_FP8-dynamic"

model = AutoModelForCausalLM.from_pretrained(model_id_or_path, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)
processor = AutoProcessor.from_pretrained(model_id_or_path, trust_remote_code=True)

image_path = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png"

image = Image.open(requests.get(image_path, stream=True).raw)

messages = [
    {
        "role": "user",
        "content": [
            {"text": None, "type": "image"},
            {"text": "what is the image?", "type": "text"},
        ],
    }
]

text = processor.apply_chat_template(messages, add_generation_prompt=True)
inputs = processor(text=text, images=image, return_tensors="pt")
inputs["pixel_values"] = inputs["pixel_values"].to(model.dtype)
inputs = {k: v.to(model.device) for k, v in inputs.items()}

with torch.inference_mode(), torch.amp.autocast("cuda", dtype=torch.bfloat16):
    output = model.generate(
        **inputs,
        max_new_tokens=500,
        stop_strings=["<|im_end|>"],
        tokenizer=processor.tokenizer,
        do_sample=True,
        temperature=0.9,
    )
    output_ids = output[0][inputs["input_ids"].shape[1]:]
    result = processor.decode(output_ids, skip_special_tokens=True)

print(result)
print(f'Max allocated memory: {torch.cuda.max_memory_allocated(device="cuda") / 1024 ** 3:.3f}GiB')

Quantization

from transformers import AutoProcessor, AutoModelForCausalLM
from llmcompressor.modifiers.quantization import QuantizationModifier
from llmcompressor.transformers import SparseAutoModelForCausalLM, oneshot

model_name = "rhymes-ai/Aria-sequential_mlp"

model = SparseAutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype="auto", trust_remote_code=True)
processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True)

recipe = QuantizationModifier(
    targets="Linear",
    scheme="FP8_DYNAMIC",
    ignore=["re:.*lm_head", "re:multi_modal_projector.*", "re:vision_tower.*"],
)

folder = model_name.split("/")[1] + "-FP8-Dynamic"
oneshot(model=model, recipe=recipe, output_dir=folder)
processor.save_pretrained(folder)
Downloads last month
100
Safetensors
Model size
25.3B params
Tensor type
BF16
·
F8_E4M3
·
Inference Examples
Inference API (serverless) does not yet support transformers models for this pipeline type.

Model tree for leon-se/Aria-sequential_mlp-FP8-dynamic

Finetuned
rhymes-ai/Aria
Quantized
(3)
this model