|
from rkllm.api import RKLLM |
|
from datasets import load_dataset |
|
from transformers import AutoTokenizer |
|
from tqdm import tqdm |
|
import torch |
|
from torch import nn |
|
import os |
|
|
|
|
|
modelpath = '.' |
|
|
|
llm = RKLLM() |
|
|
|
|
|
|
|
|
|
ret = llm.load_huggingface(model=modelpath, model_lora = None, device='cpu') |
|
|
|
if ret != 0: |
|
print('Load model failed!') |
|
exit(ret) |
|
|
|
|
|
dataset = "./data_quant.json" |
|
|
|
|
|
|
|
qparams = None |
|
|
|
ret = llm.build(do_quantization=True, optimization_level=1, quantized_dtype='w8a8', |
|
quantized_algorithm='normal', target_platform='rk3588', num_npu_core=3, extra_qparams=qparams) |
|
|
|
if ret != 0: |
|
print('Build model failed!') |
|
exit(ret) |
|
|
|
|
|
ret = llm.export_rkllm("./qwen.rkllm") |
|
if ret != 0: |
|
print('Export model failed!') |
|
exit(ret) |
|
|