package com.yolo.aim.yoloaim.criteria;

import ai.djl.Application;
import ai.djl.Device;
import ai.djl.modality.cv.Image;
import ai.djl.modality.cv.output.DetectedObjects;
import ai.djl.repository.zoo.Criteria;
import ai.djl.training.util.ProgressBar;
import ai.djl.translate.Translator;
import com.yolo.aim.yoloaim.util.YoloV5Translator;

import java.nio.file.Paths;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;

/**
 * 获取目标检测模型
 */
public class ModelCriteria {
    public Criteria<Image, DetectedObjects> getCriteria(String path){
        Map<String, Object> arguments = new ConcurrentHashMap<>();
        arguments.put("width", 640);
        arguments.put("height", 640);
        arguments.put("resize", true);//调整图片大小
        arguments.put("rescale", true);//图片值编程0-1之间
        Translator<Image, DetectedObjects> translator = YoloV5Translator.builder(arguments).optSynsetArtifactName("synset.txt").build();
        Criteria<Image, DetectedObjects> criteria =
                Criteria.builder()
                        .optApplication(Application.CV.INSTANCE_SEGMENTATION)
                        .setTypes(Image.class, DetectedObjects.class)
                        .optDevice(Device.cpu())
                        .optModelPath(Paths.get(path))
                        //.optModelUrls(this.getClass().getResource("/model").getPath())
                        .optModelName("best.torchscript.pt")
                        .optTranslator(translator)
                        .optProgress(new ProgressBar())
                        .optEngine("PyTorch")
                        .build();
        return criteria;
    }
}
