File size: 3,019 Bytes
2a4fba6
9421f12
 
 
 
 
 
 
 
 
 
 
1ef8d50
9421f12
 
 
2a4fba6
 
 
 
 
9421f12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9e2f5fe
2a4fba6
 
 
6ed5b70
2a4fba6
 
 
 
9421f12
 
 
 
2a4fba6
9421f12
 
 
 
 
 
 
 
1ef8d50
 
 
 
9421f12
 
 
 
2a4fba6
9421f12
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import { env, AutoModel, AutoProcessor, RawImage } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.17.1';

// Since we will download the model from the Hugging Face Hub, we can skip the local model check
env.allowLocalModels = false;

// Reference the elements that we will need
const status = document.getElementById('status');
const fileUpload = document.getElementById('upload');
const imageContainer = document.getElementById('container');
const example = document.getElementById('example');

const EXAMPLE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/city-streets.jpg';
const IMAGE_SIZE = 640;

// Create a new object detection pipeline
status.textContent = 'Loading model...';
const model_id = 'onnx-community/yolov10n';
const model = await AutoModel.from_pretrained(model_id, {
    quantized: false,    // (Optional) Use unquantized version.
});
const processor = await AutoProcessor.from_pretrained(model_id);
status.textContent = 'Ready';

example.addEventListener('click', (e) => {
    e.preventDefault();
    detect(EXAMPLE_URL);
});

fileUpload.addEventListener('change', function (e) {
    const file = e.target.files[0];
    if (!file) {
        return;
    }

    const reader = new FileReader();

    // Set up a callback when the file is loaded
    reader.onload = e2 => detect(e2.target.result);

    reader.readAsDataURL(file);
});


// Detect objects in the image
async function detect(img) {
    imageContainer.innerHTML = '';
    imageContainer.style.backgroundImage = `url(${img})`;

    status.textContent = 'Analysing...';
    const image = await RawImage.read(img);
    const { pixel_values } = await processor(image);
    const { output0 } = await model({ images: pixel_values });
    const predictions = output0.tolist()[0];
    const threshold = 0.3;
    for (const [xmin, ymin, xmax, ymax, score, id] of predictions) {
        if (score < threshold) continue;
        renderBox(xmin, ymin, xmax, ymax, score, model.config.id2label[id]);
    }
    status.textContent = '';
}

// Render a bounding box and label on the image
function renderBox(xmin, ymin, xmax, ymax, score, label) {
    // Generate a random color for the box
    const color = '#' + Math.floor(Math.random() * 0xFFFFFF).toString(16).padStart(6, 0);

    // Draw the box
    const boxElement = document.createElement('div');
    boxElement.className = 'bounding-box';
    Object.assign(boxElement.style, {
        borderColor: color,
        left: 100 * xmin / IMAGE_SIZE + '%',
        top: 100 * ymin / IMAGE_SIZE + '%',
        width: 100 * (xmax - xmin) / IMAGE_SIZE + '%',
        height: 100 * (ymax - ymin) / IMAGE_SIZE + '%',
    })

    // Draw label
    const labelElement = document.createElement('span');
    labelElement.textContent = `${label} (${(score * 100).toFixed(2)}%)`;
    labelElement.className = 'bounding-box-label';
    labelElement.style.backgroundColor = color;

    boxElement.appendChild(labelElement);
    imageContainer.appendChild(boxElement);
}