Xenova HF staff commited on
Commit
c0594c6
1 Parent(s): 6ed5b70

Update index.js

Browse files
Files changed (1) hide show
  1. index.js +34 -23
index.js CHANGED
@@ -1,4 +1,4 @@
1
- import { env, AutoModel, AutoProcessor, RawImage } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.17.1';
2
 
3
  // Since we will download the model from the Hugging Face Hub, we can skip the local model check
4
  env.allowLocalModels = false;
@@ -10,15 +10,13 @@ const imageContainer = document.getElementById('container');
10
  const example = document.getElementById('example');
11
 
12
  const EXAMPLE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/city-streets.jpg';
13
- const IMAGE_SIZE = 640;
14
 
15
  // Create a new object detection pipeline
16
  status.textContent = 'Loading model...';
17
- const model_id = 'onnx-community/yolov10n';
18
- const model = await AutoModel.from_pretrained(model_id, {
19
- quantized: false, // (Optional) Use unquantized version.
20
- });
21
  const processor = await AutoProcessor.from_pretrained(model_id);
 
22
  status.textContent = 'Ready';
23
 
24
  example.addEventListener('click', (e) => {
@@ -42,25 +40,38 @@ fileUpload.addEventListener('change', function (e) {
42
 
43
 
44
  // Detect objects in the image
45
- async function detect(img) {
 
46
  imageContainer.innerHTML = '';
47
- imageContainer.style.backgroundImage = `url(${img})`;
 
 
 
 
 
 
 
 
 
48
 
49
  status.textContent = 'Analysing...';
50
- const image = await RawImage.read(img);
51
- const { pixel_values } = await processor(image);
52
- const { output0 } = await model({ images: pixel_values });
53
- const predictions = output0.tolist()[0];
54
- const threshold = 0.3;
55
- for (const [xmin, ymin, xmax, ymax, score, id] of predictions) {
56
- if (score < threshold) continue;
57
- renderBox(xmin, ymin, xmax, ymax, score, model.config.id2label[id]);
58
- }
59
  status.textContent = '';
 
 
 
60
  }
61
 
62
  // Render a bounding box and label on the image
63
- function renderBox(xmin, ymin, xmax, ymax, score, label) {
 
 
64
  // Generate a random color for the box
65
  const color = '#' + Math.floor(Math.random() * 0xFFFFFF).toString(16).padStart(6, 0);
66
 
@@ -69,15 +80,15 @@ function renderBox(xmin, ymin, xmax, ymax, score, label) {
69
  boxElement.className = 'bounding-box';
70
  Object.assign(boxElement.style, {
71
  borderColor: color,
72
- left: 100 * xmin / IMAGE_SIZE + '%',
73
- top: 100 * ymin / IMAGE_SIZE + '%',
74
- width: 100 * (xmax - xmin) / IMAGE_SIZE + '%',
75
- height: 100 * (ymax - ymin) / IMAGE_SIZE + '%',
76
  })
77
 
78
  // Draw label
79
  const labelElement = document.createElement('span');
80
- labelElement.textContent = `${label} (${(score * 100).toFixed(2)}%)`;
81
  labelElement.className = 'bounding-box-label';
82
  labelElement.style.backgroundColor = color;
83
 
 
1
+ import { env, AutoProcessor, AutoModel, RawImage } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.15.1';
2
 
3
  // Since we will download the model from the Hugging Face Hub, we can skip the local model check
4
  env.allowLocalModels = false;
 
10
  const example = document.getElementById('example');
11
 
12
  const EXAMPLE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/city-streets.jpg';
13
+ const THRESHOLD = 0.25;
14
 
15
  // Create a new object detection pipeline
16
  status.textContent = 'Loading model...';
17
+ const model_id = 'onnx-community/yolov10s';
 
 
 
18
  const processor = await AutoProcessor.from_pretrained(model_id);
19
+ const model = await AutoModel.from_pretrained(model_id);
20
  status.textContent = 'Ready';
21
 
22
  example.addEventListener('click', (e) => {
 
40
 
41
 
42
  // Detect objects in the image
43
+ async function detect(url) {
44
+ // Update UI
45
  imageContainer.innerHTML = '';
46
+
47
+ // Read image
48
+ const image = await RawImage.fromURL(url);
49
+
50
+ // Set container width and height depending on the image aspect ratio
51
+ const ar = image.width / image.height;
52
+ const [cw, ch] = (ar > 1) ? [640, 640 / ar] : [640 * ar, 640];
53
+ imageContainer.style.width = `${cw}px`;
54
+ imageContainer.style.height = `${ch}px`;
55
+ imageContainer.style.backgroundImage = `url(${url})`;
56
 
57
  status.textContent = 'Analysing...';
58
+
59
+ // Preprocess image
60
+ const inputs = await processor(image);
61
+
62
+ // Predict bounding boxes
63
+ const { output0 } = await model(inputs);
64
+
 
 
65
  status.textContent = '';
66
+
67
+ const sizes = inputs.reshaped_input_sizes[0].reverse();
68
+ outputs.tolist()[0].forEach(x => renderBox(x, sizes));
69
  }
70
 
71
  // Render a bounding box and label on the image
72
+ function renderBox([xmin, ymin, xmax, ymax, score, id], [w, h]) {
73
+ if (score < THRESHOLD) return; // Skip boxes with low confidence
74
+
75
  // Generate a random color for the box
76
  const color = '#' + Math.floor(Math.random() * 0xFFFFFF).toString(16).padStart(6, 0);
77
 
 
80
  boxElement.className = 'bounding-box';
81
  Object.assign(boxElement.style, {
82
  borderColor: color,
83
+ left: 100 * xmin / w + '%',
84
+ top: 100 * ymin / h + '%',
85
+ width: 100 * (xmax - xmin) / w + '%',
86
+ height: 100 * (ymax - ymin) / h + '%',
87
  })
88
 
89
  // Draw label
90
  const labelElement = document.createElement('span');
91
+ labelElement.textContent = model.config.id2label[id];
92
  labelElement.className = 'bounding-box-label';
93
  labelElement.style.backgroundColor = color;
94