Spaces:
Running
Running
Create index.js
Browse files
index.js
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.13.0';
|
2 |
+
|
3 |
+
// Since we will download the model from the Hugging Face Hub, we can skip the local model check
|
4 |
+
env.allowLocalModels = false;
|
5 |
+
|
6 |
+
// Reference the elements that we will need
|
7 |
+
const status = document.getElementById('status');
|
8 |
+
const fileUpload = document.getElementById('upload');
|
9 |
+
const imageContainer = document.getElementById('container');
|
10 |
+
const example = document.getElementById('example');
|
11 |
+
|
12 |
+
const EXAMPLE_URL = 'https://i.imgur.com/mLvqQws.jpg';
|
13 |
+
|
14 |
+
// Create a new image segmentation pipeline
|
15 |
+
status.textContent = 'Loading model...';
|
16 |
+
const segmenter = await pipeline('image-segmentation', 'Xenova/face-parsing');
|
17 |
+
status.textContent = 'Ready';
|
18 |
+
|
19 |
+
example.addEventListener('click', (e) => {
|
20 |
+
e.preventDefault();
|
21 |
+
segment(EXAMPLE_URL);
|
22 |
+
});
|
23 |
+
|
24 |
+
fileUpload.addEventListener('change', function (e) {
|
25 |
+
const file = e.target.files[0];
|
26 |
+
if (!file) {
|
27 |
+
return;
|
28 |
+
}
|
29 |
+
|
30 |
+
const reader = new FileReader();
|
31 |
+
|
32 |
+
// Set up a callback when the file is loaded
|
33 |
+
reader.onload = e2 => segment(e2.target.result);
|
34 |
+
|
35 |
+
reader.readAsDataURL(file);
|
36 |
+
});
|
37 |
+
|
38 |
+
// Perform image segmentation
|
39 |
+
async function segment(img) {
|
40 |
+
imageContainer.innerHTML = '';
|
41 |
+
imageContainer.style.backgroundImage = `url(${img})`;
|
42 |
+
|
43 |
+
status.textContent = 'Analysing...';
|
44 |
+
const output = await segmenter(img);
|
45 |
+
status.textContent = '';
|
46 |
+
output.forEach(renderMask);
|
47 |
+
}
|
48 |
+
|
49 |
+
// Mapping of label to colour
|
50 |
+
const colours = [
|
51 |
+
[234, 76, 76], // red
|
52 |
+
[28, 180, 129], // sea green
|
53 |
+
[234, 155, 21], // orange
|
54 |
+
[67, 132, 243], // blue
|
55 |
+
[243, 117, 36], // orange-red
|
56 |
+
[145, 98, 243], // purple
|
57 |
+
[21, 178, 208], // cyan
|
58 |
+
[132, 197, 33], // lime
|
59 |
+
];
|
60 |
+
|
61 |
+
// Render a mask on the image
|
62 |
+
function renderMask({mask, label}, i) {
|
63 |
+
// Create new canvas
|
64 |
+
const canvas = document.createElement('canvas');
|
65 |
+
canvas.width = mask.width;
|
66 |
+
canvas.height = mask.height;
|
67 |
+
canvas.setAttribute('data-label', label);
|
68 |
+
|
69 |
+
// Create context and allocate buffer for pixel data
|
70 |
+
const context = canvas.getContext('2d');
|
71 |
+
const imageData = context.createImageData(canvas.width, canvas.height);
|
72 |
+
const pixelData = imageData.data;
|
73 |
+
|
74 |
+
// Choose colour based on index
|
75 |
+
const [r, g, b] = colours[i % colours.length];
|
76 |
+
|
77 |
+
// Fill mask with colour
|
78 |
+
for (let i = 0; i < pixelData.length; ++i) {
|
79 |
+
if (mask.data[i] !== 0) {
|
80 |
+
const offset = 4 * i;
|
81 |
+
pixelData[offset] = r; // red
|
82 |
+
pixelData[offset + 1] = g; // green
|
83 |
+
pixelData[offset + 2] = b; // blue
|
84 |
+
pixelData[offset + 3] = 255; // alpha (fully opaque)
|
85 |
+
}
|
86 |
+
}
|
87 |
+
|
88 |
+
// Draw image data to context
|
89 |
+
context.putImageData(imageData, 0, 0);
|
90 |
+
|
91 |
+
// Add canvas to container
|
92 |
+
imageContainer.appendChild(canvas);
|
93 |
+
}
|
94 |
+
|
95 |
+
// Clamp a value inside a range [min, max]
|
96 |
+
function clamp(x, min=0, max=1) {
|
97 |
+
return Math.max(Math.min(x, max), min)
|
98 |
+
}
|
99 |
+
|
100 |
+
// Attach hover event to image container
|
101 |
+
imageContainer.addEventListener('mousemove', e => {
|
102 |
+
const canvases = imageContainer.getElementsByTagName('canvas');
|
103 |
+
if (canvases.length === 0) return;
|
104 |
+
|
105 |
+
// Get bounding box
|
106 |
+
const bb = imageContainer.getBoundingClientRect();
|
107 |
+
|
108 |
+
// Get the mouse coordinates relative to the container
|
109 |
+
const mouseX = clamp((e.clientX - bb.left) / bb.width);
|
110 |
+
const mouseY = clamp((e.clientY - bb.top) / bb.height);
|
111 |
+
|
112 |
+
// Loop over all canvases
|
113 |
+
for (const canvas of canvases) {
|
114 |
+
|
115 |
+
const canvasX = canvas.width * mouseX;
|
116 |
+
const canvasY = canvas.height * mouseY;
|
117 |
+
|
118 |
+
// Get the pixel data of the mouse coordinates
|
119 |
+
const context = canvas.getContext('2d');
|
120 |
+
const pixelData = context.getImageData(canvasX, canvasY, 1, 1).data;
|
121 |
+
|
122 |
+
// Apply hover effect if not fully opaque
|
123 |
+
if (pixelData[3] < 255) {
|
124 |
+
canvas.style.opacity = 0.1;
|
125 |
+
} else {
|
126 |
+
canvas.style.opacity = 0.8;
|
127 |
+
status.textContent = canvas.getAttribute('data-label');
|
128 |
+
}
|
129 |
+
}
|
130 |
+
});
|
131 |
+
|
132 |
+
// Reset canvas opacities on mouse exit
|
133 |
+
imageContainer.addEventListener('mouseleave', e => {
|
134 |
+
const canvases = [...imageContainer.getElementsByTagName('canvas')];
|
135 |
+
if (canvases.length > 0) {
|
136 |
+
canvases.forEach(c => c.style.opacity = 0.6);
|
137 |
+
status.textContent = '';
|
138 |
+
}
|
139 |
+
})
|