Yang Gu commited on
Commit
80fd73c
1 Parent(s): cec88af

Add gallery mode

Browse files
Files changed (8) hide show
  1. demos.js +116 -0
  2. demos/ort-sam/ort-sam.mp4 +3 -0
  3. demos/tjs-clip/tjs-clip.mp4 +3 -0
  4. gallery.html +76 -0
  5. index.html +1 -0
  6. main.js +2 -117
  7. util.js +54 -0
  8. video-na.mp4 +3 -0
demos.js ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const categoryDemos = [
2
+ {
3
+ name: `ONNX Runtime`,
4
+ description: `ONNX Runtime`,
5
+ demos: {
6
+ 'ort-phi3': {
7
+ name: 'phi3',
8
+ description: `phi3 from Microsoft`,
9
+ filename: "ort-phi3",
10
+ },
11
+ 'ort-sam': {
12
+ name: 'Segment Anything',
13
+ description: `Segment Anything from https://github.com/guschmue/ort-webgpu/tree/master/segment-anything`,
14
+ filename: "ort-sam",
15
+ video: "ort-sam.mp4",
16
+ },
17
+ 'ort-sdturbo': {
18
+ name: 'Stable Diffusion Turbo',
19
+ description: `Stable Diffusion Turbo from https://github.com/guschmue/ort-webgpu/tree/master/sd-turbo`,
20
+ filename: "ort-sdturbo",
21
+ },
22
+ 'ort-tinyllama': {
23
+ name: 'Tiny Llama',
24
+ description: `Tiny Llama from https://github.com/guschmue/ort-webgpu/tree/master/chat`,
25
+ filename: "ort-tinyllama",
26
+ },
27
+ 'ort-yolo': {
28
+ name: 'Yolo',
29
+ description: `Yolo V9 from https://github.com/guschmue/ort-webgpu/tree/master/yolov9`,
30
+ filename: "ort-yolo",
31
+ },
32
+ },
33
+ },
34
+ {
35
+ name: `TFLite`,
36
+ description: `TFLite`,
37
+ demos: {
38
+ 'tflite-gemma': {
39
+ name: 'Gemma',
40
+ description: `Gemma with TFLite and MediaPipe from https://github.com/googlesamples/mediapipe/tree/main/examples/llm_inference/js, <a href=https://developers.googleblog.com/2024/03/running-large-language-models-on-device-with-mediapipe-andtensorflow-lite.html>more info.</a>`,
41
+ filename: "tflite-gemma",
42
+ },
43
+ },
44
+ },
45
+ {
46
+ name: 'Transformers.js',
47
+ description: 'Transformers.js',
48
+ demos: {
49
+ benchmark: {
50
+ name: 'Benchmark',
51
+ description: `Benchmark by Transformers.js`,
52
+ filename: "https://huggingface.co/spaces/Xenova/webgpu-embedding-benchmark",
53
+ openInNewTab: true,
54
+ },
55
+ 'tjs-clip': {
56
+ name: 'OpenAI Clip',
57
+ description: `Zero-shot Image Classification with OpenAI's CLIP by Transformers.js`,
58
+ filename: "https://huggingface.co/spaces/Xenova/webgpu-clip",
59
+ openInNewTab: true,
60
+ video: 'tjs-clip.mp4',
61
+ },
62
+ depthAnything: {
63
+ name: 'Depth Anything',
64
+ description: `Depth Anything by Transformers.js`,
65
+ filename: "https://huggingface.co/spaces/Xenova/webgpu-depth-anything",
66
+ openInNewTab: true,
67
+ },
68
+ removeImageBackground: {
69
+ name: 'Remove Image Background',
70
+ description: `Image Background Removal by Transformers.js`,
71
+ filename: "https://huggingface.co/spaces/Xenova/remove-background-webgpu",
72
+ openInNewTab: true,
73
+ },
74
+ removeVideoBackground: {
75
+ name: 'Remove Video Background',
76
+ description: `Video Background Removal by Transformers.js`,
77
+ filename: "https://huggingface.co/spaces/Xenova/webgpu-video-background-removal",
78
+ openInNewTab: true,
79
+ },
80
+ },
81
+ },
82
+ {
83
+ name: 'TVM',
84
+ description: 'TVM',
85
+ demos: {
86
+ sd: {
87
+ name: 'Web Stable Diffusion',
88
+ description: `Web Stable Diffusion`,
89
+ filename: "https://websd.mlc.ai/",
90
+ openInNewTab: true,
91
+ },
92
+ llm: {
93
+ name: 'Web LLM (Gemma/LLama/Mistral/Phi)',
94
+ description: `Web LLM`,
95
+ filename: "https://webllm.mlc.ai/",
96
+ openInNewTab: true,
97
+ },
98
+ },
99
+ },
100
+ {
101
+ name: `Developer Only`,
102
+ description: `Developer Only`,
103
+ demos: {
104
+ 'ort-phi2-test': {
105
+ name: 'phi2 test',
106
+ description: `phi2 from Microsoft`,
107
+ filename: "ort-phi2-test",
108
+ },
109
+ 'ort-phi3-test': {
110
+ name: 'phi3 test',
111
+ description: `phi3 from Microsoft`,
112
+ filename: "ort-phi3-test",
113
+ },
114
+ },
115
+ },
116
+ ];
demos/ort-sam/ort-sam.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:263faf257fc41043f809470a42b3b4e33d436162072339c7fd83eb88f0518a51
3
+ size 356958
demos/tjs-clip/tjs-clip.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0197ad09e1ad13b24bad2faa64368777c472a05e8bbabda8ccfaff5372b17b82
3
+ size 2723436
gallery.html ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <style>
2
+ h1 {
3
+ align: center;
4
+ }
5
+
6
+ ul {
7
+ list-style: none;
8
+ }
9
+
10
+ li {
11
+ display: inline-block;
12
+ margin-right: 10px;
13
+ /* add spacing between items */
14
+ }
15
+
16
+ a {
17
+ text-align: center;
18
+ display: block;
19
+ }
20
+ </style>
21
+
22
+ <body>
23
+ <h1 align="center">WebAI Demos (Gallery Mode)</h1>
24
+ <script src="demos.js"></script>
25
+ <script>
26
+ "use strict";
27
+
28
+ const demosElem = document.createElement('div');
29
+ document.body.appendChild(demosElem);
30
+ demosElem.style.width = screen.width;
31
+ demosElem.style.height = screen.height;
32
+
33
+ for (const { name, description, demos } of categoryDemos) {
34
+ const ul = document.createElement('ul');
35
+ const h2 = document.createElement('h2');
36
+ h2.textContent = name;
37
+ ul.appendChild(h2);
38
+ for (const [key, demoInfo] of Object.entries(demos)) {
39
+ const li = document.createElement('li');
40
+ const video = document.createElement('video');
41
+ video.width = 600;
42
+ video.height = 600;
43
+ if (demoInfo.video) {
44
+ video.src = `demos/${key}/${demoInfo.video}`;
45
+ } else {
46
+ video.src = 'video-na.mp4';
47
+ }
48
+
49
+ video.autoplay = true;
50
+ video.muted = true;
51
+ video.loop = true;
52
+ video.controls = true;
53
+
54
+ const a = document.createElement('a');
55
+ a.href = `demos/${demoInfo.filename}`;
56
+ a.textContent = `${demoInfo.name || key}${demoInfo.openInNewTab ? '' : ''}`;
57
+ a.style.fontSize = 28;
58
+ if (demoInfo.openInNewTab) {
59
+ a.target = '_blank';
60
+ } else {
61
+ a.onclick = (e) => {
62
+ setSampleIFrameURL(e, demoInfo);
63
+ };
64
+ }
65
+
66
+ li.appendChild(video);
67
+ li.appendChild(a);
68
+ ul.appendChild(li);
69
+ }
70
+ demosElem.appendChild(ul);
71
+ }
72
+
73
+ </script>
74
+
75
+
76
+ </body>
index.html CHANGED
@@ -2,6 +2,7 @@
2
  <title>WebAI Demos</title>
3
  <link href="css/styles.css" rel="stylesheet">
4
  </head>
 
5
  <script defer type="module" src="main.js"></script>
6
  <body>
7
  <div class="wrapper">
 
2
  <title>WebAI Demos</title>
3
  <link href="css/styles.css" rel="stylesheet">
4
  </head>
5
+ <script src="demos.js"></script>
6
  <script defer type="module" src="main.js"></script>
7
  <body>
8
  <div class="wrapper">
main.js CHANGED
@@ -49,121 +49,6 @@ function createElem(tag, attrs = {}, children = []) {
49
  return elem;
50
  }
51
 
52
- const demoCategories = [
53
- {
54
- name: `ONNX Runtime`,
55
- description: `ONNX Runtime`,
56
- demos: {
57
- 'ort-phi3': {
58
- name: 'phi3',
59
- description: `phi3 from Microsoft`,
60
- filename: "ort-phi3",
61
- },
62
- 'ort-sam': {
63
- name: 'Segment Anything',
64
- description: `Segment Anything from https://github.com/guschmue/ort-webgpu/tree/master/segment-anything`,
65
- filename: "ort-sam",
66
- },
67
- 'ort-sdturbo': {
68
- name: 'Stable Diffusion Turbo',
69
- description: `Stable Diffusion Turbo from https://github.com/guschmue/ort-webgpu/tree/master/sd-turbo`,
70
- filename: "ort-sdturbo",
71
- },
72
- 'ort-tinyllama': {
73
- name: 'Tiny Llama',
74
- description: `Tiny Llama from https://github.com/guschmue/ort-webgpu/tree/master/chat`,
75
- filename: "ort-tinyllama",
76
- },
77
- 'ort-yolo': {
78
- name: 'Yolo',
79
- description: `Yolo V9 from https://github.com/guschmue/ort-webgpu/tree/master/yolov9`,
80
- filename: "ort-yolo",
81
- },
82
- },
83
- },
84
- {
85
- name: `TFLite`,
86
- description: `TFLite`,
87
- demos: {
88
- 'tflite-gemma': {
89
- name: 'Gemma',
90
- description: `Gemma with TFLite and MediaPipe from https://github.com/googlesamples/mediapipe/tree/main/examples/llm_inference/js, <a href=https://developers.googleblog.com/2024/03/running-large-language-models-on-device-with-mediapipe-andtensorflow-lite.html>more info.</a>`,
91
- filename: "tflite-gemma",
92
- },
93
- },
94
- },
95
- {
96
- name: 'Transformers.js',
97
- description: 'Transformers.js',
98
- demos: {
99
- benchmark: {
100
- name: 'Benchmark',
101
- description: `Benchmark by Transformers.js`,
102
- filename: "https://huggingface.co/spaces/Xenova/webgpu-embedding-benchmark",
103
- openInNewTab: true,
104
- },
105
- clip: {
106
- name: 'OpenAI Clip',
107
- description: `Zero-shot Image Classification with OpenAI's CLIP by Transformers.js`,
108
- filename: "https://huggingface.co/spaces/Xenova/webgpu-clip",
109
- openInNewTab: true,
110
- },
111
- depthAnything: {
112
- name: 'Depth Anything',
113
- description: `Depth Anything by Transformers.js`,
114
- filename: "https://huggingface.co/spaces/Xenova/webgpu-depth-anything",
115
- openInNewTab: true,
116
- },
117
- removeImageBackground: {
118
- name: 'Remove Image Background',
119
- description: `Image Background Removal by Transformers.js`,
120
- filename: "https://huggingface.co/spaces/Xenova/remove-background-webgpu",
121
- openInNewTab: true,
122
- },
123
- removeVideoBackground: {
124
- name: 'Remove Video Background',
125
- description: `Video Background Removal by Transformers.js`,
126
- filename: "https://huggingface.co/spaces/Xenova/webgpu-video-background-removal",
127
- openInNewTab: true,
128
- },
129
- },
130
- },
131
- {
132
- name: 'TVM',
133
- description: 'TVM',
134
- demos: {
135
- sd: {
136
- name: 'Web Stable Diffusion',
137
- description: `Web Stable Diffusion`,
138
- filename: "https://websd.mlc.ai/",
139
- openInNewTab: true,
140
- },
141
- llm: {
142
- name: 'Web LLM (Gemma/LLama/Mistral/Phi)',
143
- description: `Web LLM`,
144
- filename: "https://webllm.mlc.ai/",
145
- openInNewTab: true,
146
- },
147
- },
148
- },
149
- {
150
- name: `Developer Only`,
151
- description: `Developer Only`,
152
- demos: {
153
- 'ort-phi2-test': {
154
- name: 'phi2 test',
155
- description: `phi2 from Microsoft`,
156
- filename: "ort-phi2-test",
157
- },
158
- 'ort-phi3-test': {
159
- name: 'phi3 test',
160
- description: `phi3 from Microsoft`,
161
- filename: "ort-phi3-test",
162
- },
163
- },
164
- },
165
- ];
166
-
167
  function _extends() {
168
  _extends = Object.assign ? Object.assign.bind() : function (target) {
169
  for (var i = 1; i < arguments.length; i++) {
@@ -32456,7 +32341,7 @@ function setSampleIFrame(demoInfo, search = '') {
32456
  // Replace the iframe because changing src adds to the user's history.
32457
  demoContainerElem.innerHTML = '';
32458
  if (filename) {
32459
- const src = url || `demo/${filename}${search}/index.html`;
32460
  if (window.location.origin.includes("hf")) {
32461
  demoContainerElem.appendChild(createElem('iframe', { src: `/${src}` }));
32462
  } else {
@@ -32489,7 +32374,7 @@ function setSampleIFrameURL(e, demoInfo) {
32489
  // from those keys to each demo.
32490
  const samplesByKey = new Map();
32491
  // Generate the list of demos
32492
- for (const { name, description, demos } of demoCategories) {
32493
  for (const [key, demoInfo] of Object.entries(demos)) {
32494
  samplesByKey.set(key, demoInfo);
32495
  }
 
49
  return elem;
50
  }
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  function _extends() {
53
  _extends = Object.assign ? Object.assign.bind() : function (target) {
54
  for (var i = 1; i < arguments.length; i++) {
 
32341
  // Replace the iframe because changing src adds to the user's history.
32342
  demoContainerElem.innerHTML = '';
32343
  if (filename) {
32344
+ const src = url || `demos/${filename}${search}/index.html`;
32345
  if (window.location.origin.includes("hf")) {
32346
  demoContainerElem.appendChild(createElem('iframe', { src: `/${src}` }));
32347
  } else {
 
32374
  // from those keys to each demo.
32375
  const samplesByKey = new Map();
32376
  // Generate the list of demos
32377
+ for (const { name, description, demos } of categoryDemos) {
32378
  for (const [key, demoInfo] of Object.entries(demos)) {
32379
  samplesByKey.set(key, demoInfo);
32380
  }
util.js CHANGED
@@ -1,3 +1,5 @@
 
 
1
  function updateGetModelProgress(name, loaded, total) {
2
  const progressElement = document.getElementById('model-progress');
3
  if (total === 0) {
@@ -134,3 +136,55 @@ async function loadScript(url) {
134
  document.body.append(script);
135
  })
136
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "use strict";
2
+
3
  function updateGetModelProgress(name, loaded, total) {
4
  const progressElement = document.getElementById('model-progress');
5
  if (total === 0) {
 
136
  document.body.append(script);
137
  })
138
  }
139
+
140
+ /*
141
+ const webgpuCanvas = new OffscreenCanvas(canvasWidth, canvasHeight);
142
+ const webglCanvas = new OffscreenCanvas(canvasWidth, canvasHeight);
143
+ const gl = webglCanvas.getContext('webgl2');
144
+ const webglTexture = gl.createTexture();
145
+ */
146
+ function readGPUBufferSync(buffer, byteOffset, webgpuCanvas, gl, glTexture, device) {
147
+ const bufferSize = buffer.size;
148
+ const dataBytes = bufferSize - byteOffset;
149
+
150
+ const canvasWidth = 8192;
151
+ const canvasHeight = dataBytes / 4 / canvasWidth + 1;
152
+
153
+ // Copy WebGPU buffer to WebGPU texture
154
+ const webgpuContext = webgpuCanvas.getContext('webgpu');
155
+ webgpuContext.configure({
156
+ device: device,
157
+ format: 'rgb8unorm',
158
+ usage: GPUTextureUsage.COPY_DST,
159
+ alphaMode: 'premultiplied',
160
+ });
161
+ const webgpuTexture = webgpuContext.getCurrentTexture();
162
+ const bytesPerRow = canvasWidth * 4;
163
+ const commandEncoder = device.createCommandEncoder();
164
+ commandEncoder.copyBufferToTexture(
165
+ {
166
+ buffer,
167
+ bytesPerRow,
168
+ byteOffset,
169
+ },
170
+ {
171
+ webgpuTexture,
172
+ },
173
+ {
174
+ canvasWidth,
175
+ canvasHeight,
176
+ });
177
+ device.queue.submit([commandEncoder.finish()]);
178
+ commandEncoder = null;
179
+
180
+ // Read WebGPU texture via WebGL
181
+ gl.bindTexture(gl.TEXTURE_2D, webglTexture);
182
+ gl.pixelStorei(gl.UNPACK_PREMULTIPLY_ALPHA_WEBGL, true);
183
+ gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA8, gl.RGBA, gl.UNSIGNED_BYTE, webgpuCanvas);
184
+ gl.bindFramebuffer(gl.FRAMEBUFFER, gl.createFramebuffer());
185
+ gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, webglTexture, 0);
186
+ const pixels = new Uint8Array(dataBytes);
187
+ gl.readPixels(0, 0, width, height, gl.RGBA, gl.UNSIGNED_BYTE, pixels);
188
+
189
+ return pixels;
190
+ }
video-na.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d676f789a59ee5572bda9f12501f750e8b9f71282f3174b14249c82177e5b280
3
+ size 10492