Update index.js
Browse files
index.js
CHANGED
@@ -1,4 +1,6 @@
|
|
1 |
-
import {
|
|
|
|
|
2 |
import * as ort from "https://cdn.jsdelivr.net/npm/onnxruntime-web@1.20.0/dist/ort.webgpu.mjs";
|
3 |
|
4 |
// Since we will download the model from the Hugging Face Hub, we can skip the local model check
|
@@ -10,8 +12,8 @@ const fileUpload = document.getElementById('upload');
|
|
10 |
const imageContainer = document.getElementById('container');
|
11 |
const example = document.getElementById('example');
|
12 |
|
13 |
-
const EXAMPLE_URL = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg"
|
14 |
-
const INPUT_IMAGE_SIZE = [960, 960];
|
15 |
const HEIGHT_FACTOR = 10;
|
16 |
const WIDTH_FACTOR = 10;
|
17 |
const IMAGE_EMBED_SIZE = WIDTH_FACTOR * HEIGHT_FACTOR;
|
@@ -59,14 +61,14 @@ async function detect(img) {
|
|
59 |
}
|
60 |
|
61 |
|
62 |
-
export async function
|
63 |
-
imagePath,
|
64 |
-
query,
|
65 |
vision = true
|
66 |
) {
|
67 |
const suffix = QUANTIZATION ? `_${QUANTIZATION}` : "";
|
68 |
|
69 |
-
const config = (await getModelJSON(BASE_MODEL, "config.json"));
|
70 |
|
71 |
const prompt_head_len = new Tensor("int64", new BigInt64Array([5n]), [1]);
|
72 |
|
|
|
1 |
+
import { env, AutoTokenizer, RawImage } from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers';
|
2 |
+
import { getModelJSON } from "https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.0.2/src/utils/hub.js";
|
3 |
+
import { Tensor } from "https://cdn.jsdelivr.net/npm/@huggingface/transformer/utils/tensor.js";
|
4 |
import * as ort from "https://cdn.jsdelivr.net/npm/onnxruntime-web@1.20.0/dist/ort.webgpu.mjs";
|
5 |
|
6 |
// Since we will download the model from the Hugging Face Hub, we can skip the local model check
|
|
|
12 |
const imageContainer = document.getElementById('container');
|
13 |
const example = document.getElementById('example');
|
14 |
|
15 |
+
const EXAMPLE_URL = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg",
|
16 |
+
const INPUT_IMAGE_SIZE = [960, 960] as const;
|
17 |
const HEIGHT_FACTOR = 10;
|
18 |
const WIDTH_FACTOR = 10;
|
19 |
const IMAGE_EMBED_SIZE = WIDTH_FACTOR * HEIGHT_FACTOR;
|
|
|
61 |
}
|
62 |
|
63 |
|
64 |
+
export async function simplifiedLLMVision(
|
65 |
+
imagePath: string,
|
66 |
+
query: string,
|
67 |
vision = true
|
68 |
) {
|
69 |
const suffix = QUANTIZATION ? `_${QUANTIZATION}` : "";
|
70 |
|
71 |
+
const config = (await getModelJSON(BASE_MODEL, "config.json")) as any;
|
72 |
|
73 |
const prompt_head_len = new Tensor("int64", new BigInt64Array([5n]), [1]);
|
74 |
|