a414166402 commited on
Commit
1465445
1 Parent(s): e9d8568

first commit

Browse files
README.md CHANGED
@@ -1,3 +1,52 @@
1
  ---
2
- license: unknown
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ library_name: transformers.js
3
+ pipeline_tag: depth-estimation
4
  ---
5
+
6
+ https://huggingface.co/LiheYoung/depth-anything-small-hf with ONNX weights to be compatible with Transformers.js.
7
+
8
+ ## Usage (Transformers.js)
9
+
10
+ If you haven't already, you can install the [Transformers.js](https://huggingface.co/docs/transformers.js) JavaScript library from [NPM](https://www.npmjs.com/package/@xenova/transformers) using:
11
+ ```bash
12
+ npm i @xenova/transformers
13
+ ```
14
+
15
+ **Example:** Depth estimation with `Xenova/depth-anything-small-hf`.
16
+
17
+ ```js
18
+ import { pipeline } from '@xenova/transformers';
19
+
20
+ // Create depth-estimation pipeline
21
+ const depth_estimator = await pipeline('depth-estimation', 'Xenova/depth-anything-small-hf');
22
+
23
+ // Predict depth map for the given image
24
+ const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/bread_small.png';
25
+ const output = await depth_estimator(url);
26
+ // {
27
+ // predicted_depth: Tensor {
28
+ // dims: [350, 518],
29
+ // type: 'float32',
30
+ // data: Float32Array(181300) [...],
31
+ // size: 181300
32
+ // },
33
+ // depth: RawImage {
34
+ // data: Uint8Array(271360) [...],
35
+ // width: 640,
36
+ // height: 424,
37
+ // channels: 1
38
+ // }
39
+ // }
40
+ ```
41
+
42
+ You can visualize the output with:
43
+
44
+ ```js
45
+ output.depth.save('depth.png');
46
+ ```
47
+
48
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/61b253b7ac5ecaae3d1efe0c/Zj77mcNlZS3TmlT5wKaAO.png)
49
+
50
+ ---
51
+
52
+ Note: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`).
config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": "9103bc8e2d785bcfd024e4bfd7e7c4593bc50bba",
3
+ "_name_or_path": "LiheYoung/depth-anything-small-hf",
4
+ "architectures": [
5
+ "DepthAnythingForDepthEstimation"
6
+ ],
7
+ "backbone": null,
8
+ "backbone_config": {
9
+ "architectures": [
10
+ "Dinov2Model"
11
+ ],
12
+ "hidden_size": 384,
13
+ "image_size": 518,
14
+ "model_type": "dinov2",
15
+ "num_attention_heads": 6,
16
+ "out_features": [
17
+ "stage9",
18
+ "stage10",
19
+ "stage11",
20
+ "stage12"
21
+ ],
22
+ "out_indices": [
23
+ 9,
24
+ 10,
25
+ 11,
26
+ 12
27
+ ],
28
+ "patch_size": 14,
29
+ "reshape_hidden_states": false,
30
+ "torch_dtype": "float32"
31
+ },
32
+ "fusion_hidden_size": 64,
33
+ "head_hidden_size": 32,
34
+ "head_in_index": -1,
35
+ "initializer_range": 0.02,
36
+ "model_type": "depth_anything",
37
+ "neck_hidden_sizes": [
38
+ 48,
39
+ 96,
40
+ 192,
41
+ 384
42
+ ],
43
+ "patch_size": 14,
44
+ "reassemble_factors": [
45
+ 4,
46
+ 2,
47
+ 1,
48
+ 0.5
49
+ ],
50
+ "reassemble_hidden_size": 384,
51
+ "transformers_version": null,
52
+ "use_pretrained_backbone": false
53
+ }
gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c222151fa1a941b7a075efdee44a85bb22e1bc07dce3659a03cb3f4eb53dd213
3
+ size 99170955
onnx/model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4e2ffe9b0396ebd4a0c09b855762b186aab338e79f76531b09777aafc91db84
3
+ size 27524771
preprocessor_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_pad": false,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "ensure_multiple_of": 14,
7
+ "image_mean": [
8
+ 0.485,
9
+ 0.456,
10
+ 0.406
11
+ ],
12
+ "image_processor_type": "DPTImageProcessor",
13
+ "image_std": [
14
+ 0.229,
15
+ 0.224,
16
+ 0.225
17
+ ],
18
+ "keep_aspect_ratio": true,
19
+ "resample": 3,
20
+ "rescale_factor": 0.00392156862745098,
21
+ "size": {
22
+ "height": 518,
23
+ "width": 518
24
+ },
25
+ "size_divisor": null
26
+ }
quantize_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "per_channel": true,
3
+ "reduce_range": true,
4
+ "per_model_config": {
5
+ "model": {
6
+ "op_types": [
7
+ "Constant",
8
+ "Gather",
9
+ "MatMul",
10
+ "Transpose",
11
+ "Equal",
12
+ "Expand",
13
+ "Concat",
14
+ "Sqrt",
15
+ "Pow",
16
+ "Relu",
17
+ "Cast",
18
+ "Conv",
19
+ "ConvTranspose",
20
+ "Slice",
21
+ "Unsqueeze",
22
+ "Reshape",
23
+ "Softmax",
24
+ "Erf",
25
+ "Sub",
26
+ "Identity",
27
+ "Where",
28
+ "ConstantOfShape",
29
+ "Squeeze",
30
+ "Div",
31
+ "ReduceMean",
32
+ "Shape",
33
+ "Resize",
34
+ "Mul",
35
+ "Add"
36
+ ],
37
+ "weight_type": "QUInt8"
38
+ }
39
+ }
40
+ }