Upload mlpackage's and mlmodelc's
Browse files- instruct_pix2pix/Resources/TextEncoder.mlmodelc/analytics/coremldata.bin +3 -0
- instruct_pix2pix/Resources/TextEncoder.mlmodelc/coremldata.bin +3 -0
- instruct_pix2pix/Resources/TextEncoder.mlmodelc/metadata.json +84 -0
- instruct_pix2pix/Resources/TextEncoder.mlmodelc/model.mil +0 -0
- instruct_pix2pix/Resources/TextEncoder.mlmodelc/weights/weight.bin +3 -0
- instruct_pix2pix/Resources/Unet.mlmodelc/analytics/coremldata.bin +3 -0
- instruct_pix2pix/Resources/Unet.mlmodelc/coremldata.bin +3 -0
- instruct_pix2pix/Resources/Unet.mlmodelc/metadata.json +105 -0
- instruct_pix2pix/Resources/Unet.mlmodelc/model.mil +0 -0
- instruct_pix2pix/Resources/Unet.mlmodelc/weights/weight.bin +3 -0
- instruct_pix2pix/Resources/VAEDecoder.mlmodelc/analytics/coremldata.bin +3 -0
- instruct_pix2pix/Resources/VAEDecoder.mlmodelc/coremldata.bin +3 -0
- instruct_pix2pix/Resources/VAEDecoder.mlmodelc/metadata.json +77 -0
- instruct_pix2pix/Resources/VAEDecoder.mlmodelc/model.mil +0 -0
- instruct_pix2pix/Resources/VAEDecoder.mlmodelc/weights/weight.bin +3 -0
- instruct_pix2pix/Resources/VAEEncoder.mlmodelc/analytics/coremldata.bin +3 -0
- instruct_pix2pix/Resources/VAEEncoder.mlmodelc/coremldata.bin +3 -0
- instruct_pix2pix/Resources/VAEEncoder.mlmodelc/metadata.json +77 -0
- instruct_pix2pix/Resources/VAEEncoder.mlmodelc/model.mil +0 -0
- instruct_pix2pix/Resources/VAEEncoder.mlmodelc/weights/weight.bin +3 -0
- instruct_pix2pix/Resources/merges.txt +0 -0
- instruct_pix2pix/Resources/vocab.json +0 -0
- instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_text_encoder.mlpackage/Data/com.apple.CoreML/model.mlmodel +3 -0
- instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_text_encoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin +3 -0
- instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_text_encoder.mlpackage/Manifest.json +18 -0
- instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_unet.mlpackage/Data/com.apple.CoreML/model.mlmodel +3 -0
- instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_unet.mlpackage/Data/com.apple.CoreML/weights/weight.bin +3 -0
- instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_unet.mlpackage/Manifest.json +18 -0
- instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_vae_decoder.mlpackage/Data/com.apple.CoreML/model.mlmodel +3 -0
- instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_vae_decoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin +3 -0
- instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_vae_decoder.mlpackage/Manifest.json +18 -0
- instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_vae_encoder.mlpackage/Data/com.apple.CoreML/model.mlmodel +3 -0
- instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_vae_encoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin +3 -0
- instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_vae_encoder.mlpackage/Manifest.json +18 -0
instruct_pix2pix/Resources/TextEncoder.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de7688e74b9b3ed9f537ad428f3475fca55c944266269736ae94c8871b9bb081
|
3 |
+
size 243
|
instruct_pix2pix/Resources/TextEncoder.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0256603dd4f30fc696ea8de7cc930cb54dc83c5bf16dc72427fbea8e1914f3fe
|
3 |
+
size 887
|
instruct_pix2pix/Resources/TextEncoder.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"shortDescription" : "Stable Diffusion generates images conditioned on text and\/or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.",
|
4 |
+
"metadataOutputVersion" : "3.0",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float32",
|
10 |
+
"formattedType" : "MultiArray (Float32 1 × 77 × 768)",
|
11 |
+
"shortDescription" : "The token embeddings as encoded by the Transformer model",
|
12 |
+
"shape" : "[1, 77, 768]",
|
13 |
+
"name" : "last_hidden_state",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float32",
|
20 |
+
"formattedType" : "MultiArray (Float32 1 × 768)",
|
21 |
+
"shortDescription" : "The version of the `last_hidden_state` output after pooling",
|
22 |
+
"shape" : "[1, 768]",
|
23 |
+
"name" : "pooled_outputs",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
}
|
26 |
+
],
|
27 |
+
"version" : "timbrooks\/instruct-pix2pix",
|
28 |
+
"modelParameters" : [
|
29 |
+
|
30 |
+
],
|
31 |
+
"author" : "Please refer to the Model Card available at huggingface.co\/timbrooks\/instruct-pix2pix",
|
32 |
+
"specificationVersion" : 7,
|
33 |
+
"storagePrecision" : "Float16",
|
34 |
+
"license" : "OpenRAIL (https:\/\/huggingface.co\/spaces\/CompVis\/stable-diffusion-license)",
|
35 |
+
"mlProgramOperationTypeHistogram" : {
|
36 |
+
"Ios16.cast" : 3,
|
37 |
+
"Ios16.mul" : 36,
|
38 |
+
"Ios16.layerNorm" : 25,
|
39 |
+
"Stack" : 1,
|
40 |
+
"Transpose" : 48,
|
41 |
+
"Ios16.sigmoid" : 12,
|
42 |
+
"Ios16.linear" : 72,
|
43 |
+
"Ios16.add" : 37,
|
44 |
+
"Ios16.matmul" : 24,
|
45 |
+
"Ios16.softmax" : 12,
|
46 |
+
"Ios16.gatherNd" : 1,
|
47 |
+
"Ios16.gather" : 1,
|
48 |
+
"Ios16.reshape" : 120,
|
49 |
+
"Ios16.reduceArgmax" : 1
|
50 |
+
},
|
51 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
52 |
+
"isUpdatable" : "0",
|
53 |
+
"availability" : {
|
54 |
+
"macOS" : "13.0",
|
55 |
+
"tvOS" : "16.0",
|
56 |
+
"visionOS" : "1.0",
|
57 |
+
"watchOS" : "9.0",
|
58 |
+
"iOS" : "16.0",
|
59 |
+
"macCatalyst" : "16.0"
|
60 |
+
},
|
61 |
+
"modelType" : {
|
62 |
+
"name" : "MLModelType_mlProgram"
|
63 |
+
},
|
64 |
+
"inputSchema" : [
|
65 |
+
{
|
66 |
+
"hasShapeFlexibility" : "0",
|
67 |
+
"isOptional" : "0",
|
68 |
+
"dataType" : "Float32",
|
69 |
+
"formattedType" : "MultiArray (Float32 1 × 77)",
|
70 |
+
"shortDescription" : "The token ids that represent the input text",
|
71 |
+
"shape" : "[1, 77]",
|
72 |
+
"name" : "input_ids",
|
73 |
+
"type" : "MultiArray"
|
74 |
+
}
|
75 |
+
],
|
76 |
+
"userDefinedMetadata" : {
|
77 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
78 |
+
"com.github.apple.coremltools.version" : "7.2",
|
79 |
+
"com.github.apple.coremltools.source" : "torch==2.2.0"
|
80 |
+
},
|
81 |
+
"generatedClassName" : "Stable_Diffusion_version_timbrooks_instruct_pix2pix_text_encoder",
|
82 |
+
"method" : "predict"
|
83 |
+
}
|
84 |
+
]
|
instruct_pix2pix/Resources/TextEncoder.mlmodelc/model.mil
ADDED
The diff for this file is too large to render.
See raw diff
|
|
instruct_pix2pix/Resources/TextEncoder.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f1ce471b7de94fedc369b5e12332e452ac9f18a30a8a309a5818afe3312a1e3b
|
3 |
+
size 246145536
|
instruct_pix2pix/Resources/Unet.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fdfd07100b67fed8bff118e981b8764f416ac3941c0722f4ba3471ec50d9ced7
|
3 |
+
size 243
|
instruct_pix2pix/Resources/Unet.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:975107912287f2e7c9b64db85a3862b45d0bb5ef3ed4406b7e53a654a076fbb4
|
3 |
+
size 1320
|
instruct_pix2pix/Resources/Unet.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"shortDescription" : "Stable Diffusion generates images conditioned on text or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.",
|
4 |
+
"metadataOutputVersion" : "3.0",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float32",
|
10 |
+
"formattedType" : "MultiArray (Float32 2 × 4 × 64 × 64)",
|
11 |
+
"shortDescription" : "Same shape and dtype as the `sample` input. The predicted noise to facilitate the reverse diffusion (denoising) process",
|
12 |
+
"shape" : "[2, 4, 64, 64]",
|
13 |
+
"name" : "noise_pred",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
}
|
16 |
+
],
|
17 |
+
"version" : "timbrooks\/instruct-pix2pix",
|
18 |
+
"modelParameters" : [
|
19 |
+
|
20 |
+
],
|
21 |
+
"author" : "Please refer to the Model Card available at huggingface.co\/timbrooks\/instruct-pix2pix",
|
22 |
+
"specificationVersion" : 7,
|
23 |
+
"storagePrecision" : "Float16",
|
24 |
+
"license" : "OpenRAIL (https:\/\/huggingface.co\/spaces\/CompVis\/stable-diffusion-license)",
|
25 |
+
"mlProgramOperationTypeHistogram" : {
|
26 |
+
"Transpose" : 32,
|
27 |
+
"UpsampleNearestNeighbor" : 3,
|
28 |
+
"Ios16.reduceMean" : 122,
|
29 |
+
"Ios16.sin" : 1,
|
30 |
+
"Ios16.softmax" : 896,
|
31 |
+
"Split" : 16,
|
32 |
+
"Ios16.add" : 169,
|
33 |
+
"Concat" : 206,
|
34 |
+
"Ios16.realDiv" : 61,
|
35 |
+
"Ios16.square" : 61,
|
36 |
+
"ExpandDims" : 3,
|
37 |
+
"Ios16.sub" : 61,
|
38 |
+
"Ios16.cast" : 1,
|
39 |
+
"Ios16.conv" : 282,
|
40 |
+
"Ios16.einsum" : 1792,
|
41 |
+
"Ios16.gelu" : 16,
|
42 |
+
"Ios16.layerNorm" : 48,
|
43 |
+
"Ios16.batchNorm" : 61,
|
44 |
+
"Ios16.reshape" : 154,
|
45 |
+
"Ios16.silu" : 47,
|
46 |
+
"Ios16.sqrt" : 61,
|
47 |
+
"SliceByIndex" : 1570,
|
48 |
+
"Ios16.mul" : 913,
|
49 |
+
"Ios16.cos" : 1
|
50 |
+
},
|
51 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
52 |
+
"isUpdatable" : "0",
|
53 |
+
"availability" : {
|
54 |
+
"macOS" : "13.0",
|
55 |
+
"tvOS" : "16.0",
|
56 |
+
"visionOS" : "1.0",
|
57 |
+
"watchOS" : "9.0",
|
58 |
+
"iOS" : "16.0",
|
59 |
+
"macCatalyst" : "16.0"
|
60 |
+
},
|
61 |
+
"modelType" : {
|
62 |
+
"name" : "MLModelType_mlProgram"
|
63 |
+
},
|
64 |
+
"inputSchema" : [
|
65 |
+
{
|
66 |
+
"hasShapeFlexibility" : "0",
|
67 |
+
"isOptional" : "0",
|
68 |
+
"dataType" : "Float16",
|
69 |
+
"formattedType" : "MultiArray (Float16 2 × 8 × 64 × 64)",
|
70 |
+
"shortDescription" : "The low resolution latent feature maps being denoised through reverse diffusion",
|
71 |
+
"shape" : "[2, 8, 64, 64]",
|
72 |
+
"name" : "sample",
|
73 |
+
"type" : "MultiArray"
|
74 |
+
},
|
75 |
+
{
|
76 |
+
"hasShapeFlexibility" : "0",
|
77 |
+
"isOptional" : "0",
|
78 |
+
"dataType" : "Float16",
|
79 |
+
"formattedType" : "MultiArray (Float16 2)",
|
80 |
+
"shortDescription" : "A value emitted by the associated scheduler object to condition the model on a given noise schedule",
|
81 |
+
"shape" : "[2]",
|
82 |
+
"name" : "timestep",
|
83 |
+
"type" : "MultiArray"
|
84 |
+
},
|
85 |
+
{
|
86 |
+
"hasShapeFlexibility" : "0",
|
87 |
+
"isOptional" : "0",
|
88 |
+
"dataType" : "Float16",
|
89 |
+
"formattedType" : "MultiArray (Float16 2 × 768 × 1 × 77)",
|
90 |
+
"shortDescription" : "Output embeddings from the associated text_encoder model to condition to generated image on text. A maximum of 77 tokens (~40 words) are allowed. Longer text is truncated. Shorter text does not reduce computation.",
|
91 |
+
"shape" : "[2, 768, 1, 77]",
|
92 |
+
"name" : "encoder_hidden_states",
|
93 |
+
"type" : "MultiArray"
|
94 |
+
}
|
95 |
+
],
|
96 |
+
"userDefinedMetadata" : {
|
97 |
+
"com.github.apple.ml-stable-diffusion.version" : "1.1.0",
|
98 |
+
"com.github.apple.coremltools.source" : "torch==2.2.0",
|
99 |
+
"com.github.apple.coremltools.version" : "7.2",
|
100 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript"
|
101 |
+
},
|
102 |
+
"generatedClassName" : "Stable_Diffusion_version_timbrooks_instruct_pix2pix_unet",
|
103 |
+
"method" : "predict"
|
104 |
+
}
|
105 |
+
]
|
instruct_pix2pix/Resources/Unet.mlmodelc/model.mil
ADDED
The diff for this file is too large to render.
See raw diff
|
|
instruct_pix2pix/Resources/Unet.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a9d1b0bba308dacbed05d740bcf00d00c1c2ce47a7dd069e5e78c3ed74e9ceb5
|
3 |
+
size 1719140736
|
instruct_pix2pix/Resources/VAEDecoder.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d8d02fda30d4948e8f512e2ee0cf498b5b6df7db1404df55f693db0a4f356bff
|
3 |
+
size 243
|
instruct_pix2pix/Resources/VAEDecoder.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4e8f97979b4cc07496fd855d437d56cd59c5adc04c2af28c60773eca8825092e
|
3 |
+
size 814
|
instruct_pix2pix/Resources/VAEDecoder.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"shortDescription" : "Stable Diffusion generates images conditioned on text and\/or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.",
|
4 |
+
"metadataOutputVersion" : "3.0",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float32",
|
10 |
+
"formattedType" : "MultiArray (Float32 1 × 3 × 512 × 512)",
|
11 |
+
"shortDescription" : "Generated image normalized to range [-1, 1]",
|
12 |
+
"shape" : "[1, 3, 512, 512]",
|
13 |
+
"name" : "image",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
}
|
16 |
+
],
|
17 |
+
"version" : "timbrooks\/instruct-pix2pix",
|
18 |
+
"modelParameters" : [
|
19 |
+
|
20 |
+
],
|
21 |
+
"author" : "Please refer to the Model Card available at huggingface.co\/timbrooks\/instruct-pix2pix",
|
22 |
+
"specificationVersion" : 7,
|
23 |
+
"storagePrecision" : "Float16",
|
24 |
+
"license" : "OpenRAIL (https:\/\/huggingface.co\/spaces\/CompVis\/stable-diffusion-license)",
|
25 |
+
"mlProgramOperationTypeHistogram" : {
|
26 |
+
"Ios16.cast" : 1,
|
27 |
+
"Ios16.mul" : 2,
|
28 |
+
"Ios16.sqrt" : 30,
|
29 |
+
"Ios16.sub" : 30,
|
30 |
+
"Transpose" : 6,
|
31 |
+
"UpsampleNearestNeighbor" : 3,
|
32 |
+
"Ios16.conv" : 36,
|
33 |
+
"Ios16.add" : 46,
|
34 |
+
"Ios16.linear" : 4,
|
35 |
+
"Ios16.matmul" : 2,
|
36 |
+
"Ios16.realDiv" : 30,
|
37 |
+
"Ios16.reduceMean" : 60,
|
38 |
+
"Ios16.softmax" : 1,
|
39 |
+
"Ios16.batchNorm" : 29,
|
40 |
+
"Ios16.square" : 30,
|
41 |
+
"Ios16.reshape" : 65,
|
42 |
+
"Ios16.silu" : 29
|
43 |
+
},
|
44 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
45 |
+
"isUpdatable" : "0",
|
46 |
+
"availability" : {
|
47 |
+
"macOS" : "13.0",
|
48 |
+
"tvOS" : "16.0",
|
49 |
+
"visionOS" : "1.0",
|
50 |
+
"watchOS" : "9.0",
|
51 |
+
"iOS" : "16.0",
|
52 |
+
"macCatalyst" : "16.0"
|
53 |
+
},
|
54 |
+
"modelType" : {
|
55 |
+
"name" : "MLModelType_mlProgram"
|
56 |
+
},
|
57 |
+
"inputSchema" : [
|
58 |
+
{
|
59 |
+
"hasShapeFlexibility" : "0",
|
60 |
+
"isOptional" : "0",
|
61 |
+
"dataType" : "Float16",
|
62 |
+
"formattedType" : "MultiArray (Float16 1 × 4 × 64 × 64)",
|
63 |
+
"shortDescription" : "The denoised latent embeddings from the unet model after the last step of reverse diffusion",
|
64 |
+
"shape" : "[1, 4, 64, 64]",
|
65 |
+
"name" : "z",
|
66 |
+
"type" : "MultiArray"
|
67 |
+
}
|
68 |
+
],
|
69 |
+
"userDefinedMetadata" : {
|
70 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
71 |
+
"com.github.apple.coremltools.source" : "torch==2.2.0",
|
72 |
+
"com.github.apple.coremltools.version" : "7.2"
|
73 |
+
},
|
74 |
+
"generatedClassName" : "Stable_Diffusion_version_timbrooks_instruct_pix2pix_vae_decoder",
|
75 |
+
"method" : "predict"
|
76 |
+
}
|
77 |
+
]
|
instruct_pix2pix/Resources/VAEDecoder.mlmodelc/model.mil
ADDED
The diff for this file is too large to render.
See raw diff
|
|
instruct_pix2pix/Resources/VAEDecoder.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:27f7d0987e01a1f44d8ab5afd8d47761b4a34379eddcf01239bfd737d0236509
|
3 |
+
size 98993280
|
instruct_pix2pix/Resources/VAEEncoder.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:acae2034a37f207e277feb0668f3a6b4ba215706379457fd2f3386f396e494f7
|
3 |
+
size 243
|
instruct_pix2pix/Resources/VAEEncoder.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f9cf6eeee65bdfdbbaffd1186c92336309cde0cb09e48a729f650f37dadc09f
|
3 |
+
size 818
|
instruct_pix2pix/Resources/VAEEncoder.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"shortDescription" : "Stable Diffusion generates images conditioned on text and\/or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.",
|
4 |
+
"metadataOutputVersion" : "3.0",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float32",
|
10 |
+
"formattedType" : "MultiArray (Float32 1 × 8 × 64 × 64)",
|
11 |
+
"shortDescription" : "The latent embeddings from the unet model from the input image.",
|
12 |
+
"shape" : "[1, 8, 64, 64]",
|
13 |
+
"name" : "latent",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
}
|
16 |
+
],
|
17 |
+
"version" : "timbrooks\/instruct-pix2pix",
|
18 |
+
"modelParameters" : [
|
19 |
+
|
20 |
+
],
|
21 |
+
"author" : "Please refer to the Model Card available at huggingface.co\/timbrooks\/instruct-pix2pix",
|
22 |
+
"specificationVersion" : 7,
|
23 |
+
"storagePrecision" : "Float16",
|
24 |
+
"license" : "OpenRAIL (https:\/\/huggingface.co\/spaces\/CompVis\/stable-diffusion-license)",
|
25 |
+
"mlProgramOperationTypeHistogram" : {
|
26 |
+
"Pad" : 3,
|
27 |
+
"Ios16.cast" : 1,
|
28 |
+
"Ios16.mul" : 2,
|
29 |
+
"Ios16.sqrt" : 22,
|
30 |
+
"Ios16.sub" : 22,
|
31 |
+
"Transpose" : 6,
|
32 |
+
"Ios16.conv" : 28,
|
33 |
+
"Ios16.add" : 34,
|
34 |
+
"Ios16.linear" : 4,
|
35 |
+
"Ios16.matmul" : 2,
|
36 |
+
"Ios16.realDiv" : 22,
|
37 |
+
"Ios16.reduceMean" : 44,
|
38 |
+
"Ios16.softmax" : 1,
|
39 |
+
"Ios16.batchNorm" : 21,
|
40 |
+
"Ios16.square" : 22,
|
41 |
+
"Ios16.reshape" : 49,
|
42 |
+
"Ios16.silu" : 21
|
43 |
+
},
|
44 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
45 |
+
"isUpdatable" : "0",
|
46 |
+
"availability" : {
|
47 |
+
"macOS" : "13.0",
|
48 |
+
"tvOS" : "16.0",
|
49 |
+
"visionOS" : "1.0",
|
50 |
+
"watchOS" : "9.0",
|
51 |
+
"iOS" : "16.0",
|
52 |
+
"macCatalyst" : "16.0"
|
53 |
+
},
|
54 |
+
"modelType" : {
|
55 |
+
"name" : "MLModelType_mlProgram"
|
56 |
+
},
|
57 |
+
"inputSchema" : [
|
58 |
+
{
|
59 |
+
"hasShapeFlexibility" : "0",
|
60 |
+
"isOptional" : "0",
|
61 |
+
"dataType" : "Float16",
|
62 |
+
"formattedType" : "MultiArray (Float16 1 × 3 × 512 × 512)",
|
63 |
+
"shortDescription" : "The input image to base the initial latents on normalized to range [-1, 1]",
|
64 |
+
"shape" : "[1, 3, 512, 512]",
|
65 |
+
"name" : "x",
|
66 |
+
"type" : "MultiArray"
|
67 |
+
}
|
68 |
+
],
|
69 |
+
"userDefinedMetadata" : {
|
70 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
71 |
+
"com.github.apple.coremltools.source" : "torch==2.2.0",
|
72 |
+
"com.github.apple.coremltools.version" : "7.2"
|
73 |
+
},
|
74 |
+
"generatedClassName" : "Stable_Diffusion_version_timbrooks_instruct_pix2pix_vae_encoder",
|
75 |
+
"method" : "predict"
|
76 |
+
}
|
77 |
+
]
|
instruct_pix2pix/Resources/VAEEncoder.mlmodelc/model.mil
ADDED
The diff for this file is too large to render.
See raw diff
|
|
instruct_pix2pix/Resources/VAEEncoder.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:01d11125a1ba31ba6531f60fa74faee46928aa15284cb4465205e6c31bb5801c
|
3 |
+
size 68338112
|
instruct_pix2pix/Resources/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
instruct_pix2pix/Resources/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_text_encoder.mlpackage/Data/com.apple.CoreML/model.mlmodel
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f7ae0d8c1abeb046cd0b7e4df11991811bde818ea0ae3a7d2d9f3e82fb61680d
|
3 |
+
size 163300
|
instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_text_encoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f1ce471b7de94fedc369b5e12332e452ac9f18a30a8a309a5818afe3312a1e3b
|
3 |
+
size 246145536
|
instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_text_encoder.mlpackage/Manifest.json
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"fileFormatVersion": "1.0.0",
|
3 |
+
"itemInfoEntries": {
|
4 |
+
"D0DAB02F-226F-4609-98E1-0409129CD233": {
|
5 |
+
"author": "com.apple.CoreML",
|
6 |
+
"description": "CoreML Model Weights",
|
7 |
+
"name": "weights",
|
8 |
+
"path": "com.apple.CoreML/weights"
|
9 |
+
},
|
10 |
+
"DAE9CCC7-E517-4AD6-9841-C49B9F66CEA1": {
|
11 |
+
"author": "com.apple.CoreML",
|
12 |
+
"description": "CoreML Model Specification",
|
13 |
+
"name": "model.mlmodel",
|
14 |
+
"path": "com.apple.CoreML/model.mlmodel"
|
15 |
+
}
|
16 |
+
},
|
17 |
+
"rootModelIdentifier": "DAE9CCC7-E517-4AD6-9841-C49B9F66CEA1"
|
18 |
+
}
|
instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_unet.mlpackage/Data/com.apple.CoreML/model.mlmodel
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7d1f041b5f1a704f1bab882b53fbfef956450ac7db4c3c72db8e063b610c87d5
|
3 |
+
size 2641580
|
instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_unet.mlpackage/Data/com.apple.CoreML/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a9d1b0bba308dacbed05d740bcf00d00c1c2ce47a7dd069e5e78c3ed74e9ceb5
|
3 |
+
size 1719140736
|
instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_unet.mlpackage/Manifest.json
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"fileFormatVersion": "1.0.0",
|
3 |
+
"itemInfoEntries": {
|
4 |
+
"1338721F-80E9-47A4-854B-BAB2BA2CD95D": {
|
5 |
+
"author": "com.apple.CoreML",
|
6 |
+
"description": "CoreML Model Specification",
|
7 |
+
"name": "model.mlmodel",
|
8 |
+
"path": "com.apple.CoreML/model.mlmodel"
|
9 |
+
},
|
10 |
+
"3371F923-4983-43F2-82AA-88F37B4D5A48": {
|
11 |
+
"author": "com.apple.CoreML",
|
12 |
+
"description": "CoreML Model Weights",
|
13 |
+
"name": "weights",
|
14 |
+
"path": "com.apple.CoreML/weights"
|
15 |
+
}
|
16 |
+
},
|
17 |
+
"rootModelIdentifier": "1338721F-80E9-47A4-854B-BAB2BA2CD95D"
|
18 |
+
}
|
instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_vae_decoder.mlpackage/Data/com.apple.CoreML/model.mlmodel
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:07c421625537597168ec8074f87888c615cf6d944b886ba6c5195351ff678a96
|
3 |
+
size 161897
|
instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_vae_decoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:27f7d0987e01a1f44d8ab5afd8d47761b4a34379eddcf01239bfd737d0236509
|
3 |
+
size 98993280
|
instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_vae_decoder.mlpackage/Manifest.json
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"fileFormatVersion": "1.0.0",
|
3 |
+
"itemInfoEntries": {
|
4 |
+
"B14F4E05-8A81-4952-BE0E-D8F8E4CC2CE5": {
|
5 |
+
"author": "com.apple.CoreML",
|
6 |
+
"description": "CoreML Model Weights",
|
7 |
+
"name": "weights",
|
8 |
+
"path": "com.apple.CoreML/weights"
|
9 |
+
},
|
10 |
+
"DF00656B-5B35-4BC6-9599-A5E4BC8784AB": {
|
11 |
+
"author": "com.apple.CoreML",
|
12 |
+
"description": "CoreML Model Specification",
|
13 |
+
"name": "model.mlmodel",
|
14 |
+
"path": "com.apple.CoreML/model.mlmodel"
|
15 |
+
}
|
16 |
+
},
|
17 |
+
"rootModelIdentifier": "DF00656B-5B35-4BC6-9599-A5E4BC8784AB"
|
18 |
+
}
|
instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_vae_encoder.mlpackage/Data/com.apple.CoreML/model.mlmodel
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:63b48a46531a685332b0fbcf5de32f5121fba71620e42ba5f7d1aa1c8a8ff8aa
|
3 |
+
size 123918
|
instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_vae_encoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:01d11125a1ba31ba6531f60fa74faee46928aa15284cb4465205e6c31bb5801c
|
3 |
+
size 68338112
|
instruct_pix2pix/Stable_Diffusion_version_timbrooks_instruct-pix2pix_vae_encoder.mlpackage/Manifest.json
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"fileFormatVersion": "1.0.0",
|
3 |
+
"itemInfoEntries": {
|
4 |
+
"5C7B6E51-814D-4ECC-B75E-2B13FDD4F902": {
|
5 |
+
"author": "com.apple.CoreML",
|
6 |
+
"description": "CoreML Model Specification",
|
7 |
+
"name": "model.mlmodel",
|
8 |
+
"path": "com.apple.CoreML/model.mlmodel"
|
9 |
+
},
|
10 |
+
"CC69BF93-DE16-4BB8-955B-372F3719535F": {
|
11 |
+
"author": "com.apple.CoreML",
|
12 |
+
"description": "CoreML Model Weights",
|
13 |
+
"name": "weights",
|
14 |
+
"path": "com.apple.CoreML/weights"
|
15 |
+
}
|
16 |
+
},
|
17 |
+
"rootModelIdentifier": "5C7B6E51-814D-4ECC-B75E-2B13FDD4F902"
|
18 |
+
}
|