File size: 4,335 Bytes
7da0214 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
[
{
"shortDescription" : "Stable Diffusion generates images conditioned on text and\/or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.",
"metadataOutputVersion" : "3.0",
"outputSchema" : [
{
"hasShapeFlexibility" : "0",
"isOptional" : "0",
"dataType" : "Float32",
"formattedType" : "MultiArray (Float32)",
"shortDescription" : "Identical to the input `images`. If safety checker detected any sensitive content, the corresponding image is replaced with a blank image (zeros)",
"shape" : "[]",
"name" : "filtered_images",
"type" : "MultiArray"
},
{
"hasShapeFlexibility" : "0",
"isOptional" : "0",
"dataType" : "Float32",
"formattedType" : "MultiArray (Float32)",
"shortDescription" : "Indicates whether the safety checker model found any sensitive content in the given image",
"shape" : "[]",
"name" : "has_nsfw_concepts",
"type" : "MultiArray"
},
{
"hasShapeFlexibility" : "0",
"isOptional" : "0",
"dataType" : "Float32",
"formattedType" : "MultiArray (Float32)",
"shortDescription" : "Concept scores are the scores before thresholding at zero yields the `has_nsfw_concepts` output. These scores can be used to tune the `adjustment` input",
"shape" : "[]",
"name" : "concept_scores",
"type" : "MultiArray"
}
],
"version" : "CompVis\/stable-diffusion-v1-4",
"modelParameters" : [
],
"author" : "Please refer to the Model Card available at huggingface.co\/CompVis\/stable-diffusion-v1-4",
"specificationVersion" : 7,
"storagePrecision" : "Float16",
"license" : "OpenRAIL (https:\/\/huggingface.co\/spaces\/CompVis\/stable-diffusion-license)",
"mlProgramOperationTypeHistogram" : {
"Transpose" : 121,
"Ios16.scatterNd" : 1,
"Ios16.softmax" : 24,
"Ios16.linear" : 147,
"Ios16.add" : 51,
"Concat" : 1,
"Ios16.realDiv" : 1,
"Ios16.sigmoid" : 24,
"Ios16.reduceSum" : 3,
"Tile" : 4,
"Ios16.greater" : 4,
"Shape" : 1,
"ExpandDims" : 5,
"Ios16.cast" : 9,
"Ios16.clip" : 1,
"Ios16.conv" : 1,
"Ios16.abs" : 1,
"Ios16.layerNorm" : 50,
"Ios16.matmul" : 48,
"Ios16.pow" : 2,
"Ios16.reshape" : 193,
"SliceByIndex" : 2,
"Ios16.mul" : 73,
"NonZero" : 1
},
"computePrecision" : "Mixed (Float32, Float16, Int32)",
"isUpdatable" : "0",
"availability" : {
"macOS" : "13.0",
"tvOS" : "16.0",
"watchOS" : "9.0",
"iOS" : "16.0",
"macCatalyst" : "16.0"
},
"modelType" : {
"name" : "MLModelType_mlProgram"
},
"inputSchema" : [
{
"hasShapeFlexibility" : "0",
"isOptional" : "0",
"dataType" : "Float16",
"formattedType" : "MultiArray (Float16 1 × 3 × 224 × 224)",
"shortDescription" : "The normalized image input tensor resized to (224x224) in channels-first (BCHW) format",
"shape" : "[1, 3, 224, 224]",
"name" : "clip_input",
"type" : "MultiArray"
},
{
"hasShapeFlexibility" : "0",
"isOptional" : "0",
"dataType" : "Float16",
"formattedType" : "MultiArray (Float16 1 × 512 × 512 × 3)",
"shortDescription" : "Output of the vae_decoder (512x512) in channels-last (BHWC) format",
"shape" : "[1, 512, 512, 3]",
"name" : "images",
"type" : "MultiArray"
},
{
"hasShapeFlexibility" : "0",
"isOptional" : "0",
"dataType" : "Float16",
"formattedType" : "MultiArray (Float16 1)",
"shortDescription" : "Bias added to the concept scores to trade off increased recall for reduce precision in the safety checker classifier",
"shape" : "[1]",
"name" : "adjustment",
"type" : "MultiArray"
}
],
"userDefinedMetadata" : {
"com.github.apple.coremltools.version" : "6.1",
"com.github.apple.coremltools.source" : "torch==1.13.0"
},
"generatedClassName" : "Stable_Diffusion_version_CompVis_stable_diffusion_v1_4_safety_checker",
"method" : "predict"
}
] |