Edit model card

This is finetuned version of patrickjohncyh/fashion-clip with fashion products pattern images dataset.

Dataset link: https://huggingface.co/datasets/yainage90/fashion-pattern-images

from PIL import Image
from transformers import CLIPProcessor, CLIPModel

ckpt = "yainage90/fashion-pattern-clip"

processor = CLIPProcessor.from_pretrained(ckpt)
model = CLIPModel.from_pretrained(ckpt)

image = Image.open("<path/to/image>")

labels = [
    "gradient",
    "snow_flake",
    "camouflage",
    "dot",
    "zebra",
    "leopard",
    "lettering",
    "snake_skin",
    "geometric",
    "muji",
    "floral",
    "zigzag",
    "graphic",
    "paisley",
    "tropical",
    "checked",
    "houndstooth",
    "argyle",
    "stripe",
]
inputs = processor(text=labels, images=image, padding=True, return_tensors="pt")
outputs = model(**inputs)
probs = outputs.logits_per_image.softmax(dim=-1).squeeze()
sorted_indices = probs.argsort(dim=-1, descending=True)

for i in sorted_indices.tolist():
    pattern = labels[i]
    prob = probs[i]
    print(f"{pattern}: {prob:.3f}")
Downloads last month
413
Safetensors
Model size
151M params
Tensor type
F32
·