Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import os | |
| import pandas as pd | |
| from PIL import Image | |
| import torch | |
| from transformers import BlipProcessor, BlipForConditionalGeneration | |
| import shutil | |
| # Load BLIP model | |
| processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") | |
| model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") | |
| # Keyword groups | |
| shirt_keywords = ["shirt", "hoodie", "sweatshirt", "jacket", "tee", "top"] | |
| bottom_keywords = ["pants", "shorts", "jeans", "skirt", "trousers", "bottom"] | |
| boy_keywords = ["boy", "boys", "male", "mens"] | |
| girl_keywords = ["girl", "girls", "female", "womens"] | |
| color_keywords = [ | |
| "black", "white", "red", "orange", "yellow", "green", "blue", "purple", | |
| "brown", "gray", "pink", "beige", "tan", "navy", "olive", "teal", "burgundy", | |
| "lavender", "peach", "mint", "charcoal", "gold", "silver" | |
| ] | |
| def tag_and_sort_images(images): | |
| os.makedirs("uploaded_images", exist_ok=True) | |
| os.makedirs("sorted_images", exist_ok=True) | |
| results = [] | |
| for image in images: | |
| image_path = os.path.join("uploaded_images", os.path.basename(image.name)) | |
| shutil.copy2(image.name, image_path) | |
| raw_image = Image.open(image_path).convert('RGB') | |
| inputs = processor(raw_image, return_tensors="pt") | |
| out = model.generate(**inputs) | |
| caption = processor.decode(out[0], skip_special_tokens=True) | |
| results.append({ | |
| "filename": os.path.basename(image.name), | |
| "caption": caption | |
| }) | |
| # Build DataFrame | |
| df = pd.DataFrame(results) | |
| def detect_garment_type(text): | |
| t = text.lower() | |
| if any(k in t for k in shirt_keywords): | |
| return "Shirt" | |
| elif any(k in t for k in bottom_keywords): | |
| return "Bottom" | |
| else: | |
| return "Unknown" | |
| def detect_gender(text): | |
| t = text.lower() | |
| if any(k in t for k in boy_keywords): | |
| return "Boys" | |
| elif any(k in t for k in girl_keywords): | |
| return "Girls" | |
| else: | |
| return "Unisex" | |
| def extract_colors(text): | |
| return ', '.join(sorted(set(c for c in color_keywords if c in text.lower()))) | |
| df["garment_type"] = df["caption"].apply(detect_garment_type) | |
| df["gender"] = df["caption"].apply(detect_gender) | |
| df["color_group"] = df["caption"].apply(extract_colors) | |
| df = df[["filename", "caption", "garment_type", "gender", "color_group"]] | |
| df.to_csv("blip2_image_tags_clean.csv", index=False) | |
| # Sorting | |
| for _, row in df.iterrows(): | |
| filename = row["filename"] | |
| garment = row["garment_type"] or "Unsorted" | |
| gender = row["gender"] or "Unsorted" | |
| color = row["color_group"] or "Unknown" | |
| target_path = os.path.join("sorted_images", garment, gender, color) | |
| os.makedirs(target_path, exist_ok=True) | |
| src_path = os.path.join("uploaded_images", filename) | |
| dst_path = os.path.join(target_path, filename) | |
| if os.path.exists(src_path): | |
| shutil.copy2(src_path, dst_path) | |
| zip_path = shutil.make_archive("sorted_images", 'zip', "sorted_images") | |
| return zip_path | |
| demo = gr.Interface( | |
| fn=tag_and_sort_images, | |
| inputs=gr.File(file_types=["image"], file_count="multiple", label="Upload JPG or PNG Images"), | |
| outputs=gr.File(label="Download Sorted ZIP"), | |
| title="🧠 Auto Tag & Sort Images", | |
| description="Upload fashion images. AI will tag them (garment, gender, color), sort them into folders, and give you a download link." | |
| ) | |
| demo.launch() | |