fcakyon commited on
Commit
515afcc
1 Parent(s): 6d3074f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -0
app.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import open_clip
3
+ import torch
4
+ from PIL import Image
5
+
6
+ # Load model and tokenizer
7
+ model, preprocess = open_clip.create_model_from_pretrained('hf-hub:woweenie/open-clip-vit-h-nsfw-finetune', device='cpu')
8
+ tokenizer = open_clip.get_tokenizer('hf-hub:woweenie/open-clip-vit-h-nsfw-finetune')
9
+
10
+ # Define labels
11
+ type_labels = ['2.5d render', '3d render', 'photograph', 'anime drawing', 'drawing', 'illustration', 'painting', 'pre-raphaelite painting', 'concept artwork', 'screenshot']
12
+ scene_labels = ['in an airport', 'in the bath', 'on a bed', 'in bed', 'in a bedroom', 'at the beach', 'on a boat', 'in a tent', 'in a car', 'on a chair', 'in the city', 'in a dressing room', 'on the floor', 'at the gym', 'in a hotel room', 'in a kitchen', 'in a living room', 'in an office', 'by a harbor', 'on a bench', 'in a park', 'by a piano', 'on a forest road', 'in a forest', 'in a garden', 'at a lake', 'on the grass', 'on the ground', 'on a paved surface', 'outdoors, on a rock', 'outdoors, on a rug', 'outdoors, on a towel', 'in a photo studio', 'at the pool', 'at a river', 'on a road', 'by the sea', 'showering', 'in the shower', 'on a stool', 'on a rug', 'on a rock', 'on a sofa', 'on a table', 'at a table', 'in a store', 'on snow', 'by a waterfall', 'with a water feature', 'on a windowsill']
13
+ expression_labels = ['scared', 'annoyed', 'aroused', 'bored', 'confident', 'distracted', 'dominating', 'embarrassed', 'scared', 'laughing', 'shy', 'orgasm']
14
+ clothing_labels = ['a bikini that is too small', 'bikini bottoms', 'a bikini top', 'a bikini', 'a bodysuit', 'a bra', 'a crop top', 'a dress', 'garters', 'glasses', 'goggles', 'gym shorts', 'a halter top', 'a hat', 'a handbra', 'a hoodie', 'a jacket', 'jeans', 'a jumper', 'a gown', 'a lace-up top', 'leggings', 'lingerie', 'a long sleeved top', 'a off-shoulder top', 'a nightgown', 'a coat', 'overalls', 'pink pajamas', 'pajamas', 'panties', 'pantyhose', 'a t-shirt', 'a robe', 'a bathrobe', 'a piece of fabric', 'a scarf', 'a shirt', 'shorts', 'a skirt', 'a sleeveless top', 'a slip', 'sneakers', 'tube socks', 'a sports bra', 'sunglasses', 'sweatpants', 'a one piece swimsuit', 'a t-shirt', 'a tank top', 'a tied shirt', 'a top', 'long pants', 'a wetsuit', 'a backpack', 'high hem', 'see-through', 'short', 'tight','visible nipples']
15
+ clothing_labels = ['wearing ' + label for label in clothing_labels]
16
+
17
+ def process_image(image):
18
+ # Preprocess image
19
+ image = preprocess(image).unsqueeze(0)
20
+
21
+ # Tokenize labels
22
+ type_text = tokenizer(type_labels)
23
+ scene_text = tokenizer(scene_labels)
24
+ expression_text = tokenizer(expression_labels)
25
+ clothing_text = tokenizer(clothing_labels)
26
+
27
+ with torch.no_grad(), torch.cuda.amp.autocast():
28
+ # Encode image and text
29
+ image_features = model.encode_image(image)
30
+ type_text_features = model.encode_text(type_text)
31
+ scene_text_features = model.encode_text(scene_text)
32
+ expression_text_features = model.encode_text(expression_text)
33
+ clothing_text_features = model.encode_text(clothing_text)
34
+
35
+ # Normalize features
36
+ image_features /= image_features.norm(dim=-1, keepdim=True)
37
+ type_text_features /= type_text_features.norm(dim=-1, keepdim=True)
38
+ scene_text_features /= scene_text_features.norm(dim=-1, keepdim=True)
39
+ expression_text_features /= expression_text_features.norm(dim=-1, keepdim=True)
40
+ clothing_text_features /= clothing_text_features.norm(dim=-1, keepdim=True)
41
+
42
+ # Calculate probabilities
43
+ type_text_probs = (100.0 * image_features @ type_text_features.T).softmax(dim=-1)
44
+ scene_text_probs = (100.0 * image_features @ scene_text_features.T).softmax(dim=-1)
45
+ expression_text_probs = (100.0 * image_features @ expression_text_features.T).softmax(dim=-1)
46
+ clothing_text_probs = (100.0 * image_features @ clothing_text_features.T).softmax(dim=-1)
47
+
48
+ # Convert to dictionaries
49
+ type_results = {label: float(type_text_probs[0][i]) for i, label in enumerate(type_labels)}
50
+ scene_results = {label: float(scene_text_probs[0][i]) for i, label in enumerate(scene_labels)}
51
+ expression_results = {label: float(expression_text_probs[0][i]) for i, label in enumerate(expression_labels)}
52
+ clothing_results = {label: float(clothing_text_probs[0][i]) for i, label in enumerate(clothing_labels)}
53
+
54
+ return type_results, scene_results, expression_results, clothing_results
55
+
56
+ # Create Gradio interface
57
+ iface = gr.Interface(
58
+ fn=process_image,
59
+ inputs=gr.Image(type="pil", label="Upload Image"),
60
+ outputs=[
61
+ gr.Label(label="Type Classification", num_top_classes=3),
62
+ gr.Label(label="Scene Classification", num_top_classes=3),
63
+ gr.Label(label="Expression Classification", num_top_classes=3),
64
+ gr.Label(label="Clothing Classification", num_top_classes=3)
65
+ ],
66
+ title="Image Content Moderation",
67
+ description="Upload an image to analyze its content across multiple categories."
68
+ )
69
+
70
+ if __name__ == "__main__":
71
+ iface.launch()