yfyangd commited on
Commit
faf1772
·
1 Parent(s): e6e530b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +117 -4
app.py CHANGED
@@ -25,8 +25,121 @@ blip_model = blip_decoder(pretrained=blip_model_url, image_size=blip_image_eval_
25
  blip_model.eval()
26
  blip_model = blip_model.to(device)
27
 
28
- def greet(name):
29
- return "hi " + name + "!!"
 
 
 
 
30
 
31
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
32
- iface.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  blip_model.eval()
26
  blip_model = blip_model.to(device)
27
 
28
+ def generate_caption(pil_image):
29
+ gpu_image = transforms.Compose([
30
+ transforms.Resize((blip_image_eval_size, blip_image_eval_size), interpolation=InterpolationMode.BICUBIC),
31
+ transforms.ToTensor(),
32
+ transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
33
+ ])(pil_image).unsqueeze(0).to(device)
34
 
35
+ with torch.no_grad():
36
+ caption = blip_model.generate(gpu_image, sample=False, num_beams=3, max_length=20, min_length=5)
37
+ return caption[0]
38
+
39
+ def load_list(filename):
40
+ with open(filename, 'r', encoding='utf-8', errors='replace') as f:
41
+ items = [line.strip() for line in f.readlines()]
42
+ return items
43
+
44
+ def rank(model, image_features, text_array, top_count=1):
45
+ top_count = min(top_count, len(text_array))
46
+ text_tokens = clip.tokenize([text for text in text_array]).cuda()
47
+ with torch.no_grad():
48
+ text_features = model.encode_text(text_tokens).float()
49
+ text_features /= text_features.norm(dim=-1, keepdim=True)
50
+
51
+ similarity = torch.zeros((1, len(text_array))).to(device)
52
+ for i in range(image_features.shape[0]):
53
+ similarity += (100.0 * image_features[i].unsqueeze(0) @ text_features.T).softmax(dim=-1)
54
+ similarity /= image_features.shape[0]
55
+
56
+ top_probs, top_labels = similarity.cpu().topk(top_count, dim=-1)
57
+ return [(text_array[top_labels[0][i].numpy()], (top_probs[0][i].numpy()*100)) for i in range(top_count)]
58
+
59
+ def interrogate(cover):
60
+ image = Image.fromarray(cover)
61
+ #image = cover
62
+ models = models1
63
+ #caption = generate_caption(Image.fromarray(cover))
64
+ caption = generate_caption(image)
65
+ if len(models) == 0:
66
+ #print(f"\n\n{caption}")
67
+ return
68
+
69
+ table = []
70
+ bests = [[('',0)]]*5
71
+ for model_name in models:
72
+ #print(f"Interrogating with {model_name}...")
73
+ model, preprocess = clip.load(model_name)
74
+ model.cuda().eval()
75
+
76
+ images = preprocess(image).unsqueeze(0).cuda()
77
+ with torch.no_grad():
78
+ image_features = model.encode_image(images).float()
79
+ image_features /= image_features.norm(dim=-1, keepdim=True)
80
+
81
+ ranks = [
82
+ rank(model, image_features, mediums),
83
+ rank(model, image_features, ["by "+artist for artist in artists]),
84
+ rank(model, image_features, trending_list),
85
+ rank(model, image_features, movements),
86
+ rank(model, image_features, flavors, top_count=3)
87
+ ]
88
+
89
+ for i in range(len(ranks)):
90
+ confidence_sum = 0
91
+ for ci in range(len(ranks[i])):
92
+ confidence_sum += ranks[i][ci][1]
93
+ if confidence_sum > sum(bests[i][t][1] for t in range(len(bests[i]))):
94
+ bests[i] = ranks[i]
95
+
96
+ row = [model_name]
97
+ for r in ranks:
98
+ row.append(', '.join([f"{x[0]} ({x[1]:0.1f}%)" for x in r]))
99
+
100
+ table.append(row)
101
+
102
+ del model
103
+ gc.collect()
104
+ #display(pd.DataFrame(table, columns=["Model", "Medium", "Artist", "Trending", "Movement", "Flavors"]))
105
+
106
+ flaves = ', '.join([f"{x[0]}" for x in bests[4]])
107
+ medium = bests[0][0][0]
108
+ if caption.startswith(medium):
109
+ return(f"{caption} {bests[1][0][0]}, {bests[2][0][0]}, {bests[3][0][0]}, {flaves}")
110
+ #print(f"{caption} {bests[3][0][0]}, {flaves}")
111
+ else:
112
+ return(f"{caption}, {medium} {bests[1][0][0]}, {bests[2][0][0]}, {bests[3][0][0]}, {flaves}")
113
+ #print(f"{caption} {bests[3][0][0]}, {flaves}")
114
+
115
+ data_path = "/clip-interrogator/data/"
116
+
117
+ artists = load_list(os.path.join(data_path, 'artists.txt'))
118
+ flavors = load_list(os.path.join(data_path, 'flavors.txt'))
119
+ mediums = load_list(os.path.join(data_path, 'mediums.txt'))
120
+ movements = load_list(os.path.join(data_path, 'movements.txt'))
121
+
122
+ sites = ['Artstation', 'behance', 'cg society', 'cgsociety', 'deviantart', 'dribble', 'flickr', 'instagram', 'pexels', 'pinterest', 'pixabay', 'pixiv', 'polycount', 'reddit', 'shutterstock', 'tumblr', 'unsplash', 'zbrush central']
123
+ trending_list = [site for site in sites]
124
+ trending_list.extend(["trending on "+site for site in sites])
125
+ trending_list.extend(["featured on "+site for site in sites])
126
+ trending_list.extend([site+" contest winner" for site in sites])
127
+
128
+ models1 = ['ViT-B/32']
129
+
130
+ width = 130
131
+ height = 180
132
+
133
+ cover = gr.inputs.Image(shape=(width, height), label='Upload cover image to classify')
134
+ label = gr.outputs.Label(label='Model prediction')
135
+
136
+ examples=["00064.jpg","00068.jpg", "00069.jpg"]
137
+ #gr.Interface(fn=interrogate,inputs=[gr.Image()],output_label,examples=examples).launch()
138
+ #gr.Interface(fn=interrogate,inputs=gr.Image(),outputs=gr.outputs.Label(),examples=examples).launch()
139
+ gr.Interface(fn=interrogate,inputs=cover,outputs=label,examples=examples).launch(share=True)
140
+
141
+ #def greet(name):
142
+ #return "hi " + name + "!!"
143
+
144
+ #iface = gr.Interface(fn=greet, inputs="text", outputs="text")
145
+ #iface.launch(share=True)