sonoisa commited on
Commit
3e88ed5
1 Parent(s): 4725c68

Add app.py

Browse files
Files changed (1) hide show
  1. app.py +371 -0
app.py ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import unicode_literals
2
+ import os
3
+ import re
4
+ import unicodedata
5
+ import torch
6
+ from torch import nn
7
+ import streamlit as st
8
+ import pandas as pd
9
+ import pyarrow as pa
10
+ import pyarrow.parquet as pq
11
+ import numpy as np
12
+ import scipy.spatial
13
+ import pyminizip
14
+ from transformers import AutoModel, AutoTokenizer
15
+ from huggingface_hub import hf_hub_download
16
+ from PIL import Image
17
+
18
+
19
+ def unicode_normalize(cls, s):
20
+ pt = re.compile("([{}]+)".format(cls))
21
+
22
+ def norm(c):
23
+ return unicodedata.normalize("NFKC", c) if pt.match(c) else c
24
+
25
+ s = "".join(norm(x) for x in re.split(pt, s))
26
+ s = re.sub("-", "-", s)
27
+ return s
28
+
29
+
30
+ def remove_extra_spaces(s):
31
+ s = re.sub("[  ]+", " ", s)
32
+ blocks = "".join(
33
+ (
34
+ "\u4E00-\u9FFF", # CJK UNIFIED IDEOGRAPHS
35
+ "\u3040-\u309F", # HIRAGANA
36
+ "\u30A0-\u30FF", # KATAKANA
37
+ "\u3000-\u303F", # CJK SYMBOLS AND PUNCTUATION
38
+ "\uFF00-\uFFEF", # HALFWIDTH AND FULLWIDTH FORMS
39
+ )
40
+ )
41
+ basic_latin = "\u0000-\u007F"
42
+
43
+ def remove_space_between(cls1, cls2, s):
44
+ p = re.compile("([{}]) ([{}])".format(cls1, cls2))
45
+ while p.search(s):
46
+ s = p.sub(r"\1\2", s)
47
+ return s
48
+
49
+ s = remove_space_between(blocks, blocks, s)
50
+ s = remove_space_between(blocks, basic_latin, s)
51
+ s = remove_space_between(basic_latin, blocks, s)
52
+ return s
53
+
54
+
55
+ def normalize_neologd(s):
56
+ s = s.strip()
57
+ s = unicode_normalize("0-9A-Za-z。-゚", s)
58
+
59
+ def maketrans(f, t):
60
+ return {ord(x): ord(y) for x, y in zip(f, t)}
61
+
62
+ s = re.sub("[˗֊‐‑‒–⁃⁻₋−]+", "-", s) # normalize hyphens
63
+ s = re.sub("[﹣-ー—―─━ー]+", "ー", s) # normalize choonpus
64
+ s = re.sub("[~∼∾〜〰~]+", "〜", s) # normalize tildes (modified by Isao Sonobe)
65
+ s = s.translate(
66
+ maketrans(
67
+ "!\"#$%&'()*+,-./:;<=>?@[¥]^_`{|}~。、・「」",
68
+ "!”#$%&’()*+,-./:;<=>?@[¥]^_`{|}〜。、・「」",
69
+ )
70
+ )
71
+
72
+ s = remove_extra_spaces(s)
73
+ s = unicode_normalize("!”#$%&’()*+,-./:;<>?@[¥]^_`{|}〜", s) # keep =,・,「,」
74
+ s = re.sub("[’]", "'", s)
75
+ s = re.sub("[”]", '"', s)
76
+ s = s.lower()
77
+ return s
78
+
79
+
80
+ def normalize_text(text):
81
+ return normalize_neologd(text)
82
+
83
+
84
+ class ClipTextModel(nn.Module):
85
+ def __init__(self, model_name_or_path, device=None):
86
+ super(ClipTextModel, self).__init__()
87
+
88
+ if os.path.exists(model_name_or_path):
89
+ # load from file system
90
+ output_linear_state_dict = torch.load(os.path.join(model_name_or_path, "output_linear.bin"))
91
+ else:
92
+ # download from the Hugging Face model hub
93
+ filename = hf_hub_download(repo_id=model_name_or_path, filename="output_linear.bin")
94
+ output_linear_state_dict = torch.load(filename)
95
+
96
+ self.model = AutoModel.from_pretrained(model_name_or_path)
97
+ config = self.model.config
98
+
99
+ self.max_cls_depth = 6
100
+
101
+ sentence_vector_size = output_linear_state_dict["bias"].shape[0]
102
+ self.sentence_vector_size = sentence_vector_size
103
+ self.output_linear = nn.Linear(self.max_cls_depth * config.hidden_size, sentence_vector_size)
104
+ # self.output_linear = nn.Linear(3 * config.hidden_size, sentence_vector_size)
105
+ self.output_linear.load_state_dict(output_linear_state_dict)
106
+
107
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path,
108
+ is_fast=True, do_lower_case=True)
109
+
110
+ self.eval()
111
+
112
+ if device is None:
113
+ device = "cuda" if torch.cuda.is_available() else "cpu"
114
+ self.device = torch.device(device)
115
+ self.to(self.device)
116
+
117
+ def forward(
118
+ self,
119
+ input_ids=None,
120
+ attention_mask=None,
121
+ token_type_ids=None,
122
+ ):
123
+ output_states = self.model(
124
+ input_ids,
125
+ attention_mask=attention_mask,
126
+ token_type_ids=token_type_ids,
127
+ position_ids=None,
128
+ head_mask=None,
129
+ inputs_embeds=None,
130
+ output_attentions=None,
131
+ output_hidden_states=True,
132
+ return_dict=True,
133
+ )
134
+ token_embeddings = output_states[0]
135
+ input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
136
+ hidden_states = output_states["hidden_states"]
137
+
138
+ output_vectors = []
139
+
140
+ for i in range(1, self.max_cls_depth + 1):
141
+ cls_token = hidden_states[-1 * i][:, 0]
142
+ output_vectors.append(cls_token)
143
+
144
+ output_vector = torch.cat(output_vectors, dim=1)
145
+ logits = self.output_linear(output_vector)
146
+
147
+ output = (logits,) + output_states[2:]
148
+ return output
149
+
150
+ @torch.no_grad()
151
+ def encode_text(self, texts, batch_size=8, max_length=64):
152
+ model.eval()
153
+ all_embeddings = []
154
+ iterator = range(0, len(texts), batch_size)
155
+ for batch_idx in iterator:
156
+ batch = texts[batch_idx:batch_idx + batch_size]
157
+
158
+ encoded_input = self.tokenizer.batch_encode_plus(
159
+ batch, max_length=max_length, padding="longest",
160
+ truncation=True, return_tensors="pt").to(self.device)
161
+ model_output = self(**encoded_input)
162
+ text_embeddings = model_output[0].cpu()
163
+
164
+ all_embeddings.extend(text_embeddings)
165
+
166
+ # return torch.stack(all_embeddings).numpy()
167
+ return torch.stack(all_embeddings)
168
+
169
+ def save(self, output_dir):
170
+ self.model.save_pretrained(output_dir)
171
+ self.tokenizer.save_pretrained(output_dir)
172
+ torch.save(self.output_linear.state_dict(), os.path.join(output_dir, "output_linear.bin"))
173
+
174
+
175
+ class ClipVisionModel(nn.Module):
176
+ def __init__(self, model_name_or_path, device=None):
177
+ super(ClipVisionModel, self).__init__()
178
+
179
+ if os.path.exists(model_name_or_path):
180
+ # load from file system
181
+ visual_projection_state_dict = torch.load(os.path.join(model_name_or_path, "visual_projection.bin"))
182
+ else:
183
+ # download from the Hugging Face model hub
184
+ filename = hf_hub_download(repo_id=model_name_or_path, filename="visual_projection.bin")
185
+ visual_projection_state_dict = torch.load(filename)
186
+
187
+ self.model = transformers.CLIPVisionModel.from_pretrained(model_name_or_path)
188
+ config = self.model.config
189
+
190
+ self.feature_extractor = transformers.CLIPFeatureExtractor.from_pretrained(model_name_or_path)
191
+
192
+ vision_embed_dim = config.hidden_size
193
+ projection_dim = 512
194
+
195
+ self.visual_projection = nn.Linear(vision_embed_dim, projection_dim, bias=False)
196
+ self.visual_projection.load_state_dict(visual_projection_state_dict)
197
+
198
+ self.eval()
199
+
200
+ if device is None:
201
+ device = "cuda" if torch.cuda.is_available() else "cpu"
202
+ self.device = torch.device(device)
203
+ self.to(self.device)
204
+
205
+ def forward(
206
+ self,
207
+ pixel_values=None,
208
+ output_attentions=None,
209
+ output_hidden_states=None,
210
+ return_dict=None,
211
+ ):
212
+ output_states = self.model(
213
+ pixel_values=pixel_values,
214
+ output_attentions=output_attentions,
215
+ output_hidden_states=output_hidden_states,
216
+ return_dict=return_dict,
217
+ )
218
+ image_embeds = self.visual_projection(output_states[1])
219
+
220
+ return image_embeds
221
+
222
+ @torch.no_grad()
223
+ def encode_image(self, images, batch_size=8):
224
+ all_embeddings = []
225
+ iterator = range(0, len(images), batch_size)
226
+ for batch_idx in iterator:
227
+ batch = images[batch_idx:batch_idx + batch_size]
228
+
229
+ encoded_input = self.feature_extractor(batch, return_tensors="pt").to(self.device)
230
+ model_output = self(**encoded_input)
231
+ image_embeddings = model_output.cpu()
232
+
233
+ all_embeddings.extend(image_embeddings)
234
+
235
+ # return torch.stack(all_embeddings).numpy()
236
+ return torch.stack(all_embeddings)
237
+
238
+ @staticmethod
239
+ def remove_alpha_channel(image):
240
+ image.convert("RGBA")
241
+ alpha = image.convert('RGBA').split()[-1]
242
+ background = Image.new("RGBA", image.size, (255, 255, 255))
243
+ background.paste(image, mask=alpha)
244
+ image = background.convert("RGB")
245
+ return image
246
+
247
+ def save(self, output_dir):
248
+ self.model.save_pretrained(output_dir)
249
+ self.feature_extractor.save_pretrained(output_dir)
250
+ torch.save(self.visual_projection.state_dict(), os.path.join(output_dir, "visual_projection.bin"))
251
+
252
+
253
+ class ClipModel(nn.Module):
254
+ def __init__(self, model_name_or_path, device=None):
255
+ super(ClipModel, self).__init__()
256
+
257
+ if os.path.exists(model_name_or_path):
258
+ # load from file system
259
+ repo_dir = model_name_or_path
260
+ else:
261
+ # download from the Hugging Face model hub
262
+ repo_dir = snapshot_download(model_name_or_path)
263
+
264
+ self.text_model = ClipTextModel(repo_dir, device=device)
265
+ self.vision_model = ClipVisionModel(os.path.join(repo_dir, "vision_model"), device=device)
266
+
267
+ with torch.no_grad():
268
+ logit_scale = nn.Parameter(torch.ones([]) * 2.6592)
269
+ logit_scale.set_(torch.load(os.path.join(repo_dir, "logit_scale.bin")).clone().cpu())
270
+ self.logit_scale = logit_scale
271
+
272
+ self.eval()
273
+
274
+ if device is None:
275
+ device = "cuda" if torch.cuda.is_available() else "cpu"
276
+ self.device = torch.device(device)
277
+ self.to(self.device)
278
+
279
+ def forward(self, pixel_values, input_ids, attention_mask, token_type_ids):
280
+ image_features = self.vision_model(pixel_values=pixel_values)
281
+ text_features = self.text_model(input_ids=input_ids,
282
+ attention_mask=attention_mask,
283
+ token_type_ids=token_type_ids)[0]
284
+
285
+ image_features = image_features / image_features.norm(dim=-1, keepdim=True)
286
+ text_features = text_features / text_features.norm(dim=-1, keepdim=True)
287
+
288
+ logit_scale = self.logit_scale.exp()
289
+ logits_per_image = logit_scale * image_features @ text_features.t()
290
+ logits_per_text = logits_per_image.t()
291
+
292
+ return logits_per_image, logits_per_text
293
+
294
+ def save(self, output_dir):
295
+ torch.save(self.logit_scale, os.path.join(output_dir, "logit_scale.bin"))
296
+ self.text_model.save(output_dir)
297
+ self.vision_model.save(os.path.join(output_dir, "vision_model"))
298
+
299
+
300
+ def encode_text(text, model):
301
+ text = normalize_text(text)
302
+ text_embedding = model.text_model.encode_text([text]).numpy()
303
+ return text_embedding
304
+
305
+
306
+ def encode_image(image_filename, model):
307
+ image = Image.open(image_filename)
308
+ image_embedding = model.vision_model.encode_image([image]).numpy()
309
+ return image_embedding
310
+
311
+
312
+ st.title("いらすと検索(日本語CLIP ゼロショット)")
313
+ description_text = st.empty()
314
+
315
+ if "model" not in st.session_state:
316
+ description_text.text("...日本語CLIPモデル読み込み中...")
317
+ device = "cuda" if torch.cuda.is_available() else "cpu"
318
+ model = ClipModel("sonoisa/clip-vit-b-32-japanese-v1", device=device)
319
+ st.session_state.model = model
320
+
321
+ pyminizip.uncompress(
322
+ "clip_zeroshot_irasuto_items_20210224.pq.zip", st.secrets["ZIP_PASSWORD"], None, 1
323
+ )
324
+
325
+ df = pq.read_table("clip_zeroshot_irasuto_items_20210224.parquet").to_pandas()
326
+ st.session_state.df = df
327
+
328
+ sentence_vectors = np.stack(df["sentence_vector"])
329
+ image_vectors = np.stack(df["image_vector"])
330
+ st.session_state.sentence_vectors = sentence_vectors
331
+ st.session_state.image_vectors = image_vectors
332
+
333
+ model = st.session_state.model
334
+ df = st.session_state.df
335
+ sentence_vectors = st.session_state.sentence_vectors
336
+ image_vectors = st.session_state.image_vectors
337
+
338
+ description_text.text("日本語CLIPモデル(ゼロショット)を用いて、説明文の意味が近い「いらすとや」画像を検索します。\nキーワードを列挙するよりも、自然な文章を入力した方が精度よく検索できます。\n画像は必ずリンク先の「いらすとや」さんのページを開き、そこからダウンロードしてください。")
339
+
340
+ def clear_result():
341
+ result_text.text("")
342
+
343
+ prev_query = ""
344
+ query_input = st.text_input(label="説明文", value="", on_change=clear_result)
345
+
346
+ closest_n = st.number_input(label="検索数", min_value=1, value=10, max_value=100)
347
+
348
+ search_buttion = st.button("検索")
349
+
350
+ result_text = st.empty()
351
+
352
+ if search_buttion or prev_query != query_input:
353
+ prev_query = query_input
354
+ query_embedding = encode_text(query_input, model)
355
+
356
+ distances = scipy.spatial.distance.cdist(
357
+ query_embedding, image_vectors, metric="cosine"
358
+ )[0]
359
+
360
+ results = zip(range(len(distances)), distances)
361
+ results = sorted(results, key=lambda x: x[1])
362
+
363
+ md_content = ""
364
+ for i, (idx, distance) in enumerate(results[0:closest_n]):
365
+ page_url = df.iloc[idx]["page"]
366
+ desc = df.iloc[idx]["description"]
367
+ img_url = df.iloc[idx]["image_url"]
368
+ md_content += f"1. <div><a href='{page_url}' target='_blank' rel='noopener noreferrer'><img src='{img_url}' width='100'>{distance / 2:.4f}: {desc}</a><div>\n"
369
+
370
+ result_text.markdown(md_content, unsafe_allow_html=True)
371
+