jiaqingj commited on
Commit
aa6ef1d
·
1 Parent(s): 151b464

Delete clip

Browse files
Files changed (3) hide show
  1. clip/build_text_index.py +0 -105
  2. clip/clip.py +0 -146
  3. clip/clipretrieval.py +0 -135
clip/build_text_index.py DELETED
@@ -1,105 +0,0 @@
1
- import sys
2
- import torch
3
- import numpy as np
4
- import progressbar
5
- import os
6
-
7
- def parse_config():
8
- parser = argparse.ArgumentParser()
9
- parser.add_argument("--clip_name", type=str, default="openai/clip-vit-base-patch32")
10
- parser.add_argument("--text_file_path", type=str)
11
- # save configuration
12
- parser.add_argument("--save_index_prefix", type=str, help='where to save the mips index')
13
- parser.add_argument("--save_index_name", type=str)
14
- parser.add_argument("--save_mapping_dict_name", type=str,
15
- help="a json file that stores a dictory. The dictory contains mapping between mips index and caption text")
16
- # inference configuration
17
- parser.add_argument("--batch_size", type=int, help="the batch size used to conduct inference with CLIP")
18
- return parser.parse_args()
19
-
20
- def load_batch_text(text_file_path, batch_size):
21
- import json
22
- with open(text_file_path) as f:
23
- item_list = json.load(f)
24
-
25
- text_list = []
26
- for item in item_list:
27
- captions = item["captions"]
28
- for cap in captions:
29
- text_list.append(cap)
30
- print ('Number of text instances is {}'.format(len(text_list)))
31
-
32
- data_num = len(text_list)
33
- batch_num = data_num // batch_size
34
- batch_text_list = []
35
- s_idx, e_idx = 0, batch_size
36
- for p_idx in range(batch_num):
37
- one_batch_text_list = []
38
- for idx in range(s_idx, e_idx):
39
- one_batch_text_list.append(text_list[idx])
40
- batch_text_list.append(one_batch_text_list)
41
- return batch_text_list
42
-
43
-
44
- import argparse
45
- if __name__ == '__main__':
46
- if torch.cuda.is_available():
47
- print ('Cuda is available.')
48
- cuda_available = torch.cuda.is_available()
49
- args = parse_config()
50
- device = torch.device('cuda')
51
-
52
- import os
53
- if os.path.exists(args.save_index_prefix):
54
- pass
55
- else: # recursively construct directory
56
- os.makedirs(args.save_index_prefix, exist_ok=True)
57
-
58
- print ('Loading CLIP...')
59
- from clip import CLIP
60
- model = CLIP(args.clip_name)
61
- if cuda_available:
62
- model = model.cuda(device)
63
- model.eval()
64
- print ('CLIP loaded!')
65
-
66
- print ('Loading text data...')
67
- batch_text_list = load_batch_text(args.text_file_path, args.batch_size)
68
- print ('Text data loaded.')
69
-
70
- res_text_vec_list, res_text_list = [], []
71
- batch_num = len(batch_text_list)
72
- print ('Number of batches is {}'.format(batch_num))
73
- print ('Start inference...')
74
- p = progressbar.ProgressBar(batch_num)
75
- p.start()
76
- with torch.no_grad():
77
- for p_idx in range(batch_num):
78
- p.update(p_idx)
79
- one_text_batch = batch_text_list[p_idx]
80
- one_batch_vec = model.compute_batch_index_text_representation(one_text_batch).detach().cpu()
81
- one_batch_vec_list = one_batch_vec.unbind(dim=0)
82
- bsz = len(one_batch_vec_list)
83
- for k in range(bsz):
84
- res_text_vec_list.append(one_batch_vec_list[k].numpy())
85
- res_text_list.append(one_text_batch[k])
86
- p.finish()
87
- assert len(res_text_vec_list) == len(res_text_list)
88
- print ('Inference completed!')
89
-
90
- index_text_mapping_dict = {}
91
- for k in range(len(res_text_list)):
92
- index_text_mapping_dict[k] = res_text_list[k]
93
- mapping_list_save_path = args.save_index_prefix + '/' + args.save_mapping_dict_name
94
- import json
95
- with open(mapping_list_save_path, 'w') as outfile:
96
- json.dump(index_text_mapping_dict, outfile, indent=4)
97
- print ('Mapping dictionary saved!')
98
-
99
- print ('Start buiding index...')
100
- index_save_path = args.save_index_prefix + '/' + args.save_index_name
101
- with open(index_save_path, 'w', encoding = 'utf8') as o:
102
- for vec in res_text_vec_list:
103
- one_text = ' '.join([str(num) for num in vec]).strip()
104
- o.writelines(one_text + '\n')
105
- print ('Index completed!')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
clip/clip.py DELETED
@@ -1,146 +0,0 @@
1
- import torch
2
- import requests
3
- from torch import nn
4
- from PIL import Image
5
-
6
- class CLIP(nn.Module):
7
- def __init__(self, model_name):
8
- super(CLIP, self).__init__()
9
- # model name: e.g. openai/clip-vit-base-patch32
10
- print ('Initializing CLIP model...')
11
- from transformers import CLIPProcessor, CLIPModel
12
- self.model = CLIPModel.from_pretrained(model_name)
13
- self.model.eval()
14
- self.processor = CLIPProcessor.from_pretrained(model_name)
15
- from transformers import CLIPTokenizer
16
- self.tokenizer = CLIPTokenizer.from_pretrained(model_name)
17
- self.cuda_has_been_checked = False
18
- print ('CLIP model initialized.')
19
-
20
- def check_cuda(self):
21
- self.cuda_available = next(self.model.parameters()).is_cuda
22
- self.device = next(self.model.parameters()).get_device()
23
- if self.cuda_available:
24
- print ('Cuda is available.')
25
- print ('Device is {}'.format(self.device))
26
- else:
27
- print ('Cuda is not available.')
28
- print ('Device is {}'.format(self.device))
29
-
30
- @torch.no_grad()
31
- def compute_image_representation_from_image_path(self, image_path):
32
- if not self.cuda_has_been_checked:
33
- self.check_cuda()
34
- self.cuda_has_been_checked = True
35
- else:
36
- pass
37
- # image_path: the path of the image
38
- image = Image.open(image_path)
39
- inputs = self.processor(images=image, return_tensors="pt")
40
- pixel_values = inputs['pixel_values']
41
- if self.cuda_available:
42
- pixel_values = pixel_values.cuda(self.device)
43
- visual_outputs = self.model.vision_model(pixel_values=pixel_values)
44
- image_embeds = visual_outputs[1]
45
- image_embeds = self.model.visual_projection(image_embeds) # [1 x embed_dim]
46
- return image_embeds
47
-
48
- def compute_image_representation_from_image_instance(self, image):
49
- if not self.cuda_has_been_checked:
50
- self.check_cuda()
51
- self.cuda_has_been_checked = True
52
- else:
53
- pass
54
- # image_path: the path of the image
55
- inputs = self.processor(images=image, return_tensors="pt")
56
- pixel_values = inputs['pixel_values']
57
- if self.cuda_available:
58
- pixel_values = pixel_values.cuda(self.device)
59
- visual_outputs = self.model.vision_model(pixel_values=pixel_values)
60
- image_embeds = visual_outputs[1]
61
- image_embeds = self.model.visual_projection(image_embeds) # [1 x embed_dim]
62
- return image_embeds
63
-
64
- def compute_text_representation(self, text_list):
65
- if not self.cuda_has_been_checked:
66
- self.check_cuda()
67
- self.cuda_has_been_checked = True
68
- else:
69
- pass
70
- # text_list: a list of text
71
- text_inputs = self.tokenizer(text_list, padding=True, return_tensors="pt",
72
- max_length=self.tokenizer.max_len_single_sentence + 2, truncation=True)
73
- # self.tokenizer.max_len_single_sentence + 2 = 77
74
- input_ids, attention_mask = text_inputs['input_ids'], text_inputs['attention_mask']
75
- if self.cuda_available:
76
- input_ids = input_ids.cuda(self.device)
77
- attention_mask = attention_mask.cuda(self.device)
78
- text_outputs = self.model.text_model(
79
- input_ids=input_ids,
80
- attention_mask=attention_mask
81
- )
82
- text_embeds = text_outputs[1]
83
- text_embeds = self.model.text_projection(text_embeds)
84
- return text_embeds
85
-
86
- def compute_image_text_similarity_via_embeddings(self, image_embeds, text_embeds):
87
- '''
88
- image_embeds: 1 x embed_dim
89
- text_embeds: len(text_list) x embed_dim
90
- '''
91
- image_embeds = image_embeds / image_embeds.norm(dim=-1, keepdim=True)
92
- text_embeds = text_embeds / text_embeds.norm(dim=-1, keepdim=True)
93
- logit_scale = self.model.logit_scale.exp()
94
- logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
95
- logits_per_image = logits_per_text.T
96
- return logits_per_image.softmax(dim=1), logits_per_image/logit_scale # 1 x len(text_list)
97
-
98
- def compute_image_text_similarity_via_raw_text(self, image_embeds, text_list):
99
- text_embeds = self.compute_text_representation(text_list)
100
- return self.compute_image_text_similarity_via_embeddings(image_embeds, text_embeds)
101
-
102
- ### -------------------- functions for building index ---------------------- ###
103
- def compute_batch_index_image_features(self, image_list):
104
- '''
105
- # list of image instances
106
- '''
107
- if not self.cuda_has_been_checked:
108
- self.check_cuda()
109
- self.cuda_has_been_checked = True
110
- else:
111
- pass
112
- # image_path: the path of the image
113
- inputs = self.processor(images=image_list, return_tensors="pt")
114
- pixel_values = inputs['pixel_values']
115
- if self.cuda_available:
116
- pixel_values = pixel_values.cuda(self.device)
117
- visual_outputs = self.model.vision_model(pixel_values=pixel_values)
118
- image_embeds = visual_outputs[1]
119
- image_embeds = self.model.visual_projection(image_embeds) # [1 x embed_dim]
120
- return image_embeds # len(image_list) x embed_dim
121
-
122
- def compute_batch_index_text_representation(self, text_list):
123
- if not self.cuda_has_been_checked:
124
- self.check_cuda()
125
- self.cuda_has_been_checked = True
126
- else:
127
- pass
128
- # text_list: a list of text
129
- #text_inputs = self.tokenizer(text_list, padding=True, return_tensors="pt")
130
- text_inputs = self.tokenizer(text_list, padding=True, return_tensors="pt",
131
- max_length=self.tokenizer.max_len_single_sentence + 2, truncation=True)
132
- input_ids, attention_mask = text_inputs['input_ids'], text_inputs['attention_mask']
133
- if self.cuda_available:
134
- input_ids = input_ids.cuda(self.device)
135
- attention_mask = attention_mask.cuda(self.device)
136
- text_outputs = self.model.text_model(
137
- input_ids=input_ids,
138
- attention_mask=attention_mask
139
- )
140
- text_embeds = text_outputs[1]
141
- text_embeds = self.model.text_projection(text_embeds)
142
- return text_embeds
143
- #logit_scale = self.model.logit_scale.exp()
144
- #text_embeds = text_embeds * logit_scale
145
- #return text_embeds
146
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
clip/clipretrieval.py DELETED
@@ -1,135 +0,0 @@
1
- import json
2
- import copy
3
- import torch
4
- import progressbar
5
- import numpy as np
6
- from PIL import Image
7
-
8
- class CLIPIndex:
9
- def __init__(self, index_matrix_path, mapping_dict_path, clip):
10
- '''
11
- index_path: the pre-trained index
12
- mapping_dict_path: the pre-indexed mapping dictionary
13
- clip: the pre-trained clip model
14
- '''
15
- print ('Loading index...')
16
- self.index_matrix = self.normalization(self.load_matrix(index_matrix_path))
17
- print ('Index loaded.')
18
- print (self.index_matrix.shape)
19
- with open(mapping_dict_path) as f:
20
- self.mapping_dict = json.load(f)
21
- self.clip = clip
22
-
23
- def load_matrix(self, in_f):
24
- matrix_list = []
25
- with open(in_f, 'r', encoding = 'utf8') as i:
26
- lines = i.readlines()
27
- for l in lines:
28
- one_vec = [float(num) for num in l.strip('\n').split()]
29
- matrix_list.append(one_vec)
30
- return np.array(matrix_list)
31
-
32
- def normalization(self, matrix):
33
- '''
34
- matrix: num_instance x num_feature
35
- '''
36
- return matrix / np.linalg.norm(matrix, axis=1, keepdims=True)
37
-
38
- def get_image_representation(self, image_path):
39
- image_instance = Image.open(image_path)
40
- image_vec = self.clip.compute_batch_index_image_features([image_instance]).detach().cpu().numpy()
41
- image_vec = self.normalization(image_vec)
42
- return image_vec
43
-
44
- def search_text(self, image_path):
45
- image_vec = self.get_image_representation(image_path)
46
- sort_idx_list = np.matmul(image_vec, self.index_matrix.transpose())[0].argsort()[::-1]
47
- top_idx = sort_idx_list[0]
48
- return self.mapping_dict[str(top_idx)]
49
-
50
-
51
- def parse_config():
52
- parser = argparse.ArgumentParser()
53
- parser.add_argument("--clip_name", type=str)
54
- parser.add_argument("--test_image_prefix_path", type=str, help="the folder that stores all test images")
55
- parser.add_argument("--test_path", type=str)
56
- # index configuration
57
- parser.add_argument("--index_matrix_path", type=str)
58
- parser.add_argument("--mapping_dict_path", type=str)
59
- # save configuration
60
- parser.add_argument("--save_path_prefix", type=str, help="save the result in which directory")
61
- parser.add_argument("--save_name", type=str, help="the name of the saved file")
62
- return parser.parse_args()
63
-
64
- import argparse
65
- if __name__ == '__main__':
66
- if torch.cuda.is_available():
67
- print ('Cuda is available.')
68
- cuda_available = torch.cuda.is_available()
69
- args = parse_config()
70
- device = torch.device('cuda')
71
-
72
- save_path_prefix = args.save_path_prefix
73
- import os
74
- if os.path.exists(save_path_prefix):
75
- pass
76
- else: # recursively construct directory
77
- os.makedirs(save_path_prefix, exist_ok=True)
78
- # parse save name
79
- save_name = args.save_name
80
- full_save_path = save_path_prefix + '/' + save_name
81
- print ('full save path is {}'.format(full_save_path))
82
-
83
- print ('Loading CLIP...')
84
- from clip import CLIP
85
- clip = CLIP(args.clip_name)
86
- if cuda_available:
87
- clip = clip.cuda(device)
88
- clip.eval()
89
- print ('CLIP loaded!')
90
-
91
- clipindex = CLIPIndex(args.index_matrix_path, args.mapping_dict_path, clip)
92
-
93
- print ('Loading data...')
94
- import json
95
- with open(args.test_path) as f:
96
- item_list = json.load(f)
97
- print ('Data loaded.')
98
- print ('Number of test instances is {}'.format(len(item_list)))
99
-
100
- result_list = []
101
- invalid_num = 0
102
- print ('----------------------------------------------------------------')
103
- with torch.no_grad():
104
- test_num = len(item_list)
105
- #test_num = 10
106
- print ('Number of inference instances is {}'.format(test_num))
107
- p = progressbar.ProgressBar(test_num)
108
- p.start()
109
- for p_idx in range(test_num):
110
- p.update(p_idx)
111
- one_test_dict = item_list[p_idx]
112
-
113
- one_res_dict = {
114
- 'split':one_test_dict['split'],
115
- 'image_name':one_test_dict['image_name'],
116
- #'file_path':one_test_dict['file_path'],
117
- 'captions':one_test_dict['captions']
118
- }
119
-
120
- image_full_path = args.test_image_prefix_path + '/' + one_test_dict['image_name']
121
- try:
122
- output_text = clipindex.search_text(image_full_path)
123
- one_res_dict['prediction'] = output_text
124
- result_list.append(one_res_dict)
125
- except:
126
- invalid_num += 1
127
- print ('invalid number is {}'.format(invalid_num))
128
- continue
129
- p.finish()
130
- print ('Inference completed!')
131
-
132
- import json
133
- with open(full_save_path, 'w') as outfile:
134
- json.dump(result_list, outfile, indent=4)
135
-