QingyiSi commited on
Commit
44ddffd
1 Parent(s): d762820

Upload 14 files

Browse files
code/compute_score.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import string
3
+ import regex
4
+
5
+ #Normalization from SQuAD evaluation script https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
6
+ def normalize_answer(s):
7
+ def remove_articles(text):
8
+ return regex.sub(r'\b(a|an|the)\b', ' ', text)
9
+
10
+ def white_space_fix(text):
11
+ return ' '.join(text.split())
12
+
13
+ def remove_punc(text):
14
+ exclude = set(string.punctuation)
15
+ return ''.join(ch for ch in text if ch not in exclude)
16
+
17
+ def lower(text):
18
+ return text.lower()
19
+
20
+ return white_space_fix(remove_articles(remove_punc(lower(s))))
21
+
22
+
23
+ def cal_acc_multi(ground_truth, preds, return_id = False):
24
+ all_num = len(ground_truth)
25
+ acc_num = 0
26
+ ids = []
27
+ temp = []
28
+ for i, answer_id in enumerate(ground_truth):
29
+ pred = preds[i]
30
+ cnt = 0
31
+ for aid in answer_id:
32
+ if pred == aid:
33
+ cnt += 1
34
+ if cnt ==1:
35
+ acc_num += 1/3
36
+
37
+ elif cnt == 2:
38
+ acc_num += 2/3
39
+
40
+ elif cnt > 2:
41
+ acc_num += 1
42
+
43
+
44
+ if return_id:
45
+ return acc_num / all_num, ids
46
+ else:
47
+ return acc_num, all_num
48
+
49
+
50
+ def ensemble(a):
51
+ return max(a[::-1], key = a.count)
52
+
53
+ # Ground Truth Answers
54
+ f=open("/root/okvqa/data/okvqa_val.json", "r")
55
+ answer_dict=json.load(f)
56
+ f.close()
57
+ for k in answer_dict.keys():
58
+ for a_ind, a in enumerate(answer_dict[k]['multi_answers']):
59
+ answer_dict[k]['multi_answers'][a_ind] = normalize_answer(answer_dict[k]['multi_answers'][a_ind])
60
+
61
+
62
+ # Load Predictions (for example, ensemble of three models' predictions)
63
+ f1=open("/mnt/bn/qingyi-hl/finetunedModelonOKVQA/1e-41e-5FTwiki25-From-1e-41e-5PretrainWiki25Epo0/FTwiki25FromPretrainWiki25Epo0-1e41e5/predictions.json", "r")
64
+ predict0_dict=json.load(f1)
65
+ for p in predict0_dict.keys():
66
+ predict0_dict[p]=normalize_answer(predict0_dict[p])
67
+ f1.close()
68
+ f2=open("/mnt/bn/qingyi-hl/finetunedModelonOKVQA/1e-41e-5FTwiki25-From-1e-41e-5PretrainWiki25Epo1/predictions.json", "r")
69
+ predict1_dict=json.load(f2)
70
+ for p in predict1_dict.keys():
71
+ predict1_dict[p]=normalize_answer(predict1_dict[p])
72
+ f2.close()
73
+ f3=open("/mnt/bn/qingyi-hl/finetunedModelonOKVQA/1e-41e-5FTwiki25-From-1e-41e-5PretrainWiki25Epo2/predictions.json", "r")
74
+ predict2_dict=json.load(f3)
75
+ for p in predict2_dict.keys():
76
+ predict2_dict[p]=normalize_answer(predict2_dict[p])
77
+ f3.close()
78
+
79
+
80
+
81
+ answer_list=[]
82
+ predict0_list=[]
83
+ predict1_list=[]
84
+ predict2_list=[]
85
+ emsemble_predict=[]
86
+ for k in answer_dict.keys():
87
+ answer_list.append( answer_dict[k]['multi_answers'])
88
+ predict0_list.append( predict0_dict[k])
89
+ predict1_list.append( predict1_dict[k])
90
+ predict2_list.append( predict2_dict[k])
91
+
92
+ emsemble_predict.append(ensemble([predict0_dict[k], predict1_dict[k], predict2_dict[k])
93
+
94
+
95
+
96
+ acc_n0,all_n0=cal_acc_multi(answer_list,predict0_list)
97
+ acc_n1,all_n1=cal_acc_multi(answer_list,predict1_list)
98
+ acc_n2,all_n2=cal_acc_multi(answer_list,predict2_list)
99
+
100
+ acc_ens,all_ens=cal_acc_multi(answer_list,emsemble_predict)
101
+
102
+ print("0-accuracy",acc_n0/all_n0)
103
+ print("1-accuracy",acc_n1/all_n1)
104
+ print("2-accuracy",acc_n2/all_n2)
105
+
106
+
107
+ print("ensemble-accuracy",acc_ens/all_ens)
code/config4LXMT5_DDP.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!user/bin/env python
2
+ # -*- coding:utf-8 -*-
3
+ import argparse
4
+
5
+ parser = argparse.ArgumentParser()
6
+ # parser.add_argument("--inference", action="store_true", help='complete dataset or not')
7
+ parser.add_argument("--pretrain", default=False, action="store_true", help='use vqa2.0 or not')
8
+ parser.add_argument("--gpt3", default=False, action="store_true", help='use gpt3 to train on okvqa')
9
+ parser.add_argument("--visualBERT", default=False, action="store_true", help='use visualBERT, if false use LXMERT')
10
+
11
+ parser.add_argument('--batch_size', type=int, default=128,
12
+ help='minibatch size')
13
+ parser.add_argument('--seed', type=int, default=4,
14
+ help='random seed!')
15
+ parser.add_argument('--num_wiki', type=int, default=25,
16
+ help='the number of wiki passages')
17
+ parser.add_argument('--num_epochs', type=int, default=40,
18
+ help='number of epochs')
19
+ parser.add_argument('--learning_rate', type=float, default=0.0001,
20
+ help='LR')
21
+ parser.add_argument('--learning_rate_LXM', type=float, default=0.00001,
22
+ help='LR_LXM')
23
+ parser.add_argument('--model_dir', type=str, default='xxx/',
24
+ help='model file path')
25
+ parser.add_argument('--input_type', type=int, default=1,#200,
26
+ help='input types: 1==Q-OFA-C-L-O; 2==Q-C-L-O; 3==Q-OFA-L-O; 4==Q-OFA-C-O; 5==Q-OFA-C-L')
27
+ parser.add_argument('--describe', type=str, default='',
28
+ help='the model description used as the saved-model name')
29
+ parser.add_argument("--load_pthpath", default="",
30
+ help="To continue training, path to .pth file of saved checkpoint.")
31
+ parser.add_argument("--validate", default='True', action="store_true", help="Whether to validate on val split after every epoch.")
32
+ parser.add_argument("--dataset", default="okvqa", help="dataset that model training on")
33
+ parser.add_argument("--ofa", default="normal", help=" normal or finetune --- load the knowledge from Normal OFA or vqav2-Finetuned OFA")
34
+ parser.add_argument('--local_rank', default=-1, type=int,
35
+ help='node rank for distributed training')
36
+ args = parser.parse_args()
37
+ print(args)
code/dataset4LXMT5.py ADDED
@@ -0,0 +1,551 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!user/bin/env python
2
+ # -*- coding:utf-8 -*-
3
+ import collections
4
+ import json
5
+
6
+
7
+ import string
8
+
9
+ import numpy as np
10
+ from model_LXM2T5 import T5tokenizer, LXMT52T5, LXMtokenizer
11
+ import pickle
12
+ import torch
13
+ from torch.utils.data import Dataset
14
+
15
+ from config4LXMT5_DDP import args
16
+ print('dataset4T5',args)
17
+ from random import sample
18
+
19
+
20
+ def normalize_wiki(s):
21
+ stopwords=['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've", "you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn', "mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", 'won', "won't", 'wouldn', "wouldn't"]
22
+
23
+
24
+ def white_space_fix(text):
25
+ return ' '.join(text.split())
26
+
27
+ def remove_punc(text):
28
+ exclude = set(string.punctuation)
29
+ return ''.join(ch for ch in text if ch not in exclude)
30
+
31
+ def lower(text):
32
+ return text.lower()
33
+
34
+ def remove_stop_w(text):
35
+ to_be_removed = set(stopwords)
36
+ text_list = text.split(' ')
37
+ text_list = [item for item in text_list if item not in to_be_removed]
38
+ return ' '.join(text_list)
39
+
40
+ return white_space_fix(remove_stop_w(remove_punc(lower(s))))
41
+
42
+
43
+
44
+ if args.dataset == 'okvqa':
45
+ with open('../data/image_features/vqa_img_feature_train.pickle', 'rb') as f:
46
+ pretrain_feature = pickle.load(f)
47
+ if args.pretrain:
48
+ with open('../data/pretrain/vqa_train_filter.json','r') as f:
49
+ vqa2 = json.load(f)
50
+ train_row = vqa2
51
+ else:
52
+ with open('../data/finetune/okvqa_train.json','r') as f:
53
+ train_row = json.load(f)
54
+
55
+ if args.pretrain:
56
+ with open('../data/pretrain/caption_predict_vqav2train.json', 'r') as f:
57
+ captions_train = json.load(f)
58
+ with open('../data/pretrain/labeling_predict_vqav2train.json', 'r') as f:
59
+ labelings_train = json.load(f)
60
+ with open('../data/pretrain/ocr_predict_vqav2train.json', 'r') as f:
61
+ ocrs_train = json.load(f)
62
+
63
+ with open('../data/pretrain/wiki_100sim_train.json', 'r') as f:
64
+ wikis_train = json.load(f)
65
+
66
+ else:
67
+ with open('../data/finetune/caption_predict_train.json', 'r') as f:
68
+ captions_train = json.load(f)
69
+ with open('../data/finetune/labeling_predict_train.json', 'r') as f:
70
+ labelings_train = json.load(f)
71
+ with open('../data/finetune/ocr_predict_train.json', 'r') as f:
72
+ ocrs_train = json.load(f)
73
+ if args.ofa=="normal":
74
+ with open('../data/finetune/ofa_predictions/OFA_zerorate_predict_train.json', 'r') as f:
75
+ ofas_train = json.load(f)#key为数字
76
+ with open('../data/finetune/ofa_predictions/OFA_zerorate_evidence_train.json', 'r') as f:
77
+ evid_train = json.load(f)#key为字符串
78
+ elif args.ofa=="finetune":
79
+ with open('../data/finetune/ofa_predictions/OFAvqa_zerorate_answer_train.json', 'r') as f:
80
+ ofas_train = json.load(f)#key为字符串
81
+ with open('../data/finetune/ofa_predictions/OFAvqa_zerorate_evidence_train.json', 'r') as f:
82
+ evid_train = json.load(f)#key为字符串
83
+ else:
84
+ assert 0==1
85
+ with open("../data/finetune/gpt3_okvqa_train2014_answers.pkl", 'rb') as f:
86
+ gpt3_train = pickle.load(f)
87
+ with open('../data/finetune/wiki_100sim_train.json', 'r') as f:
88
+ wikis_train = json.load(f)
89
+
90
+ else:
91
+ assert 0==1
92
+
93
+
94
+
95
+ def plural(word):
96
+ if word.endswith('y'):
97
+ return word[:-1] + 'ies'
98
+ elif word[-1] in 'sxo' or word[-2:] in ['sh', 'ch']:
99
+ return word + 'es'
100
+ elif word.endswith('an'):
101
+ return word[:-2] + 'en'
102
+ else:
103
+ return word + 's'
104
+
105
+ image_ids = []
106
+ qids = []
107
+ questions = []
108
+ answers = []
109
+ labels = []
110
+ objects = []
111
+ answer_ids = []
112
+ answers_lists = []
113
+ question_lengths = []
114
+ answers_most = []
115
+ neg_answer = []
116
+
117
+
118
+ train_captions = {}
119
+ for item in captions_train:
120
+ if item['image_id'] in train_captions.keys():
121
+ print("IMG caption REPEATED!")
122
+ assert 0==1
123
+ train_captions[item['image_id']] = item['caption']
124
+
125
+ train_labelings = {}
126
+ for item in labelings_train:
127
+ if item['image_id'] in train_labelings.keys():
128
+ print("IMG labelings REPEATED!")
129
+ assert 0==1
130
+ train_labelings[str(item['image_id'])] = item['labeling']
131
+ print("labeling number:", len(train_labelings.keys()))
132
+
133
+ train_ocrs = {}
134
+ for item in ocrs_train:
135
+ if item['image_id'] in train_ocrs.keys():
136
+ print("IMG ocrs REPEATED!")
137
+ assert 0==1
138
+ train_ocrs[str(item['image_id'])] = item['ocr']
139
+
140
+
141
+ if not args.pretrain:
142
+ train_ofas = {}
143
+ if args.ofa=="normal":
144
+ for item in ofas_train:
145
+ if item['question_id'] in train_ofas.keys():
146
+ print("IMG ofas REPEATED!")
147
+ assert 0==1
148
+ train_ofas[str(item['question_id'])] = item['OFA_answer']+", "+evid_train[str(item['question_id'])]
149
+ elif args.ofa=="finetune":
150
+ for k in evid_train.keys():
151
+ train_ofas[k] = ofas_train[k]+", "+evid_train[k]
152
+ else:
153
+ assert 0==1
154
+
155
+ train_gpt3 = {}
156
+ for k in gpt3_train.keys():
157
+ qid = k.split("#")[1]
158
+
159
+ train_gpt3[str(qid)] = ", ".join(gpt3_train[k][0])#[(ans, evid)]
160
+
161
+
162
+ train_wikis = wikis_train
163
+
164
+
165
+ if args.pretrain:
166
+ if args.num_wiki > 51:
167
+ for key in train_wikis.keys():
168
+ for i in range(args.num_wiki):
169
+ train_wikis[key][i]=normalize_wiki(train_wikis[key][i])
170
+
171
+
172
+
173
+
174
+ n = 0
175
+
176
+
177
+ for qid, item in train_row.items():
178
+ img_id = str(item['image_id'])
179
+ image_ids.append(img_id)
180
+ qids.append(qid)
181
+ question_clean = item['question']# + answer_sentence
182
+ questions.append(question_clean)
183
+
184
+
185
+
186
+ # multi-answer
187
+ if args.dataset == 'okvqa':
188
+ answers.append(item['multi_answers'])
189
+ # m_ans_id = [a_dic.get(i, 0) for i in item['multi_answers']]
190
+ # most_answer_ids.append(m_ans_id)
191
+
192
+
193
+ #single answer
194
+ else:
195
+ answers.append(item['answer'])
196
+
197
+
198
+
199
+
200
+ def _create_gpt3_entry(imgage_ids, q_ids, questions, answer, captions,labelings, ocrs,ofas, gpt3, wikis,final_txt):
201
+
202
+ if not args.pretrain:
203
+ entry = {
204
+ 'img_id': imgage_ids,
205
+ 'qid': q_ids,
206
+ 'question': questions,
207
+ 'answer': answer,
208
+ 'caption': captions,
209
+ 'labeling':labelings,
210
+ 'ocr': ocrs,
211
+ 'ofa':ofas,
212
+ 'gpt3':gpt3,
213
+ 'wiki':wikis,
214
+ 'final_txt':final_txt}
215
+
216
+ return entry
217
+
218
+
219
+
220
+ def _create_entry(imgage_ids, q_ids, questions, answer, captions,labelings, ocrs,ofas, wikis,final_txt):
221
+ if not args.pretrain:
222
+ entry = {
223
+ 'img_id': imgage_ids,
224
+ 'qid': q_ids,
225
+ 'question': questions,
226
+ 'answer': answer,
227
+ 'caption': captions,
228
+ 'labeling':labelings,
229
+ 'ocr': ocrs,
230
+ 'ofa':ofas,
231
+ 'wiki':wikis,
232
+ 'final_txt':final_txt}
233
+ return entry
234
+
235
+
236
+ def _create_vqav2_entry(imgage_ids, q_ids, questions, answer, captions,labelings, ocrs,wikis,final_txt):
237
+ if args.pretrain:
238
+ entry = {
239
+ 'img_id': imgage_ids,
240
+ 'qid': q_ids,
241
+ 'question': questions,
242
+ 'answer': answer,
243
+ 'caption': captions,
244
+ 'labeling':labelings,
245
+ 'ocr': ocrs,
246
+ 'wiki':wikis,
247
+ 'final_txt':final_txt}
248
+ # else:
249
+ return entry
250
+
251
+
252
+ def _load_dataset(train_row):
253
+ entries=[]
254
+ for qid, item in train_row.items():
255
+ qid = str(qid)
256
+ img_id = str(item['image_id'])
257
+ question = item['question']
258
+
259
+
260
+
261
+ # multi-answer
262
+ if args.dataset == 'okvqa':
263
+ answers=item['multi_answers']
264
+
265
+
266
+ #single answer
267
+ else:
268
+ answers=item['answer']
269
+
270
+ caption=train_captions[img_id]
271
+ labeling=train_labelings[img_id]
272
+ ocr_list=train_ocrs[img_id]
273
+ ocr = ", ".join(str(i) for i in ocr_list)
274
+ if not args.pretrain:
275
+ ofa=train_ofas[qid]
276
+ gpt3=train_gpt3[qid]
277
+ wiki=train_wikis[qid]
278
+
279
+ if args.pretrain:
280
+ if args.num_wiki > 51:
281
+ final_txt = [question + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr + " [SEP] " + x for x in wiki[:args.num_wiki]]
282
+ else:
283
+ final_txt = [question + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr + " [SEP] " + x for x in wiki[:args.num_wiki]]
284
+ else:
285
+ if args.seed > 1000:
286
+ print("seed > 1000 denotes that ablation study on 2 encoders")
287
+ assert args.input_type==0
288
+ if args.gpt3:
289
+ if args.input_type==0:
290
+
291
+ if args.num_wiki > 51:
292
+ # When there are a large number of Wiki passages, to save on GPU memory usage, Wiki passages are processed.
293
+ final_txt = [question + " [SEP] " + ofa + " " + gpt3 + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr + " [SEP] " + normalize_wiki(x) for x in wiki[:args.num_wiki]]
294
+ else:
295
+ final_txt = [question + " [SEP] " + ofa + " " + gpt3 + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr + " [SEP] " + x for x in wiki[:args.num_wiki]]
296
+ elif args.input_type==1:
297
+ final_txt = question + " [SEP] " + ofa + " " + gpt3 + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr
298
+ elif args.input_type==2:
299
+ if args.num_wiki > 51:
300
+ final_txt = [question + " [SEP] " + gpt3 + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr + " [SEP] " + normalize_wiki(x) for x in wiki[:args.num_wiki]]
301
+ else:
302
+ final_txt = [question + " [SEP] " + gpt3 + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr + " [SEP] " + x for x in wiki[:args.num_wiki]]
303
+ elif args.input_type==3:
304
+ final_txt = question + " [SEP] " + gpt3 + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr
305
+ else:
306
+ print('choose input-type in [0,1,2,3]')
307
+ assert 0==1
308
+
309
+
310
+ else:
311
+ if args.input_type==0:
312
+
313
+ if args.num_wiki > 51:
314
+ final_txt = [question + " [SEP] " + ofa + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr + " [SEP] " + normalize_wiki(x) for x in wiki[:args.num_wiki]]
315
+ else:
316
+ final_txt = [question + " [SEP] " + ofa + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr + " [SEP] " + x for x in wiki[:args.num_wiki]]
317
+ elif args.input_type==1:
318
+ final_txt = question + " [SEP] " + ofa + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr
319
+ elif args.input_type==2:
320
+ if args.num_wiki > 51:
321
+ final_txt = [question + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr + " [SEP] " + normalize_wiki(x) for x in wiki[:args.num_wiki]]
322
+ else:
323
+ final_txt = [question + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr + " [SEP] " + x for x in wiki[:args.num_wiki]]
324
+ elif args.input_type==3:
325
+ final_txt = question + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr
326
+ else:
327
+ print('choose input-type in [0,1,2,3,4,5]')
328
+ assert 0==1
329
+
330
+
331
+
332
+
333
+ if args.pretrain:
334
+ entries.append(_create_vqav2_entry(img_id, qid, question, answers, caption,labeling, ocr, wiki, final_txt))
335
+ else:
336
+ if args.gpt3:
337
+ entries.append(_create_gpt3_entry(img_id, qid, question, answers, caption,labeling, ocr,ofa,gpt3, wiki, final_txt))
338
+ else:
339
+ entries.append(_create_entry(img_id, qid, question, answers, caption,labeling, ocr,ofa, wiki, final_txt))
340
+
341
+ return entries
342
+
343
+
344
+
345
+
346
+
347
+ def _create_pretrain_entry(imgage_ids, q_ids, questions, answer):#, captions,labelings, ocrs,ofas,final_txt):
348
+ entry = {
349
+ 'img_id': imgage_ids,
350
+ 'qid': q_ids,
351
+ 'question': questions,
352
+ 'answer': answer}#,
353
+ return entry
354
+
355
+ def _load_pretrain_dataset(train_row):
356
+ entries=[]
357
+ for qid, item in train_row.items():
358
+ qid = str(qid)
359
+
360
+ img_id = str(item['image_id'])
361
+ question = item['question']
362
+
363
+
364
+ # multi-answer
365
+ if args.dataset == 'okvqa':
366
+ answers=item['multi_answers']
367
+ # answers.append(item['multi_answers'])
368
+ # m_ans_id = [a_dic.get(i, 0) for i in item['multi_answers']]
369
+ # most_answer_ids.append(m_ans_id)
370
+
371
+
372
+ #single answer
373
+ else:
374
+ answers=item['answer']
375
+
376
+ entries.append(_create_pretrain_entry(img_id, qid, question, answers))
377
+ return entries
378
+
379
+
380
+
381
+ class KgDataset(Dataset):
382
+ def __init__(self, val=False, val_test=False):
383
+ self.entries = _load_dataset(train_row)
384
+ self.tokenize()
385
+
386
+ def __len__(self):
387
+ return len(self.entries)
388
+ def tokenize(self):
389
+ if args.input_type==0:
390
+ if args.num_wiki > 51:
391
+ max_source_length=200
392
+ else:
393
+ max_source_length=250 #300
394
+ else:
395
+ max_source_length=128
396
+ max_target_length=5
397
+ max_que_length=16
398
+ for entry in self.entries:
399
+ T5_input_seq, T5_input_ids, T5_input_masks = self.tokenizer_func( T5tokenizer, entry['final_txt'], max_length=max_source_length)
400
+ LXM_input_seq, LXM_input_ids, LXM_input_masks = self.tokenizer_func( LXMtokenizer, entry['question'], max_length=max_que_length)
401
+
402
+
403
+ all_Ans_T5_target_seq = []
404
+ all_Ans_T5_target_ids = []
405
+ all_Ans_T5_target_masks = []
406
+ if args.allAns:
407
+ for i in range(10):
408
+ if i%2==0:
409
+ T5_target_seq, T5_target_ids, T5_target_masks = self.tokenizer_func( T5tokenizer, entry['answer'][i], max_length=max_target_length)
410
+ all_Ans_T5_target_seq.append(T5_target_seq)
411
+ all_Ans_T5_target_ids.append(torch.from_numpy(np.array(T5_target_ids)))
412
+ all_Ans_T5_target_masks.append(torch.from_numpy(np.array(T5_target_masks)))
413
+ # print()
414
+ all_Ans_T5_target_ids=torch.stack(all_Ans_T5_target_ids)
415
+ all_Ans_T5_target_masks=torch.stack(all_Ans_T5_target_masks)
416
+
417
+ entry['T5_target_seq']=all_Ans_T5_target_seq
418
+ entry['T5_target_ids']=all_Ans_T5_target_ids
419
+ entry['T5_target_masks']=all_Ans_T5_target_masks
420
+
421
+ else:
422
+ T5_target_seq, T5_target_ids, T5_target_masks = self.tokenizer_func( T5tokenizer, entry['answer'][0], max_length=max_target_length)
423
+ entry['T5_target_seq']=T5_target_seq#torch.from_numpy(np.array(T5_target_seq))
424
+ entry['T5_target_ids']=torch.from_numpy(np.array(T5_target_ids))
425
+ entry['T5_target_masks']=torch.from_numpy(np.array(T5_target_masks))
426
+ entry['T5_input_seq']=T5_input_seq#torch.from_numpy(np.array(T5_input_seq))
427
+ entry['T5_input_ids']=torch.from_numpy(np.array(T5_input_ids))
428
+ entry['T5_input_masks']=torch.from_numpy(np.array(T5_input_masks))
429
+ entry['LXM_input_seq']=LXM_input_seq#torch.from_numpy(np.array(LXM_input_seq))
430
+ entry['LXM_input_ids']=torch.from_numpy(np.array(LXM_input_ids))
431
+ entry['LXM_input_masks']=torch.from_numpy(np.array(LXM_input_masks))
432
+
433
+
434
+
435
+ def tokenizer_func(self, tokenizer, text, max_length=0):
436
+ if max_length==0:
437
+ print('plz set the max length of input sequence!')
438
+ assert 1==2
439
+
440
+ out_seq = tokenizer(
441
+ text,
442
+ # batch_data['final_txt'],
443
+ padding='max_length',
444
+ max_length=max_length,
445
+ truncation=True,
446
+ # return_tensors="pt",
447
+ )
448
+
449
+ tokens=out_seq.input_ids #['input_ids']
450
+ masks=out_seq.attention_mask
451
+ length = len(tokens)
452
+
453
+ return out_seq, tokens, masks
454
+
455
+ def __getitem__(self, index):
456
+
457
+ entry = self.entries[index]
458
+ qid=entry['qid']
459
+ question=entry['question']
460
+ answer=entry['answer']
461
+ img_id=entry['img_id']
462
+ image_feature = pretrain_feature[img_id]['feats']
463
+
464
+ image_caption = entry['caption']
465
+ image_labeling = entry['labeling']
466
+ image_ocr_list = entry['ocr']
467
+ image_ocr = ", ".join(str(i) for i in image_ocr_list)
468
+ if not args.pretrain:
469
+ ofa = entry['ofa']
470
+ if args.gpt3:
471
+ gpt3 = entry['gpt3']
472
+ wiki = entry['wiki']
473
+ final_txt = entry['final_txt']
474
+
475
+
476
+ spatial_feature = pretrain_feature[img_id]['sp_feats']
477
+
478
+ T5_input_seq, T5_input_ids, T5_input_masks = entry['T5_input_seq'], entry['T5_input_ids'], entry['T5_input_masks']#self.tokenizer_func( T5tokenizer, final_txt, max_length=max_source_length)
479
+
480
+ LXM_input_seq, LXM_input_ids, LXM_input_masks = entry['LXM_input_seq'], entry['LXM_input_ids'], entry['LXM_input_masks']
481
+
482
+ LXM_token_type_ids = torch.from_numpy(np.array(LXM_input_seq['token_type_ids']))#.to(device)
483
+
484
+ T5_target_seq, T5_target_ids, T5_target_masks=entry['T5_target_seq'],entry['T5_target_ids'],entry['T5_target_masks']
485
+
486
+ if not args.pretrain:
487
+ if not args.gpt3:
488
+ return qid, question, answer, image_feature, spatial_feature, image_caption, image_labeling, image_ocr, ofa, wiki, final_txt, T5_input_seq,T5_input_ids,T5_input_masks,LXM_input_ids,LXM_input_masks,LXM_token_type_ids,T5_target_seq,T5_target_ids,T5_target_masks
489
+ elif args.gpt3:
490
+ return qid, question, answer, image_feature, spatial_feature, image_caption, image_labeling, image_ocr, ofa, gpt3, wiki, final_txt, T5_input_seq,T5_input_ids,T5_input_masks,LXM_input_ids,LXM_input_masks,LXM_token_type_ids,T5_target_seq,T5_target_ids,T5_target_masks
491
+ else:
492
+ return qid, question, answer, image_feature, spatial_feature, image_caption, image_labeling, image_ocr, wiki, final_txt, T5_input_seq,T5_input_ids,T5_input_masks,LXM_input_ids,LXM_input_masks,LXM_token_type_ids,T5_target_seq,T5_target_ids,T5_target_masks
493
+
494
+ def my_collate(batch):
495
+ batch = list(zip(*batch))
496
+ if not args.pretrain:
497
+ if not args.gpt3:
498
+ res = {'id': batch[0], 'ques': batch[1], 'ans': batch[2],
499
+ 'img': batch[3], 'spatial': batch[4],
500
+ 'caption': batch[5], 'labeling': batch[6], 'ocr': batch[7], 'ofa': batch[8], 'wiki': batch[9], 'final_txt': batch[10],
501
+ 'T5_input_seq': batch[11], 'T5_input_ids': batch[12],'T5_input_masks': batch[13],'LXM_input_ids':batch[14], 'LXM_input_masks':batch[15], 'LXM_token_type_ids':batch[16], 'T5_target_seq':batch[17],'T5_target_ids':batch[18],'T5_target_masks':batch[19]}
502
+ elif args.gpt3:
503
+ res = {'id': batch[0], 'ques': batch[1], 'ans': batch[2],
504
+ 'img': batch[3], 'spatial': batch[4],
505
+ 'caption': batch[5], 'labeling': batch[6], 'ocr': batch[7], 'ofa': batch[8], 'gpt3': batch[9], 'wiki': batch[10], 'final_txt': batch[11],
506
+ 'T5_input_seq': batch[12], 'T5_input_ids': batch[13],'T5_input_masks': batch[14],'LXM_input_ids':batch[15], 'LXM_input_masks':batch[16], 'LXM_token_type_ids':batch[17], 'T5_target_seq':batch[18],'T5_target_ids':batch[19],'T5_target_masks':batch[20]}
507
+
508
+
509
+ else:
510
+ res = {'id': batch[0], 'ques': batch[1], 'ans': batch[2],
511
+ 'img': batch[3], 'spatial': batch[4],
512
+ 'caption': batch[5], 'labeling': batch[6], 'ocr': batch[7], 'wiki': batch[8], 'final_txt': batch[9],
513
+ 'T5_input_seq': batch[10], 'T5_input_ids': batch[11],'T5_input_masks': batch[12],'LXM_input_ids':batch[13], 'LXM_input_masks':batch[14], 'LXM_token_type_ids':batch[15], 'T5_target_seq':batch[16],'T5_target_ids':batch[17],'T5_target_masks':batch[18]}
514
+
515
+
516
+ del batch
517
+ return res
518
+
519
+ def my_val_collate(batch):
520
+ batch = list(zip(*batch))
521
+ if 1:
522
+ res = {'id': batch[0], 'ques': batch[1], 'ans': batch[2],
523
+ 'img': batch[3], 'spatial': batch[4],
524
+ 'caption': batch[5], 'labeling': batch[6], 'ocr': batch[7], 'ofa': batch[8], 'wiki': batch[9], 'final_txt': batch[10],
525
+ 'T5_input_seq': batch[11], 'T5_input_ids': batch[12],'T5_input_masks': batch[13],'LXM_input_ids':batch[14], 'LXM_input_masks':batch[15], 'LXM_token_type_ids':batch[16], 'T5_target_seq':batch[17],'T5_target_ids':batch[18],'T5_target_masks':batch[19]}
526
+ del batch
527
+ return res
528
+
529
+
530
+
531
+
532
+
533
+ def my_gpt3_collate(batch):
534
+ batch = list(zip(*batch))
535
+ if 1:
536
+ res = {'id': batch[0], 'ques': batch[1], 'ans': batch[2],
537
+ 'img': batch[3], 'spatial': batch[4],
538
+ 'caption': batch[5], 'labeling': batch[6], 'ocr': batch[7], 'ofa': batch[8],'gpt3': batch[9], 'wiki': batch[10], 'final_txt': batch[11],
539
+ 'T5_input_seq': batch[12], 'T5_input_ids': batch[13],'T5_input_masks': batch[14],'LXM_input_ids':batch[15], 'LXM_input_masks':batch[16], 'LXM_token_type_ids':batch[17], 'T5_target_seq':batch[18],'T5_target_ids':batch[19],'T5_target_masks':batch[20]}
540
+ del batch
541
+ return res
542
+
543
+ def my_val_gpt3_collate(batch):
544
+ batch = list(zip(*batch))
545
+ if 1:
546
+ res = {'id': batch[0], 'ques': batch[1], 'ans': batch[2],
547
+ 'img': batch[3], 'spatial': batch[4],
548
+ 'caption': batch[5], 'labeling': batch[6], 'ocr': batch[7], 'ofa': batch[8],'gpt3': batch[9], 'wiki': batch[10], 'final_txt': batch[11],
549
+ 'T5_input_seq': batch[12], 'T5_input_ids': batch[13],'T5_input_masks': batch[14],'LXM_input_ids':batch[15], 'LXM_input_masks':batch[16], 'LXM_token_type_ids':batch[17], 'T5_target_seq':batch[18],'T5_target_ids':batch[19],'T5_target_masks':batch[20]}
550
+ del batch
551
+ return res
code/dataset_val4LXMT5.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!user/bin/env python
2
+ # -*- coding:utf-8 -*-
3
+ import collections
4
+ import pickle
5
+ from model_LXM2T5 import T5tokenizer, LXMT52T5, LXMtokenizer
6
+
7
+ from torch.utils.data import Dataset
8
+ import json
9
+ import pickle
10
+ import numpy as np
11
+ import torch
12
+ import string
13
+
14
+
15
+ from config4LXMT5_DDP import args
16
+ print('dataset_val4T5',args)
17
+ from random import sample
18
+
19
+
20
+ def normalize_wiki(s):
21
+ stopwords=['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've", "you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn', "mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", 'won', "won't", 'wouldn', "wouldn't"]
22
+ # def remove_articles(text):
23
+ # return regex.sub(r'\b(a|an|the)\b', ' ', text)
24
+
25
+ def white_space_fix(text):
26
+ return ' '.join(text.split())
27
+
28
+ def remove_punc(text):
29
+ exclude = set(string.punctuation)
30
+ return ''.join(ch for ch in text if ch not in exclude)
31
+
32
+ def lower(text):
33
+ return text.lower()
34
+
35
+ def remove_stop_w(text):
36
+ to_be_removed = set(stopwords)
37
+ text_list = text.split(' ')
38
+ text_list = [item for item in text_list if item not in to_be_removed]
39
+ return ' '.join(text_list)
40
+
41
+ return white_space_fix(remove_stop_w(remove_punc(lower(s))))
42
+
43
+ if args.dataset == 'okvqa':
44
+ with open('../data/validate/okvqa_val.json','r') as f:
45
+ val_row = json.load(f)
46
+ with open('../data/image_features/vqa_img_feature_val.pickle', 'rb') as f:
47
+ pretrain_feature = pickle.load(f)
48
+ with open('../data/validate/caption_predict_val.json', 'r') as f:
49
+ captions_val = json.load(f)
50
+ with open('../data/validate/labeling_predict_val.json', 'r') as f:
51
+ labelings_val = json.load(f)
52
+ with open('../data/validate/ocr_predict_val.json', 'r') as f:
53
+ ocrs_val = json.load(f)
54
+
55
+ if args.ofa=="normal":
56
+ with open('../data/validate/ofa_predictions/OFA_zerorate_predict_val.json', 'r') as f:
57
+ ofas_val = json.load(f)
58
+ with open('../data/validate/ofa_predictions/OFA_zerorate_evidence_val.json', 'r') as f:
59
+ evid_val = json.load(f)
60
+ elif args.ofa=="finetune":
61
+ with open('../data/validate/ofa_predictions/OFAvqa_zerorate_answer_val.json', 'r') as f:
62
+ ofas_val = json.load(f)
63
+ with open('../data/validate/ofa_predictions/OFAvqa_zerorate_evidence_val.json', 'r') as f:
64
+ evid_val = json.load(f)
65
+ else:
66
+ assert 0==1
67
+ with open("../data/validate/gpt3_okvqa_val2014_answers.pkl", 'rb') as f:
68
+ gpt3_val = pickle.load(f)
69
+ with open('../data/validate/wiki_100sim_val.json', 'r') as f:
70
+ wikis_val = json.load(f)
71
+
72
+
73
+ def plural(word):
74
+ if word.endswith('y'):
75
+ return word[:-1] + 'ies'
76
+ elif word[-1] in 'sxo' or word[-2:] in ['sh', 'ch']:
77
+ return word + 'es'
78
+ elif word.endswith('an'):
79
+ return word[:-2] + 'en'
80
+ else:
81
+ return word + 's'
82
+
83
+ image_ids = []
84
+ qids = []
85
+ questions = []
86
+ answers = []
87
+ labels = []
88
+ objects = []
89
+ answer_ids = []
90
+ answers_lists = []
91
+ question_lengths = []
92
+ most_answer = []
93
+ neg_answer = []
94
+
95
+ val_captions = {}
96
+ for item in captions_val:
97
+ if item['image_id'] in val_captions.keys():
98
+ print("IMG caption REPEATED!")
99
+ assert 0==1
100
+ val_captions[item['image_id']] = item['caption']
101
+
102
+ val_labelings = {}
103
+ for item in labelings_val:
104
+ if item['image_id'] in val_labelings.keys():
105
+ print("IMG labelings REPEATED!")
106
+ assert 0==1
107
+ val_labelings[str(item['image_id'])] = item['labeling']
108
+
109
+ val_ocrs = {}
110
+ for item in ocrs_val:
111
+ if item['image_id'] in val_ocrs.keys():
112
+ print("IMG ocrs REPEATED!")
113
+ assert 0==1
114
+ val_ocrs[str(item['image_id'])] = item['ocr']
115
+
116
+
117
+ val_ofas = {}
118
+
119
+ if args.ofa=="normal":
120
+ for item in ofas_val:
121
+ if item['question_id'] in val_ofas.keys():
122
+ print("IMG ofas REPEATED!")
123
+ assert 0==1
124
+ val_ofas[str(item['question_id'])] = item['OFA_answer']+", "+evid_val[str(item['question_id'])]
125
+ elif args.ofa=="finetune":
126
+ for k in evid_val.keys():
127
+ val_ofas[k] = ofas_val[k]+", "+evid_val[k]
128
+ else:
129
+ assert 0==1
130
+
131
+
132
+
133
+ val_gpt3 = {}
134
+ for k in gpt3_val.keys():
135
+ qid = k.split("#")[1]
136
+
137
+ val_gpt3[str(qid)] = ", ".join(gpt3_val[k][0]) #[(ans, evid)]
138
+
139
+
140
+ val_wikis = wikis_val
141
+
142
+
143
+ for qid, item in val_row.items():
144
+ img_id = str(item['image_id'])
145
+ image_ids.append(img_id)
146
+ qids.append(qid)
147
+
148
+ question_clean = item['question'] # + answer_sentence
149
+ questions.append(question_clean)
150
+ if args.dataset == 'okvqa' or args.dataset == 'vqav2':
151
+ answers.append(item['multi_answers'])
152
+ if args.dataset == 'okvqa':
153
+ objects.append(item['label'])
154
+ else:
155
+ answers.append(item['answer'])
156
+
157
+
158
+
159
+ def _create_gpt3_entry(imgage_ids, q_ids, questions, answer, captions,labelings, ocrs,ofas,gpt3, wikis, final_txt):
160
+ entry = {
161
+ 'img_id': imgage_ids,
162
+ 'qid': q_ids,
163
+ 'question': questions,
164
+ 'answer': answer,
165
+ 'caption': captions,
166
+ 'labeling':labelings,
167
+ 'ocr': ocrs,
168
+ 'ofa':ofas,
169
+ 'gpt3':gpt3,
170
+ 'wiki': wikis,
171
+ 'final_txt':final_txt}
172
+ return entry
173
+
174
+
175
+ def _create_entry(imgage_ids, q_ids, questions, answer, captions,labelings, ocrs,ofas, wikis, final_txt):
176
+ entry = {
177
+ 'img_id': imgage_ids,
178
+ 'qid': q_ids,
179
+ 'question': questions,
180
+ 'answer': answer,
181
+ 'caption': captions,
182
+ 'labeling':labelings,
183
+ 'ocr': ocrs,
184
+ 'ofa':ofas,
185
+ 'wiki': wikis,
186
+ 'final_txt':final_txt}
187
+ return entry
188
+
189
+ def _load_dataset(val_row):
190
+ entries=[]
191
+ for qid, item in val_row.items():
192
+ qid = str(qid)
193
+ img_id = str(item['image_id'])
194
+ question = item['question']# + answer_sentence
195
+
196
+ if args.dataset == 'okvqa':
197
+ answers=item['multi_answers']
198
+
199
+
200
+ else:
201
+ answers=item['answer']
202
+ caption=val_captions[img_id]
203
+ labeling=val_labelings[img_id]
204
+ ocr_list=val_ocrs[img_id]
205
+ ocr = ", ".join(str(i) for i in ocr_list)
206
+ ofa=val_ofas[qid]
207
+ gpt3=val_gpt3[qid]
208
+ wiki=val_wikis[qid]
209
+
210
+ if args.seed > 1000:
211
+ print("seed > 1000 denotes that ablation study on 2 encoders")
212
+ assert args.input_type==0
213
+
214
+ if args.gpt3:
215
+ if args.input_type==0:
216
+ if args.num_wiki > 51:
217
+ final_txt = [question + " [SEP] " + ofa + " " + gpt3 + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr + " [SEP] " + normalize_wiki(x) for x in wiki[:args.num_wiki]]
218
+ else:
219
+ final_txt = [question + " [SEP] " + ofa + " " + gpt3 + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr + " [SEP] " + x for x in wiki[:args.num_wiki]]
220
+ elif args.input_type==1:
221
+ final_txt = question + " [SEP] " + ofa + " " + gpt3 + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr
222
+ elif args.input_type==2:
223
+ if args.num_wiki > 51:
224
+ final_txt = [question + " [SEP] " + gpt3 + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr + " [SEP] " + normalize_wiki(x) for x in wiki[:args.num_wiki]]
225
+ else:
226
+ final_txt = [question + " [SEP] " + gpt3 + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr + " [SEP] " + x for x in wiki[:args.num_wiki]]
227
+ elif args.input_type==3:
228
+ final_txt = question + " [SEP] " + gpt3 + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr
229
+ else:
230
+ print('choose input-type in [0,1,2,3]')
231
+ assert 0==1
232
+
233
+ entries.append(_create_gpt3_entry(img_id, qid, question, answers, caption,labeling, ocr,ofa,gpt3, wiki, final_txt))
234
+
235
+ else:
236
+ if args.input_type==0:
237
+
238
+ if args.num_wiki > 51:
239
+ final_txt = [question + " [SEP] " + ofa + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr + " [SEP] " + normalize_wiki(x) for x in wiki[:args.num_wiki]]
240
+ else:
241
+ final_txt = [question + " [SEP] " + ofa + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr + " [SEP] " + x for x in wiki[:args.num_wiki]]
242
+ elif args.input_type==1:
243
+ final_txt = question + " [SEP] " + ofa + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr
244
+ elif args.input_type==2:
245
+ if args.num_wiki > 51:
246
+ final_txt = [question + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr + " [SEP] " + normalize_wiki(x) for x in wiki[:args.num_wiki]]
247
+ else:
248
+ final_txt = [question + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr + " [SEP] " + x for x in wiki[:args.num_wiki]]
249
+ elif args.input_type==3: #什么知识都不加。知识单独的性能4(不要预训练):什么知识都不加,只有视觉属性。
250
+ final_txt = question + " [SEP] " + ofa + " [SEP] " + caption + " [SEP] " + labeling + " [SEP] " + ocr
251
+ else:
252
+ print('choose input-type in [1,2,3,4,5]')
253
+ assert 0==1
254
+
255
+ entries.append(_create_entry(img_id, qid, question, answers, caption,labeling, ocr,ofa, wiki, final_txt))
256
+ return entries
257
+
258
+
259
+
260
+
261
+
262
+
263
+ class KgDatasetVal(Dataset):
264
+ def __init__(self, val=False, val_test=False):
265
+ self.entries = _load_dataset(val_row)
266
+ self.tokenize()
267
+
268
+
269
+ def __len__(self):
270
+ return len(self.entries)
271
+ def tokenize(self):
272
+ if args.input_type%2==0 : #当input_type=0或者2的时候,有wiki在,所以句子长度要长
273
+ if args.num_wiki > 51:
274
+ max_source_length=200
275
+ else:
276
+ max_source_length=250 #300
277
+ else:
278
+ max_source_length=128
279
+ max_target_length=5
280
+ max_que_length=16
281
+ for entry in self.entries:
282
+ T5_input_seq, T5_input_ids, T5_input_masks = self.tokenizer_func( T5tokenizer, entry['final_txt'], max_length=max_source_length)
283
+ LXM_input_seq, LXM_input_ids, LXM_input_masks = self.tokenizer_func( LXMtokenizer, entry['question'], max_length=max_que_length)
284
+ T5_target_seq, T5_target_ids, T5_target_masks = self.tokenizer_func( T5tokenizer, entry['answer'][0], max_length=max_target_length)
285
+ entry['T5_input_seq']=T5_input_seq#torch.from_numpy(np.array(T5_input_seq))
286
+ entry['T5_input_ids']=torch.from_numpy(np.array(T5_input_ids))
287
+ entry['T5_input_masks']=torch.from_numpy(np.array(T5_input_masks))
288
+ entry['LXM_input_seq']=LXM_input_seq#torch.from_numpy(np.array(LXM_input_seq))
289
+ entry['LXM_input_ids']=torch.from_numpy(np.array(LXM_input_ids))
290
+ entry['LXM_input_masks']=torch.from_numpy(np.array(LXM_input_masks))
291
+ entry['T5_target_seq']=T5_target_seq#torch.from_numpy(np.array(T5_target_seq))
292
+ entry['T5_target_ids']=torch.from_numpy(np.array(T5_target_ids))
293
+ entry['T5_target_masks']=torch.from_numpy(np.array(T5_target_masks))
294
+
295
+ def tokenizer_func(self, tokenizer, text, max_length=0):
296
+ if max_length==0:
297
+ print('plz set the max length of input sequence!')
298
+ assert 1==2
299
+
300
+ out_seq = tokenizer(
301
+ text,
302
+ padding='max_length',
303
+ max_length=max_length,
304
+ truncation=True,
305
+ # return_tensors="pt",
306
+ )
307
+
308
+ tokens=out_seq.input_ids #['input_ids']
309
+ masks=out_seq.attention_mask
310
+ length = len(tokens)
311
+ return out_seq, tokens, masks
312
+
313
+ def __getitem__(self, index):
314
+ entry = self.entries[index]
315
+ qid=entry['qid']
316
+ question=entry['question']
317
+ answer=entry['answer']
318
+ img_id=entry['img_id']
319
+
320
+ image_feature = pretrain_feature[img_id]['feats']
321
+
322
+ image_caption = entry['caption']
323
+ image_labeling = entry['labeling']
324
+ image_ocr_list = entry['ocr']
325
+ image_ocr = ", ".join(str(i) for i in image_ocr_list)
326
+ ofa = entry['ofa']
327
+ if args.gpt3:
328
+ gpt3 = entry['gpt3']
329
+ wiki = entry['wiki']
330
+ final_txt = entry['final_txt']
331
+
332
+
333
+ spatial_feature = pretrain_feature[img_id]['sp_feats']
334
+ T5_input_seq, T5_input_ids, T5_input_masks = entry['T5_input_seq'], entry['T5_input_ids'], entry['T5_input_masks']#self.tokenizer_func( T5tokenizer, final_txt, max_length=max_source_length)
335
+ LXM_input_seq, LXM_input_ids, LXM_input_masks = entry['LXM_input_seq'], entry['LXM_input_ids'], entry['LXM_input_masks']
336
+ LXM_token_type_ids = torch.from_numpy(np.array(LXM_input_seq['token_type_ids']))#.to(device)
337
+ T5_target_seq, T5_target_ids, T5_target_masks=entry['T5_target_seq'],entry['T5_target_ids'],entry['T5_target_masks']
338
+
339
+
340
+
341
+ if args.gpt3:
342
+ return qid, question, answer, image_feature, spatial_feature, image_caption, image_labeling, image_ocr, ofa, gpt3, wiki, final_txt, T5_input_seq,T5_input_ids,T5_input_masks,LXM_input_ids,LXM_input_masks,LXM_token_type_ids,T5_target_seq,T5_target_ids,T5_target_masks
343
+ elif not args.gpt3:
344
+ return qid, question, answer, image_feature, spatial_feature, image_caption, image_labeling, image_ocr, ofa, wiki, final_txt, T5_input_seq,T5_input_ids,T5_input_masks,LXM_input_ids,LXM_input_masks,LXM_token_type_ids,T5_target_seq,T5_target_ids,T5_target_masks
345
+
346
+
code/dist_train.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+
4
+ import torch
5
+ import torch.distributed as dist
6
+
7
+
8
+ _print = print
9
+
10
+
11
+ def get_world_size(): return int(os.getenv('WORLD_SIZE', 1))
12
+ def get_rank(): return int(os.getenv('RANK', 0))
13
+ def get_local_rank(): return int(os.getenv('LOCAL_RANK', 0))
14
+
15
+
16
+ def is_dist():
17
+ return dist.is_available() and dist.is_initialized() and get_world_size() > 1
18
+
19
+
20
+ def print(*argc, all=False, **kwargs):
21
+ if not is_dist():
22
+ _print(*argc, **kwargs)
23
+ return
24
+
25
+ if not all and get_local_rank() != 0:
26
+ return
27
+
28
+ output = io.StringIO()
29
+ kwargs['end'] = ''
30
+ kwargs['file'] = output
31
+ kwargs['flush'] = True
32
+ _print(*argc, **kwargs)
33
+
34
+ s = output.getvalue()
35
+ output.close()
36
+
37
+ s = '[rank {}] {}'.format(dist.get_rank(), s)
38
+ _print(s)
39
+
40
+
41
+ def reduce_mean(tensor, nprocs=None):
42
+ if not is_dist():
43
+ return tensor
44
+ if not isinstance(tensor, torch.Tensor):
45
+ device = torch.cuda.current_device()
46
+ rt = torch.tensor(tensor, device=device)
47
+ else:
48
+ rt = tensor.clone()
49
+ dist.all_reduce(rt, op=dist.ReduceOp.SUM)
50
+ nprocs = nprocs if nprocs else dist.get_world_size()
51
+ rt = rt / nprocs
52
+ if not isinstance(tensor, torch.Tensor):
53
+ rt = rt.item()
54
+ return rt
55
+
56
+
57
+
58
+ def reduce_sum(tensor):
59
+ if not is_dist():
60
+ return tensor
61
+ if not isinstance(tensor, torch.Tensor):
62
+ device = torch.cuda.current_device()
63
+ rt = torch.tensor(tensor, device=device)
64
+ else:
65
+ rt = tensor.clone()
66
+ dist.all_reduce(rt, op=dist.ReduceOp.SUM)
67
+ if not isinstance(tensor, torch.Tensor):
68
+ rt = rt.item()
69
+ return rt
70
+
71
+
72
+ def barrier():
73
+ if not is_dist():
74
+ return
75
+ dist.barrier()
code/model_LXM2T5.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from torch.nn import init
5
+ import copy
6
+ from config4LXMT5_DDP import args
7
+ import collections
8
+ from transformers import LxmertConfig, LxmertTokenizer, LxmertModel,BertTokenizer#,BaseModelOutputWithPastAndCrossAttentions
9
+ from transformers import T5Tokenizer, T5Model, T5Config, T5ForConditionalGeneration
10
+ from transformers.modeling_outputs import BaseModelOutputWithPastAndCrossAttentions
11
+ T5tokenizer = T5Tokenizer.from_pretrained("../model/t5-large")#"t5-large")
12
+ LXMtokenizer = BertTokenizer.from_pretrained('../model/bert-base-uncased/vocab.txt')
13
+ T5config = T5Config.from_pretrained('../model/t5-large')
14
+
15
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
16
+ LXM_source_masks=None,token_type_ids=None, visual_features=None, spatial_features=None,T5_target_ids=None,T5_target_masks=None):
17
+ attention_mask=LXM_source_masks, token_type_ids=token_type_ids, visual_feats=visual_features, visual_pos=spatial_features)
18
+
19
+
20
+
21
+
22
+ class LXMT52T5(nn.Module):
23
+ def __init__(self):
24
+ super(LXMT52T5, self).__init__()
25
+ self.T5model = T5ForConditionalGeneration.from_pretrained("../model/t5-large").to(device)
26
+ self.LXMmodel = LxmertModel.from_pretrained('../model/lxmert-base-uncased').to(device)
27
+ self.mapping = torch.nn.Sequential(
28
+ torch.nn.Linear(768, 1024),
29
+ torch.nn.ReLU(inplace=True),
30
+ torch.nn.Linear(1024, 1024)
31
+ )
32
+
33
+
34
+
35
+ def LXMT5end2T5dec(self, train=None, LXM_source_ids=None, LXM_source_masks=None,T5_source_ids=None, T5_source_masks=None,token_type_ids=None, visual_features=None, spatial_features=None,T5_target_ids=None,T5_target_masks=None):
36
+
37
+ if 1:
38
+ LXM_encoder_output_seq = self.LXMmodel(input_ids=LXM_source_ids, attention_mask=LXM_source_masks, token_type_ids=token_type_ids, visual_feats=visual_features, visual_pos=spatial_features)
39
+ LXM_lang_enc_out = LXM_encoder_output_seq.language_output
40
+ LXM_visual_enc_out = LXM_encoder_output_seq.vision_output
41
+
42
+ LXM_VL_encoder_output_seq = torch.cat((LXM_lang_enc_out, LXM_visual_enc_out),1)
43
+
44
+
45
+
46
+
47
+
48
+
49
+ #if 1: # (w/o wiki passages)
50
+ # T5_encoder_output_seq = self.T5model.encoder(input_ids=T5_source_ids, attention_mask=T5_source_masks)
51
+ # final_encoder_output_seq = torch.cat((final_LXM_encoder_output_seq, T5_encoder_output_seq["last_hidden_state"]),1)
52
+
53
+
54
+ if 1: # (w/ wiki passages)
55
+ final_encoder_output_seq_list = []
56
+ final_T5_encoder_output_seq_list = []
57
+
58
+ for ind in range(args.num_wiki):
59
+ T5_encoder_output_seq = self.T5model.encoder(input_ids=T5_source_ids[:,ind,:], attention_mask=T5_source_masks[:,ind,:])
60
+ #if 1: #(T5 encoder only)
61
+ # final_T5_encoder_output_seq_list.append(T5_encoder_output_seq["last_hidden_state"])
62
+ tmp_encoder_output_seq = torch.cat((final_LXM_encoder_output_seq, T5_encoder_output_seq["last_hidden_state"]),1)
63
+ final_encoder_output_seq_list.append(tmp_encoder_output_seq)
64
+ final_encoder_output_seq = torch.cat(final_encoder_output_seq_list,1)
65
+
66
+ # ablation study on two encoders
67
+ # LXMERTenc-T5dec
68
+ final_encoder_output_seq = final_LXM_encoder_output_seq
69
+ # T5enc-T5dec
70
+ final_encoder_output_seq = torch.cat(final_T5_encoder_output_seq_list,1)
71
+
72
+
73
+
74
+
75
+
76
+
77
+ my_order_dict=T5_encoder_output_seq
78
+ # replace the origin order_dict with our designed final_encoder_output_seq
79
+ my_order_dict.last_hidden_state=final_encoder_output_seq
80
+
81
+ if train:
82
+ if args.allAns:
83
+ outputs = self.T5model(encoder_outputs=my_order_dict, labels=T5_target_ids, decoder_attention_mask=T5_target_masks)
84
+ else:
85
+ outputs = self.T5model(encoder_outputs=my_order_dict, labels=T5_target_ids, decoder_attention_mask=T5_target_masks)
86
+ return outputs
87
+ else:
88
+ if torch.cuda.device_count() > 1:
89
+ pred = self.T5model.generate(encoder_outputs=my_order_dict)
90
+ else:
91
+ pred = self.T5model.generate(encoder_outputs=my_order_dict)
92
+ return pred
93
+
94
+ def forward(self, train=None, LXM_source_ids=None, LXM_source_masks=None,T5_source_ids=None, T5_source_masks=None,token_type_ids=None, visual_features=None, spatial_features=None,T5_target_ids=None,T5_target_masks=None):
95
+ return self.LXMT5end2T5dec(train, LXM_source_ids, LXM_source_masks, T5_source_ids, T5_source_masks, token_type_ids, visual_features, spatial_features, T5_target_ids, T5_target_masks)
code/model_ViB2T5.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from torch.nn import init
5
+ import copy
6
+ from config4LXMT5_DDP import args
7
+ import collections
8
+ from transformers import LxmertConfig, LxmertTokenizer, LxmertModel,BertTokenizer
9
+ from transformers import T5Tokenizer, T5Model, T5Config, T5ForConditionalGeneration
10
+ from transformers.modeling_outputs import BaseModelOutputWithPastAndCrossAttentions
11
+ T5tokenizer = T5Tokenizer.from_pretrained("../model/t5-large")#"t5-large")
12
+ LXMtokenizer = BertTokenizer.from_pretrained('../model/bert-base-uncased/vocab.txt')
13
+ T5config = T5Config.from_pretrained('../model/t5-large')
14
+ from transformers import VisualBertConfig, VisualBertModel
15
+
16
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
17
+
18
+ class ViBT52T5(nn.Module):
19
+ def __init__(self):
20
+ super(ViBT52T5, self).__init__()
21
+ self.T5model = T5ForConditionalGeneration.from_pretrained("../model/t5-large").to(device)
22
+ self.ViBmodel = VisualBertModel.from_pretrained('../model/visualBERT').to(device)
23
+ self.mapping = torch.nn.Sequential(
24
+ torch.nn.Linear(768, 1024),
25
+ torch.nn.ReLU(inplace=True),
26
+ torch.nn.Linear(1024, 1024)
27
+ )
28
+
29
+
30
+ def LXMT5end2T5dec(self, train=None, LXM_source_ids=None, LXM_source_masks=None,T5_source_ids=None, T5_source_masks=None,token_type_ids=None, visual_features=None, spatial_features=None,T5_target_ids=None,T5_target_masks=None):
31
+ if 1:
32
+
33
+ ViB_encoder_output_seq = self.ViBmodel(input_ids=LXM_source_ids, attention_mask=LXM_source_masks,token_type_ids=token_type_ids, visual_embeds=visual_features)
34
+ ViB_VL_encoder_output_seq = ViB_encoder_output_seq[0]
35
+ final_ViB_encoder_output_seq = self.mapping(ViB_VL_encoder_output_seq)
36
+
37
+
38
+
39
+
40
+ # w/o wiki passages
41
+ #T5_encoder_output_seq = self.T5model.encoder(input_ids=T5_source_ids, attention_mask=T5_source_masks)
42
+ #final_encoder_output_seq = torch.cat((final_ViB_encoder_output_seq, T5_encoder_output_seq["last_hidden_state"]),1)
43
+
44
+
45
+ # w/ wiki passages
46
+ if 1:
47
+ final_encoder_output_seq_list = []
48
+ for ind in range(args.num_wiki):
49
+ T5_encoder_output_seq = self.T5model.encoder(input_ids=T5_source_ids[:,ind,:], attention_mask=T5_source_masks[:,ind,:])
50
+ tmp_encoder_output_seq = torch.cat((final_ViB_encoder_output_seq, T5_encoder_output_seq["last_hidden_state"]),1)
51
+ final_encoder_output_seq_list.append(tmp_encoder_output_seq)
52
+ final_encoder_output_seq = torch.cat(final_encoder_output_seq_list,1)
53
+ my_order_dict=T5_encoder_output_seq
54
+ my_order_dict.last_hidden_state=final_encoder_output_seq
55
+
56
+ if train:
57
+ outputs = self.T5model(encoder_outputs=my_order_dict, labels=T5_target_ids, decoder_attention_mask=T5_target_masks)
58
+ return outputs
59
+ else:
60
+ if torch.cuda.device_count() > 1:
61
+ pred = self.T5model.generate(encoder_outputs=my_order_dict)
62
+ else:
63
+ pred = self.T5model.generate(encoder_outputs=my_order_dict)
64
+ return pred
65
+
66
+
67
+ def forward(self, train=None, LXM_source_ids=None, LXM_source_masks=None,T5_source_ids=None, T5_source_masks=None,token_type_ids=None, visual_features=None, spatial_features=None,T5_target_ids=None,T5_target_masks=None):
68
+ return self.LXMT5end2T5dec(train, LXM_source_ids, LXM_source_masks, T5_source_ids, T5_source_masks, token_type_ids, visual_features, spatial_features, T5_target_ids, T5_target_masks)
code/run_DDP_finetune.sh ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #!/usr/bin/env bash
3
+ #!/bin/sh
4
+ export load_pthpath=${10}
5
+ export pre_epo=${11}
6
+ export load_pthmodel=$load_pthpath/model_for_epoch_$pre_epo.pth
7
+
8
+ export NCCL_P2P_LEVEL=NVL
9
+ cd /opt/tiger/okvqa
10
+ export dataset=$1
11
+
12
+ export model_dir=$2
13
+ mkdir $model_dir
14
+ mkdir $load_pthpath
15
+
16
+
17
+ echo "$1, $2, $3, $4, $5, $6, $7, $8, $9, ${10}, ${11}, ${12}"
18
+ echo "dataset $1, model dir $2, input type $3, describe $4, lr $5, lr_LXM $6, batch_size $7, wiki num $8, gpu_num $9, load path ${10}, pre_epo ${11}, seed ${12}"
19
+
20
+
21
+ export input_type=$3
22
+ export describe=$4
23
+ export lr=$5
24
+ export lr_LXM=$6
25
+ export batch_size=$7
26
+ export wiki_num=$8
27
+ export gpu_num=$9
28
+ export seed=${12}
29
+ ports=(`echo $METIS_WORKER_0_PORT | tr ',' ' '`)
30
+ port=${ports[0]}
31
+
32
+ echo "total workers: ${ARNOLD_WORKER_NUM}"
33
+ echo "cur worker id: ${ARNOLD_ID}"
34
+ echo "gpus per worker: ${ARNOLD_WORKER_GPU}"
35
+ echo "master ip: ${METIS_WORKER_0_HOST}"
36
+ echo "master port: ${port}"
37
+
38
+
39
+
40
+ export OMP_NUM_THREADS=8
41
+ export NCCL_IB_DISABLE=0
42
+ export NCCL_IB_GID_INDEX=3
43
+ export NCCL_IB_HCA=${ARNOLD_RDMA_DEVICE}
44
+ export NCCL_SOCKET_IFNAME=eth0
45
+
46
+
47
+ python3 -m torch.distributed.launch --nproc_per_node $gpu_num \
48
+ --nnodes=${ARNOLD_WORKER_NUM} --node_rank=${ARNOLD_ID} --master_addr=${METIS_WORKER_0_HOST} --master_port ${port} \
49
+ train4LXMT5_DDP.py \
50
+ --dataset $dataset \
51
+ --model_dir $model_dir \
52
+ --input_type $input_type \
53
+ --describe $describe \
54
+ --learning_rate $lr \
55
+ --learning_rate_LXM $lr_LXM \
56
+ --validate \
57
+ --gpt3 \
58
+ --ofa finetune \
59
+ --batch_size $batch_size \
60
+ --load_pthpath $load_pthmodel \
61
+ --num_wiki $wiki_num \
62
+ --seed $seed
code/run_DDP_finetune_visualBERT.sh ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #!/usr/bin/env bash
3
+ #!/bin/sh
4
+ export load_pthpath=${10}
5
+ export pre_epo=${11}
6
+ export load_pthmodel=$load_pthpath/model_for_epoch_$pre_epo.pth
7
+
8
+ export NCCL_P2P_LEVEL=NVL
9
+ cd /opt/tiger/okvqa
10
+ export dataset=$1
11
+
12
+ export model_dir=$2
13
+ mkdir $model_dir
14
+ mkdir $load_pthpath
15
+
16
+ echo "hdfs done"
17
+ echo "$1, $2, $3, $4, $5, $6, $7, $8, $9, ${10}, ${11}, ${12}"
18
+ echo "dataset $1, model dir $2, input type $3, describe $4, lr $5, lr_LXM $6, batch_size $7, wiki num $8, gpu_num $9, load path ${10}, pre_epo ${11}, seed ${12}"
19
+
20
+
21
+
22
+ export input_type=$3
23
+ #model_name to save
24
+ export describe=$4
25
+ export lr=$5
26
+ export lr_LXM=$6
27
+
28
+ export batch_size=$7
29
+ export wiki_num=$8
30
+ export gpu_num=$9
31
+ export seed=${12}
32
+ ports=(`echo $METIS_WORKER_0_PORT | tr ',' ' '`)
33
+ port=${ports[0]}
34
+
35
+ echo "total workers: ${ARNOLD_WORKER_NUM}"
36
+ echo "cur worker id: ${ARNOLD_ID}"
37
+ echo "gpus per worker: ${ARNOLD_WORKER_GPU}"
38
+ echo "master ip: ${METIS_WORKER_0_HOST}"
39
+ echo "master port: ${port}"
40
+
41
+
42
+
43
+ export OMP_NUM_THREADS=8
44
+ export NCCL_IB_DISABLE=0
45
+ export NCCL_IB_GID_INDEX=3
46
+ export NCCL_IB_HCA=${ARNOLD_RDMA_DEVICE}
47
+ export NCCL_SOCKET_IFNAME=eth0
48
+
49
+ python3 -m torch.distributed.launch --nproc_per_node $gpu_num \
50
+ --nnodes=${ARNOLD_WORKER_NUM} --node_rank=${ARNOLD_ID} --master_addr=${METIS_WORKER_0_HOST} --master_port ${port} \
51
+ train4LXMT5_jiqun_wiki_DDP_multiVal_GPT3.py \
52
+ --dataset $dataset \
53
+ --model_dir $model_dir \
54
+ --input_type $input_type \
55
+ --describe $describe \
56
+ --learning_rate $lr \
57
+ --learning_rate_LXM $lr_LXM \
58
+ --validate \
59
+ --gpt3 \
60
+ --ofa finetune \
61
+ --batch_size $batch_size \
62
+ --load_pthpath $load_pthmodel \
63
+ --num_wiki $wiki_num \
64
+ --seed $seed \
65
+ --visualBERT
66
+
code/run_DDP_pretrain.sh ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+
4
+ export NCCL_P2P_LEVEL=NVL
5
+ echo "dataset $1, model dir $2, input type $3, describe $4, lr $5, lr_LXM $6, batch size $7, wiki num $8, gpu_num $9 "
6
+
7
+ export dataset=$1
8
+ export model_dir=$2
9
+ mkdir $model_dir
10
+ export input_type=$3
11
+ #model_name to save
12
+ export describe=$4
13
+ export lr=$5
14
+ export lr_LXM=$6
15
+ export batch_size=$7
16
+ # export port=$7
17
+ export wiki_num=$8
18
+ export gpu_num=$9
19
+ ports=(`echo $METIS_WORKER_0_PORT | tr ',' ' '`)
20
+ port=${ports[0]}
21
+
22
+ echo "total workers: ${ARNOLD_WORKER_NUM}"
23
+ echo "cur worker id: ${ARNOLD_ID}"
24
+ echo "gpus per worker: ${ARNOLD_WORKER_GPU}"
25
+ echo "master ip: ${METIS_WORKER_0_HOST}"
26
+ echo "master port: ${port}"
27
+
28
+
29
+
30
+ export OMP_NUM_THREADS=8
31
+ export NCCL_IB_DISABLE=0
32
+ export NCCL_IB_GID_INDEX=3
33
+ export NCCL_IB_HCA=${ARNOLD_RDMA_DEVICE}
34
+ export NCCL_SOCKET_IFNAME=eth0
35
+
36
+ python3 -m torch.distributed.launch --nproc_per_node $gpu_num \
37
+ --nnodes=${ARNOLD_WORKER_NUM} --node_rank=${ARNOLD_ID} --master_addr=${METIS_WORKER_0_HOST} --master_port ${port} \
38
+ train4LXMT5_DDP.py \
39
+ --dataset $dataset \
40
+ --model_dir $model_dir \
41
+ --input_type $input_type \
42
+ --describe $describe \
43
+ --learning_rate $lr \
44
+ --learning_rate_LXM $lr_LXM \
45
+ --validate \
46
+ --batch_size $batch_size \
47
+ --num_wiki $wiki_num \
48
+ --pretrain
49
+
code/run_DDP_pretrain_visualBERT.sh ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ export NCCL_P2P_LEVEL=NVL
4
+ echo "dataset $1, model dir $2, input type $3, describe $4, lr $5, lr_LXM $6, batch size $7, wiki num $8, gpu_num $9 "
5
+ export dataset=$1
6
+ export model_dir=$2
7
+ mkdir $model_dir
8
+ export input_type=$3
9
+ #model_name to save
10
+ export describe=$4
11
+ export lr=$5
12
+ export lr_LXM=$6
13
+ export batch_size=$7
14
+ # export port=$7
15
+ export wiki_num=$8
16
+ export gpu_num=$9
17
+ ports=(`echo $METIS_WORKER_0_PORT | tr ',' ' '`)
18
+ port=${ports[0]}
19
+
20
+ echo "total workers: ${ARNOLD_WORKER_NUM}"
21
+ echo "cur worker id: ${ARNOLD_ID}"
22
+ echo "gpus per worker: ${ARNOLD_WORKER_GPU}"
23
+ echo "master ip: ${METIS_WORKER_0_HOST}"
24
+ echo "master port: ${port}"
25
+
26
+
27
+
28
+ export OMP_NUM_THREADS=8
29
+ export NCCL_IB_DISABLE=0
30
+ export NCCL_IB_GID_INDEX=3
31
+ export NCCL_IB_HCA=${ARNOLD_RDMA_DEVICE}
32
+ export NCCL_SOCKET_IFNAME=eth0
33
+
34
+ python3 -m torch.distributed.launch --nproc_per_node $gpu_num \
35
+ --nnodes=${ARNOLD_WORKER_NUM} --node_rank=${ARNOLD_ID} --master_addr=${METIS_WORKER_0_HOST} --master_port ${port} \
36
+ train4LXMT5_DDP.py \
37
+ --dataset $dataset \
38
+ --model_dir $model_dir \
39
+ --input_type $input_type \
40
+ --describe $describe \
41
+ --learning_rate $lr \
42
+ --learning_rate_LXM $lr_LXM \
43
+ --validate \
44
+ --batch_size $batch_size \
45
+ --num_wiki $wiki_num \
46
+ --visualBERT \
47
+ --pretrain
code/test4LXMT5.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from model_LXM2T5 import T5tokenizer, LXMT52T5, LXMtokenizer
2
+ import tqdm
3
+ from dataset_val4LXMT5 import KgDatasetVal
4
+ model = LXMT52T5()
5
+ model.module.load_state_dict(torch.load("xxxx.pth"))
6
+ test_dataset = KgDatasetVal(val=False)
7
+
8
+
9
+ test_dataloader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, shuffle=False,
10
+ num_workers=0, collate_fn=my_val_collate)
11
+
12
+ model.eval()
13
+ answers = [] # [batch_answers,...]
14
+ preds = [] # [batch_preds,...]
15
+ preds_list = []
16
+ answers_list = []
17
+ id2pred_list = {}
18
+ for i, batch_data in enumerate(tqdm(test_dataloader)):
19
+ with torch.no_grad():
20
+ val_T5_input_id = torch.stack(batch_data['T5_input_ids']).to(device)
21
+ val_T5_input_mask = torch.stack(batch_data['T5_input_masks']).to(device)
22
+ val_visual_faetures = torch.tensor(np.array(batch_data['img'])).float().to(device)
23
+ val_spatial_features = torch.tensor(np.array(batch_data['spatial'])).float().to(device)
24
+
25
+ val_LXM_input_id = torch.stack(batch_data['LXM_input_ids']).to(device)
26
+ val_LXM_input_mask = torch.stack(batch_data['LXM_input_masks']).to(device)
27
+ val_LXM_token_type_ids = torch.stack(batch_data['LXM_token_type_ids']).to(device)
28
+
29
+
30
+
31
+ val_outputs = model(train=False, LXM_source_ids=val_LXM_input_id, LXM_source_masks=val_LXM_input_mask,T5_source_ids=val_T5_input_id, T5_source_masks=val_T5_input_mask,token_type_ids=val_LXM_token_type_ids, visual_features=val_visual_faetures, spatial_features=val_spatial_features,T5_target_ids=None)
32
+
33
+
34
+
35
+ val_list_predict = T5tokenizer.batch_decode(val_outputs, skip_special_tokens=True)
36
+
37
+
38
+
39
+ for i, pre in enumerate(batch_data['ans']):
40
+
41
+ preds_list.append(val_list_predict[i])
42
+
43
+ answers_list.append(batch_data['ans'][i])
44
+
45
+ id2pred_list[str(batch_data['id'][i])]=val_list_predict[i]
46
+
47
+
48
+
49
+
50
+ f=open("file_to_save.json", 'w')
51
+ json.dump(id2pred_list, f)
52
+ f.close()
53
+
code/train4LXMT5_DDP.py ADDED
@@ -0,0 +1,470 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!user/bin/env python
2
+ # -*- coding:utf-8 -*-
3
+ import argparse
4
+ import json
5
+ import os
6
+ import datetime
7
+ import pickle
8
+ import random
9
+ import torch
10
+ import torch.backends.cudnn as cudnn
11
+ import torch.distributed as dist
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+ import torch.optim as optim
15
+ from bisect import bisect
16
+ from math import fabs
17
+ from torch.optim import lr_scheduler
18
+ from torch.utils.data import DataLoader
19
+ from tqdm import tqdm
20
+ from transformers import LxmertTokenizer
21
+ from dist_train import get_world_size, get_rank, get_local_rank, barrier, reduce_sum
22
+
23
+ import numpy as np
24
+ from transformers.tokenization_utils_base import ENCODE_KWARGS_DOCSTRING
25
+ from config4LXMT5_DDP import args
26
+
27
+ from dataset4LXMT5 import KgDataset, my_collate, my_val_gpt3_collate, my_val_collate
28
+ from dataset_val4LXMT5 import KgDatasetVal
29
+
30
+ if args.visualBERT:
31
+ from model_ViB2T5 import T5tokenizer, ViBT52T5, LXMtokenizer
32
+ else:
33
+ from model_LXM2T5 import T5tokenizer, LXMT52T5, LXMtokenizer
34
+
35
+ from transformers import get_linear_schedule_with_warmup
36
+ from transformers import LxmertConfig, LxmertTokenizer, LxmertModel,BertTokenizer
37
+
38
+ dist.init_process_group(backend='nccl',timeout=datetime.timedelta(seconds=5400))
39
+ torch.cuda.set_device(args.local_rank)
40
+
41
+
42
+ # LR = 1e-5
43
+ LR = args.learning_rate
44
+ LR_LXM = args.learning_rate_LXM
45
+ # LR = 1e-4
46
+
47
+ torch.multiprocessing.set_sharing_strategy('file_system')
48
+
49
+ torch.cuda.set_device(get_local_rank())
50
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
51
+
52
+ def reduce_tensor(tensor: torch.Tensor):
53
+ rt = tensor.clone().float()
54
+ dist.all_reduce(rt,op=dist.ReduceOp.SUM)
55
+ rt /= dist.get_world_size()#.float()
56
+ return rt
57
+
58
+ def set_seed(rank):
59
+ random.seed(args.seed+rank)
60
+ np.random.seed(args.seed+rank)
61
+ torch.manual_seed(args.seed+rank)
62
+ torch.cuda.manual_seed(args.seed+rank)
63
+ torch.cuda.manual_seed_all(args.seed+rank)
64
+ torch.backends.cudnn.deterministic = True
65
+
66
+ set_seed(get_rank())
67
+
68
+
69
+
70
+
71
+
72
+ def cal_acc_multi(ground_truth, preds, return_id = False):
73
+ all_num = len(ground_truth)
74
+ acc_num = 0
75
+ ids = []
76
+ temp = []
77
+ for i, answer_id in enumerate(ground_truth):
78
+ pred = preds[i]
79
+ # ids.append([i, int(pred)])
80
+ cnt = 0
81
+ for aid in answer_id:
82
+ if pred == aid:
83
+ cnt += 1
84
+ if cnt ==1:
85
+ acc_num += 1/3
86
+ elif cnt == 2:
87
+ acc_num += 2/3
88
+ elif cnt > 2:
89
+ acc_num += 1
90
+
91
+ if return_id:
92
+ return acc_num / all_num, ids
93
+ else:
94
+ return acc_num, all_num
95
+
96
+ def cal_acc(ground_truth, preds, return_id = False):
97
+ all_num = len(ground_truth)
98
+ acc_num = 0
99
+ ids = []
100
+ temp = []
101
+ for i, answer_id in enumerate(ground_truth):
102
+ pred = preds[i]
103
+ # ids.append([i, int(pred)])
104
+ cnt = 0
105
+ for aid in answer_id:
106
+ if pred == aid:
107
+ acc_num += 1
108
+ if return_id:
109
+ return acc_num / all_num, ids
110
+ else:
111
+ return acc_num, all_num
112
+
113
+
114
+ def train():
115
+ if not args.describe:
116
+ print('please set the description for the saved-model name! use --describe !')
117
+ assert 1==0
118
+ else:
119
+ model_name=args.describe
120
+ if not args.pretrain:
121
+ train_dataset = KgDataset(val=False)
122
+ train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
123
+ train_dataloader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, sampler=train_sampler,#shuffle=True,
124
+ num_workers=0, collate_fn=my_collate)#, pin_memory=True)
125
+
126
+ if args.validate:
127
+ test_dataset = KgDatasetVal(val=False)
128
+ if args.gpt3:
129
+ test_dataloader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, shuffle=False,
130
+ num_workers=0, collate_fn=my_val_gpt3_collate)
131
+ elif not args.gpt3:
132
+ test_dataloader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, shuffle=False,
133
+ num_workers=0, collate_fn=my_val_collate)
134
+ else:
135
+ train_dataset = KgDataset(val=False)
136
+
137
+ train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
138
+ train_dataloader = DataLoader(dataset=train_dataset, batch_size=args.batch_size,#pin_memory=True,
139
+ num_workers=0, collate_fn=my_collate, sampler=train_sampler)
140
+ if args.validate:
141
+ test_dataset = KgDatasetVal(val=False)
142
+ if args.gpt3:
143
+ test_dataloader = DataLoader(dataset=test_dataset, batch_size=args.batch_size,
144
+ num_workers=0, collate_fn=my_val_gpt3_collate, shuffle=False)#sampler=test_sampler)
145
+ elif not args.gpt3:
146
+ test_dataloader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, shuffle=False,
147
+ num_workers=0, collate_fn=my_val_collate)
148
+
149
+ if args.pretrain:
150
+ if get_rank() == 0:
151
+ print('pre-training!')
152
+ if args.visualBERT:
153
+ model= ViBT52T5()
154
+ else:
155
+ model = LXMT52T5()
156
+ else:
157
+ if get_rank() == 0:
158
+ print('fine-tuning!')
159
+ if args.visualBERT:
160
+ model = ViBT52T5()
161
+ else:
162
+ model = LXMT52T5()
163
+
164
+ model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
165
+ model = model.to(device)
166
+ if get_world_size() > 1:
167
+ if get_rank() == 0:
168
+ print("Let's use", get_world_size(), "GPUs!")
169
+ model = nn.parallel.DistributedDataParallel(model, device_ids=[get_local_rank()], output_device=get_local_rank(),find_unused_parameters=True)
170
+
171
+ print(model.named_modules)
172
+ if get_world_size() > 1:
173
+ if args.visualBERT:
174
+ optimizer = optim.AdamW([
175
+ {'params': model.module.T5model.parameters(), 'lr': LR},
176
+ {'params': model.module.ViBmodel.parameters(), 'lr': LR_LXM},
177
+ {'params': model.module.mapping.parameters(), 'lr': LR_LXM},
178
+ ])
179
+ else:
180
+ optimizer = optim.AdamW([
181
+ {'params': model.module.T5model.parameters(), 'lr': LR},
182
+ {'params': model.module.LXMmodel.parameters(), 'lr': LR_LXM},
183
+ {'params': model.module.mapping.parameters(), 'lr': LR_LXM},
184
+
185
+ ])
186
+ else:
187
+ if args.visualBERT:
188
+ optimizer = optim.AdamW([
189
+ {'params': model.T5model.parameters(), 'lr': LR},
190
+ {'params': model.ViBmodel.parameters(), 'lr': LR_LXM},
191
+ {'params': model.mapping.parameters(), 'lr': LR_LXM},
192
+ ])
193
+ else:
194
+ optimizer = optim.AdamW([
195
+ {'params': model.T5model.parameters(), 'lr': LR},
196
+ {'params': model.LXMmodel.parameters(), 'lr': LR_LXM},
197
+ {'params': model.mapping.parameters(), 'lr': LR_LXM},
198
+ ])
199
+
200
+ if args.pretrain:
201
+ steps_num = 100000 # batch_size should be set small
202
+ else:
203
+ steps_num = 4000
204
+
205
+
206
+
207
+ args.num_epochs = steps_num // (len(train_dataset) / (args.batch_size * get_world_size())) \
208
+ if len(train_dataset) % args.batch_size == 0 \
209
+ else (steps_num // (len(train_dataset) / (args.batch_size * get_world_size())) )+1
210
+ args.num_epochs = int(args.num_epochs)
211
+
212
+ if get_rank() == 0:
213
+ print('total_epoch', args.num_epochs)
214
+ print('total_steps', "we set steps=",steps_num)
215
+ print('warmup_steps', int(steps_num/10)) #0.05*total_steps)
216
+ scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(steps_num/10), #0.01 * total_steps,
217
+ num_training_steps=steps_num)
218
+
219
+
220
+ if args.load_pthpath == "":
221
+ start_epoch = 0
222
+ else:
223
+ if get_rank() == 0:
224
+ print('load model')
225
+ start_epoch = 0
226
+
227
+ if get_world_size() > 1:
228
+ model.module.load_state_dict(torch.load(args.load_pthpath))
229
+ else:
230
+ model.load_state_dict(torch.load(args.load_pthpath))
231
+
232
+
233
+ best_acc_t = 0
234
+ best_epoch_t = 0
235
+ best_acc_t3 = 0
236
+ step_ind = 0
237
+
238
+ for epoch in range(start_epoch, args.num_epochs):
239
+ train_preds_trip = []
240
+ train_sampler.set_epoch(epoch)
241
+ train_answers_trip = []
242
+ s=0
243
+ for batch_data in tqdm(train_dataloader):
244
+ step_ind+=1
245
+ if get_rank()==0:
246
+ print("step_ind",step_ind)
247
+ s=s+1
248
+ visual_faetures = torch.from_numpy(np.array(batch_data['img'], dtype=float)).float().to(device)
249
+ spatial_features = torch.tensor(np.array(batch_data['spatial'])).float().to(device)
250
+ if 1:
251
+ T5_input_id = torch.stack(batch_data['T5_input_ids']).to(device)
252
+ T5_input_mask = torch.stack(batch_data['T5_input_masks']).to(device)
253
+
254
+
255
+ LXM_input_id = torch.stack(batch_data['LXM_input_ids']).to(device)
256
+ LXM_input_mask = torch.stack(batch_data['LXM_input_masks']).to(device)
257
+ LXM_token_type_ids = torch.stack(batch_data['LXM_token_type_ids']).to(device)
258
+
259
+ T5_target_id = torch.stack(batch_data['T5_target_ids']).to(device)
260
+
261
+ neg100 = torch.ones_like(T5_target_id)*(-100)
262
+ T5_target_id = torch.where(T5_target_id==T5tokenizer.pad_token_id,neg100, T5_target_id)
263
+
264
+
265
+
266
+
267
+ model.zero_grad()
268
+
269
+
270
+ optimizer.zero_grad()
271
+ if args.pretrain:
272
+ outputs = model(train=True, LXM_source_ids=LXM_input_id, LXM_source_masks=LXM_input_mask,T5_source_ids=T5_input_id, T5_source_masks=T5_input_mask,token_type_ids=LXM_token_type_ids, visual_features=visual_faetures, spatial_features=spatial_features,T5_target_ids=T5_target_id)#,T5_target_masks=None
273
+ else:
274
+ outputs = model(train=True, LXM_source_ids=LXM_input_id, LXM_source_masks=LXM_input_mask,T5_source_ids=T5_input_id, T5_source_masks=T5_input_mask,token_type_ids=LXM_token_type_ids, visual_features=visual_faetures, spatial_features=spatial_features,T5_target_ids=T5_target_id)#,T5_target_masks=None
275
+ loss = outputs.loss
276
+
277
+ loss_stat = torch.mean(loss.detach()).item()
278
+
279
+ if get_rank() == 0:
280
+ print("loss on GPU0", loss_stat)
281
+ loss.sum().backward()
282
+ optimizer.step()
283
+ scheduler.step()
284
+ model.eval()
285
+ with torch.no_grad():
286
+ if args.pretrain:
287
+ eval_outputs = model(train=False, LXM_source_ids=LXM_input_id, LXM_source_masks=LXM_input_mask,T5_source_ids=T5_input_id, T5_source_masks=T5_input_mask,token_type_ids=LXM_token_type_ids, visual_features=visual_faetures, spatial_features=spatial_features,T5_target_ids=T5_target_id)
288
+ else:
289
+ eval_outputs = model(train=False, LXM_source_ids=LXM_input_id, LXM_source_masks=LXM_input_mask,T5_source_ids=T5_input_id, T5_source_masks=T5_input_mask,token_type_ids=LXM_token_type_ids, visual_features=visual_faetures, spatial_features=spatial_features,T5_target_ids=T5_target_id)
290
+ trip_predict = T5tokenizer.batch_decode(eval_outputs, skip_special_tokens=True)
291
+ if get_rank() == 0:
292
+ print('epoch', epoch, 'step', s, '>>>', '\tans:', batch_data['ans'][0], 'pred:', trip_predict[0])
293
+ for i, pre in enumerate(batch_data['ans']):
294
+ train_answers_trip.append(batch_data['ans'][i])
295
+ train_preds_trip.append(trip_predict[i])
296
+
297
+ model.train()
298
+ barrier()
299
+
300
+
301
+
302
+
303
+
304
+
305
+
306
+
307
+
308
+ barrier()
309
+
310
+ if 1:
311
+ train_acc_1_num, train_total_1_num = cal_acc_multi(train_answers_trip, train_preds_trip)
312
+
313
+ train_reduce_acc_num=reduce_tensor(torch.tensor(train_acc_1_num).cuda(args.local_rank)).item()
314
+ train_reduce_total_num=reduce_tensor(torch.tensor(train_total_1_num).cuda(args.local_rank)).item()
315
+ train_acc_1_trip = train_reduce_acc_num/train_reduce_total_num
316
+ if get_rank() == 0:
317
+ print('epoch %d train_loss of GPU0= %.1f, acc_trip on all GPUs= %.4f' % (epoch, loss_stat,
318
+ train_acc_1_trip))
319
+ if args.validate:
320
+ model.eval()
321
+ answers = [] # [batch_answers,...]
322
+ preds = [] # [batch_preds,...]
323
+ preds_trip = []
324
+ preds_trip_3 = []
325
+ answers_trip = []
326
+ id2pred_trip = {}
327
+ print(f"\nValidation after epoch {epoch}:")
328
+ for i, batch_data in enumerate(tqdm(test_dataloader)):
329
+ with torch.no_grad():
330
+ val_T5_input_id = torch.stack(batch_data['T5_input_ids']).to(device)
331
+ val_T5_input_mask = torch.stack(batch_data['T5_input_masks']).to(device)
332
+
333
+ val_visual_faetures = torch.tensor(np.array(batch_data['img'])).float().to(device)
334
+ val_spatial_features = torch.tensor(np.array(batch_data['spatial'])).float().to(device)
335
+
336
+ val_LXM_input_id = torch.stack(batch_data['LXM_input_ids']).to(device)
337
+ val_LXM_input_mask = torch.stack(batch_data['LXM_input_masks']).to(device)
338
+ val_LXM_token_type_ids = torch.stack(batch_data['LXM_token_type_ids']).to(device)
339
+
340
+
341
+ if args.pretrain:
342
+ val_outputs = model(train=False, LXM_source_ids=val_LXM_input_id, LXM_source_masks=val_LXM_input_mask,T5_source_ids=val_T5_input_id, T5_source_masks=val_T5_input_mask,token_type_ids=val_LXM_token_type_ids, visual_features=val_visual_faetures, spatial_features=val_spatial_features,T5_target_ids=None)
343
+ else:
344
+ val_outputs = model(train=False, LXM_source_ids=val_LXM_input_id, LXM_source_masks=val_LXM_input_mask,T5_source_ids=val_T5_input_id, T5_source_masks=val_T5_input_mask,token_type_ids=val_LXM_token_type_ids, visual_features=val_visual_faetures, spatial_features=val_spatial_features,T5_target_ids=None)
345
+
346
+
347
+ val_trip_predict = T5tokenizer.batch_decode(val_outputs, skip_special_tokens=True)
348
+
349
+
350
+
351
+ for i, pre in enumerate(batch_data['ans']):
352
+ preds_trip.append(val_trip_predict[i])
353
+ answers_trip.append(batch_data['ans'][i])
354
+
355
+ id2pred_trip[str(batch_data['id'][i])]=val_trip_predict[i]
356
+
357
+
358
+ if args.dataset == 'krvqa':
359
+ acc_1_num, total_1_num = cal_acc(answers_trip, preds_trip)
360
+ reduce_acc_num=reduce_tensor(torch.tensor(acc_1_num).cuda(args.local_rank)).item()
361
+ reduce_total_num=reduce_tensor(torch.tensor(total_1_num).cuda(args.local_rank)).item()
362
+ acc_1_trip = reduce_acc_num/reduce_total_num
363
+ if get_rank() == 0:
364
+ print('epoch %d , acc_trip on all GPUs= %.4f' % (epoch, acc_1_trip))
365
+
366
+ else:
367
+ acc_1_num, total_1_num = cal_acc_multi(answers_trip, preds_trip)
368
+ reduce_acc_num=reduce_tensor(torch.tensor(acc_1_num).cuda(args.local_rank)).item()
369
+ reduce_total_num=reduce_tensor(torch.tensor(total_1_num).cuda(args.local_rank)).item()
370
+ acc_1_trip = reduce_acc_num/reduce_total_num
371
+ if get_rank() == 0:
372
+ print('epoch %d , acc_trip on all GPUs= %.4f' % (epoch, acc_1_trip))
373
+
374
+ if acc_1_trip > best_acc_t:
375
+
376
+ best_acc_t = acc_1_trip
377
+ best_epoch_t = epoch
378
+ if not args.pretrain:
379
+ if get_rank() == 0:
380
+ f=open(args.model_dir+"/predictions.json", 'w')
381
+ json.dump(id2pred_trip, f)
382
+ f.close()
383
+
384
+ """
385
+ # ablations on two encoders
386
+ # LXMERTenc-T5dec
387
+ if args.load_pthpath == "":
388
+ fx=open("/mnt/bn/qingyi-bn-lq/okvqa-output/C1_LXMERTencOnly_noPre_predictions.json", 'w') #GPT-noPre
389
+ else:
390
+ fx=open("/mnt/bn/qingyi-bn-lq/okvqa-output/C3_LXMERTencOnly_predictions.json", 'w') #GPT
391
+
392
+ json.dump(id2pred_trip, fx)
393
+ fx.close()
394
+ # T5enc-T5dec
395
+ if args.load_pthpath == "":
396
+ fx=open("/mnt/bn/qingyi-bn-lq/okvqa-output/C2_T5encOnly_noPre_predictions.json", 'w') #GPT-noPre
397
+ else:
398
+ fx=open("/mnt/bn/qingyi-bn-lq/okvqa-output/C4_T5encOnly_predictions.json", 'w') #GPT
399
+
400
+ json.dump(id2pred_trip, fx)
401
+ fx.close()
402
+ """
403
+ """
404
+ # ablations on Knowledge types
405
+ if args.gpt3:
406
+ if args.input_type==0 and args.load_pthpath == "":
407
+ fx=open("/mnt/bn/qingyi-bn-lq/okvqa-output/A2_noPre_predictions.json", 'w') #GPT-noPre
408
+ json.dump(id2pred_trip, fx)
409
+ fx.close()
410
+ elif args.input_type==1 and (args.load_pthpath != ""):
411
+ fx=open("/mnt/bn/qingyi-bn-lq/okvqa-output/A3_noWiki_predictions.json", 'w') #GPT
412
+ json.dump(id2pred_trip, fx)
413
+ fx.close()
414
+ elif args.input_type==2 and (args.load_pthpath != ""):
415
+ fx=open("/mnt/bn/qingyi-bn-lq/okvqa-output/A4_noOFA_predictions.json", 'w') #GPT
416
+ json.dump(id2pred_trip, fx)
417
+ fx.close()
418
+ elif args.input_type==3 and (args.load_pthpath == ""):
419
+ fx=open("/mnt/bn/qingyi-bn-lq/okvqa-output/B1_onlyGPT3_predictions.json", 'w') #GPT-noPre
420
+ json.dump(id2pred_trip, fx)
421
+ fx.close()
422
+ else:
423
+ if args.input_type==0 and args.load_pthpath != "":
424
+ fx=open("/mnt/bn/qingyi-bn-lq/okvqa-output/A5_noGPT3_predictions.json", 'w') #noGPT
425
+ json.dump(id2pred_trip, fx)
426
+ fx.close()
427
+ elif args.input_type==0 and args.load_pthpath == "":
428
+ fx=open("/mnt/bn/qingyi-bn-lq/okvqa-output/A6_noGPT3noPre_predictions.json", 'w') #noGPT
429
+ json.dump(id2pred_trip, fx)
430
+ fx.close()
431
+
432
+ elif args.input_type==1 and (args.load_pthpath == ""):
433
+ fx=open("/mnt/bn/qingyi-bn-lq/okvqa-output/B2_onlyOFA_predictions.json", 'w') #noGPT-noPre
434
+ json.dump(id2pred_trip, fx)
435
+ fx.close()
436
+ elif args.input_type==2 and (args.load_pthpath == ""):
437
+ fx=open("/mnt/bn/qingyi-bn-lq/okvqa-output/B3_onlyWiki_predictions.json", 'w') #noGPT-noPre
438
+ json.dump(id2pred_trip, fx)
439
+ fx.close()
440
+ elif args.input_type==3 and (args.load_pthpath == ""):
441
+ fx=open("/mnt/bn/qingyi-bn-lq/okvqa-output/B4_onlyVisualNoKnowledge_predictions.json", 'w') #noGPT-noPre
442
+ json.dump(id2pred_trip, fx)
443
+ fx.close()
444
+
445
+ """
446
+
447
+
448
+ print('saving model at epoch', epoch, '!!')
449
+ if get_world_size() > 1:
450
+ torch.save(model.module.state_dict(), args.model_dir+'/best_finetuned_model_'+model_name+'.pth')
451
+ else:
452
+ torch.save(model.state_dict(), args.model_dir+'/best_finetuned_model_'+model_name+'.pth')
453
+
454
+ if get_rank() == 0:
455
+ print("best_acc@1t={:.2%}, epoch{}\n\n".format(best_acc_t, best_epoch_t))
456
+
457
+ model.train()
458
+ if args.pretrain:
459
+ if get_rank() == 0:
460
+ if get_world_size() > 1:
461
+ torch.save(model.module.state_dict(), args.model_dir+ '/model_for_epoch_%d.pth' % epoch)
462
+ else:
463
+ torch.save(model.state_dict(), args.model_dir+ '/model_for_epoch_%d.pth' % epoch)
464
+
465
+ barrier()
466
+
467
+
468
+ dist.destroy_process_group()
469
+ if __name__ == "__main__":
470
+ train()
code/train4LXMT5_DDP_original.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!user/bin/env python
2
+ # -*- coding:utf-8 -*-
3
+ import argparse
4
+ import json
5
+ import os
6
+ import datetime
7
+ import pickle
8
+ import random
9
+ import torch
10
+ import torch.backends.cudnn as cudnn
11
+ import torch.distributed as dist
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+ import torch.optim as optim
15
+ from bisect import bisect
16
+ from math import fabs
17
+ from torch.optim import lr_scheduler
18
+ from torch.utils.data import DataLoader
19
+ from tqdm import tqdm
20
+ from transformers import LxmertTokenizer
21
+ from dist_train import get_world_size, get_rank, get_local_rank, barrier, reduce_sum
22
+
23
+ import numpy as np
24
+ from transformers.tokenization_utils_base import ENCODE_KWARGS_DOCSTRING
25
+ from config4LXMT5_DDP import args
26
+
27
+ from dataset4LXMT5 import KgDataset,my_collate,my_val_collate
28
+ from dataset_val4LXMT5 import KgDatasetVal
29
+
30
+ if args.visualBERT:
31
+ from model_ViB2T5 import T5tokenizer, ViBT52T5, LXMtokenizer
32
+ else:
33
+ from model_LXM2T5 import T5tokenizer, LXMT52T5, LXMtokenizer
34
+
35
+ from transformers import get_linear_schedule_with_warmup
36
+ from transformers import LxmertConfig, LxmertTokenizer, LxmertModel,BertTokenizer
37
+
38
+ dist.init_process_group(backend='nccl',timeout=datetime.timedelta(seconds=5400))
39
+ torch.cuda.set_device(args.local_rank)
40
+
41
+
42
+ # LR = 1e-5
43
+ LR = args.learning_rate
44
+ LR_LXM = args.learning_rate_LXM
45
+ # LR = 1e-4
46
+
47
+ torch.multiprocessing.set_sharing_strategy('file_system')
48
+
49
+ torch.cuda.set_device(get_local_rank())
50
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
51
+
52
+ def reduce_tensor(tensor: torch.Tensor):
53
+ rt = tensor.clone().float()
54
+ dist.all_reduce(rt,op=dist.ReduceOp.SUM)
55
+ rt /= dist.get_world_size()#.float()
56
+ return rt
57
+
58
+ def set_seed(rank):
59
+ random.seed(args.seed+rank)
60
+ np.random.seed(args.seed+rank)
61
+ torch.manual_seed(args.seed+rank)
62
+ torch.cuda.manual_seed(args.seed+rank)
63
+ torch.cuda.manual_seed_all(args.seed+rank)
64
+ torch.backends.cudnn.deterministic = True
65
+
66
+ set_seed(get_rank())
67
+
68
+
69
+
70
+
71
+
72
+ def cal_acc_multi(ground_truth, preds, return_id = False):
73
+ all_num = len(ground_truth)
74
+ acc_num = 0
75
+ ids = []
76
+ temp = []
77
+ for i, answer_id in enumerate(ground_truth):
78
+ pred = preds[i]
79
+ # ids.append([i, int(pred)])
80
+ cnt = 0
81
+ for aid in answer_id:
82
+ if pred == aid:
83
+ cnt += 1
84
+ if cnt ==1:
85
+ acc_num += 1/3
86
+ elif cnt == 2:
87
+ acc_num += 2/3
88
+ elif cnt > 2:
89
+ acc_num += 1
90
+ if return_id:
91
+ return acc_num / all_num, ids
92
+ else:
93
+ return acc_num, all_num
94
+
95
+ def cal_acc(ground_truth, preds, return_id = False):
96
+ all_num = len(ground_truth)
97
+ acc_num = 0
98
+ ids = []
99
+ temp = []
100
+ for i, answer_id in enumerate(ground_truth):
101
+ pred = preds[i]
102
+ # ids.append([i, int(pred)])
103
+ cnt = 0
104
+ for aid in answer_id:
105
+ if pred == aid:
106
+ acc_num += 1
107
+ if return_id:
108
+ return acc_num / all_num, ids
109
+ else:
110
+ return acc_num, all_num
111
+
112
+
113
+ def train():
114
+ if not args.describe:
115
+ print('please set the description for the saved-model name! use --describe !')
116
+ assert 1==0
117
+ else:
118
+ model_name=args.describe
119
+ if not args.pretrain:
120
+ train_dataset = KgDataset(val=False)
121
+ train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
122
+ train_dataloader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, sampler=train_sampler,#shuffle=True,
123
+ num_workers=0, collate_fn=my_collate)#, pin_memory=True)
124
+
125
+ if args.validate:
126
+ test_dataset = KgDatasetVal(val=False)
127
+ test_dataloader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, shuffle=False,
128
+ num_workers=0, collate_fn=my_val_collate)
129
+ else:
130
+ train_dataset = KgDataset(val=False)
131
+
132
+ train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
133
+ train_dataloader = DataLoader(dataset=train_dataset, batch_size=args.batch_size,#pin_memory=True,
134
+ num_workers=0, collate_fn=my_collate, sampler=train_sampler)#shuffle=True,
135
+ # num_workers=0, collate_fn=my_collate_pretrain, sampler=train_sampler)#shuffle=True,
136
+ if args.validate:
137
+ test_dataset = KgDatasetVal(val=False)
138
+ test_dataloader = DataLoader(dataset=test_dataset, batch_size=args.batch_size,
139
+ num_workers=0, collate_fn=my_val_collate, shuffle=False)#sampler=test_sampler)
140
+ if args.pretrain:
141
+ if get_rank() == 0:
142
+ print('pre-training!')
143
+ if args.visualBERT:
144
+ model= ViBT52T5()
145
+ else:
146
+ model = LXMT52T5()
147
+ else:
148
+ if get_rank() == 0:
149
+ print('fine-tuning!')
150
+ if args.visualBERT:
151
+ model= ViBT52T5()
152
+ else:
153
+ model = LXMT52T5()
154
+
155
+ model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
156
+ model = model.to(device)
157
+
158
+ if get_world_size() > 1:
159
+ if get_rank() == 0:
160
+
161
+ print("Let's use", get_world_size(), "GPUs!")
162
+
163
+ model = nn.parallel.DistributedDataParallel(model, device_ids=[get_local_rank()], output_device=get_local_rank(),find_unused_parameters=True)
164
+
165
+ print(model.named_modules)
166
+ if get_world_size() > 1:
167
+ if args.visualBERT:
168
+ optimizer = optim.AdamW([
169
+ {'params': model.module.T5model.parameters(), 'lr': LR},
170
+ {'params': model.module.ViBmodel.parameters(), 'lr': LR_LXM},
171
+ {'params': model.module.mapping.parameters(), 'lr': LR_LXM},
172
+ ])
173
+ else:
174
+ optimizer = optim.AdamW([
175
+ {'params': model.module.T5model.parameters(), 'lr': LR},
176
+ {'params': model.module.LXMmodel.parameters(), 'lr': LR_LXM},
177
+ {'params': model.module.mapping.parameters(), 'lr': LR_LXM},
178
+
179
+ ])
180
+ else:
181
+ if args.visualBERT:
182
+ optimizer = optim.AdamW([
183
+ {'params': model.T5model.parameters(), 'lr': LR},
184
+ {'params': model.ViBmodel.parameters(), 'lr': LR_LXM},
185
+ {'params': model.mapping.parameters(), 'lr': LR_LXM},
186
+ ])
187
+ else:
188
+ optimizer = optim.AdamW([
189
+ {'params': model.T5model.parameters(), 'lr': LR},
190
+ {'params': model.LXMmodel.parameters(), 'lr': LR_LXM},
191
+ {'params': model.mapping.parameters(), 'lr': LR_LXM},
192
+ ])
193
+
194
+
195
+
196
+ if args.pretrain:
197
+ steps_num = 100000
198
+ else:
199
+ steps_num = 20000
200
+
201
+
202
+
203
+
204
+ args.num_epochs = steps_num // (len(train_dataset) / (args.batch_size * get_world_size())) \
205
+ if len(train_dataset) % args.batch_size == 0 \
206
+ else (steps_num // (len(train_dataset) / (args.batch_size * get_world_size())) )+1
207
+
208
+ args.num_epochs = int(args.num_epochs)
209
+
210
+ if get_rank() == 0:
211
+ print('total_epoch', args.num_epochs)
212
+ print('total_steps', "we set steps=",steps_num)
213
+ print('warmup_steps', int(steps_num/10)) #0.05*total_steps)
214
+ scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(steps_num/10), #0.01 * total_steps,
215
+ num_training_steps=steps_num)
216
+
217
+
218
+ if args.load_pthpath == "":
219
+ start_epoch = 0
220
+ else:
221
+ if get_rank() == 0:
222
+ print('load model')
223
+ start_epoch = 0
224
+
225
+
226
+ if get_world_size() > 1:
227
+ model.module.load_state_dict(torch.load(args.load_pthpath))
228
+ else:
229
+ model.load_state_dict(torch.load(args.load_pthpath))
230
+
231
+
232
+
233
+ best_acc = 0
234
+ best_epoch = 0
235
+ best_acc_t = 0
236
+ best_epoch_t = 0
237
+ best_acc_t3 = 0
238
+ step_ind = 0
239
+
240
+ for epoch in range(start_epoch, args.num_epochs):
241
+ train_preds_trip = []
242
+ train_sampler.set_epoch(epoch)
243
+ train_answers_trip = []
244
+ s=0
245
+ for batch_data in tqdm(train_dataloader):
246
+ step_ind+=1
247
+ if get_rank()==0:
248
+ print("step_ind",step_ind)
249
+ s=s+1
250
+
251
+ visual_faetures = torch.from_numpy(np.array(batch_data['img'], dtype=float)).float().to(device)
252
+ spatial_features = torch.tensor(np.array(batch_data['spatial'])).float().to(device)
253
+
254
+ if 1:
255
+
256
+ T5_input_id = torch.stack(batch_data['T5_input_ids']).to(device)
257
+ T5_input_mask = torch.stack(batch_data['T5_input_masks']).to(device)
258
+
259
+
260
+ LXM_input_id = torch.stack(batch_data['LXM_input_ids']).to(device)
261
+ LXM_input_mask = torch.stack(batch_data['LXM_input_masks']).to(device)
262
+ LXM_token_type_ids = torch.stack(batch_data['LXM_token_type_ids']).to(device)
263
+
264
+
265
+ T5_target_id = torch.stack(batch_data['T5_target_ids']).to(device)
266
+
267
+ neg100 = torch.ones_like(T5_target_id)*(-100)
268
+ T5_target_id = torch.where(T5_target_id==T5tokenizer.pad_token_id,neg100, T5_target_id)
269
+
270
+
271
+
272
+
273
+ model.zero_grad()
274
+
275
+
276
+ optimizer.zero_grad()
277
+ if args.pretrain:
278
+ outputs = model(train=True, LXM_source_ids=LXM_input_id, LXM_source_masks=LXM_input_mask,T5_source_ids=T5_input_id, T5_source_masks=T5_input_mask,token_type_ids=LXM_token_type_ids, visual_features=visual_faetures, spatial_features=spatial_features,T5_target_ids=T5_target_id)#,T5_target_masks=None
279
+
280
+
281
+ else:
282
+ outputs = model(train=True, LXM_source_ids=LXM_input_id, LXM_source_masks=LXM_input_mask,T5_source_ids=T5_input_id, T5_source_masks=T5_input_mask,token_type_ids=LXM_token_type_ids, visual_features=visual_faetures, spatial_features=spatial_features,T5_target_ids=T5_target_id)#,T5_target_masks=None
283
+ loss = outputs.loss
284
+
285
+ loss_stat = torch.mean(loss.detach()).item()
286
+
287
+ if get_rank() == 0:
288
+ print("loss on GPU0", loss_stat)
289
+
290
+ loss.sum().backward()
291
+ optimizer.step()
292
+ scheduler.step()
293
+
294
+ with torch.no_grad():
295
+
296
+ if args.pretrain:
297
+ eval_outputs = model(train=False, LXM_source_ids=LXM_input_id, LXM_source_masks=LXM_input_mask,T5_source_ids=T5_input_id, T5_source_masks=T5_input_mask,token_type_ids=LXM_token_type_ids, visual_features=visual_faetures, spatial_features=spatial_features,T5_target_ids=T5_target_id)#,T5_target_masks=None
298
+
299
+ else:
300
+ eval_outputs = model(train=False, LXM_source_ids=LXM_input_id, LXM_source_masks=LXM_input_mask,T5_source_ids=T5_input_id, T5_source_masks=T5_input_mask,token_type_ids=LXM_token_type_ids, visual_features=visual_faetures, spatial_features=spatial_features,T5_target_ids=T5_target_id)#,T5_target_masks=None
301
+
302
+ trip_predict = T5tokenizer.batch_decode(eval_outputs, skip_special_tokens=True)
303
+ if get_rank() == 0:
304
+ print('epoch', epoch, 'step', s, '>>>', '\tans:', batch_data['ans'][0], 'pred:', trip_predict[0])
305
+
306
+ for i, pre in enumerate(batch_data['ans']):
307
+ train_answers_trip.append(batch_data['ans'][i])
308
+ train_preds_trip.append(trip_predict[i])
309
+
310
+ barrier()
311
+ barrier()
312
+
313
+
314
+ if args.dataset == 'krvqa':
315
+ train_acc_1_num, train_total_1_num = cal_acc(train_answers_trip, train_preds_trip)
316
+
317
+ train_reduce_acc_num=reduce_tensor(torch.tensor(train_acc_1_num).cuda(args.local_rank)).item()
318
+ train_reduce_total_num=reduce_tensor(torch.tensor(train_total_1_num).cuda(args.local_rank)).item()
319
+ train_acc_1_trip = train_reduce_acc_num/train_reduce_total_num
320
+
321
+ if get_rank() == 0:
322
+ # print("train_acc_1_trip all GPUs:", train_acc_1_trip)
323
+ print('epoch %d train_loss = %.1f, acc_trip = %.4f' % (epoch, loss_stat,train_acc_1_trip))
324
+ else:
325
+
326
+ train_acc_1_num, train_total_1_num = cal_acc_multi(train_answers_trip, train_preds_trip)
327
+
328
+ train_reduce_acc_num=reduce_tensor(torch.tensor(train_acc_1_num).cuda(args.local_rank)).item()
329
+ train_reduce_total_num=reduce_tensor(torch.tensor(train_total_1_num).cuda(args.local_rank)).item()
330
+ train_acc_1_trip = train_reduce_acc_num/train_reduce_total_num
331
+ if get_rank() == 0:
332
+
333
+ print('epoch %d train_loss of GPU0= %.1f, acc_trip on all GPUs= %.4f' % (epoch, loss_stat,
334
+ train_acc_1_trip))
335
+
336
+ barrier()
337
+ if args.validate:
338
+ model.eval()
339
+ answers = [] # [batch_answers,...]
340
+ preds = [] # [batch_preds,...]
341
+ preds_trip = []
342
+ preds_trip_3 = []
343
+ answers_trip = []
344
+ id2pred_trip = {}
345
+ print(f"\nValidation after epoch {epoch}:")
346
+ for i, batch_data in enumerate(tqdm(test_dataloader)):
347
+ with torch.no_grad():
348
+ val_T5_input_id = torch.stack(batch_data['T5_input_ids']).to(device)
349
+ val_T5_input_mask = torch.stack(batch_data['T5_input_masks']).to(device)
350
+
351
+
352
+ val_visual_faetures = torch.tensor(np.array(batch_data['img'])).float().to(device)
353
+
354
+ val_spatial_features = torch.tensor(np.array(batch_data['spatial'])).float().to(device)
355
+
356
+
357
+
358
+ val_LXM_input_id = torch.stack(batch_data['LXM_input_ids']).to(device)
359
+ val_LXM_input_mask = torch.stack(batch_data['LXM_input_masks']).to(device)
360
+ val_LXM_token_type_ids = torch.stack(batch_data['LXM_token_type_ids']).to(device)
361
+
362
+
363
+
364
+ if args.pretrain:
365
+ val_outputs = model(train=False, LXM_source_ids=val_LXM_input_id, LXM_source_masks=val_LXM_input_mask,T5_source_ids=val_T5_input_id, T5_source_masks=val_T5_input_mask,token_type_ids=val_LXM_token_type_ids, visual_features=val_visual_faetures, spatial_features=val_spatial_features,T5_target_ids=None)#,T5_target_masks=None
366
+
367
+ else:
368
+ val_outputs = model(train=False, LXM_source_ids=val_LXM_input_id, LXM_source_masks=val_LXM_input_mask,T5_source_ids=val_T5_input_id, T5_source_masks=val_T5_input_mask,token_type_ids=val_LXM_token_type_ids, visual_features=val_visual_faetures, spatial_features=val_spatial_features,T5_target_ids=None)#,T5_target_masks=None
369
+
370
+
371
+
372
+ val_trip_predict = T5tokenizer.batch_decode(val_outputs, skip_special_tokens=True)
373
+
374
+
375
+
376
+
377
+ for i, pre in enumerate(batch_data['ans']):
378
+ preds_trip.append(val_trip_predict[i])
379
+ answers_trip.append(batch_data['ans'][i])
380
+
381
+ id2pred_trip[str(batch_data['id'][i])]=val_trip_predict[i]
382
+
383
+
384
+ if args.dataset == 'krvqa':
385
+ acc_1_num, total_1_num = cal_acc(answers_trip, preds_trip)
386
+ reduce_acc_num=reduce_tensor(torch.tensor(acc_1_num).cuda(args.local_rank)).item()
387
+ reduce_total_num=reduce_tensor(torch.tensor(total_1_num).cuda(args.local_rank)).item()
388
+ acc_1_trip = reduce_acc_num/reduce_total_num
389
+ if get_rank() == 0:
390
+ print('epoch %d , acc_trip on all GPUs= %.4f' % (epoch, acc_1_trip))
391
+
392
+ else:
393
+ acc_1_num, total_1_num = cal_acc_multi(answers_trip, preds_trip)
394
+ reduce_acc_num=reduce_tensor(torch.tensor(acc_1_num).cuda(args.local_rank)).item()
395
+ reduce_total_num=reduce_tensor(torch.tensor(total_1_num).cuda(args.local_rank)).item()
396
+ acc_1_trip = reduce_acc_num/reduce_total_num
397
+ if get_rank() == 0:
398
+ print('epoch %d , acc_trip on all GPUs= %.4f' % (epoch, acc_1_trip))
399
+
400
+ if acc_1_trip > best_acc_t:
401
+
402
+ best_acc_t = acc_1_trip
403
+ best_epoch_t = epoch
404
+ if not args.pretrain:
405
+ if get_rank() == 0:
406
+ f=open(args.model_dir+"/predictions.json", 'w')
407
+ json.dump(id2pred_trip, f)
408
+ f.close()
409
+ print('saving model at epoch', epoch, '!!')
410
+
411
+ if get_world_size() > 1:
412
+ torch.save(model.module.state_dict(), args.model_dir+'/best_finetuned_model_'+model_name+'.pth')
413
+ else:
414
+ torch.save(model.state_dict(), args.model_dir+'/best_finetuned_model_'+model_name+'.pth')
415
+
416
+
417
+
418
+ if get_rank() == 0:
419
+ print("best_acc@1t={:.2%}, epoch{}\n\n".format(best_acc_t, best_epoch_t))
420
+ model.train()
421
+ if args.pretrain:
422
+
423
+ if get_rank() == 0: #对于预训练,那么每个模型都保存一下,以便后面选取合适的,或者进行相应分析。
424
+ if get_world_size() > 1:
425
+ torch.save(model.module.state_dict(), args.model_dir+ '/model_for_epoch_%d.pth' % epoch)
426
+ else:
427
+ torch.save(model.state_dict(), args.model_dir+ '/model_for_epoch_%d.pth' % epoch)
428
+
429
+
430
+ barrier()
431
+
432
+
433
+ dist.destroy_process_group()
434
+ if __name__ == "__main__":
435
+ train()