Karroyan commited on
Commit
322bf9d
·
verified ·
1 Parent(s): 431055f

Upload data_preprocess.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. data_preprocess.py +429 -0
data_preprocess.py ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import random
3
+ import jsonlines
4
+ import os
5
+
6
+ def load_data_jsonl(data_path):
7
+ data = []
8
+ with open(data_path, "r+", encoding="utf8") as f:
9
+ for item in jsonlines.Reader(f):
10
+ data.append(item)
11
+
12
+ return data
13
+
14
+ def load_data(data_path):
15
+ with open(data_path, 'r') as f:
16
+ data = json.load(f)
17
+
18
+ return data
19
+
20
+ def ensure_dir_exists(path):
21
+ """Create directory if it doesn't exist"""
22
+ directory = os.path.dirname(path)
23
+ if not os.path.exists(directory):
24
+ os.makedirs(directory)
25
+ print(f"Created directory: {directory}")
26
+
27
+ def build_dataset(data_list, path):
28
+ with open('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/prompt/reward_model_prompt.txt', 'r') as f:
29
+ PROMPT = f.read()
30
+
31
+ dict_list = []
32
+ for id, d in enumerate(data_list):
33
+ data_json = {'id': id,
34
+ 'image': d["image_list"],
35
+ 'conversations': [
36
+ {'from': 'human', 'value': f'{PROMPT}\nFirst image: <image>\nSecond image:<image>'}, # f'<image>{replace_with_zh(PROMPT, True)}
37
+ {'from': 'gpt', 'value': d["label"]}
38
+ ]}
39
+ dict_list.append(data_json)
40
+ with open(path, 'w', encoding='utf-8') as file:
41
+ for entry in dict_list:
42
+ json.dump(entry, file)
43
+ file.write('\n')
44
+ return len(dict_list)
45
+
46
+ def build_dataset_multihead(data_list, path, mask):
47
+ with open('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/prompt/reward_model_prompt.txt', 'r') as f:
48
+ PROMPT = f.read()
49
+
50
+ dict_list = []
51
+ for id, d in enumerate(data_list):
52
+ data_json = {'id': id,
53
+ 'image': d["image_list"],
54
+ 'conversations': [
55
+ {'from': 'human', 'value': f'{PROMPT}\nFirst image: <image>\nSecond image:<image>'}, # f'<image>{replace_with_zh(PROMPT, True)}
56
+ {'from': 'gpt', 'value': [[d["label"]]*2, mask]}
57
+ ]}
58
+ dict_list.append(data_json)
59
+ with open(path, 'w', encoding='utf-8') as file:
60
+ for entry in dict_list:
61
+ json.dump(entry, file)
62
+ file.write('\n')
63
+ return len(dict_list)
64
+
65
+ def build_dataset_cross(data_list, path, TYPE):
66
+ with open('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/prompt/reward_model_prompt.txt', 'r') as f:
67
+ PROMPT = f.read()
68
+
69
+ dict_list = []
70
+ origin_image_list = []
71
+ boring_image_list = []
72
+ origin_text_lengths = []
73
+ boring_text_lengths = []
74
+ for id, d in enumerate(data_list):
75
+ if d["label"] == 0:
76
+ origin_image_list.append(d["image_list"][0])
77
+ boring_image_list.append(d["image_list"][1])
78
+ origin_text_lengths.append(d["text_lengths"][0])
79
+ boring_text_lengths.append(d["text_lengths"][1])
80
+ elif d["label"] == 1:
81
+ origin_image_list.append(d["image_list"][1])
82
+ boring_image_list.append(d["image_list"][0])
83
+ origin_text_lengths.append(d["text_lengths"][1])
84
+ boring_text_lengths.append(d["text_lengths"][0])
85
+ else:
86
+ raise ValueError("Wrong label")
87
+
88
+ # for origin, boring in zip(origin_image_list, boring_image_list):
89
+ # if 'origin' not in origin or TYPE[:-4] not in boring:
90
+ # raise ValueError("Wrong split")
91
+
92
+ print(f'sorting the boring images')
93
+ # Create pairs of boring images with their text lengths and sort once
94
+ boring_with_lengths = list(zip(boring_image_list, boring_text_lengths))
95
+ boring_with_lengths.sort(key=lambda x: x[1]) # Sort by text length (ascending)
96
+
97
+ print(f'generating the pairs')
98
+ for id, origin in enumerate(origin_image_list):
99
+ original_length = origin_text_lengths[id]
100
+
101
+ # Find the index where boring text lengths become longer than original
102
+ longer_idx = 0
103
+ while longer_idx < len(boring_with_lengths) and boring_with_lengths[longer_idx][1] <= original_length:
104
+ longer_idx += 1
105
+
106
+ # With 70% probability, choose a boring image with longer text if available
107
+ # if longer_idx < len(boring_with_lengths) and random.random() < 0.7:
108
+ # # Sample from longer text images
109
+ # boring = random.choice(boring_with_lengths[longer_idx:])[0]
110
+ # else:
111
+ # # Sample from shorter text images, or all if none are longer
112
+ # if longer_idx > 0:
113
+ # boring = random.choice(boring_with_lengths[:longer_idx])[0]
114
+ # else:
115
+ # boring = random.choice(boring_with_lengths)[0]
116
+
117
+ boring = random.choice(boring_with_lengths)[0]
118
+
119
+ pos_neg = random.choice(["pos", "neg"])
120
+ if pos_neg == 'pos':
121
+ data_json = {'id': id,
122
+ 'image': [origin, boring],
123
+ 'conversations': [
124
+ {'from': 'human', 'value': f'{PROMPT}\nFirst image: <image>\nSecond image:<image>'},
125
+ {'from': 'gpt', 'value': 0}
126
+ ]}
127
+ dict_list.append(data_json)
128
+ else:
129
+ data_json = {'id': id,
130
+ 'image': [boring, origin],
131
+ 'conversations': [
132
+ {'from': 'human', 'value': f'{PROMPT}\nFirst image: <image>\nSecond image:<image>'},
133
+ {'from': 'gpt', 'value': 1}
134
+ ]}
135
+ dict_list.append(data_json)
136
+ with open(path, 'w', encoding='utf-8') as file:
137
+ for entry in dict_list:
138
+ json.dump(entry, file)
139
+ file.write('\n')
140
+ return len(dict_list)
141
+
142
+ def build_dataset_cross_multihead(data_list, path, TYPE, mask):
143
+ with open('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/prompt/reward_model_prompt.txt', 'r') as f:
144
+ PROMPT = f.read()
145
+
146
+ dict_list = []
147
+ origin_image_list = []
148
+ boring_image_list = []
149
+ origin_text_lengths = []
150
+ boring_text_lengths = []
151
+ for id, d in enumerate(data_list):
152
+ if d["label"] == 0:
153
+ origin_image_list.append(d["image_list"][0])
154
+ boring_image_list.append(d["image_list"][1])
155
+ origin_text_lengths.append(d["text_lengths"][0])
156
+ boring_text_lengths.append(d["text_lengths"][1])
157
+ elif d["label"] == 1:
158
+ origin_image_list.append(d["image_list"][1])
159
+ boring_image_list.append(d["image_list"][0])
160
+ origin_text_lengths.append(d["text_lengths"][1])
161
+ boring_text_lengths.append(d["text_lengths"][0])
162
+ else:
163
+ raise ValueError("Wrong label")
164
+
165
+ # for origin, boring in zip(origin_image_list, boring_image_list):
166
+ # if 'origin' not in origin or TYPE[:-4] not in boring:
167
+ # raise ValueError("Wrong split")
168
+
169
+ print(f'sorting the boring images')
170
+ # Create pairs of boring images with their text lengths and sort once
171
+ boring_with_lengths = list(zip(boring_image_list, boring_text_lengths))
172
+ boring_with_lengths.sort(key=lambda x: x[1]) # Sort by text length (ascending)
173
+
174
+ print(f'generating the pairs')
175
+ for id, origin in enumerate(origin_image_list):
176
+ original_length = origin_text_lengths[id]
177
+
178
+ # Find the index where boring text lengths become longer than original
179
+ longer_idx = 0
180
+ while longer_idx < len(boring_with_lengths) and boring_with_lengths[longer_idx][1] <= original_length:
181
+ longer_idx += 1
182
+
183
+ # With 70% probability, choose a boring image with longer text if available
184
+ if longer_idx < len(boring_with_lengths) and random.random() < 0.7:
185
+ # Sample from longer text images
186
+ boring = random.choice(boring_with_lengths[longer_idx:])[0]
187
+ else:
188
+ # Sample from shorter text images, or all if none are longer
189
+ if longer_idx > 0:
190
+ boring = random.choice(boring_with_lengths[:longer_idx])[0]
191
+ else:
192
+ boring = random.choice(boring_with_lengths)[0]
193
+
194
+ pos_neg = random.choice(["pos", "neg"])
195
+ if pos_neg == 'pos':
196
+ data_json = {'id': id,
197
+ 'image': [origin, boring],
198
+ 'conversations': [
199
+ {'from': 'human', 'value': f'{PROMPT}\nFirst image: <image>\nSecond image:<image>'},
200
+ {'from': 'gpt', 'value': [[0]*2, mask]}
201
+ ]}
202
+ dict_list.append(data_json)
203
+ else:
204
+ data_json = {'id': id,
205
+ 'image': [boring, origin],
206
+ 'conversations': [
207
+ {'from': 'human', 'value': f'{PROMPT}\nFirst image: <image>\nSecond image:<image>'},
208
+ {'from': 'gpt', 'value': [[1]*2, mask]}
209
+ ]}
210
+ dict_list.append(data_json)
211
+ with open(path, 'w', encoding='utf-8') as file:
212
+ for entry in dict_list:
213
+ json.dump(entry, file)
214
+ file.write('\n')
215
+ return len(dict_list)
216
+
217
+ def build_json(dataset_path_list, length_list, name_list, json_path):
218
+ dict_list = []
219
+ for dataset_path, length, name in zip(dataset_path_list, length_list, name_list):
220
+ dict = {
221
+ f"{name}": {
222
+ "root": "",
223
+ "annotation": dataset_path,
224
+ "data_augment": False,
225
+ "repeat_time": 1,
226
+ "length": length
227
+ }
228
+ }
229
+ dict_list.append(dict)
230
+
231
+ with open(json_path, 'w', encoding='utf-8') as file:
232
+ for dict in dict_list:
233
+ json.dump(dict, file)
234
+ file.write('\n')
235
+
236
+ def split_train_test(data, train_path, test_path):
237
+ random.shuffle(data)
238
+
239
+ selected_items = data[:int(len(data) * 0.9)]
240
+ unselected_items = data[int(len(data) * 0.9):]
241
+
242
+ with open(train_path, 'w') as f:
243
+ json.dump(selected_items, f)
244
+
245
+ with open(test_path, 'w') as f:
246
+ json.dump(unselected_items, f)
247
+
248
+ return selected_items, unselected_items
249
+
250
+ def split_train_test_original(original_dataset):
251
+ # First, load and split the original dataset to get the indices
252
+ original_data = load_data(original_dataset)
253
+ random.shuffle(original_data)
254
+
255
+ # Split the original data
256
+ train_data_original = original_data[:int(len(original_data) * 0.9)]
257
+ test_data_original = original_data[int(len(original_data) * 0.9):]
258
+
259
+ # Extract image IDs from filenames (assuming filenames are like "image_xxx.jpg")
260
+ train_image_ids = []
261
+ for item in train_data_original:
262
+ # Extract ID from original_image filename
263
+ filename = item["original_image"].split("/")[-1] # Get just the filename
264
+ train_image_ids.append(filename)
265
+
266
+ test_image_ids = []
267
+ for item in test_data_original:
268
+ # Extract ID from original_image filename
269
+ filename = item["original_image"].split("/")[-1] # Get just the filename
270
+ test_image_ids.append(filename)
271
+
272
+ with open('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/Eimages_train_ids.jsonl', 'w') as f:
273
+ json.dump(train_image_ids, f)
274
+
275
+ with open('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/Eimages_test_ids.jsonl', 'w') as f:
276
+ json.dump(test_image_ids, f)
277
+
278
+ if __name__ == '__main__':
279
+ NAME_list = ['object_add'] # 'text_replaced', 'lowperformancememe', 'irrelevantmeme', 'boringmeme', 'boring_detailed'
280
+ TYPE_list = ['cross', '']
281
+
282
+ mask_dict = { # 0: mask, 1: no mask, first: humor, second: relate
283
+ 'text_replaced': [1, 1], # text replaced, both humor and relate no mask
284
+ 'lowperformancememe': [1, 0], # low performance meme, humor no mask, relate mask
285
+ 'irrelevantmeme': [0, 1], # irrelevant meme, humor mask, relate no mask
286
+ 'boringmeme': [1, 0] # boring meme, humor no mask, relate mask
287
+ }
288
+
289
+ for NAME in NAME_list:
290
+ for TYPE in TYPE_list:
291
+ if NAME == 'lowperformancememe':
292
+ dataset = f'/fs-computility/niuyazhe/lixueyan/meme/memetrash/{NAME}.jsonl'
293
+ elif NAME == 'text_replaced' or NAME == 'boring_detailed':
294
+ dataset = f'/fs-computility/niuyazhe/lixueyan/meme/memetrash/Eimages_{NAME}.json'
295
+ else:
296
+ # dataset = f'/fs-computility/niuyazhe/lixueyan/meme/memetrash/{NAME}.json'
297
+ dataset = "/fs-computility/niuyazhe/shared/meme/data/meme/Eimages/Eimages_object_2.jsonl"
298
+
299
+
300
+ original_dataset = '/fs-computility/niuyazhe/lixueyan/jmj/DIlab/meme/memetrash/processed_dections_Eimage_UPDATED.json'
301
+ train_image_ids = load_data_jsonl('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/Eimages_train_ids.jsonl')
302
+ test_image_ids = load_data_jsonl('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/Eimages_test_ids.jsonl')
303
+
304
+ # split_train_test_original(original_dataset)
305
+
306
+ if TYPE != '':
307
+ dataset_path_train =f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}_{TYPE}/Ejson/{NAME}_{TYPE}_train.jsonl'
308
+ dataset_path_test = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}_{TYPE}/Ejson/{NAME}_{TYPE}_test.jsonl'
309
+ json_path_train = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}_{TYPE}/{NAME}_{TYPE}_train.jsonl'
310
+ json_path_test = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}_{TYPE}/{NAME}_{TYPE}_test.jsonl'
311
+
312
+ else:
313
+ dataset_path_train =f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}/Ejson/{NAME}_train.jsonl'
314
+ dataset_path_test = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}/Ejson/{NAME}_test.jsonl'
315
+ json_path_train = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}/{NAME}_train.jsonl'
316
+ json_path_test = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}/{NAME}_test.jsonl'
317
+
318
+ train_path = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}/raw_data/train.json'
319
+ test_path = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}/raw_data/test.json'
320
+
321
+
322
+ ensure_dir_exists(dataset_path_train)
323
+ ensure_dir_exists(dataset_path_test)
324
+ ensure_dir_exists(json_path_train)
325
+ ensure_dir_exists(json_path_test)
326
+ ensure_dir_exists(train_path)
327
+ ensure_dir_exists(test_path)
328
+
329
+
330
+ # # Now load the current dataset
331
+ # if NAME == 'object_add':
332
+ # data = load_data_jsonl(dataset)
333
+ # else:
334
+ # data = load_data(dataset)
335
+
336
+ # # Process train data based on original split
337
+ # train_data_list = []
338
+ # test_data_list = []
339
+
340
+ # for d in data:
341
+ # pos_neg = random.choice(["pos", "neg"])
342
+
343
+ # # Extract text lengths
344
+ # original_image_length = 0
345
+ # new_image_length = 0
346
+
347
+ # # Calculate text length for new image from detections
348
+ # if "detections" in d:
349
+ # for detection in d["detections"]:
350
+ # if "text" in detection:
351
+ # new_image_length += len(detection["text"])
352
+
353
+ # # Find original image in original dataset to get its text length
354
+ # original_filename = d["original_image"].split("/")[-1]
355
+ # for orig_item in load_data(original_dataset):
356
+ # if orig_item["image_path"].split("/")[-1] == original_filename:
357
+ # if "detections" in orig_item:
358
+ # for detection in orig_item["detections"]:
359
+ # if "text" in detection:
360
+ # original_image_length += len(detection["text"])
361
+ # break
362
+
363
+ # # Create data dictionary with text lengths
364
+ # if pos_neg == "pos":
365
+ # data_dict = {"image_list": [d["original_image"], d["new_image"]],
366
+ # "label": 0,
367
+ # "text_lengths": [original_image_length, new_image_length]}
368
+ # else:
369
+ # data_dict = {"image_list": [d["new_image"], d["original_image"]],
370
+ # "label": 1,
371
+ # "text_lengths": [new_image_length, original_image_length]}
372
+
373
+ # # Get the filename from the original image path
374
+ # filename = d["original_image"].split("/")[-1]
375
+
376
+ # # only for object changed
377
+ # filename = filename.replace('(','').replace(')','').replace(' ','')
378
+
379
+ # # breakpoint()
380
+
381
+ # # Assign to train or test based on the original split
382
+ # if filename in train_image_ids[0]:
383
+ # train_data_list.append(data_dict)
384
+ # else:
385
+ # test_data_list.append(data_dict)
386
+
387
+ # print(len(train_data_list), len(test_data_list))
388
+ # # Save processed data
389
+ # with open(train_path, 'w') as f:
390
+ # json.dump(train_data_list, f)
391
+
392
+ # with open(test_path, 'w') as f:
393
+ # json.dump(test_data_list, f)
394
+
395
+ # exit()
396
+
397
+ # Build datasets
398
+ train_data = load_data(train_path)
399
+ test_data = load_data(test_path)
400
+
401
+ if 'meme' in NAME:
402
+ name = NAME[:-4]
403
+ else:
404
+ name = NAME
405
+
406
+ if TYPE == '':
407
+ length_train = build_dataset(train_data, dataset_path_train)
408
+ build_json([dataset_path_train], [length_train], [name], json_path_train)
409
+ length_test = build_dataset(test_data, dataset_path_test)
410
+ build_json([dataset_path_test], [length_test], [name], json_path_test)
411
+
412
+ elif TYPE == 'cross':
413
+ length_train = build_dataset_cross(train_data, dataset_path_train, NAME)
414
+ build_json([dataset_path_train], [length_train], [name+'_'+TYPE], json_path_train)
415
+ length_test = build_dataset_cross(test_data, dataset_path_test, NAME)
416
+ build_json([dataset_path_test], [length_test], [name+'_'+TYPE], json_path_test)
417
+
418
+ elif TYPE == 'align_multihead':
419
+ length_train = build_dataset_multihead(train_data, dataset_path_train, mask_dict[NAME])
420
+ build_json([dataset_path_train], [length_train], [name], json_path_train)
421
+ length_test = build_dataset_multihead(test_data, dataset_path_test, mask_dict[NAME])
422
+ build_json([dataset_path_test], [length_test], [name], json_path_test)
423
+ elif TYPE == 'cross_multihead':
424
+ length_train = build_dataset_cross_multihead(train_data, dataset_path_train, NAME, mask_dict[NAME])
425
+ build_json([dataset_path_train], [length_train], [name+'_'+TYPE], json_path_train)
426
+ length_test = build_dataset_cross_multihead(test_data, dataset_path_test, NAME, mask_dict[NAME])
427
+ build_json([dataset_path_test], [length_test], [name+'_'+TYPE], json_path_test)
428
+
429
+ print(f'Done {NAME} {TYPE}')