|
import os |
|
import json |
|
import datasets |
|
from datasets import Dataset, DatasetDict, load_dataset, Features, Value, Image, ClassLabel |
|
|
|
|
|
|
|
image_root_dir = "./" |
|
train_jsonl_file_path = "arabic_memes_categorization_train.jsonl" |
|
dev_jsonl_file_path = "arabic_memes_categorization_dev.jsonl" |
|
test_jsonl_file_path = "arabic_memes_categorization_test.jsonl" |
|
|
|
|
|
|
|
|
|
def load_armeme_split(jsonl_file_path, image_root_dir): |
|
texts = [] |
|
images = [] |
|
ids=[] |
|
class_labels=[] |
|
image_file_paths = [] |
|
|
|
|
|
with open(jsonl_file_path, 'r') as f: |
|
for line in f: |
|
item = json.loads(line) |
|
ids.append(item['id']) |
|
texts.append(item['text']) |
|
image_file_path = os.path.join(image_root_dir, item['img_path']) |
|
images.append(image_file_path) |
|
image_file_paths.append(image_file_path) |
|
class_labels.append(item['class_label']) |
|
|
|
|
|
data_dict = { |
|
'id':ids, |
|
'text': texts, |
|
'image': images, |
|
'img_path': image_file_paths, |
|
'class_label': class_labels |
|
} |
|
|
|
|
|
features = Features({ |
|
'id': Value('string'), |
|
'text': Value('string'), |
|
'image': Image(), |
|
'img_path': Value('string'), |
|
'class_label': ClassLabel(names=['not_propaganda','propaganda','not-meme','other']) |
|
}) |
|
|
|
|
|
dataset = Dataset.from_dict(data_dict, features=features) |
|
return dataset |
|
|
|
|
|
train_dataset = load_armeme_split(train_jsonl_file_path, image_root_dir) |
|
dev_dataset = load_armeme_split(dev_jsonl_file_path, image_root_dir) |
|
test_dataset = load_armeme_split(test_jsonl_file_path, image_root_dir) |
|
|
|
|
|
dataset_dict = DatasetDict({ |
|
'train': train_dataset, |
|
'dev': dev_dataset, |
|
'test': test_dataset |
|
}) |
|
|
|
|
|
dataset_dict.push_to_hub("QCRI/ArMeme") |
|
|