Datasets:
QCRI
/

Modalities:
Image
Text
Formats:
parquet
Languages:
Arabic
ArXiv:
Libraries:
Datasets
pandas
License:
ArMeme / armeme_loader.py
Firoj's picture
Upload armeme_loader.py with huggingface_hub
772bf5d verified
import os
import json
import datasets
from datasets import Dataset, DatasetDict, load_dataset, Features, Value, Image, ClassLabel
# Define the paths to your dataset
image_root_dir = "./"
train_jsonl_file_path = "arabic_memes_categorization_train.jsonl"
dev_jsonl_file_path = "arabic_memes_categorization_dev.jsonl"
test_jsonl_file_path = "arabic_memes_categorization_test.jsonl"
# Function to load each dataset split
def load_armeme_split(jsonl_file_path, image_root_dir):
texts = []
images = []
ids=[]
class_labels=[]
image_file_paths = []
# Load JSONL file
with open(jsonl_file_path, 'r') as f:
for line in f:
item = json.loads(line)
ids.append(item['id'])
texts.append(item['text'])
image_file_path = os.path.join(image_root_dir, item['img_path'])
images.append(image_file_path)
image_file_paths.append(image_file_path)
class_labels.append(item['class_label'])
# Create a dictionary to match the dataset structure
data_dict = {
'id':ids,
'text': texts,
'image': images,
'img_path': image_file_paths,
'class_label': class_labels
}
# Define the features
features = Features({
'id': Value('string'),
'text': Value('string'),
'image': Image(),
'img_path': Value('string'),
'class_label': ClassLabel(names=['not_propaganda','propaganda','not-meme','other'])
})
# Create a Hugging Face dataset from the dictionary
dataset = Dataset.from_dict(data_dict, features=features)
return dataset
# Load each split
train_dataset = load_armeme_split(train_jsonl_file_path, image_root_dir)
dev_dataset = load_armeme_split(dev_jsonl_file_path, image_root_dir)
test_dataset = load_armeme_split(test_jsonl_file_path, image_root_dir)
# Create a DatasetDict
dataset_dict = DatasetDict({
'train': train_dataset,
'dev': dev_dataset,
'test': test_dataset
})
# Push the dataset to Hugging Face Hub
dataset_dict.push_to_hub("QCRI/ArMeme")