DKK-nli / get_all.py
Seongsu Park
dropna
f83defa
raw
history blame
2.5 kB
from datasets import load_dataset, load_metric, ClassLabel, Sequence, Dataset, DatasetDict, concatenate_datasets
import pandas as pd
def load_klue():
dataset = load_dataset('klue', 'nli')
dataset = dataset.filter(lambda row: row['label'] in [0, 1, 2])
def label_map(row):
labels = [
'entailment',
'neutral',
'contradiction',
]
row['labell'] = list(map(lambda x: labels[x], row['label']))
return row
dataset = dataset.map(label_map, batched=True, remove_columns=['label'])
dataset = dataset.rename_column('labell', 'label')
return dataset.select_columns(['premise', 'hypothesis', 'label'])
def load_dacon():
dataset = load_dataset('csv', data_files={'train': ['data/dacon_train_data.csv'], 'validation': 'data/dacon_test_data.csv'})
return dataset.select_columns(['premise', 'hypothesis', 'label'])
def load_kakao():
kakao_snli = pd.read_csv('data/snli_1.0_train.ko.tsv', sep='\t', encoding='utf-8')
kakao_dev = pd.read_csv('data/xnli.dev.ko.tsv', sep='\t', encoding='utf-8')
kakao_train = pd.concat([kakao_dev, kakao_snli])
kakao_train.rename(columns = {'sentence1':'premise','sentence2':'hypothesis','gold_label':'label'}, inplace=True)
kakao_train = kakao_train[['premise', 'hypothesis', 'label']]
kakao_train.reset_index(drop=True, inplace=True)
kakao_test = pd.read_csv('data/xnli.test.ko.tsv', sep='\t', encoding='utf-8')
kakao_test.rename(columns = {'sentence1':'premise','sentence2':'hypothesis','gold_label':'label'}, inplace=True)
kakao_test = kakao_test[['premise', 'hypothesis', 'label']]
kakao_test.reset_index(drop=True, inplace=True)
train_ds = Dataset.from_pandas(kakao_train)
test_ds = Dataset.from_pandas(kakao_test)
return DatasetDict({
'train': train_ds,
'validation': test_ds,
})
def drop_na(example):
na = False
for column in example.keys():
na = na or pd.isna(example[column])
return not na
datasets = {}
datasets['klue'] = load_klue()
datasets['dacon'] = load_dacon()
datasets['kakao'] = load_kakao()
trains, tests = zip(*[
[ds_dict['train'], ds_dict['validation']]
for source, ds_dict in datasets.items()
])
datasets = DatasetDict({
'train': concatenate_datasets(trains),
'validation': concatenate_datasets(tests),
})
datasets = datasets.filter(drop_na)
datasets.push_to_hub("seongs1024/DKK-nli", private=True)
# datasets = load_dataset('seongs1024/DKK-nli')