Seongsu Park commited on
Commit
ebb1d8b
1 Parent(s): 63212b9

create DKK-nli

Browse files
Files changed (3) hide show
  1. .gitignore +1 -0
  2. get_all.py +59 -0
  3. requirements.txt +3 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .venv/
get_all.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset, load_metric, ClassLabel, Sequence, Dataset, DatasetDict, concatenate_datasets
2
+ import pandas as pd
3
+
4
+ def load_klue():
5
+ dataset = load_dataset('klue', 'nli')
6
+ dataset = dataset.filter(lambda row: row['label'] in [0, 1, 2])
7
+ def label_map(row):
8
+ labels = [
9
+ 'entailment',
10
+ 'neutral',
11
+ 'contradiction',
12
+ ]
13
+ row['labell'] = list(map(lambda x: labels[x], row['label']))
14
+ return row
15
+ dataset = dataset.map(label_map, batched=True, remove_columns=['label'])
16
+ dataset = dataset.rename_column('labell', 'label')
17
+ return dataset.select_columns(['premise', 'hypothesis', 'label'])
18
+
19
+ def load_dacon():
20
+ dataset = load_dataset('csv', data_files={'train': ['data/dacon_train_data.csv'], 'validation': 'data/dacon_test_data.csv'})
21
+ return dataset.select_columns(['premise', 'hypothesis', 'label'])
22
+
23
+ def load_kakao():
24
+ kakao_snli = pd.read_csv('data/snli_1.0_train.ko.tsv', sep='\t', encoding='utf-8')
25
+ kakao_dev = pd.read_csv('data/xnli.dev.ko.tsv', sep='\t', encoding='utf-8')
26
+ kakao_train = pd.concat([kakao_dev, kakao_snli])
27
+ kakao_train.rename(columns = {'sentence1':'premise','sentence2':'hypothesis','gold_label':'label'}, inplace=True)
28
+ kakao_train = kakao_train[['premise', 'hypothesis', 'label']]
29
+ kakao_train.reset_index(drop=True, inplace=True)
30
+ kakao_test = pd.read_csv('data/xnli.test.ko.tsv', sep='\t', encoding='utf-8')
31
+ kakao_test.rename(columns = {'sentence1':'premise','sentence2':'hypothesis','gold_label':'label'}, inplace=True)
32
+ kakao_test = kakao_test[['premise', 'hypothesis', 'label']]
33
+ kakao_test.reset_index(drop=True, inplace=True)
34
+ train_ds = Dataset.from_pandas(kakao_train)
35
+ test_ds = Dataset.from_pandas(kakao_test)
36
+ return DatasetDict({
37
+ 'train': train_ds,
38
+ 'validation': test_ds,
39
+ })
40
+
41
+
42
+ datasets = {}
43
+ datasets['klue'] = load_klue()
44
+ datasets['dacon'] = load_dacon()
45
+ datasets['kakao'] = load_kakao()
46
+
47
+ trains, tests = zip(*[
48
+ [ds_dict['train'], ds_dict['validation']]
49
+ for source, ds_dict in datasets.items()
50
+ ])
51
+
52
+ datasets = DatasetDict({
53
+ 'train': concatenate_datasets(trains),
54
+ 'validation': concatenate_datasets(tests),
55
+ })
56
+
57
+ # datasets.push_to_hub("seongs1024/DKK-nli", private=True)
58
+
59
+ # datasets = load_dataset('seongs1024/DKK-nli')
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ datasets==2.10.0
2
+ torch==1.9.0
3
+ transformers==4.26.0