KHLim commited on
Commit
82f167c
1 Parent(s): 14c66c0
Files changed (3) hide show
  1. README.md +27 -0
  2. boost_camp.py +84 -0
  3. dummy/mydata/0.0.0/dummy_data.zip +3 -0
README.md ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-2.0
3
+ dataset_info:
4
+ config_name: mydata
5
+ features:
6
+ - name: id
7
+ dtype: string
8
+ - name: source
9
+ dtype: string
10
+ - name: sentence_1
11
+ dtype: string
12
+ - name: sentence_2
13
+ dtype: string
14
+ - name: label
15
+ dtype: float32
16
+ - name: binary-label
17
+ dtype: float32
18
+ splits:
19
+ - name: train
20
+ num_bytes: 1675390
21
+ num_examples: 9324
22
+ - name: validation
23
+ num_bytes: 97959
24
+ num_examples: 550
25
+ download_size: 1657856
26
+ dataset_size: 1773349
27
+ ---
boost_camp.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import datasets
3
+ from datasets.download.download_manager import DownloadManager
4
+
5
+ _CITATION = """"""
6
+ _DESCRIPTION = "hugging face dataset 공유해보기 연습"
7
+ _LICENSE = "CC-BY-SA-4.0"
8
+ _URL = "https://github.com/KyungHyunLim/hf_dataset_test"
9
+ _DATA_URLS = {
10
+ "train": "https://huggingface.co/datasets/KHLim/boost_camp/resolve/main/train.csv",
11
+ "valid": "https://huggingface.co/datasets/KHLim/boost_camp/resolve/main/dev.csv"
12
+ }
13
+
14
+ _VERSION = "0.0.0"
15
+
16
+ class MyDataConfig(datasets.BuilderConfig):
17
+ def __init__(self, data_url, **kwargs):
18
+ super().__init__(version=datasets.Version(_VERSION), **kwargs)
19
+ self.data_url = data_url
20
+
21
+ class MYData(datasets.GeneratorBasedBuilder):
22
+ DEFAULT_CONFIG_NAME = "mydata"
23
+ BUILDER_CONFIGS = [
24
+ MyDataConfig(
25
+ name="mydata",
26
+ data_url=_DATA_URLS,
27
+ description=_DESCRIPTION
28
+ )
29
+ ]
30
+
31
+ def _info(self):
32
+ return datasets.DatasetInfo(
33
+ description=_DESCRIPTION,
34
+ features=datasets.Features(
35
+ {
36
+ "id": datasets.Value("string"),
37
+ "source": datasets.Value("string"),
38
+ "sentence_1": datasets.Value("string"),
39
+ "sentence_2": datasets.Value("string"),
40
+ "label": datasets.Value("float"),
41
+ "binary-label": datasets.Value("float")
42
+ }
43
+ ),
44
+ homepage=_URL,
45
+ license=_LICENSE,
46
+ citation=_CITATION,
47
+ supervised_keys=None,
48
+ )
49
+
50
+ def _split_generators(self, dl_manager: DownloadManager):
51
+ data_file = dl_manager.download_and_extract(self.config.data_url)
52
+ return [
53
+ datasets.SplitGenerator(
54
+ name=datasets.Split.TRAIN,
55
+ gen_kwargs={
56
+ "data_file": data_file['train'],
57
+ "split": "train"
58
+ },
59
+ ),
60
+ datasets.SplitGenerator(
61
+ name=datasets.Split.VALIDATION,
62
+ gen_kwargs={
63
+ "data_file": data_file['valid'],
64
+ "split": "valid"
65
+ }
66
+ )
67
+ ]
68
+
69
+ def _generate_examples(self, data_file:str, split:str):
70
+ with open(data_file, newline='', encoding='utf-8') as f:
71
+ reader = csv.reader(f, delimiter=',')
72
+ features_names = next(reader)
73
+ idx = 0
74
+ for row in reader:
75
+ features = {
76
+ "id": row[0],
77
+ "source": row[1],
78
+ "sentence_1": row[2],
79
+ "sentence_2": row[3],
80
+ "label": row[4],
81
+ "binary-label": row[5]
82
+ }
83
+ yield idx, features
84
+ idx += 1
dummy/mydata/0.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37d2d53b94777551d18c345efcba70e0e2339e91fdd19651e82e2eed660e11e7
3
+ size 1185