dongju.park commited on
Commit
9d9b575
1 Parent(s): b1c07d1

Initial commit

Browse files
.gitattributes CHANGED
@@ -14,3 +14,6 @@
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
 
 
 
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
+ data/train.tsv filter=lfs diff=lfs merge=lfs -text
18
+ data/test.tsv filter=lfs diff=lfs merge=lfs -text
19
+ data/valid.tsv filter=lfs diff=lfs merge=lfs -text
data/label.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
1
+ 매우 긍정
2
+ 다소 긍정
3
+ 중립
4
+ 다소 부정
5
+ 매우 부정
6
+ 모름
data/test.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b508c2654230f5f945a6da908a939c1aaac4a19c59d589acadf414f904475b6d
3
+ size 256110
data/train.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8722da721f809b4d7cec831fcdc97625c7eceb60c617e7e3ba6cf3c5bb8f7f3a
3
+ size 1601405
data/valid.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbf3cc0adbedde75edf9037889805925944d329fefbfcef8c6ee9ddc5915c826
3
+ size 205479
talktalk-sentiment-210713-multi-singleturn-custom-multiturn.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import absolute_import, division, print_function
2
+ import datasets
3
+ import json
4
+
5
+ _URL = "data/"
6
+ _URLs = {
7
+ "train": _URL + "train.tsv",
8
+ "valid": _URL + "valid.tsv",
9
+ "test": _URL + "test.tsv",
10
+ }
11
+
12
+
13
+ class TalktalkSentiment_210713_multi_singleturn_custom_multiturn(
14
+ datasets.GeneratorBasedBuilder):
15
+
16
+ def _info(self):
17
+ return datasets.DatasetInfo(
18
+ description="TalkTalk Sentiment Classification Multiturn Dataset (210713 Version)",
19
+ features=datasets.Features(
20
+ {
21
+ "text": datasets.Value("string"),
22
+ "label": datasets.features.ClassLabel(
23
+ names=['매우 긍정', '다소 긍정', '중립', '다소 부정', '매우 부정', '모름']),
24
+ "text_2": datasets.Value("string"),
25
+ "label_2": datasets.Value("string"),
26
+ }
27
+ ),
28
+ supervised_keys=None,
29
+ license="",
30
+ homepage="",
31
+ citation="",
32
+ )
33
+
34
+ def _split_generators(self, dl_manager):
35
+ downloaded_files = dl_manager.download_and_extract(_URLs)
36
+ return [
37
+ datasets.SplitGenerator(
38
+ name=datasets.Split.TRAIN,
39
+ gen_kwargs={
40
+ "filepath": downloaded_files["train"],
41
+ }
42
+ ),
43
+ datasets.SplitGenerator(
44
+ name=datasets.Split.VALIDATION,
45
+ gen_kwargs={
46
+ "filepath": downloaded_files["valid"],
47
+ }
48
+ ),
49
+ datasets.SplitGenerator(
50
+ name=datasets.Split.TEST,
51
+ gen_kwargs={
52
+ "filepath": downloaded_files["test"],
53
+ }
54
+ ),
55
+ ]
56
+
57
+ def _generate_examples(self, filepath):
58
+ with open(filepath, "r", encoding='UTF-8') as f:
59
+ for idx, line in enumerate(f):
60
+ text, label = line.split("\t")
61
+ if text[0] == '[' and text[-1] == ']':
62
+ yield idx, {"text": '', "label": '모름',
63
+ "text_2": str(text), "label_2": str(label)}
64
+ else:
65
+ yield idx, {"text": text.strip(), "label": label.strip(),
66
+ "text_2": '', "label_2": ''}