albertvillanova HF staff commited on
Commit
c2dbfb3
1 Parent(s): 2c133fc

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (b066c54ccc7c8c45b9ecf414ecf53152160c1cdc)
- Delete loading script (d4db534c9b05e7f2dae82653517a5cdb69b38a97)

README.md CHANGED
@@ -35,10 +35,15 @@ dataset_info:
35
  '4': '-2'
36
  splits:
37
  - name: train
38
- num_bytes: 829569
39
  num_examples: 9008
40
- download_size: 690873
41
- dataset_size: 829569
 
 
 
 
 
42
  ---
43
 
44
  # Dataset Card for DanishPoliticalComments
 
35
  '4': '-2'
36
  splits:
37
  - name: train
38
+ num_bytes: 829561
39
  num_examples: 9008
40
+ download_size: 512677
41
+ dataset_size: 829561
42
+ configs:
43
+ - config_name: default
44
+ data_files:
45
+ - split: train
46
+ path: data/train-*
47
  ---
48
 
49
  # Dataset Card for DanishPoliticalComments
danish_political_comments.py DELETED
@@ -1,72 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- import datasets
18
-
19
-
20
- _DESCRIPTION = """\
21
- The dataset consists of 9008 sentences that are labelled with fine-grained polarity in the range from -2 to 2 (negative to postive). The quality of the fine-grained is not cross validated and is therefore subject to uncertainties; however, the simple polarity has been cross validated and therefore is considered to be more correct.
22
- """
23
- _HOMEPAGE_URL = "https://github.com/steffan267/Sentiment-Analysis-on-Danish-Social-Media"
24
- _URL = (
25
- "https://raw.githubusercontent.com/steffan267/Sentiment-Analysis-on-Danish-Social-Media/master/all_sentences.tsv"
26
- )
27
- _CITATION = "https://github.com/lucaspuvis/SAM/blob/master/Thesis.pdf"
28
-
29
-
30
- class DanishPoliticalComments(datasets.GeneratorBasedBuilder):
31
- VERSION = datasets.Version("0.9.1")
32
-
33
- def _info(self):
34
- return datasets.DatasetInfo(
35
- description=_DESCRIPTION,
36
- features=datasets.Features(
37
- {
38
- "id": datasets.Value("string"),
39
- "sentence": datasets.Value("string"),
40
- "target": datasets.features.ClassLabel(names=["2", "1", "0", "-1", "-2"]),
41
- },
42
- ),
43
- supervised_keys=None,
44
- homepage=_HOMEPAGE_URL,
45
- citation=_CITATION,
46
- )
47
-
48
- def _split_generators(self, dl_manager):
49
- path = dl_manager.download_and_extract(_URL)
50
- return [
51
- datasets.SplitGenerator(
52
- name=datasets.Split.TRAIN,
53
- gen_kwargs={"datapath": path},
54
- )
55
- ]
56
-
57
- def _generate_examples(self, datapath):
58
- sentence_counter = 0
59
- with open(datapath, encoding="utf-8") as f:
60
- for row in f:
61
- row = row.strip()
62
- target, sentence = row.split("\t")
63
- result = (
64
- sentence_counter,
65
- {
66
- "id": str(sentence_counter),
67
- "sentence": sentence,
68
- "target": target,
69
- },
70
- )
71
- sentence_counter += 1
72
- yield result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e82ab96f17c171ad0e1a0bad8fffa1ea3d37f1799e280806c7aaa181ded91ca7
3
+ size 512677