pere commited on
Commit
1975941
•
1 Parent(s): 992e870
data/train-shard-0001-of-0001.json.gz CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e732886098a487ee1ab4e61fd33ddd8ec56da48fa282711867cc5bee6b1e546
3
- size 13382966
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b1e508953ad5e9a812b5bf58f84fb7f2bd9afd21ebd984948d0e3bd0d038a1e
3
+ size 16302399
data/{unlabelled_1341057.json → unlabelled_670529.json} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d0c13fda2f429b6264fa8290f6b267f76c64d185a503b11f80fa0d36cd789e5e
3
- size 125996673
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0582c89e3a0878f9d66553d8f8b76d48f69a83404c9f7e429187367213540dbd
3
+ size 122773501
data/validation-shard-0001-of-0001.json.gz CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1a031ce6bc725f03446c1392dda9740c9fbb01ffa16bb1e39e638c0512aa9e10
3
- size 3086100
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc3913da7f010dedcee0a0ab0201ebd62b70ebbb2bc50fd417ddf11469ba083c
3
+ size 10433051
italian_tweets_1M.py → italian_tweets_500k.py RENAMED
@@ -5,13 +5,13 @@ import datasets
5
 
6
  logger = datasets.logging.get_logger(__name__)
7
  _DESCRIPTION = """\\nItalian tweets."""
8
- _DATA_URL = "https://huggingface.co/datasets/pere/italian_tweets_1M/resolve/main/data/{split_suffix}-shard-{index:04d}-of-{n_shards:04d}.json.gz"
9
  _N_SHARDS_PER_SPLIT = {
10
  "train": 1, "validation": 1
11
  }
12
 
13
 
14
- class italian_tweets_1MConfig(datasets.BuilderConfig):
15
  """BuilderConfig for NbNn."""
16
 
17
  def __init__(self, *args, **kwargs):
@@ -21,15 +21,15 @@ class italian_tweets_1MConfig(datasets.BuilderConfig):
21
  """
22
  super().__init__(
23
  *args,
24
- name="italian_tweets_1M",
25
  **kwargs,
26
  )
27
 
28
 
29
- class italian_tweets_1M(datasets.GeneratorBasedBuilder):
30
  """Norwegian Colossal Corpus v2."""
31
- BUILDER_CONFIGS = [italian_tweets_1MConfig()]
32
- BUILDER_CONFIG_CLASS = italian_tweets_1MConfig
33
 
34
  def _info(self):
35
  return datasets.DatasetInfo(
 
5
 
6
  logger = datasets.logging.get_logger(__name__)
7
  _DESCRIPTION = """\\nItalian tweets."""
8
+ _DATA_URL = "https://huggingface.co/datasets/pere/italian_tweets_500k/resolve/main/data/{split_suffix}-shard-{index:04d}-of-{n_shards:04d}.json.gz"
9
  _N_SHARDS_PER_SPLIT = {
10
  "train": 1, "validation": 1
11
  }
12
 
13
 
14
+ class italian_tweets_500kConfig(datasets.BuilderConfig):
15
  """BuilderConfig for NbNn."""
16
 
17
  def __init__(self, *args, **kwargs):
 
21
  """
22
  super().__init__(
23
  *args,
24
+ name="italian_tweets_500k",
25
  **kwargs,
26
  )
27
 
28
 
29
+ class italian_tweets_500k(datasets.GeneratorBasedBuilder):
30
  """Norwegian Colossal Corpus v2."""
31
+ BUILDER_CONFIGS = [italian_tweets_500kConfig()]
32
+ BUILDER_CONFIG_CLASS = italian_tweets_500kConfig
33
 
34
  def _info(self):
35
  return datasets.DatasetInfo(