saied commited on
Commit
d61cfc9
1 Parent(s): a710a60

adding info

Browse files
Files changed (2) hide show
  1. dataset_info.json +1 -0
  2. persian_blog.py +4 -4
dataset_info.json ADDED
@@ -0,0 +1 @@
 
1
+ {"description": "persian_blog is a dataset consist of 400K blog posts from various websites and has types of tones.\nthis dataset can be used in different NLG tasks and as a show-case it's is used in training reformer-persian.", "citation": "\nhttps://saied71.github.io/RohanAiLab/,\n author={Saied Alimoradi},\n year={2021}\n}\n", "homepage": "", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "persian_blog", "config_name": "default", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2262319474, "num_examples": 385900, "dataset_name": "persian_blog"}}, "download_checksums": {"blogs.zip": {"num_bytes": 591517924, "checksum": "2ade6eb60013b1cd50168061ba4656b92b87f29c285091c0a673bd27ec353fcc"}}, "download_size": 591517924, "post_processing_size": null, "dataset_size": 2262319474, "size_in_bytes": 2853837398}
persian_blog.py CHANGED
@@ -4,7 +4,8 @@ import os
4
  import sys
5
  csv.field_size_limit(sys.maxsize)
6
 
7
- _DESCRIPTION = """"""
 
8
  _PROJECT_URL = """"""
9
 
10
 
@@ -15,8 +16,7 @@ https://saied71.github.io/RohanAiLab/,
15
  }
16
  """
17
 
18
- # _URL = "blogs.zip"
19
- _URL = "sample_lit.zip"
20
 
21
 
22
  class persian_blog(datasets.GeneratorBasedBuilder):
@@ -38,7 +38,7 @@ class persian_blog(datasets.GeneratorBasedBuilder):
38
  def _split_generators(self, dl_manager):
39
  """Returns SplitGenerators."""
40
  dl_dir = dl_manager.download_and_extract(_URL)
41
- data_dir = os.path.join(dl_dir, "sample_lit.csv")
42
  return [
43
  datasets.SplitGenerator(
44
  name=datasets.Split.TRAIN,
4
  import sys
5
  csv.field_size_limit(sys.maxsize)
6
 
7
+ _DESCRIPTION = """persian_blog is a dataset consist of 400K blog posts from various websites and has types of tones.
8
+ this dataset can be used in different NLG tasks and as a show-case it's is used in training reformer-persian."""
9
  _PROJECT_URL = """"""
10
 
11
 
16
  }
17
  """
18
 
19
+ _URL = "blogs.zip"
 
20
 
21
 
22
  class persian_blog(datasets.GeneratorBasedBuilder):
38
  def _split_generators(self, dl_manager):
39
  """Returns SplitGenerators."""
40
  dl_dir = dl_manager.download_and_extract(_URL)
41
+ data_dir = os.path.join(dl_dir, "blogs.csv")
42
  return [
43
  datasets.SplitGenerator(
44
  name=datasets.Split.TRAIN,