Datasets:

Tasks:
Other
Languages:
Arabic
Multilinguality:
monolingual
Size Categories:
1M<n<10M
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
original
ArXiv:
Tags:
data-mining
albertvillanova HF staff commited on
Commit
5b609e6
1 Parent(s): f0059d7

Update script to iterate data files for all dates

Browse files
Files changed (1) hide show
  1. ar_cov19.py +4 -3
ar_cov19.py CHANGED
@@ -121,14 +121,15 @@ class ArCov19(datasets.GeneratorBasedBuilder):
121
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
122
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
123
  data_dir = dl_manager.download_and_extract(_URL)
124
- return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data_dir": data_dir})]
 
125
 
126
- def _generate_examples(self, data_dir):
127
  """Yields examples."""
128
  # TODO: This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
129
  # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
130
  # The key is not important, it's more here for legacy reason (legacy from tfds)
131
- for fname in sorted(glob.glob(os.path.join(data_dir, "dataset-all_tweets", "2020-*"))):
132
 
133
  df = pd.read_csv(fname, names=["tweetID"])
134
  for id_, record in df.iterrows():
 
121
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
122
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
123
  data_dir = dl_manager.download_and_extract(_URL)
124
+ data_files = dl_manager.iter_files(data_dir)
125
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data_files": data_files})]
126
 
127
+ def _generate_examples(self, data_files):
128
  """Yields examples."""
129
  # TODO: This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
130
  # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
131
  # The key is not important, it's more here for legacy reason (legacy from tfds)
132
+ for fname in data_files:
133
 
134
  df = pd.read_csv(fname, names=["tweetID"])
135
  for id_, record in df.iterrows():