Datasets:

Tasks:
Other
Languages:
Arabic
Multilinguality:
monolingual
Size Categories:
1M<n<10M
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
original
ArXiv:
Tags:
data-mining
albertvillanova HF staff commited on
Commit
f18514c
1 Parent(s): 0f26a0c

Update loading script

Browse files
Files changed (1) hide show
  1. ar_cov19.py +2 -2
ar_cov19.py CHANGED
@@ -48,7 +48,7 @@ _HOMEPAGE = "https://gitlab.com/bigirqu/ArCOV-19"
48
  # TODO: Add link to the official dataset URLs here
49
  # The HuggingFace dataset library don't host the datasets but only point to the original files
50
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
51
- _URL = "https://gitlab.com/bigirqu/ArCOV-19/-/archive/master/ArCOV-19-master.zip"
52
  # _URL="https://gitlab.com/bigirqu/ArCOV-19/-/archive/master/ArCOV-19-master.zip?path=dataset/all_tweets"
53
 
54
 
@@ -128,7 +128,7 @@ class ArCov19(datasets.GeneratorBasedBuilder):
128
  # TODO: This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
129
  # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
130
  # The key is not important, it's more here for legacy reason (legacy from tfds)
131
- for fname in sorted(glob.glob(os.path.join(data_dir, "ArCOV-19-master/dataset/all_tweets/2020-*"))):
132
 
133
  df = pd.read_csv(fname, names=["tweetID"])
134
  for id_, record in df.iterrows():
 
48
  # TODO: Add link to the official dataset URLs here
49
  # The HuggingFace dataset library don't host the datasets but only point to the original files
50
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
51
+ _URL = "dataset-all_tweets.zip"
52
  # _URL="https://gitlab.com/bigirqu/ArCOV-19/-/archive/master/ArCOV-19-master.zip?path=dataset/all_tweets"
53
 
54
 
 
128
  # TODO: This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
129
  # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
130
  # The key is not important, it's more here for legacy reason (legacy from tfds)
131
+ for fname in sorted(glob.glob(os.path.join(data_dir, "dataset-all_tweets", "2020-*"))):
132
 
133
  df = pd.read_csv(fname, names=["tweetID"])
134
  for id_, record in df.iterrows():