Datasets:
Update files from the datasets library (from 1.7.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.7.0
- README.MD +5 -4
- saudinewsnet.py +2 -2
README.MD
CHANGED
@@ -17,6 +17,7 @@ task_categories:
|
|
17 |
- sequence-modeling
|
18 |
task_ids:
|
19 |
- language-modeling
|
|
|
20 |
---
|
21 |
|
22 |
# Dataset Card for "saudinewsnet"
|
@@ -24,12 +25,12 @@ task_ids:
|
|
24 |
## Table of Contents
|
25 |
- [Dataset Description](#dataset-description)
|
26 |
- [Dataset Summary](#dataset-summary)
|
27 |
-
- [Supported Tasks](#supported-tasks)
|
28 |
- [Languages](#languages)
|
29 |
- [Dataset Structure](#dataset-structure)
|
30 |
- [Data Instances](#data-instances)
|
31 |
- [Data Fields](#data-fields)
|
32 |
-
- [Data Splits
|
33 |
- [Dataset Creation](#dataset-creation)
|
34 |
- [Curation Rationale](#curation-rationale)
|
35 |
- [Source Data](#source-data)
|
@@ -76,7 +77,7 @@ The dataset currently contains **31,030** Arabic articles (with a total number o
|
|
76 |
- [Arreyadi](http://www.arreyadi.com.sa/) (133 articles)
|
77 |
- [Arreyadiyah](http://www.arreyadiyah.com/) (52 articles)
|
78 |
|
79 |
-
### Supported Tasks
|
80 |
|
81 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
82 |
|
@@ -121,7 +122,7 @@ The data fields are the same among all splits.
|
|
121 |
- **`author`** (str): The author of the article. Contains missing values that were replaced with an empty string.
|
122 |
- **`content`** (str): The content of the article.
|
123 |
|
124 |
-
### Data Splits
|
125 |
|
126 |
| name |train|
|
127 |
|-------|----:|
|
|
|
17 |
- sequence-modeling
|
18 |
task_ids:
|
19 |
- language-modeling
|
20 |
+
paperswithcode_id: null
|
21 |
---
|
22 |
|
23 |
# Dataset Card for "saudinewsnet"
|
|
|
25 |
## Table of Contents
|
26 |
- [Dataset Description](#dataset-description)
|
27 |
- [Dataset Summary](#dataset-summary)
|
28 |
+
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
29 |
- [Languages](#languages)
|
30 |
- [Dataset Structure](#dataset-structure)
|
31 |
- [Data Instances](#data-instances)
|
32 |
- [Data Fields](#data-fields)
|
33 |
+
- [Data Splits](#data-splits)
|
34 |
- [Dataset Creation](#dataset-creation)
|
35 |
- [Curation Rationale](#curation-rationale)
|
36 |
- [Source Data](#source-data)
|
|
|
77 |
- [Arreyadi](http://www.arreyadi.com.sa/) (133 articles)
|
78 |
- [Arreyadiyah](http://www.arreyadiyah.com/) (52 articles)
|
79 |
|
80 |
+
### Supported Tasks and Leaderboards
|
81 |
|
82 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
83 |
|
|
|
122 |
- **`author`** (str): The author of the article. Contains missing values that were replaced with an empty string.
|
123 |
- **`content`** (str): The content of the article.
|
124 |
|
125 |
+
### Data Splits
|
126 |
|
127 |
| name |train|
|
128 |
|-------|----:|
|
saudinewsnet.py
CHANGED
@@ -124,7 +124,7 @@ class Saudinewsnet(datasets.GeneratorBasedBuilder):
|
|
124 |
|
125 |
def _generate_examples(self, filepath, split):
|
126 |
"""Generates examples"""
|
127 |
-
for path in filepath:
|
128 |
with open(path, encoding="utf-8") as f:
|
129 |
articles = json.load(f)
|
130 |
for _id, article in enumerate(articles):
|
@@ -135,7 +135,7 @@ class Saudinewsnet(datasets.GeneratorBasedBuilder):
|
|
135 |
author = article.get("author", "").strip(" ")
|
136 |
content = article["content"].strip("/n")
|
137 |
|
138 |
-
yield _id, {
|
139 |
"title": title,
|
140 |
"source": source,
|
141 |
"date_extracted": dt,
|
|
|
124 |
|
125 |
def _generate_examples(self, filepath, split):
|
126 |
"""Generates examples"""
|
127 |
+
for file_idx, path in enumerate(filepath):
|
128 |
with open(path, encoding="utf-8") as f:
|
129 |
articles = json.load(f)
|
130 |
for _id, article in enumerate(articles):
|
|
|
135 |
author = article.get("author", "").strip(" ")
|
136 |
content = article["content"].strip("/n")
|
137 |
|
138 |
+
yield f"{file_idx}_{_id}", {
|
139 |
"title": title,
|
140 |
"source": source,
|
141 |
"date_extracted": dt,
|