s-jse commited on
Commit
b67421f
1 Parent(s): 7b86964

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +119 -3
README.md CHANGED
@@ -115,6 +115,113 @@ configs:
115
  - "2017_part_03.jsonl.gz"
116
  - "2017_part_04.jsonl.gz"
117
  - "2017_part_05.jsonl.gz"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  ---
119
 
120
  This dataset is the result of processing all WARC files in the [CCNews Corpus](https://commoncrawl.org/blog/news-dataset-available), from the beginning (2016) to June of 2024.
@@ -127,17 +234,26 @@ Sample Python code to explore this dataset:
127
 
128
  ```python
129
  from datasets import load_dataset
 
130
 
131
- # Load the dataset in streaming mode
132
- dataset = load_dataset("stanford-oval/ccnews", streaming=True)
133
 
134
  # Print information about the dataset
135
  print(dataset)
136
 
137
- # To demonstrate streaming, we can iterate over a few examples
138
  print("\nFirst few examples:")
139
  for i, example in enumerate(dataset["train"].take(5)):
140
  print(f"Example {i + 1}:")
141
  print(example)
142
  print()
 
 
 
 
 
 
 
 
143
  ```
 
115
  - "2017_part_03.jsonl.gz"
116
  - "2017_part_04.jsonl.gz"
117
  - "2017_part_05.jsonl.gz"
118
+ - config_name: "2018"
119
+ data_files:
120
+ - "2018_part_00.jsonl.gz"
121
+ - "2018_part_01.jsonl.gz"
122
+ - "2018_part_02.jsonl.gz"
123
+ - "2018_part_03.jsonl.gz"
124
+ - "2018_part_04.jsonl.gz"
125
+ - "2018_part_05.jsonl.gz"
126
+ - "2018_part_06.jsonl.gz"
127
+ - "2018_part_07.jsonl.gz"
128
+ - "2018_part_08.jsonl.gz"
129
+ - config_name: "2019"
130
+ data_files:
131
+ - "2019_part_00.jsonl.gz"
132
+ - "2019_part_01.jsonl.gz"
133
+ - "2019_part_02.jsonl.gz"
134
+ - "2019_part_03.jsonl.gz"
135
+ - "2019_part_04.jsonl.gz"
136
+ - "2019_part_05.jsonl.gz"
137
+ - "2019_part_06.jsonl.gz"
138
+ - "2019_part_07.jsonl.gz"
139
+ - "2019_part_08.jsonl.gz"
140
+ - "2019_part_09.jsonl.gz"
141
+ - "2019_part_10.jsonl.gz"
142
+ - config_name: "2020"
143
+ data_files:
144
+ - "2020_part_00.jsonl.gz"
145
+ - "2020_part_01.jsonl.gz"
146
+ - "2020_part_02.jsonl.gz"
147
+ - "2020_part_03.jsonl.gz"
148
+ - "2020_part_04.jsonl.gz"
149
+ - "2020_part_05.jsonl.gz"
150
+ - "2020_part_06.jsonl.gz"
151
+ - "2020_part_07.jsonl.gz"
152
+ - "2020_part_08.jsonl.gz"
153
+ - "2020_part_09.jsonl.gz"
154
+ - "2020_part_10.jsonl.gz"
155
+ - "2020_part_11.jsonl.gz"
156
+ - "2020_part_12.jsonl.gz"
157
+ - "2020_part_13.jsonl.gz"
158
+ - "2020_part_14.jsonl.gz"
159
+ - "2020_part_15.jsonl.gz"
160
+ - config_name: "2021"
161
+ data_files:
162
+ - "2021_part_00.jsonl.gz"
163
+ - "2021_part_01.jsonl.gz"
164
+ - "2021_part_02.jsonl.gz"
165
+ - "2021_part_03.jsonl.gz"
166
+ - "2021_part_04.jsonl.gz"
167
+ - "2021_part_05.jsonl.gz"
168
+ - "2021_part_06.jsonl.gz"
169
+ - "2021_part_07.jsonl.gz"
170
+ - "2021_part_08.jsonl.gz"
171
+ - "2021_part_09.jsonl.gz"
172
+ - "2021_part_10.jsonl.gz"
173
+ - "2021_part_11.jsonl.gz"
174
+ - "2021_part_12.jsonl.gz"
175
+ - "2021_part_13.jsonl.gz"
176
+ - "2021_part_14.jsonl.gz"
177
+ - "2021_part_15.jsonl.gz"
178
+ - config_name: "2022"
179
+ data_files:
180
+ - "2022_part_00.jsonl.gz"
181
+ - "2022_part_01.jsonl.gz"
182
+ - "2022_part_02.jsonl.gz"
183
+ - "2022_part_03.jsonl.gz"
184
+ - "2022_part_04.jsonl.gz"
185
+ - "2022_part_05.jsonl.gz"
186
+ - "2022_part_06.jsonl.gz"
187
+ - "2022_part_07.jsonl.gz"
188
+ - "2022_part_08.jsonl.gz"
189
+ - "2022_part_09.jsonl.gz"
190
+ - "2022_part_10.jsonl.gz"
191
+ - "2022_part_11.jsonl.gz"
192
+ - "2022_part_12.jsonl.gz"
193
+ - "2022_part_13.jsonl.gz"
194
+ - "2022_part_14.jsonl.gz"
195
+ - "2022_part_15.jsonl.gz"
196
+ - "2022_part_16.jsonl.gz"
197
+ - config_name: "2023"
198
+ data_files:
199
+ - "2023_part_00.jsonl.gz"
200
+ - "2023_part_01.jsonl.gz"
201
+ - "2023_part_02.jsonl.gz"
202
+ - "2023_part_03.jsonl.gz"
203
+ - "2023_part_04.jsonl.gz"
204
+ - "2023_part_05.jsonl.gz"
205
+ - "2023_part_06.jsonl.gz"
206
+ - "2023_part_07.jsonl.gz"
207
+ - "2023_part_08.jsonl.gz"
208
+ - "2023_part_09.jsonl.gz"
209
+ - "2023_part_10.jsonl.gz"
210
+ - "2023_part_11.jsonl.gz"
211
+ - "2023_part_12.jsonl.gz"
212
+ - "2023_part_13.jsonl.gz"
213
+ - "2023_part_14.jsonl.gz"
214
+ - "2023_part_15.jsonl.gz"
215
+ - config_name: "2024"
216
+ data_files:
217
+ - "2024_part_00.jsonl.gz"
218
+ - "2024_part_01.jsonl.gz"
219
+ - "2024_part_02.jsonl.gz"
220
+ - "2024_part_03.jsonl.gz"
221
+ - "2024_part_04.jsonl.gz"
222
+ - "2024_part_05.jsonl.gz"
223
+ - "2024_part_06.jsonl.gz"
224
+
225
  ---
226
 
227
  This dataset is the result of processing all WARC files in the [CCNews Corpus](https://commoncrawl.org/blog/news-dataset-available), from the beginning (2016) to June of 2024.
 
234
 
235
  ```python
236
  from datasets import load_dataset
237
+ from tqdm import tqdm
238
 
239
+ # Load the news articles for the year 2016, in streaming mode
240
+ dataset = load_dataset("stanford-oval/ccnews", "2016", streaming=True)
241
 
242
  # Print information about the dataset
243
  print(dataset)
244
 
245
+ # Iterate over a few examples
246
  print("\nFirst few examples:")
247
  for i, example in enumerate(dataset["train"].take(5)):
248
  print(f"Example {i + 1}:")
249
  print(example)
250
  print()
251
+
252
+ # Count the number of articles (in 2016)
253
+ row_count = 0
254
+ for _ in tqdm(dataset["train"], desc="Counting rows", unit=" rows", unit_scale=True, unit_divisor=1000):
255
+ row_count += 1
256
+
257
+ # Print the number of rows
258
+ print(f"\nTotal number of articles: {row_count}")
259
  ```