Fraser commited on
Commit
81484f7
1 Parent(s): 1069607

loading code

Browse files
Files changed (3) hide show
  1. .gitignore +3 -0
  2. README.md +7 -0
  3. wiki_sentences.py +38 -0
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .vscode
2
+ *.pyc
3
+ venv
README.md ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Wiki Sentences
2
+
3
+ A dataset of all the sentences in Wikipedia.
4
+
5
+ Filtered to only include sentences <=64 characters.
6
+
7
+ Taken from the OPTIMUS project. https://github.com/ChunyuanLI/Optimus/blob/master/download_datasets.md
wiki_sentences.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import absolute_import, division, print_function
2
+
3
+ from typing import List
4
+
5
+ import datasets
6
+
7
+
8
+ DESCRIPTION = open('README.md', 'r').read()
9
+
10
+
11
+ DOWNLOAD_URL = "https://textae.blob.core.windows.net/optimus/data/datasets/wikipedia.segmented.nltk.txt"
12
+
13
+
14
+ class WikiSentences(datasets.GeneratorBasedBuilder):
15
+
16
+ def _info(self):
17
+ return datasets.DatasetInfo(
18
+ description=DESCRIPTION,
19
+ features=datasets.Features(
20
+ {
21
+ 'text': datasets.Value("string"),
22
+ }
23
+ ),
24
+ homepage="https://github.com/ChunyuanLI/Optimus/blob/master/download_datasets.md",
25
+ )
26
+
27
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
28
+ path = dl_manager.download(DOWNLOAD_URL)
29
+ return [
30
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": path}),
31
+ ]
32
+
33
+ def _generate_examples(self, filepath):
34
+ with open(filepath, encoding="utf-8") as txt_file:
35
+ for i, line in enumerate(txt_file):
36
+ line = line.strip()
37
+ if len(line) <= 64:
38
+ yield i, {"text": line}