Duy Huynh commited on
Commit
a973aa2
1 Parent(s): 47a7bd0

Initialize

Browse files
OV_Text.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import datasets
4
+
5
+ _DESCRIPTION = """\
6
+ OVText
7
+ """
8
+
9
+ _CITATION = """\
10
+ """
11
+
12
+ _BASE_URL = "https://huggingface.co/datasets/duyhngoc/OV_Text/resolve/main/data/"
13
+ TRAIN_FILE = "train.txt"
14
+ VALIDATION_FILE = "validation.txt"
15
+ TEST_FILE = "test.txt"
16
+
17
+
18
+ class UTSTextConfig(datasets.BuilderConfig):
19
+ """BuilderConfig"""
20
+
21
+ def __init__(self, **kwargs):
22
+ super(UTSTextConfig, self).__init__(**kwargs)
23
+
24
+
25
+ class UTSText(datasets.GeneratorBasedBuilder):
26
+ """OV Word Tokenize datasets"""
27
+ VERSION = datasets.Version("1.0.0")
28
+ BUILDER_CONFIGS = [
29
+ UTSTextConfig(
30
+ name="small", version=VERSION, description="OV_Text Small"),
31
+ UTSTextConfig(
32
+ name="base", version=VERSION, description="OV_Text Base"),
33
+ UTSTextConfig(
34
+ name="large", version=VERSION, description="OV_Text Large")
35
+ ]
36
+
37
+ def _info(self):
38
+ return datasets.DatasetInfo(
39
+ description=_DESCRIPTION,
40
+ features=datasets.Features(
41
+ {
42
+ "text": datasets.Value("string"),
43
+ }
44
+ ),
45
+ supervised_keys=None,
46
+ homepage=None,
47
+ citation=_CITATION
48
+ )
49
+
50
+ def _split_generators(self, dl_manager):
51
+ """Returns SplitGenerators."""
52
+ subset_folder = self.config.name
53
+ train_file = dl_manager.download(os.path.join(_BASE_URL, subset_folder, TRAIN_FILE))
54
+ validation_file = dl_manager.download(os.path.join(_BASE_URL, subset_folder, VALIDATION_FILE))
55
+ test_file = dl_manager.download(os.path.join(_BASE_URL, subset_folder, TEST_FILE))
56
+
57
+ splits = [
58
+ datasets.SplitGenerator(
59
+ name=datasets.Split.TRAIN,
60
+ gen_kwargs={"filepath": train_file}
61
+ ),
62
+ datasets.SplitGenerator(
63
+ name=datasets.Split.VALIDATION,
64
+ gen_kwargs={"filepath": validation_file}
65
+ ),
66
+ datasets.SplitGenerator(
67
+ name=datasets.Split.TEST,
68
+ gen_kwargs={"filepath": test_file}
69
+ ),
70
+ ]
71
+ return splits
72
+
73
+ def _generate_examples(self, filepath):
74
+ print(filepath)
75
+ guid = 0
76
+ with open(filepath, encoding="utf-8") as f:
77
+ for line in f:
78
+ print(line)
79
+ if line.strip() != "":
80
+ item = {
81
+ "text": line.strip()
82
+ }
83
+ yield guid, item
84
+ guid += 1
README.md CHANGED
@@ -1,3 +1,88 @@
1
  ---
2
- license: apache-2.0
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ annotations_creators:
3
+ - no-annotation
4
+ language:
5
+ - vi
6
+ license:
7
+ - apache-2.0
8
+ multilinguality:
9
+ - monolingual
10
+ pretty_name: OV_Text
11
+ size_categories:
12
+ - 10K<n<100K
13
+ task_categories:
14
+ - text-generation
15
  ---
16
+
17
+ # Dataset Card for OV_Text
18
+
19
+ ## Table of Contents
20
+ - [Table of Contents](#table-of-contents)
21
+ - [Dataset Description](#dataset-description)
22
+ - [Dataset Summary](#dataset-summary)
23
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
24
+ - [Languages](#languages)
25
+ - [Dataset Structure](#dataset-structure)
26
+ - [Data Instances](#data-instances)
27
+ - [Data Fields](#data-fields)
28
+ - [Data Splits](#data-splits)
29
+ - [Dataset Creation](#dataset-creation)
30
+ - [Curation Rationale](#curation-rationale)
31
+ - [Source Data](#source-data)
32
+ - [Annotations](#annotations)
33
+ - [Additional Information](#additional-information)
34
+ - [Licensing Information](#licensing-information)
35
+ - [Citation Information](#citation-information)
36
+ - [Contributions](#contributions)
37
+
38
+ ## Dataset Description
39
+
40
+ The OV_Text dataset is a collection of 100,000 sentences sourced from various news articles.
41
+
42
+ Out of the 10,000 sentences in the dataset, 5,000 sentences have a length ranging from 50 to 150, while the other 5,000 sentences have a length ranging from 20 to 50. This distribution of sentence lengths provides a diverse range of text samples that can be used to train and test natural language processing models.
43
+
44
+ ### Dataset Summary
45
+
46
+ ### Supported Tasks and Leaderboards
47
+
48
+
49
+ ### Languages
50
+
51
+ ## Dataset Structure
52
+
53
+ ### Data Instances
54
+
55
+ ### Data Fields
56
+
57
+
58
+ ### Data Splits
59
+
60
+ | name | train | validation | test |
61
+ |---------|--------:|-----------:|-------:|
62
+ | small | 1600 | 200 | 200 |
63
+ | base | 8000 | 1000 | 1000 |
64
+ | large | 95000 | 2500 | 2500 |
65
+
66
+
67
+ ## Dataset Creation
68
+
69
+ ### Curation Rationale
70
+
71
+
72
+ ### Source Data
73
+
74
+
75
+ ### Annotations
76
+
77
+
78
+
79
+ ## Additional Information
80
+
81
+ ### Licensing Information
82
+
83
+ The dataset is released under Apache 2.0.
84
+
85
+ ### Citation Information
86
+
87
+
88
+ ### Contributions
data/base/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/base/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/base/validation.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/large/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/large/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/large/validation.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/small/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/small/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/small/validation.txt ADDED
The diff for this file is too large to render. See raw diff
 
eval.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+ import matplotlib.pyplot as plt
3
+ from os.path import dirname, join
4
+
5
+ dataset = load_dataset("duyhngoc/OV_Text")
6
+ sentences = dataset["train"]["text"]
7
+
8
+ # compute histogram of sentence lengths with bin size = 10
9
+ pwd = dirname(__file__)
10
+ tmp = join(pwd, "tmp")
11
+ lengths = [len(s) for s in sentences]
12
+ plt.hist(lengths, bins=range(0, max(lengths), 10))
13
+ # add title of plot
14
+ plt.title("Histogram of sentence lengths")
15
+ plt.savefig(join(tmp, "histogram.png"))
16
+ plt.show()
statistics.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from os.path import dirname, join
2
+
3
+
4
+ def count_lines(filepath):
5
+ with open(filepath, encoding="utf-8") as f:
6
+ lines = f.read().splitlines()
7
+ return len(lines)
8
+
9
+
10
+ def statistic(subset_folder):
11
+ print("Train\t\t", count_lines(join(subset_folder, "train.txt")))
12
+ print("Validation\t", count_lines(join(subset_folder, "validation.txt")))
13
+ print("Test\t\t", count_lines(join(subset_folder, "test.txt")))
14
+
15
+
16
+ pwd = dirname(__file__)
17
+ dataset_folder = join(pwd, "data")
18
+
19
+ print("Subset Small")
20
+ statistic(join(dataset_folder, "small"))
21
+
22
+ print("Subset Base")
23
+ statistic(join(dataset_folder, "base"))
24
+
25
+ print("Subset Large")
26
+ statistic(join(dataset_folder, "large"))