rain1024 commited on
Commit
b7cf6a5
2 Parent(s): fb03fda c1ebcdb
README.md CHANGED
@@ -1,3 +1,83 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9153f692584fe7ad587cdf6ab168be0785ede5db07f7f31a7ed40bd969b2ff40
3
- size 1789
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - no-annotation
4
+ language:
5
+ - vi
6
+ license:
7
+ - apache-2.0
8
+ multilinguality:
9
+ - monolingual
10
+ pretty_name: UTS_Text_v1
11
+ size_categories:
12
+ - 1K<n<10K
13
+ task_categories:
14
+ - text-generation
15
+ ---
16
+
17
+ # Dataset Card for UTS_Text_v1
18
+
19
+ ## Table of Contents
20
+ - [Table of Contents](#table-of-contents)
21
+ - [Dataset Description](#dataset-description)
22
+ - [Dataset Summary](#dataset-summary)
23
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
24
+ - [Languages](#languages)
25
+ - [Dataset Structure](#dataset-structure)
26
+ - [Data Instances](#data-instances)
27
+ - [Data Fields](#data-fields)
28
+ - [Data Splits](#data-splits)
29
+ - [Dataset Creation](#dataset-creation)
30
+ - [Curation Rationale](#curation-rationale)
31
+ - [Source Data](#source-data)
32
+ - [Annotations](#annotations)
33
+ - [Additional Information](#additional-information)
34
+ - [Licensing Information](#licensing-information)
35
+ - [Citation Information](#citation-information)
36
+ - [Contributions](#contributions)
37
+
38
+ ## Dataset Description
39
+
40
+ The UTS_Text_v1 dataset is a collection of 10,000 sentences sourced from various news articles.
41
+
42
+ Out of the 10,000 sentences in the dataset, 5,000 sentences have a length ranging from 50 to 150, while the other 5,000 sentences have a length ranging from 20 to 50. This distribution of sentence lengths provides a diverse range of text samples that can be used to train and test natural language processing models.
43
+
44
+ ### Dataset Summary
45
+
46
+ ### Supported Tasks and Leaderboards
47
+
48
+
49
+ ### Languages
50
+
51
+
52
+ ## Dataset Structure
53
+
54
+ ### Data Instances
55
+
56
+ ### Data Fields
57
+
58
+
59
+ ### Data Splits
60
+
61
+ ## Dataset Creation
62
+
63
+ ### Curation Rationale
64
+
65
+
66
+ ### Source Data
67
+
68
+
69
+ ### Annotations
70
+
71
+
72
+
73
+ ## Additional Information
74
+
75
+ ### Licensing Information
76
+
77
+ The dataset is released under Apache 2.0.
78
+
79
+ ### Citation Information
80
+
81
+
82
+ ### Contributions
83
+
UTS_Text_v1.py CHANGED
@@ -1,3 +1,54 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1bcf654c0b873b2ec89b8af9fc2560c28893d683dc824691efd3b8d556b0a01f
3
- size 1717
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import datasets
3
+
4
+ _DESCRIPTION = """\
5
+ UTS_Text_v1
6
+ """
7
+
8
+ _CITATION = """\
9
+ """
10
+
11
+ _BASE_URL = "https://huggingface.co/datasets/undertheseanlp/UTS_Text_v1/raw/main/data/"
12
+ TRAIN_FILE = "UTS_Text_v1.txt"
13
+
14
+
15
+ class UTS_Text_v1(datasets.GeneratorBasedBuilder):
16
+ def _info(self):
17
+ return datasets.DatasetInfo(
18
+ description=_DESCRIPTION,
19
+ features=datasets.Features(
20
+ {
21
+ "text": datasets.Value("string"),
22
+ }
23
+ ),
24
+ supervised_keys=None,
25
+ homepage=None,
26
+ citation=_CITATION,
27
+ )
28
+
29
+ def _split_generators(self, dl_manager):
30
+ """Returns SplitGenerators."""
31
+ train_file = dl_manager.download(os.path.join(_BASE_URL, TRAIN_FILE))
32
+ # dev_file = dl_manager.download(os.path.join(DATA_PATH, "dev.txt"))
33
+ # test_file = dl_manager.download(os.path.join(DATA_PATH, "test.txt"))
34
+ data_files = {
35
+ "train": train_file,
36
+ # "dev": dev_file,
37
+ # "test": test_file,
38
+ }
39
+ splits = [
40
+ datasets.SplitGenerator(
41
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}
42
+ ),
43
+ # datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
44
+ # datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
45
+ ]
46
+ return splits
47
+
48
+ def _generate_examples(self, filepath):
49
+ with open(filepath, encoding="utf-8") as f:
50
+ guid = 0
51
+ for line in f:
52
+ text = line.strip()
53
+ yield guid, {"text": text}
54
+ guid += 1
data/UTS_Text_v1.txt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e9dcc7579094881397a2b49b7a92f88c8a201104b82660ae4eaa63da620a56f2
3
- size 905068
 
 
 
 
data/small/validation.txt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7827ab20b575c6c3736ec5b00c2689d54c880bcf8453269260aff780fc7c18b5
3
- size 24631
 
 
 
 
eval.py CHANGED
@@ -1,3 +1,16 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3383996333606968884d9705d06807f095a6d8e4de04170dc9e1439bfb4cdc51
3
- size 501
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+ import matplotlib.pyplot as plt
3
+ from os.path import dirname, join
4
+
5
+ dataset = load_dataset("undertheseanlp/UTS_Text_v1")
6
+ sentences = dataset["train"]["text"]
7
+
8
+ # compute histogram of sentence lengths with bin size = 10
9
+ pwd = dirname(__file__)
10
+ tmp = join(pwd, "tmp")
11
+ lengths = [len(s) for s in sentences]
12
+ plt.hist(lengths, bins=range(0, max(lengths), 10))
13
+ # add title of plot
14
+ plt.title("Histogram of sentence lengths")
15
+ plt.savefig(join(tmp, "histogram.png"))
16
+ plt.show()
generate_dataset.py CHANGED
@@ -1,3 +1,54 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9de029f85cabeed131dcdcb77ea883ad63673b3646af90b6a5b8bacaaaddea57
3
- size 2403
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from os.path import join, dirname
2
+ from underthesea.file_utils import DATASETS_FOLDER
3
+ import random
4
+
5
+ random.seed(10)
6
+ text_file = join(DATASETS_FOLDER, "VNESES", "VNESEScorpus.txt")
7
+ with open(text_file) as f:
8
+ lines = f.read().splitlines()
9
+ NUM_LONG_TOKENS = 50
10
+ NUM_SHORT_TOKENS = 20
11
+ NUM_MAX_TOKENS = 150
12
+
13
+
14
+ def longline_conditions(line):
15
+ if len(line) < NUM_LONG_TOKENS or len(line) > NUM_MAX_TOKENS:
16
+ return False
17
+ if not (line[0].isupper() and line[-1] == "."):
18
+ return False
19
+ return True
20
+
21
+
22
+ long_lines = [line for line in lines if longline_conditions(line)]
23
+ # get random 1000 lines
24
+ random_long_lines = random.sample(long_lines, 5000)
25
+ for line in random_long_lines[:20]:
26
+ print(line)
27
+
28
+
29
+ def shortline_conditions(line):
30
+ if len(line) < NUM_SHORT_TOKENS:
31
+ return False
32
+ if len(line) > NUM_LONG_TOKENS:
33
+ return False
34
+ if not line[0].isupper():
35
+ return False
36
+ return True
37
+
38
+
39
+ short_lines = [line for line in lines if shortline_conditions(line)]
40
+ random_short_lines = random.sample(short_lines, 5000)
41
+ for line in random_short_lines[:20]:
42
+ print(line)
43
+
44
+ print("Long lines", len(random_long_lines))
45
+ print("Short lines", len(random_short_lines))
46
+
47
+ pwd = dirname(__file__)
48
+ tmp = join(pwd, "data")
49
+ corpus_file = join(tmp, "UTS_Text_v1.txt")
50
+
51
+ with open(corpus_file, "w") as f:
52
+ lines = random_long_lines + random_short_lines
53
+ content = "\n".join(lines)
54
+ f.write(content)
statistics.py CHANGED
@@ -1,3 +1,13 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4ff4285ba8a2a4ef12590894ae22a52a7499b684f54ca579892d07ed41715d32
3
- size 775
 
 
 
 
 
 
 
 
 
 
 
1
+ from os.path import dirname, join
2
+
3
+ def count_lines(filepath):
4
+ with open(filepath, encoding="utf-8") as f:
5
+ lines = f.read().splitlines()
6
+ return len(lines)
7
+
8
+ pwd = dirname(__file__)
9
+ folder = join(pwd, "data", "base")
10
+
11
+ print(count_lines(join(folder, "train.txt")))
12
+ print(count_lines(join(folder, "validation.txt")))
13
+ print(count_lines(join(folder, "test.txt")))
tmp/.gitignore CHANGED
@@ -1,3 +1,8 @@
 
1
  version https://git-lfs.github.com/spec/v1
2
  oid sha256:5c89a8a9c5af258d02d0bfd3582c75963bea53baa1f5f5132662b80020a63452
3
  size 13
 
 
 
 
 
1
+ <<<<<<< HEAD
2
  version https://git-lfs.github.com/spec/v1
3
  oid sha256:5c89a8a9c5af258d02d0bfd3582c75963bea53baa1f5f5132662b80020a63452
4
  size 13
5
+ =======
6
+ *
7
+ !.gitignore
8
+ >>>>>>> origin/main