The script with which I generated this dataset from the text input file
Browse filesIn the input file, each new piece was separated by a doubled new-line character
- load_ds.py +33 -0
load_ds.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
from datasets import Dataset
|
3 |
+
from collections import defaultdict
|
4 |
+
import textwrap
|
5 |
+
|
6 |
+
def chunk_examples(txt_list):
|
7 |
+
chunks = []
|
8 |
+
for sentence in txt_list:
|
9 |
+
chunks += textwrap.wrap(sentence, width=1000)
|
10 |
+
return chunks
|
11 |
+
|
12 |
+
txt = open('HebrewStageAndLyrics_clean.txt').read()
|
13 |
+
parsed = txt.split("\n\n")
|
14 |
+
df = pd.DataFrame(parsed)
|
15 |
+
|
16 |
+
parsed_lines = df[0].values.tolist()
|
17 |
+
data = defaultdict(list)
|
18 |
+
|
19 |
+
chunked_lines = chunk_examples(parsed_lines)
|
20 |
+
|
21 |
+
for line in chunked_lines:
|
22 |
+
data['text'].append(line)
|
23 |
+
|
24 |
+
dataset = Dataset.from_dict(data).shuffle(seed=42)
|
25 |
+
dataset = dataset.train_test_split(test_size=0.1)
|
26 |
+
print(dataset)
|
27 |
+
print(dataset.column_names)
|
28 |
+
|
29 |
+
for i in range(20, 22, 1):
|
30 |
+
print(dataset['train'][i])
|
31 |
+
print(dataset['test'][i])
|
32 |
+
|
33 |
+
dataset.push_to_hub("Norod78/HebrewStageAndLyricsWithNewLines")
|