|
import pandas as pd |
|
from datasets import Dataset |
|
from collections import defaultdict |
|
import textwrap |
|
|
|
def chunk_examples(txt_list): |
|
chunks = [] |
|
for sentence in txt_list: |
|
chunks += textwrap.wrap(sentence, width=1000) |
|
return chunks |
|
|
|
txt = open('HebrewStageAndLyrics_clean.txt').read() |
|
parsed = txt.split("\n\n") |
|
df = pd.DataFrame(parsed) |
|
|
|
parsed_lines = df[0].values.tolist() |
|
data = defaultdict(list) |
|
|
|
chunked_lines = chunk_examples(parsed_lines) |
|
|
|
for line in chunked_lines: |
|
sc_line = line.replace(";", ",").replace("\n", " ; ") |
|
data['text'].append(sc_line) |
|
|
|
dataset = Dataset.from_dict(data).shuffle(seed=42) |
|
dataset = dataset.train_test_split(test_size=0.05) |
|
print(dataset) |
|
print(dataset.column_names) |
|
|
|
for i in range(20, 22, 1): |
|
print(dataset['train'][i]) |
|
print(dataset['test'][i]) |
|
|
|
dataset.push_to_hub("Norod78/HebrewStageAndLyricsWithNewLines") |