File size: 4,118 Bytes
a93e458
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
import gzip
import json
import sys

import pandas as pd
from pathlib import Path

l = sys.argv[-1]


def _close_when_exhausted(file):
    with file:
        for line in file:
            yield json.loads(line)


def open_read_cleaned(filename):
    file: TextIO = gzip.open(filename, "rt")  # type: ignore
    return _close_when_exhausted(file)


def write_json_lines_to_gzip(filename: str, data):
    try:
        with gzip.open(filename, "wt") as f:
            for item in data:
                json_line = json.dumps(item)
                f.write(json_line + "\n")
    finally:
        f.close()  # Ensure file is closed even if an exception occurs


def write_json_lines(filename: str, data):
    try:
        with open(filename, "w") as f:
            for item in data:
                json_line = json.dumps(item)
                f.write(json_line + "\n")
    finally:
        f.close()  # Ensure file is closed even if an exception occurs


TEST_SIZE = 10000
TRAIN_LEN = 2_000_000  # 2 million instances is likely enough, since 3.8M yields 9.6G italian tokens

# red pajama (en, de, es, fr, it)

root_dir = "/mnt/data/shared/tower_llm_data/redpajama_v2_heuristic_filtered"
# l_datasets = {
#     "it": {
#         "train": [
#             "filtered_it_2023-06_head_documents.jsonl.gz",
#             "filtered_it_2022-49_head_documents.jsonl.gz",
#             "filtered_it_2022-40_head_documents.jsonl.gz",
#         ],
#         "test": "filtered_it_2023-14_head_documents.jsonl.gz",
#     },
#     "es": {
#         "train": [
#             "filtered_es_2023-06_head_documents.jsonl.gz",
#             "filtered_es_2022-49_head_documents.jsonl.gz",
#         ],
#         "test": "filtered_es_2023-14_head_documents.jsonl.gz",
#     },
#     "de": {
#         "train": [
#             "filtered_de_2023-06_head_documents.jsonl.gz",
#             "filtered_de_2022-49_head_documents.jsonl.gz",
#         ],
#         "test": "filtered_de_2023-14_head_documents.jsonl.gz",
#     },
#     "fr": {
#         "train": [
#             "filtered_fr_2023-06_head_documents.jsonl.gz",
#             "filtered_fr_2022-49_head_documents.jsonl.gz",
#         ],
#         "test": "filtered_fr_2023-14_head_documents.jsonl.gz",
#     },
#     "en": {
#         "train": [
#             "filtered_en_2023-06_head_documents.jsonl.gz",
#         ],
#         "test": "filtered_en_2023-14_head_documents.jsonl.gz",
#     },
# }

obs = []
# train
# append = True
# for d in l_datasets[l]["train"]:
#     if append:
#         for o in open_read_cleaned(f"{root_dir}/{l}/{d}"):
#             obs.append(o)
#             print(f"Selected {len(obs)} instances...")
#             if len(obs) == TRAIN_LEN:
#                 append = False
#                 break

# print("Saving")
# write_json_lines_to_gzip(f"{root_dir}/{l}/train.jsonl.gz", obs)
# test
# obs = []
# for o in open_read_cleaned(f'{root_dir}/{l}/{l_datasets[l]["test"]}'):
#     obs.append(o)
# test = pd.DataFrame(obs)
# test = test.sample(n=TEST_SIZE, random_state=42).reset_index(drop=True)
# test.to_json(
#     f"/mnt/data/jpombal/tower-results/raw_data/monolingual/red_pajama_filtered.{l}/test.jsonl",
#     orient="records",
#     lines=True,
# )

# number of words that exceeds by far the number of words for the training data;
# this way we ensure the test data does not overlap
n_words_dict = {
    "nl": 933333330,
    "pt": 933333330,
    "ru": 600000000,
    "zh": 33888888,
    "ko": 350000000,
}

corpus = open_read_cleaned(
    f"/mnt/data/shared/tower_llm_data/webcorpus/{l}/0000.json.gz"
)

n_words = 0
rows = 0
data = []
for doc in corpus:
    if l == "zh":
        n_words += len(doc["text"])
    else:
        n_words += len(doc["text"].split(" "))
    if n_words >= n_words_dict[l]:
        data.append({"text": doc["text"]})
        rows += 1
        if rows == TEST_SIZE:
            break

Path(f"/mnt/data/jpombal/tower-results/raw_data/monolingual/webcorpus.{l}").mkdir(
    exist_ok=True, parents=True
)

write_json_lines(
    f"/mnt/data/jpombal/tower-results/raw_data/monolingual/webcorpus.{l}/test.jsonl",
    data,
)

print("done")