File size: 3,221 Bytes
6735016
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a3a7925
6735016
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
from transformers import AutoTokenizer
import jsonlines
import random
import os

tokenizer = AutoTokenizer.from_pretrained("NilanE/tinyllama-relora-merge")

max_seq_len = 2048 # max context length

prompt = "Translate this from Japanese to English:\n### JAPANESE:  \n### ENGLISH: </s>" # insert SFT prompt to add to token count

input_file_path = "dataset-parallel-complete.jsonl"

output_file_path = input_file_path.split('.')[0] + "-chunked." + input_file_path.split('.')[1]
promptTokens = len(tokenizer.tokenize(prompt))

def load_jsonl(file_path):
    data = []
    with jsonlines.open(file_path) as reader:
        for entry in reader:
            source = entry['src'].replace('</s>', '').strip()
            target = entry['trg'].replace('</s>', '').strip()
            data.append([source, target])
    return data

def save_jsonl(file_path, data):
    with jsonlines.open(file_path, 'w') as writer:
        writer.write_all(data)
                    
chunks = []

data = load_jsonl(input_file_path)

#tolerance
max_seq_len -= 10

skippedDocs = 0

for doc in data:

    src_lines = doc[0].split('\n')
    trg_lines = doc[1].split('\n')

    out_src = []
    out_trg = []
    tokenCount = 0
    lastTokenCount = 0
    longLines = 0

    try:
        for x in range(len(src_lines)):
            out_src.append(src_lines[x])
            out_trg.append(trg_lines[x])
            out_src_string = "\n".join(out_src)
            trg_src_string = "\n".join(out_trg)
            tokenCount = len(tokenizer.tokenize(out_src_string.strip() + trg_src_string.strip())) + promptTokens
            if tokenCount-lastTokenCount < max_seq_len-1: # avoid lines > max line length    
                if tokenCount > max_seq_len-1:
                    src_end = out_src.pop()
                    trg_end = out_trg.pop()
                    out_src_string = "\n".join(out_src)
                    trg_src_string = "\n".join(out_trg)
                    data = {
                        'src' : out_src_string.strip(),
                        'trg' : trg_src_string.strip()
                    }
                    chunks.append(data)
                    out_src = [src_end]
                    out_trg = [trg_end]
                elif x+1 == len(src_lines): #and len(out_src) > 2:
                    data = {
                        'src' : out_src_string.strip(),
                        'trg' : trg_src_string.strip()
                    }
                    chunks.append(data)
            else:
                # remove offending line > max_seq_len
                out_src.pop()
                out_trg.pop()
                out_src_string = "\n".join(out_src)
                trg_src_string = "\n".join(out_trg)
                tokenCount = len(tokenizer.tokenize(out_src_string.strip() + trg_src_string.strip())) + promptTokens
                longLines += 1

            lastTokenCount = tokenCount
    except:
        skippedDocs += 1
        

random.shuffle(chunks)

print(f"LINES LONGER THAN MAX SEQUENCE LENTH: {longLines}")
print(f"SKIPPED DOCS: {skippedDocs}")

# Save the randomized data to a new JSONL file
if os.path.exists(output_file_path):
    os.remove(output_file_path)
save_jsonl(output_file_path, chunks)