File size: 9,559 Bytes
3f823da
 
 
 
 
 
 
 
 
5af1a37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3f823da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6747b07
3f823da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
502b5eb
3f823da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6747b07
 
 
 
3f823da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
502b5eb
3f823da
 
 
502b5eb
3f823da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
import json
import os
import copy
import re
from os import listdir
from os.path import isfile, join
import argparse
import sys

INSTRUCT_CHUNKED_PROMPT = """你是一个翻译API。请将以下内容翻译为中文。输入内容的格式如下:

[开始时间1] 原文文本1 [结束时间1]
[开始时间2] 原文文本2 [结束时间2]
[开始时间3] 原文文本3 [结束时间3]
...

请翻译其中内容,并严格按以下格式输出:

[开始时间1] 译文文本1 [结束时间1]
[开始时间2] 译文文本2 [结束时间2]
[开始时间3] 译文文本3 [结束时间3]
...

为保证语序通顺,有时需要合并多行原文。例如,如果原文文本1和2需要合并的话,请按以下格式输出:

[开始时间1] 译文文本1 译文文本2 [结束时间2]
[开始时间2] 译文文本3 [结束时间3]

以下是一个真实的例子:
输入:
[368.18] but if it's not for you, [369.70]
[369.70] TikTok has made the Android app [371.31]
[371.31] available on its website, [372.96]
[372.96] since it's still not allowed back on Google Play. [375.32]

输出:
[368.18] 但如果它不适合你, [369.70]
[369.70] TikTok已经在其网站上提供了安卓应用的下载, [372.96]
[372.96] 因为它仍然未被允许重返Google Play商店。 [375.32]
"""

def break_line(line: str):
    pattern = re.compile(r"^\[(\d+.\d+)\](.*)\[(\d+.\d+)\]$")
    match = pattern.match(line)
    start_time = match.group(1)
    text = match.group(2)
    end_time = match.group(3)
    return start_time, text.strip(), end_time, float(start_time), float(end_time)

def get_total_chars(cn_lines: list[str], en_lines: list[str]):
    cn_total_chars = 0
    en_total_chars = 0

    for line in cn_lines:
        cn_total_chars += len(line)
    
    for line in en_lines:
        en_total_chars += len(line)
    
    return cn_total_chars + en_total_chars

def chunk_messages(cn_lines: list[str], en_lines: list[str], MAX_LEN: int = 2000):
    cn_lines_copy = copy.deepcopy(cn_lines)
    en_lines_copy = copy.deepcopy(en_lines)

    final_chunks: list[tuple[list[str], list[str]]] = []
    
    while True:
        en_current_chunk = []
        cn_current_chunk = []
        while True:
            curr_total_len = get_total_chars(cn_current_chunk, en_current_chunk)
            if len(cn_lines_copy) == 0 or len(en_lines_copy) == 0:
                final_chunks.append((cn_current_chunk, en_current_chunk))
                return final_chunks
            elif curr_total_len > MAX_LEN:
                final_chunks.append((cn_current_chunk, en_current_chunk))
                break
            else:
                # Try append a new line to current chunk
                latest_cn_line = cn_lines_copy.pop(0)
                cn_start, cn_text, cn_end, cn_start_f, cn_end_f = break_line(latest_cn_line)
                cn_current_chunk.append(latest_cn_line)
                while True:
                    latest_en_line = en_lines_copy.pop(0)
                    en_start, en_text, en_end, en_start_f, en_end_f = break_line(latest_en_line)
                    en_current_chunk.append(latest_en_line)
                    if en_end == cn_end:
                        break
                    else:
                        if en_start_f > cn_end_f:
                            raise Exception("ERROR: English and Chinese lines are not in sync. Offensing line: " + latest_cn_line)
                
def new_message(eng_in, chs_out, prev_in = None, prev_out = None):
    if(prev_in == None or prev_out == None):
        return {"messages": [
            {"role": "system", "content": INSTRUCT_CHUNKED_PROMPT}, 
            {"role": "user", "content": eng_in}, 
            {"role": "assistant", "content": chs_out}]
        }
    else:
        return {"messages": [
            {"role": "system", "content": INSTRUCT_CHUNKED_PROMPT}, 
            {"role": "user", "content": prev_in}, 
            {"role": "assistant", "content": prev_out},
            {"role": "user", "content": eng_in}, 
            {"role": "assistant", "content": chs_out}]
        }

def write_jsonl(message_groups, filename):
    json_lines = []
    with open(filename, "w", encoding='utf-8') as fout:
        for i in range(len(message_groups)):
            if(i>0):
                msg_obj = new_message(
                    message_groups[i][0].strip(),
                    message_groups[i][1].strip(),
                    message_groups[i-1][0].strip(),
                    message_groups[i-1][1].strip()
                )
            else:
                msg_obj = new_message(
                    message_groups[i][0].strip(),
                    message_groups[i][1].strip()
                )
            json.dump(msg_obj, fout)
            fout.write("\n")
            json_lines.append(json.dumps(msg_obj))
    return json_lines

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Generate ChatGPT training data from a directory of subtitle files.')
    parser.add_argument('data_dir', type=str, help='The directory containing the subtitle files.', default="data")
    parser.add_argument('--maxlen', type=int, help='The maximum length of a combined message. \nNote that this limit will be exceeded a little bit, so leave some headroom. \nRecommended value is max context length / 4.', default=2000)
    parser.add_argument('--test-ratio', type=float, help='The ratio of test data to training data.', default=0.2)

    args = parser.parse_args()

    message_groups = []

    DOCUMENT_ROOT = args.data_dir
    files = listdir(DOCUMENT_ROOT)
    files = list(filter(lambda x: x.endswith(".en.txt"), files))
    files.sort()

    for f in files:
        en_fname = join(DOCUMENT_ROOT, f)
        if en_fname.endswith(".en.txt") and isfile(en_fname):
            cn_fname = join(DOCUMENT_ROOT, f.replace(".en.txt", ".cn.txt"))
            if os.path.exists(cn_fname) and isfile(cn_fname):
                print(f"Found data pair: {en_fname} and {cn_fname}")

                with open(en_fname, "r", encoding='utf-8-sig') as enfin:
                    en_messages = enfin.read()

                with open(cn_fname, "r", encoding='utf-8-sig') as cnfin:
                    cn_messages = cnfin.read()
                
                en_messages = [part.strip() for part in en_messages.split("\n") if part.strip() != ""]
                cn_messages = [part.strip() for part in cn_messages.split("\n") if part.strip() != ""]

                try:
                    chunks = chunk_messages(cn_messages, en_messages, MAX_LEN=args.maxlen)
                    en_messages = []
                    cn_messages = []
                    for chunk in chunks:
                        cn_chunk, en_chunk = chunk
                        en_messages.append("\n".join(en_chunk))
                        cn_messages.append("\n".join(cn_chunk))
                        # print("\n".join(en_chunk))
                        # print("---")
                        # print("\n".join(cn_chunk))
                        # print("\n")
                except Exception as e:
                    print(f"Error: {e}")
                    continue
                
                if(len(en_messages) != len(cn_messages)):
                    print(f"English and Chinese version mismatch. Discarding {en_fname} pair.")
                
                messages = zip(en_messages, cn_messages)

                message_groups.extend(messages)

    jsonl_lines = write_jsonl(message_groups, f"combined-{args.maxlen}.jsonl")

    import random
    random.shuffle(jsonl_lines)

    TEST_RATIO = args.test_ratio

    split_index = int(len(jsonl_lines) * TEST_RATIO)

    test = jsonl_lines[:split_index]
    train = jsonl_lines[split_index:]

    with open (f"chatgpt-train-{args.maxlen}.jsonl", "w", encoding='utf-8') as fout:
        for line in train:
            fout.write(line + "\n")

    with open (f"chatgpt-test-{args.maxlen}.jsonl", "w", encoding='utf-8') as fout:
        for line in test:
            fout.write(line + "\n")

    # recent_files = files[-5:]
    # recent_messages = []

    # for f in recent_files:
    #     en_fname = join(DOCUMENT_ROOT, f)
    #     if en_fname.endswith(".en.txt") and isfile(en_fname):
    #         cn_fname = join(DOCUMENT_ROOT, f.replace(".en.txt", ".cn.txt"))
    #         if os.path.exists(cn_fname) and isfile(cn_fname):
    #             print(f"Found data pair: {en_fname} and {cn_fname}")

    #             with open(en_fname, "r", encoding='utf-8-sig') as enfin:
    #                 en_messages = enfin.read()

    #             with open(cn_fname, "r", encoding='utf-8-sig') as cnfin:
    #                 cn_messages = cnfin.read()
                
    #             en_messages = [part.strip() for part in en_messages.split("\n") if part.strip() != ""]
    #             cn_messages = [part.strip() for part in cn_messages.split("\n") if part.strip() != ""] 

    #             if(len(en_messages) != len(cn_messages)):
    #                 print(f"English and Chinese version mismatch. Discarding {en_fname} pair.")
                
    #             messages = zip(en_messages, cn_messages)

    #             recent_messages.extend(messages)

    # write_jsonl(recent_messages, "recent-combined.jsonl")

    # TEST_RATIO = 0.2

    # split_index = int(len(recent_messages) * TEST_RATIO)

    # test = recent_messages[:split_index]
    # train = recent_messages[split_index:]

    # write_jsonl(train, "chatgpt-recent-train.jsonl")
    # write_jsonl(test, "chatgpt-recent-test.jsonl")