metricsubs-chunktranslate / generate_chatgpt_varlen.py
metricv's picture
Update
5af1a37
import json
import os
import copy
import re
from os import listdir
from os.path import isfile, join
import argparse
import sys
INSTRUCT_CHUNKED_PROMPT = """你是一个翻译API。请将以下内容翻译为中文。输入内容的格式如下:
[开始时间1] 原文文本1 [结束时间1]
[开始时间2] 原文文本2 [结束时间2]
[开始时间3] 原文文本3 [结束时间3]
...
请翻译其中内容,并严格按以下格式输出:
[开始时间1] 译文文本1 [结束时间1]
[开始时间2] 译文文本2 [结束时间2]
[开始时间3] 译文文本3 [结束时间3]
...
为保证语序通顺,有时需要合并多行原文。例如,如果原文文本1和2需要合并的话,请按以下格式输出:
[开始时间1] 译文文本1 译文文本2 [结束时间2]
[开始时间2] 译文文本3 [结束时间3]
以下是一个真实的例子:
输入:
[368.18] but if it's not for you, [369.70]
[369.70] TikTok has made the Android app [371.31]
[371.31] available on its website, [372.96]
[372.96] since it's still not allowed back on Google Play. [375.32]
输出:
[368.18] 但如果它不适合你, [369.70]
[369.70] TikTok已经在其网站上提供了安卓应用的下载, [372.96]
[372.96] 因为它仍然未被允许重返Google Play商店。 [375.32]
"""
def break_line(line: str):
pattern = re.compile(r"^\[(\d+.\d+)\](.*)\[(\d+.\d+)\]$")
match = pattern.match(line)
start_time = match.group(1)
text = match.group(2)
end_time = match.group(3)
return start_time, text.strip(), end_time, float(start_time), float(end_time)
def get_total_chars(cn_lines: list[str], en_lines: list[str]):
cn_total_chars = 0
en_total_chars = 0
for line in cn_lines:
cn_total_chars += len(line)
for line in en_lines:
en_total_chars += len(line)
return cn_total_chars + en_total_chars
def chunk_messages(cn_lines: list[str], en_lines: list[str], MAX_LEN: int = 2000):
cn_lines_copy = copy.deepcopy(cn_lines)
en_lines_copy = copy.deepcopy(en_lines)
final_chunks: list[tuple[list[str], list[str]]] = []
while True:
en_current_chunk = []
cn_current_chunk = []
while True:
curr_total_len = get_total_chars(cn_current_chunk, en_current_chunk)
if len(cn_lines_copy) == 0 or len(en_lines_copy) == 0:
final_chunks.append((cn_current_chunk, en_current_chunk))
return final_chunks
elif curr_total_len > MAX_LEN:
final_chunks.append((cn_current_chunk, en_current_chunk))
break
else:
# Try append a new line to current chunk
latest_cn_line = cn_lines_copy.pop(0)
cn_start, cn_text, cn_end, cn_start_f, cn_end_f = break_line(latest_cn_line)
cn_current_chunk.append(latest_cn_line)
while True:
latest_en_line = en_lines_copy.pop(0)
en_start, en_text, en_end, en_start_f, en_end_f = break_line(latest_en_line)
en_current_chunk.append(latest_en_line)
if en_end == cn_end:
break
else:
if en_start_f > cn_end_f:
raise Exception("ERROR: English and Chinese lines are not in sync. Offensing line: " + latest_cn_line)
def new_message(eng_in, chs_out, prev_in = None, prev_out = None):
if(prev_in == None or prev_out == None):
return {"messages": [
{"role": "system", "content": INSTRUCT_CHUNKED_PROMPT},
{"role": "user", "content": eng_in},
{"role": "assistant", "content": chs_out}]
}
else:
return {"messages": [
{"role": "system", "content": INSTRUCT_CHUNKED_PROMPT},
{"role": "user", "content": prev_in},
{"role": "assistant", "content": prev_out},
{"role": "user", "content": eng_in},
{"role": "assistant", "content": chs_out}]
}
def write_jsonl(message_groups, filename):
json_lines = []
with open(filename, "w", encoding='utf-8') as fout:
for i in range(len(message_groups)):
if(i>0):
msg_obj = new_message(
message_groups[i][0].strip(),
message_groups[i][1].strip(),
message_groups[i-1][0].strip(),
message_groups[i-1][1].strip()
)
else:
msg_obj = new_message(
message_groups[i][0].strip(),
message_groups[i][1].strip()
)
json.dump(msg_obj, fout)
fout.write("\n")
json_lines.append(json.dumps(msg_obj))
return json_lines
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generate ChatGPT training data from a directory of subtitle files.')
parser.add_argument('data_dir', type=str, help='The directory containing the subtitle files.', default="data")
parser.add_argument('--maxlen', type=int, help='The maximum length of a combined message. \nNote that this limit will be exceeded a little bit, so leave some headroom. \nRecommended value is max context length / 4.', default=2000)
parser.add_argument('--test-ratio', type=float, help='The ratio of test data to training data.', default=0.2)
args = parser.parse_args()
message_groups = []
DOCUMENT_ROOT = args.data_dir
files = listdir(DOCUMENT_ROOT)
files = list(filter(lambda x: x.endswith(".en.txt"), files))
files.sort()
for f in files:
en_fname = join(DOCUMENT_ROOT, f)
if en_fname.endswith(".en.txt") and isfile(en_fname):
cn_fname = join(DOCUMENT_ROOT, f.replace(".en.txt", ".cn.txt"))
if os.path.exists(cn_fname) and isfile(cn_fname):
print(f"Found data pair: {en_fname} and {cn_fname}")
with open(en_fname, "r", encoding='utf-8-sig') as enfin:
en_messages = enfin.read()
with open(cn_fname, "r", encoding='utf-8-sig') as cnfin:
cn_messages = cnfin.read()
en_messages = [part.strip() for part in en_messages.split("\n") if part.strip() != ""]
cn_messages = [part.strip() for part in cn_messages.split("\n") if part.strip() != ""]
try:
chunks = chunk_messages(cn_messages, en_messages, MAX_LEN=args.maxlen)
en_messages = []
cn_messages = []
for chunk in chunks:
cn_chunk, en_chunk = chunk
en_messages.append("\n".join(en_chunk))
cn_messages.append("\n".join(cn_chunk))
# print("\n".join(en_chunk))
# print("---")
# print("\n".join(cn_chunk))
# print("\n")
except Exception as e:
print(f"Error: {e}")
continue
if(len(en_messages) != len(cn_messages)):
print(f"English and Chinese version mismatch. Discarding {en_fname} pair.")
messages = zip(en_messages, cn_messages)
message_groups.extend(messages)
jsonl_lines = write_jsonl(message_groups, f"combined-{args.maxlen}.jsonl")
import random
random.shuffle(jsonl_lines)
TEST_RATIO = args.test_ratio
split_index = int(len(jsonl_lines) * TEST_RATIO)
test = jsonl_lines[:split_index]
train = jsonl_lines[split_index:]
with open (f"chatgpt-train-{args.maxlen}.jsonl", "w", encoding='utf-8') as fout:
for line in train:
fout.write(line + "\n")
with open (f"chatgpt-test-{args.maxlen}.jsonl", "w", encoding='utf-8') as fout:
for line in test:
fout.write(line + "\n")
# recent_files = files[-5:]
# recent_messages = []
# for f in recent_files:
# en_fname = join(DOCUMENT_ROOT, f)
# if en_fname.endswith(".en.txt") and isfile(en_fname):
# cn_fname = join(DOCUMENT_ROOT, f.replace(".en.txt", ".cn.txt"))
# if os.path.exists(cn_fname) and isfile(cn_fname):
# print(f"Found data pair: {en_fname} and {cn_fname}")
# with open(en_fname, "r", encoding='utf-8-sig') as enfin:
# en_messages = enfin.read()
# with open(cn_fname, "r", encoding='utf-8-sig') as cnfin:
# cn_messages = cnfin.read()
# en_messages = [part.strip() for part in en_messages.split("\n") if part.strip() != ""]
# cn_messages = [part.strip() for part in cn_messages.split("\n") if part.strip() != ""]
# if(len(en_messages) != len(cn_messages)):
# print(f"English and Chinese version mismatch. Discarding {en_fname} pair.")
# messages = zip(en_messages, cn_messages)
# recent_messages.extend(messages)
# write_jsonl(recent_messages, "recent-combined.jsonl")
# TEST_RATIO = 0.2
# split_index = int(len(recent_messages) * TEST_RATIO)
# test = recent_messages[:split_index]
# train = recent_messages[split_index:]
# write_jsonl(train, "chatgpt-recent-train.jsonl")
# write_jsonl(test, "chatgpt-recent-test.jsonl")