from zzz import *
import csv
import sys
term_dict = []


def remove_blank(text):
    for i in range(6):
        text = text.replace('\t', '').replace(' ', '').replace(
            '\u3000', '').replace('\n\n', '\n')
    return text


def load_terms():
    global term_dict
    if len(term_dict) > 0:
        return
    with open("terms.csv", "r", encoding='utf-8-sig') as f:
        reader = csv.DictReader(f)
        t = {}
        for row in reader:
            cn = row['cn']
            jp = row['jp']
            hira = row['hira']
            en = row['en']
            if jp:
                t[jp] = cn
            if hira:
                t[hira] = cn
            if en:
                t[en] = cn
    term_dict.extend(sorted(t.items(), key=lambda item: -len(item[0])))


load_terms()

suffix = '''
酱=ちゃん
君=くん
'''
suffix_dict = {v: k for k, v in [x.strip().split('=')
                                 for x in suffix.splitlines() if x] if k and v}
# The character range of japanese hiragana
jp_range = ['\u3041', '\u30F6']


def predict_jp_cn(text):
    jp_count = 0
    for i in text:
        if jp_range[0] <= i <= jp_range[1]:
            jp_count += 1
    if jp_count / len(text) >= 0.4:
        return True


def get_terms_explanations(terms: set) -> str:
    # 创建术语解释缓存字典
    cache = {}

    # 读取并解析CSV文件
    with open('terms.csv', 'r', encoding='utf-8-sig') as file:
        reader = csv.DictReader(file)
        for row in reader:
            if row['display'] == '0':
                continue
            # 获取原文和解释字段
            jp_term = row['jp'].strip()
            hira_term = row['hira'].strip()
            explanation = row['info'].strip()
            cn_term = row['cn'].strip()
            if not explanation:
                continue
            # 更新缓存（允许后续覆盖）
            if jp_term:
                cache[jp_term] = (cn_term,explanation)
            if hira_term:
                cache[hira_term] = (cn_term,explanation)

    # 生成结果列表
    result_lines = []
    for term in terms:
        if term in cache:
            result_lines.append(f"{cache[term][0]}:{cache[term][1]}")

    # 返回格式化字符串
    return '\n'.join(result_lines)


def proc_jp(filename):
    data = file.readstr(filename)
    print(f"processing {filename} with {len(data)} chars.")
    load_terms()
    terms = set()
    connect_terms = set()
    for (k, v) in term_dict:
        for (k2, v2) in term_dict:
            if data.find(f"{k}・{k2}") > 0:
                print("found", f"{k}・{k2}", '->', f"{v}·{v2}")
                data = data.replace(f"{k}・{k2}", f"[{v}·{v2}]")
                terms.add(k)
                terms.add(k2)
    for (k, v) in term_dict:
        for k2, v2 in suffix_dict.items():
            if data.find(k + k2) > 0:
                print("found", k + k2, '->', v + v2)
                data = data.replace(k + k2, f"[{v}{v2}]")
                terms.add(k)
                terms.add(k2)
                connect_terms.add(k + k2)
            if data.find(k) > 0:
                print("found", k, '->', v)
                data = data.replace(k, f"[{v}]")
                terms.add(k)
    filename = os.path.splitext(filename)[-2] + '.regular' + \
        os.path.splitext(filename)[-1]
    # replace blank
    data = remove_blank(data)
    file.write(filename, data)
    extra_prompt = file.readstr('prompt.rtf')
    extra_prompt += '## 术语语义\n'
    extra_prompt += get_terms_explanations(terms)
    if len(connect_terms) > 0:
        extra_prompt += "\n## 二级术语\n"
        extra_prompt += "\n".join(list(connect_terms))
    extra_prompt += '\n## 正文\n'
    file.write('prompt.txt', extra_prompt)


def proc_gpt(filename):
    data = file.readstr(filename)
    # replace blank
    data = remove_blank(data)
    # replace quote
    data = reg.replace(data, '^|(?<=\n)“([^”\n ]+)”', '「\\1」')
    data = reg.replace(data, '^|(?<=\n)"([^"\n ]+)"', '「\\1」')
    # replace (*)->
    data = reg.replace(data, '\\([^)]+\\)', '')
    # replace ・-> ·
    data = data.replace('・', '·')
    # replace [[*]]->
    # data = reg.replace(data, '\\[\\[[^]]+\\]\\]', '')
    # replace [->
    data = data.replace('[', '')
    # replace ]->
    data = data.replace(']', '')
    # replace ...->…
    data = data.replace('...', '…')
    # replace 《》->『』
    data = data.replace('《', '『').replace(
        '》', '』').replace('【', '『').replace('】', '』')
    while len(data) > 2 and data[0] == '「' and data[1] == '」':
        data = data[2:]
    filename = os.path.splitext(filename)[-2] + '.final' + \
        os.path.splitext(filename)[-1]
    # file.write(filename, data)
    return data


def proc_cc(filename):
    import opencc
    cvt = opencc.OpenCC('t2s.json')
    data = file.readstr(filename)
    print(f"processing {filename} with {len(data)} chars.")
    data = cvt.convert(data)
    filename = filename.split('.')[0] + '.cc.' + \
        filename.split('.')[-1]
    file.write(filename, data)


if __name__ == '__main__':
    if len(sys.argv) < 2:
        print("Usage: python clean.py <jp.rtf|cn.?> [cc]")
        exit(1)
    name = sys.argv[1]
    params = sys.argv[2:]
    ext = os.path.splitext(name)[-1]
    cc = 'cc' in params
    if cc:
        proc_cc(name)
        name = os.path.splitext(name)[-2] + '.cc' + ext
    jp_cn = True if 'jp' in name else None
    if jp_cn is None:
        jp_cn = False if 'regular' in name else None
    if jp_cn is None:
        jp_cn = False if 'cn' in name else None
    if jp_cn is None:
        jp_cn = predict_jp_cn(file.readstr(name))
    print('jp->cn' if jp_cn else 'cn->simp')
    if jp_cn:
        proc_jp(name)
    else:
        file.write(f"{name}.proc.txt", proc_gpt(name))
