from paddlenlp.transformers import ProphetNetTokenizer
import sys
import os
import json
from nltk.tokenize.treebank import TreebankWordDetokenizer
import tqdm

fin_dir = sys.argv[1]
fout_dir = sys.argv[2]


def write_json_lines(lines, out_path):
    with open(out_path, 'w+', encoding='utf-8') as f:
        for line in lines:
            line = json.dumps(line, ensure_ascii=False) + '\n'
            f.write(line)


bpe = ProphetNetTokenizer.from_pretrained('bert-base-uncased')
twd = TreebankWordDetokenizer()


def preocess(line, keep_sep=False, max_len=512):

    line = line.strip().replace('``', '"').replace('\'\'', '"').replace('`',
                                                                        '\'')
    s_list = [
        twd.detokenize(
            x.strip().split(' '), convert_parentheses=True)
        for x in line.split('<S_SEP>')
    ]
    tk_list = [bpe.tokenize(s) for s in s_list]
    output_string_list = [" ".join(s) for s in tk_list]
    if keep_sep:
        output_string = " [X_SEP] ".join(output_string_list)
    else:
        output_string = " ".join(output_string_list)
    output_string = " ".join(output_string.split(' ')[:max_len - 1])
    return output_string


def convert_cased2uncased(fin_dir, fout_dir):

    if not os.path.exists(fout_dir):
        os.mkdir(fout_dir)

    for file in ['train', 'dev', 'test']:
        src_file = os.path.join(fin_dir, '{}.src'.format(file))
        tgt_file = os.path.join(fin_dir, '{}.tgt'.format(file))
        lines = []
        for src, tgt in tqdm.tqdm(zip(open(src_file), open(tgt_file))):
            src = preocess(src, False)
            tgt = preocess(tgt, True, max_len=128 if file == 'train' else 512)
            lines.append({'src': src, 'tgt': tgt})
        write_json_lines(lines, os.path.join(fout_dir, '{}.json'.format(file)))


convert_cased2uncased(fin_dir, fout_dir)
