import csv
import re
from PyCmpltrtok.common import sep
import os
import traceback
import numpy as np
from nlp_dataset_emotion_x00100_check_emojis import get_en2zh_zh2en

regex_words_in_sb = re.compile('\[([^\[\]]+)\]')
regex_words_in_sb_limited = re.compile('\[([^\[\]]{,7})\]')  # only for characters count 1~7

if '__main__' == __name__:

    trans_path = 'trans/tte_options_zh.trans.txt'
    en2zh, zh2en = get_en2zh_zh2en(trans_path)
    print('en2zh:', en2zh)
    print('zh2en:', zh2en)

    csv_path = r'D:\_dell7590_root\local\LNP_datasets\emotionX7zh\OCEMOTION.csv'
    print('path:', csv_path)
    washed_path = '_save/washed/emotionX7'
    xdir, xbase = os.path.split(washed_path)
    os.makedirs(xdir, exist_ok=True)

    xdata = []
    with open(csv_path, 'r', encoding="utf8") as f:
        cnt = 0
        while True:
            xline = f.readline()
            if not xline:
                break
            if '\r\n' == xline[-2:]:
                xline = xline[:-2]
            elif '\r' == xline[-1:]:
                xline = xline[:-1]
            xline = xline.strip()
            if not xline:
                continue

            cnt += 1
            # if cnt > 10:
            #     break
            try:
                xid, xtext, xe = xline.split('\t')
                xid = int(xid)
                xtext_washed = re.sub(regex_words_in_sb_limited, '', xtext)
                xe_zh = en2zh[xe]
                print('>', end='')
                xdata.append((xid, xtext_washed, xe_zh,))
            except Exception as ex:
                print(traceback.format_exc())
        print()

        np.random.seed(666)
        xlen = len(xdata)
        rnd_idx = np.random.permutation(xlen)
        xdata = np.array(xdata)
        xdata = xdata[rnd_idx]
        xlen_train = int(round(xlen * 0.7))
        xlen_val = xlen - xlen_train

        with open(washed_path + '_train.txt', 'w', encoding='utf8') as f:
            for xline in xdata[:xlen_train]:
                f.write('\t'.join(xline) + '\n')
        with open(washed_path + '_val.txt', 'w', encoding='utf8') as f:
            for xline in xdata[xlen_train:]:
                f.write('\t'.join(xline) + '\n')

        sep('All over')
