
from torch.utils.data import IterableDataset, DataLoader

from typing import List
import glob
import torch
import time
import random
import sys
import os
from transformers import AutoTokenizer, AutoModelForMaskedLM
from transformers import pipeline
import numpy as np
from qbconvert import stringQ2B

#from transformers.modeling_electra import ElectraForMaskedLM

import json
import re
from tool_zh_cn import text_norm
from ltp import LTP
import logging
import jieba

from algo import *
logger = logging.getLogger('__log__')

def _is_chinese_char(cp):
    """Checks whether CP is the codepoint of a CJK character."""
    # This defines a "chinese character" as anything in the CJK Unicode block:
    #   https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
    #
    # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
    # despite its name. The modern Korean Hangul alphabet is a different block,
    # as is Japanese Hiragana and Katakana. Those alphabets are used to write
    # space-separated words, so they are not treated specially and handled
    # like the all of the other languages.
    if ((cp >= 0x4E00 and cp <= 0x9FFF)
        or (cp >= 0x3400 and cp <= 0x4DBF)  #
        or (cp >= 0x20000 and cp <= 0x2A6DF)  #
        or (cp >= 0x2A700 and cp <= 0x2B73F)  #
        or (cp >= 0x2B740 and cp <= 0x2B81F)  #
        or (cp >= 0x2B820 and cp <= 0x2CEAF)  #
        or (cp >= 0xF900 and cp <= 0xFAFF)
        or (cp >= 0x2F800 and cp <= 0x2FA1F)  #
    ):  #
        return True
    return False

def is_chinese(word: str):
    # word like '180' or '身高' or '神'
    for char in word:
        char = ord(char)
        if not _is_chinese_char(char):
            return False
    return True

def is_english(word:str):
    for c in word:
        if 'a' <= c <= 'z' or 'A' <= c <= 'Z'  or c in set(list(',.#')):
            pass
        else:
            return False
    return True

def is_num(word):
    for c in word:
        if c in '0123456789零一二三四五六七八九十百千万':
            pass
        else:
            return False
    return True

def is_chinese_tokens(tokens: List):
    for token in tokens:
        if not is_chinese(token):
            return False
    return True

def ignore_tokens(tokens):
    pos = []
    for i, token in enumerate(tokens):
        token_str = ''.join(token)
        if is_english(token_str) or is_num(token_str) or token_str in ['[UNK]']:
            pos.append(i)
    return pos

def get_chinese_word(tokens: List[str], min_len=2):
    words = []
    
    for token in tokens:
        if len(token) < min_len:
            words.append([token, False])
        else:
            if is_chinese(token) == True:
                words.append([token, True])
            else:
                words.append([token, False])

    return words

def zh_words_2d(zh_words):
    words = []
    for p in zh_words:
        if p[1] == True:
            words.append(list(p[0]))
        else:
            #tokenize p[0] using list or bpe...
            for c in p[0]:
                words.append( [c] )
    words = [['[CLS]']] + words + [['[SEP]']]
    return words

def whole_word_mask(input_tokens, ignore_token_pos, mlm_per=0.15, max_mask_per_sent=6):
    tokens_len = [len(t) for t in input_tokens]
    char_num = sum(tokens_len) - 2 #去掉首尾填充字符
    max_masked_char_num = min(max(1, int(round(char_num * mlm_per))),  max_mask_per_sent)
    
    indexs = [i for i, v in enumerate(input_tokens) ]
    random.shuffle(indexs)
    
    #单个句子 MASK的字数不能超过15%或者10个字
    masked_pos = []
    masked_char_num = 0   #记录被MASK的字数
    for idx in indexs:
        if masked_char_num + tokens_len[idx] > max_masked_char_num:
            break
        if idx in ignore_token_pos:
            continue
        masked_pos.append(idx)
        ignore_token_pos.append(idx)
        masked_char_num += len(input_tokens[idx])

    masked_pos = sorted(masked_pos, reverse=True)
    masked_pos_tokens = [input_tokens[i] for i in masked_pos]
    
    return masked_pos, masked_pos_tokens


def del_pos_pick(bpe_tokens, masked_pos, cnt=1, ngram=1, tok_char_max=5):
    # cls seq masked pos skip, 伪造多余插入，然后删除
    picked_masked_pos = [p for p in masked_pos]
    for i, t in enumerate(bpe_tokens):
        if len(t) > tok_char_max:
            picked_masked_pos.append(i)

    indexs = [i for i, v in enumerate(bpe_tokens) if i not in picked_masked_pos]
    random.shuffle(indexs)
    pos_pick = []
    for i, idx in enumerate(indexs):
        if len(pos_pick) >= cnt:
            break
        partlen = 0
        pos_pick.append([])
        for n in range(0, ngram):
            idx_n = idx + n
            if idx_n in picked_masked_pos:
                break
            else:
                partlen += len(bpe_tokens[idx_n])
                if partlen > tok_char_max:
                    break
                else:
                    pos_pick[-1].append(idx_n)
                    picked_masked_pos.append(idx_n)
        if len(pos_pick[-1]) == 0:
            pos_pick.pop(-1)
    pos_pick = sorted(pos_pick, key=lambda x:x[0], reverse=True)

    return pos_pick


class Homo():
    #同音字、词
    def __init__(self,  path_word):
        self.py_words = {}
        self.word_py = {}

        self.load_map(path_word)

    def load_map(self, path):
        for line in open(path, 'r', encoding='utf-8'):
            words = line.strip().split()
            py = words[0].replace('_', '')
            words = words[1:]
            self.py_words[py] = words
            for wd in words:
                self.word_py[wd] = py

    def get_homo_by_word(self, word):
        py = self.word_py.get(word, None)
        
        chars = [self.get_homo_by_char(c) for c in word]

        if py is None:
            return ''.join(chars)
        words = self.py_words.get(py, None) 
        if words is None:
            return ''.join(chars)
        
        words = words +  [py]   #把拼音加入同音词中
        
        word_rand = random.choice(words )
        if word == word_rand:
            return py
        return word_rand

    def get_homo_by_char(self, char):
        py = self.word_py.get(char, None)
        if py is None:
            return ''
        chars = self.py_words.get(py, None)
        if chars is None:
            return ''
        chars = chars + [py]

        char_rand = random.choice(chars )

        if char_rand == char:
            return py
        else:
            return char_rand

def full2half(strs):
    n = []
    for char in strs:
        num = ord(char)
        if num == 0x3000:
            num = 32
        elif 0xFF01 <= num <= 0xFF5E:
            num -= 0xfee0
        num = chr(num)
        n.append(num)
    return ''.join(n)




def compute_replace_tag(bpe_tokens, masked_pos, max_mask=6, mode=1):
    #include del and ins
    edit_tags = [['INS|0'] * len(t) for t in bpe_tokens]
    for i, p in enumerate(masked_pos):
        if i == 0:
            ins_len = len(bpe_tokens[p])
            edit_tags[p] = ['DEL|1'] * len(bpe_tokens[p])
            continue
        if p != masked_pos[i-1] - 1:
            assert ins_len <= max_mask, 'ins or replace too long'
            ins_pos = masked_pos[i-1] - 1 
            edit_tags[ins_pos][-1] = 'INS|' + str(ins_len)
            ins_len = len(bpe_tokens[p])
        else:
            ins_len += len(bpe_tokens[p])
        
        #proc last pos
        if p != -1:
            edit_tags[p] = ['DEL|1'] * len(bpe_tokens[p])
    
    return edit_tags




class MateIterableDataset(IterableDataset):
    def __init__(self, datadir,  args=None):
        if os.path.isdir(datadir):
            self.file_iter = [f for i, f in enumerate(glob.iglob(datadir + '/**/*.txt', recursive=True))]
            np.random.shuffle(self.file_iter)
        else:
            self.file_iter = [datadir]
        self.args = args
        self.resource_dir  = args.resource_dir
        self.fan_jian_file = os.path.join(self.resource_dir, 'fanjian')
        self.font_sim_file = os.path.join(self.resource_dir, 'feat1_sim.txt')
        self.zh_jiegou_file = os.path.join(self.resource_dir, 'zh_jiegou.json')
        self.fan_dict = {}
        self.char_freq_path = os.path.join(self.resource_dir, 'char_freq.txt')
        self.vocab = os.path.join(self.resource_dir,'chinese-bert-wwm-ext', 'vocab.txt')
        self.homo = Homo(os.path.join(self.resource_dir,'chinese_homophone_word.txt'))  #同音词/近音词
        
        self.edit_tag_ids = self.load_tag_ids(args.label_map_file)

        self.maxseqlen = args.max_seq_length   #196
        self.minseqlen = 6

        self.mlm_per = 0.15
        self.max_mask_per_sent = 6

        self.edit_ngram = 3
        self.edit_ftokens = '哦 噢 呃 嗯 啊 唉 哎 啦 吧 呵 哼 嘿 嗨 咦 呗 哟 嘞 哈 哇 呢 呐 诶 吗 嘛 \
                             这 这个 这种 这样 这么 那 那个 那种 那么 就是 就 那啥 这啥 就是 就是说 应该说 \
                             说 然后 的话 是吧 对 对吧 好啊 好 ok'.split()
        self.edit_ftokens_len = len(self.edit_ftokens)
        
        self.device = 'cuda' if torch.cuda.is_available() else "cpu"  #cuda
        self.ltp_cws = LTP(pretrained_model_name_or_path=os.path.join(self.resource_dir, "ltp/base"), map_location=self.device)
        
        self.init_mlm_model()
        self.init_zh_chars()
        self.init_fan_jian()
        self.init_font_sim()
        self.init_zh_jiegou()
        
        self.info = self.get_file_info(self.file_iter)
        
        self.cnt = 0
        self.do_cache = False
        self.masked_tokens = {}  #记录 mask 生成的词top10，避免重复计算
        self.DEL = 'DEL|1'
        
    def load_tag_ids(self, fpath):
        fr = open(fpath, 'r', encoding='utf-8')
        line = fr.readline().strip()
        return json.loads(line)
    
    def init_zh_jiegou(self, ):
        hz_jg = {}
        bert_vocab = set( [line.strip() for line in open(self.vocab, 'r', encoding='utf-8')])
        with open(self.zh_jiegou_file, 'r', encoding='utf-8') as f:
            js = json.load(f)
            oov = {}
            for k, v in js.items():
                #print(k, v)
                comp = v['components']
                vals = []
                for c in comp:
                    if type(c) == dict:
                        for d in c['components']:
                            if type(d) == str:
                                if len(d) > 1:
                                    d = d[-1]
                                vals.append(d)
                            else:
                                pass
                    elif type(c) == str:
                        if len(c) > 1:
                            c = c[-1]
                        vals.append(c)
                
                has_unk = False
                
                for _, vv in enumerate(vals):
                    
                    if vv not in bert_vocab:
                        oov[vv] = oov.get(vv, 0) + 1
                        has_unk = True
                        break
                    
                if has_unk == True or len(vals) == 0:
                    pass
                elif k in bert_vocab:
                    hz_jg[k] = vals
                    #print(k +'\t'+ ' '.join(vals))
            # kv = sorted(oov.items(), key=lambda x: x[1], reverse=True)
            # for v, k in kv[:80]:
            #     print(v)
            self.hz_jg = { k:v for k, v in hz_jg.items()}
    
    def init_mlm_model(self):
        bert_res_local = os.path.join(self.resource_dir, "chinese-bert-wwm-ext") 
        
        self.bert_tokenizer = AutoTokenizer.from_pretrained(bert_res_local)
        #self.mlm = AutoModelForMaskedLM.from_pretrained(bert_res_local).to(self.device)
        self.mlm_pipeline = pipeline("fill-mask", model=bert_res_local, device=self.device)
    
    
    def init_font_sim(self):
        topn = 21 #top 20
        self.font_sims = {}
        for line in open(self.font_sim_file, 'r', encoding='utf-8'):
            tups = line.strip().split('\t')
            t0 = tups[0]
            t_sims = []
            for t in tups[1:topn]:
                t_sims.append(t.split()[0])
            self.font_sims[t0] = t_sims
            
    def pick_mlm_tokens(self, bpe_tokens, ipos):
        bpe_tokens_tmp = copy.deepcopy(bpe_tokens)
        _masked_tokens  = []  #被掩码遮挡的词
        mlm_tokens = []
        
        for p in ipos:
            _masked_tokens.append(''.join(bpe_tokens_tmp[p]))
            bpe_tokens_tmp[p] = ['[MASK]'] * len(bpe_tokens_tmp[p])
        
        if self.do_cache and ''.join(_masked_tokens) in self.masked_tokens.keys():
            mlm_tokens = self.masked_tokens[''.join(_masked_tokens)]
            return mlm_tokens
        
        masked_pos = []
        #flatten
        tokens = [t for toks in bpe_tokens_tmp for t in toks]
        sent = ''
        for i, c in enumerate(tokens):
            if c == '[MASK]':
                masked_pos.append(i)
            if i == 0 or i == len(tokens) - 1:
                pass
            else:
                sent += c.replace('#', '')
        toks = self.mlm_pipeline(sent) #top5
        shape = np.array(toks).shape
        
        if len(shape) == 1:
            for i in range(shape[0]):
                mlm_tokens.append([toks[i]['token_str']])
            if self.do_cache:
                self.masked_tokens[''.join(_masked_tokens)] = mlm_tokens
        elif len(shape) == 2:
            #top5
            for i in range(shape[1]):
                tok = []
                for j in range(shape[0]):
                    tok.append(toks[j][i]['token_str'])
                mlm_tokens.append(tok)
            if self.do_cache:
                self.masked_tokens[''.join(_masked_tokens)] = mlm_tokens
        
        return mlm_tokens
    
    def chinese_words_seg(self, text):
        try:
            words = self.ltp_cws.pipeline(text, tasks=['cws'])['cws']
        except Exception as e:
            words = [i for i in jieba.cut(text)]
            return words
        return words
    
    def __len__(self):
        return self.info['end']

    def __iter__(self):
        worker_info = torch.utils.data.get_worker_info()
        files_size = len(self.file_iter)
        if worker_info is None:
            iter_start = 0
            iter_end = files_size
            worker_id = -1
        else:
            per_worker = int(files_size / worker_info.num_workers)
            worker_id = worker_info.id
            iter_start = worker_id * per_worker
            iter_end = min(iter_start + per_worker, files_size)
        sampler = self.iter_chunk_files(iter_start, iter_end, worker_id)
        return sampler

    def iter_chunk_files(self, start, end, worker_id):
        for i in range(start, end):
            for kv in self.parse_file(self.file_iter[i], worker_id):
                self.cnt += 1
                if self.cnt % 1e4 == 0:
                    logger.info(f"line:{self.cnt} self.masked_tokens size {len(self.masked_tokens)} {kv}")
                yield kv
            
    
    def init_fan_jian(self):
        for t in open(self.fan_jian_file, "r", encoding="utf-8"):
            t = t.strip()
            li = t.split("\t")
            self.fan_dict[li[0]] = li[1]

    def fan2jian(self, str_org):
        new_str = ""
        for ele in str_org:
            if ele in self.fan_dict:
                new_str += self.fan_dict[ele]
            else:
                new_str += ele
        return new_str

    def init_zh_chars(self, ):
        self.zh_chars = [line.split()[0] for line in open(self.char_freq_path, 'r', encoding='utf-8')][0:1000]
        self.zh_chars_len = len(self.zh_chars)

    def random_choose_zh_chars(self, num):
        chars = [self.zh_chars[random.randint(0, self.zh_chars_len-1)] for i in range(num)]
        return chars

    def pick_homo_word(self, token, samelen=1):
        _token = ''.join(token)
        tlen = len(token)
        homo_word = self.homo.get_homo_by_word(_token)
        
        if homo_word is None or len(homo_word) == 0:
            homo_word = self.random_choose_zh_chars(tlen)
        elif len(homo_word) != tlen:
            if samelen == 1:
                homo_word = self.random_choose_zh_chars(tlen)
            else:
                homo_word = list(homo_word)
        else:
            homo_word = list(homo_word)
        return homo_word
    
    def pick_font_sim_word(self, tokens):
        sim_tokens = []
        for t in tokens:
            t_sims = self.font_sims.get(t, None)
            
            if t_sims is not None:
                sim_tokens.append(random.choice(t_sims))
            else:
                sim_tokens.append(self.random_choose_zh_chars(1)[0])
        return sim_tokens

    def pick_hz_jiegou_word(self, tokens):
        
        jg_tokens = []
        for t in tokens:
            jg = self.hz_jg.get(t, None)
            
            if jg is not None:
                jg_tokens.extend(jg)
            else:
                jg_tokens.append(self.random_choose_zh_chars(1)[0])
        return jg_tokens
    
    def pad_to_max_length(self, seq, max_seg_length, pad_val):
        pad_len = max_seg_length - len(seq)
        if pad_len > 0:
            seq.extend([pad_val] * pad_len)
        else:
            seq = seq[0:max_seg_length]
    
    def parse_line(self, sent, lidx):
        seg_words = self.chinese_words_seg(sent)    #中文分词
        zh_words  = get_chinese_word(seg_words, min_len=2)     #取中文词
        
        bpe_tokens = zh_words_2d(zh_words) #按照分词再处理 [['[CLS]'], ['搜', '狗'], ['输', '入', '法'], ['[SEP]']]
        masked_lm_tokens = [ _ for _ in bpe_tokens]  #masked lm 标签
        #edit op: replace  insert  delete
        bpe_tokens_backup = [_ for _ in bpe_tokens]
        bpe_tokens_cnt = len(bpe_tokens)
        ignore_token_pos = [0, bpe_tokens_cnt - 1] + ignore_tokens(bpe_tokens)
        p1 = random.random()
        
        if p1 <= 0.6:
            #replace mask op    60%
            ret = self.edit_replace_op(bpe_tokens, ignore_token_pos, self.mlm_per,  self.max_mask_per_sent, bpe_tokens_backup)
        elif p1 <= 0.8:
            #add then delete 20%
            ret = self.edit_delete_op(bpe_tokens, ignore_token_pos, self.mlm_per,  self.max_mask_per_sent, bpe_tokens_backup)
        elif p1 <= 0.9:
            #delete then add ins|n  10% 
            ret = self.edit_insert_op(bpe_tokens, ignore_token_pos, self.mlm_per, self.max_mask_per_sent, bpe_tokens_backup)
        else:
            #swap then reorder 10%
            ret = self.edit_reorder_op(bpe_tokens, ignore_token_pos, self.mlm_per, self.max_mask_per_sent, bpe_tokens_backup)
            
        assert len(ret) == 5, f"{p1}"
        input_tokens, masked_lm_tokens, edit_tags, edit_pointing, ops = ret
        assert len(input_tokens) == len(masked_lm_tokens) == len(edit_tags) == len(edit_pointing), \
            f'edit_delete_op false return. {len(input_tokens)} {len(masked_lm_tokens)} {len(edit_tags)} {len(edit_pointing)} {p1}'
        
        ops['p1'] = p1
        input_ids = self.bert_tokenizer.convert_tokens_to_ids(input_tokens)
        
        masked_lm_ids = self.bert_tokenizer.convert_tokens_to_ids(masked_lm_tokens)
        edit_tag_ids = [self.edit_tag_ids[dtag] for dtag in edit_tags]
        #raw = to_raw(input_tokens, edit_tags)
        raw = None
        kv = {  'input_raw': sent,
                'raw': raw,
                'input_tokens': input_tokens,
                'edit_tags': edit_tags,
                'masked_lm_tokens': masked_lm_tokens,
                'input_ids': input_ids,
                #'input_mask': input_mask,
                #'segment_ids': segment_ids,
                'masked_lm_ids': masked_lm_ids,
                'edit_tag_ids': edit_tag_ids,
                'edit_pointing': edit_pointing,
                "ops":ops,
                }
        return kv
    
    def edit_reorder_op(self, bpe_tokens, ignore_token_pos, mlm_per, max_mask_per_sent, bpe_tokens_backup):
        #disorder with max 4gram，构造乱序标签, edit_tags  不变
        ngram = random.randint(1, self.edit_ngram)
        pos = del_pos_pick(bpe_tokens, masked_pos=ignore_token_pos, cnt=2, ngram=ngram, tok_char_max=self.max_mask_per_sent)
        pos = sorted(pos, key=lambda x:x[0])
        ops = { "reorder_op":[] }
        edit_tags = [['INS|0'] * len(t) for t in bpe_tokens]
        
        edit_tags_flatten = flatten(edit_tags)
        bpe_tokens_input_flatten = flatten(bpe_tokens)
        bpe_tokens_output_flatten = flatten(bpe_tokens)
        pointing = compute_points_del(edit_tags_flatten, True, self.DEL)
        
        if len(pos) != 2:
            return bpe_tokens_input_flatten, bpe_tokens_output_flatten, edit_tags_flatten, pointing, ops
        
        bpe_pos = []
        last = 0
        for token in bpe_tokens:
            _pos = [last + i for i, _ in enumerate(token)]
            last += len(_pos)
            bpe_pos.append(_pos)
        #disorder
        parts = [bpe_tokens[:pos[0][0]],                 #o1 左 
                    bpe_tokens[pos[1][0]:pos[1][-1] + 1],   #o2
                    bpe_tokens[pos[0][-1] + 1:pos[1][0]],   #o1、 o2 之间
                    bpe_tokens[pos[0][0]:pos[0][-1] + 1],   #o1
                    bpe_tokens[pos[1][-1] + 1:]]            #o2 右
        
        o1 = bpe_tokens[pos[0][0]:pos[0][-1] + 1]
        o2 = bpe_tokens[pos[1][0]:pos[1][-1] + 1]
        
        o1 = [t for toks in o1 for t in toks]
        o2 = [t for toks in o2 for t in toks]
        o1s = ''.join(o1)
        o2s = ''.join(o2)
        
        ops['reorder_op'].append(o1s)
        ops['reorder_op'].append(o2s)
        points = [bpe_pos[:pos[0][0]],                     #o1 左 
                    bpe_pos[pos[1][0]:pos[1][-1] + 1],     #o2 
                    bpe_pos[pos[0][-1] + 1:pos[1][0]],     #o1、 o2 之间
                    bpe_pos[pos[0][0]:pos[0][-1] + 1],     #o1
                    bpe_pos[pos[1][-1] + 1:]]              #o2 右

        input_tokens = [r for p in parts for q in p for r in q]
        masked_lm_tokens = [ _ for _ in input_tokens]   #直接copy...
        pp = [t for p in points for q in p for t in q]
        pp_map = {v: i for i, v in enumerate(pp)}
        edit_pointing = [pp_map.get(i + 1, 0) for i in pp]
        edit_tags = [t for tags in edit_tags for t in tags]  #flatten
        
        assert len(input_tokens) == len(masked_lm_tokens) == len(edit_tags) == len(edit_pointing), f'edit_delete_op false return. {len(input_tokens)} {len(masked_lm_tokens)} {len(edit_tags)} {len(edit_pointing)}'
        return input_tokens, masked_lm_tokens, edit_tags, edit_pointing, ops
    
    def edit_insert_op(self, bpe_tokens, ignore_token_pos, mlm_per, max_mask_per_sent, bpe_tokens_backup):
        #先删除  再插入
        del_edit_pos = del_pos_pick(bpe_tokens, ignore_token_pos, cnt=1, ngram=1, tok_char_max=self.max_mask_per_sent)
        masked_lm_tokens = copy.deepcopy(bpe_tokens)
        edit_tags = [['INS|0'] * len(t) for t in masked_lm_tokens]
        ops= {"insert_op":[]}
        #update backwards
        for i, dpos in enumerate(del_edit_pos):
            for p in dpos:
                del_tok = masked_lm_tokens.pop(p)
                ops['insert_op'].append(del_tok)
                
                insert_label = 'INS|' + str(len(del_tok))
                edit_tags[p - 1][-1] = insert_label #删除前位置 标签更新
                edit_tags.pop(p)  #编辑位标签 清空
        masked_lm_tokens_flatten = flatten(masked_lm_tokens)
        edit_tags_flatten = flatten(edit_tags)
        pointing = compute_points_del(edit_tags_flatten, True, self.DEL)
        
        return masked_lm_tokens_flatten, masked_lm_tokens_flatten, edit_tags_flatten, pointing, ops
        
    def parse_file(self, fpath, worker_id):
        for lidx, line in enumerate(open(fpath, 'r', encoding='utf-8')):
            line = line.lower().strip()
            line = full2half(line)                  #半角
            line = self.fan2jian(line)              #繁简
            #sents = sent_cutter(line, self.maxseqlen) #分句
            line_length =  len(line)
            if line_length > self.maxseqlen - 2 or line_length < self.minseqlen : continue
            #line = text_norm(line)                     #规整处理
            
            kv = self.parse_line(line, lidx)
            if len(kv) > 0:
                kv['file'] = fpath
                kv['lidx'] = lidx
                _length = len(kv['input_ids'])
                if _length > self.maxseqlen or _length < self.minseqlen : continue
                yield kv
        
    def edit_replace_op(self, bpe_tokens, ignore_token_pos, mlm_per, max_mask_per_sent, bpe_tokens_bak):
        #bpe_tokens:[['[CLS]'], ['鸿', '海'], ['将'], ['与'], ['英', '国'], ['arm'], ['设', '立'], ['半', '导', '体'], ['研', '发'], ['基', '地'], ['[SEP]']]
        #ignore_token_pos:[0, 10, 5]
        p1 = random.random()
        bpe_tokens_tmp = [_ for _ in bpe_tokens]
        masked_pos, masked_pos_tokens = whole_word_mask(bpe_tokens_tmp, ignore_token_pos, mlm_per, max_mask_per_sent)
        # [7, 6, 2] [ ['半', '导', '体'], ['设', '立'],  ['将']]
        ops = { "replace_op":[] }
        

        if len(masked_pos) == 0 or p1 <= 0.1:
            #keep same op  10%
            ops['replace_op'] = ["KEEPSAME"]
            edit_tags = [['INS|0'] * len(_) for _ in bpe_tokens_tmp]
            
            edit_tags_flatten = flatten(edit_tags)
            bpe_tokens_flatten = flatten(bpe_tokens_tmp)
            bpe_tokens_input_flatten = copy.deepcopy(bpe_tokens_flatten)
            bpe_tokens_output_flatten = copy.deepcopy(bpe_tokens_flatten)   #golden tags
            pointing = compute_points_del(edit_tags_flatten, True, delete='DEL|1')  #keep the delete point.
            
            return bpe_tokens_input_flatten, bpe_tokens_output_flatten, edit_tags_flatten, pointing, ops
        elif p1 <= 0.3:
            ##纠错预测阶段2
            #mask replace op ，设置[MASK] 预测[MASK]对应的词 用于完形填空|缺词插入  20%
            mask_tokens = [["[MASK]"] * len(_) for _ in masked_pos_tokens]  #not carry the replaced tokens
            ops['replace_op'] = [copy.deepcopy(mask_tokens) , copy.deepcopy(masked_pos_tokens) ]
            bpe_tokens_input_flatten, bpe_tokens_output_flatten, edit_tags_flatten, pointing \
                = build_edit_tags(bpe_tokens,  masked_pos , mask_tokens, self.DEL)
                
        elif p1 <= 0.7:
            #纠错预测阶段2
            #mask replace op ，设置[MASK] + addition 预测[MASK]对应的词|窥词纠正,即变长纠词模式 from felix 
            replaced_tokens = [self.build_replace_tokens(token, samelen=0) for token in masked_pos_tokens]
            
            ops['replace_op'] = [copy.deepcopy(replaced_tokens), ["&addtion"], copy.deepcopy(masked_pos_tokens)]
            bpe_tokens_input_flatten, bpe_tokens_output_flatten, edit_tags_flatten, pointing \
                = build_edit_tags_with_addition(bpe_tokens,  masked_pos , replaced_tokens, self.DEL)
            
        elif p1 <= 1.0:
            #step 1
            replaced_tokens = [self.build_replace_tokens(token, samelen=0) for token in masked_pos_tokens]
            #mask replace op ，
            ops['replace_op'] = [ copy.deepcopy(masked_pos_tokens), copy.deepcopy(replaced_tokens)]
            t = build_edit_tags_step1(bpe_tokens,  masked_pos , replaced_tokens,  self.DEL)
            bpe_tokens_input_flatten  = t[0]
            bpe_tokens_output_flatten = t[1]
            edit_tags_flatten = t[2]
            pointing = t[3]
            
        # elif p1 <= 1.0:
        #     replaced_tokens = [self.build_replace_tokens(token, samelen=1) for token in masked_pos_tokens]
        #     #mask replace op ，设置replace token 预测masked token from mac  ######### drop!!
        #     ops['replace_op'] = [replaced_tokens, masked_pos_tokens]
        #     bpe_tokens_input_flatten, bpe_tokens_output_flatten, edit_tags_flatten, pointing \
        #         = build_edit_tags(bpe_tokens,  masked_pos , replaced_tokens, self.DEL)
        else:
            pass
        
        return bpe_tokens_input_flatten, bpe_tokens_output_flatten, edit_tags_flatten, pointing, ops
    
    def edit_delete_op(self, bpe_tokens, ignore_token_pos, mlm_per,  max_mask_per_sent, bpe_tokens_backup):
        #先插入 再删除
        input_tokens = copy.deepcopy(bpe_tokens)
        masked_lm_tokens = copy.deepcopy(bpe_tokens)
        edit_tags = [['INS|0'] * len(t) for t in bpe_tokens]
        ops= {"delete_op":[], }
        
        #插入ngram 构造delete op
        p2 = random.random()
        if p2 < 0.6:
            ngram = random.randint(1, self.edit_ngram)
            edit_ins_pos = del_pos_pick(bpe_tokens, ignore_token_pos, cnt=1, ngram=ngram, tok_char_max=self.max_mask_per_sent)
            #insert pos token and tag
            for i, ipos in enumerate(edit_ins_pos):
                pos_tokens = [bpe_tokens[p] for p in ipos]
                ins_tokens = [self.build_del_tokens(token) for token in pos_tokens]
                pos_b = ipos[0]
                for j, t in enumerate(ins_tokens[::-1]):
                    ops['delete_op'].append(t)
                    input_tokens.insert(pos_b, t)
                    #masked_lm_tokens.insert(pos_b, t)
                    masked_lm_tokens.insert(pos_b, ['[unused99]'] * len(t))   #插入词 预测为[unused99]  等价于 DEL|1
                    edit_tags.insert(pos_b, ['DEL|1'] * len(t))
        elif p2 <= 1:
            #ngram edit
            edit_ins_pos = del_pos_pick(bpe_tokens, ignore_token_pos, cnt=1, ngram=1, tok_char_max=self.max_mask_per_sent)
            if len(edit_ins_pos) > 0:
                ipos = edit_ins_pos[0]
                ins_tokens_top5 = self.pick_mlm_tokens(bpe_tokens, ipos) #mlm 构造词插入, 只选一个NGRAM
                ins_tokens = random.choice(ins_tokens_top5)
                ops['delete_op'].append(ins_tokens)
                input_tokens.insert(ipos[0], ins_tokens)
                #masked_lm_tokens.insert(ipos[0], ins_tokens)
                masked_lm_tokens.insert(ipos[0], ['[unused99]'] * len(ins_tokens))   #插入词 预测为[unused99]  等价于 DEL|1
                edit_tags.insert(ipos[0], ['DEL|1'] * len(ins_tokens))
        
        # not include reorder...
        input_tokens_flatten = flatten(input_tokens)
        masked_lm_tokens_flatten = flatten(masked_lm_tokens)
        edit_tags_flatten = flatten(edit_tags)
        edit_pointing = compute_points_del(edit_tags_flatten, True, self.DEL)
        
        
        return input_tokens_flatten, masked_lm_tokens_flatten, edit_tags_flatten, edit_pointing, ops
    
    def get_file_info(self, paths):
        info = {'start':0, 'end':0,}
        for path in paths:
            for _ in open(path, 'r', encoding='utf-8'):
                info['end'] += 1   #maybe not correct
        return info

    def build_replace_tokens(self, tokens, samelen=1):
        #mlm replace, no [MASK],获取相似字：同音字，随机字
        per = random.random()
        if per <= 0.3:
            #同音词
            rep_tokens = list(self.pick_homo_word(tokens, samelen))
        elif per <= 0.6:
            #形近词
            rep_tokens = self.pick_font_sim_word(tokens)
        elif per <= 0.9:
            #结构拆字
            rep_tokens = self.pick_hz_jiegou_word(tokens)
        elif per <= 1.0:
            #随机词
            sz = len(tokens)
            if samelen == 0:
                #随机变长
                r_ = random.choice([-1, 0, 1, 2])
                sz = max(sz + r_, 1)
            rep_tokens = self.random_choose_zh_chars(sz)
        return rep_tokens

    def build_del_tokens(self, tokens):
        #edit insert token no [MASK]
        per = random.random()
        if per <= 0.25:
            rep_tokens = tokens
        elif per <= 0.5:
            rep_tokens = self.pick_homo_word(tokens)
        elif per <= 0.75:
            rep_tokens = self.pick_font_sim_word(tokens)
        elif per <= 0.9:
            idx = random.randint(0, self.edit_ftokens_len-1)
            rep_tokens = [self.edit_ftokens[idx]]
        elif per <= 1:
            rep_tokens = self.random_choose_zh_chars(len(tokens))
        return rep_tokens

def to_raw(toks, tags):
    size = len(toks)
    raw_str = toks[0]
    for j in range(1, size):
        prev = j - 1
        if j < size and tags[j] != tags[prev]:
            raw_str += '/' + tags[prev] + ' ' + toks[j]
        else:
            raw_str += toks[j]
    return raw_str.replace('DEL|1', 'E').replace('[CLS]', '').replace('[SEP]', '').replace('/INS|0', '').strip()

def collate_fn(data):
    def merge(datas):
        bs = len(datas)
        lens = [len(d['input_ids']) for d in datas]
        maxlen = max(lens)
        
        input_ids_p = torch.zeros(bs, maxlen).long()
        input_mask_p = torch.zeros(bs, maxlen).float()
        segment_ids_p = torch.zeros(bs, maxlen).long()
        masked_lm_ids_p = torch.zeros(bs, maxlen).long()
        edit_tag_ids_p = torch.zeros(bs, maxlen).long()
        edit_pointing_p = torch.zeros(bs, maxlen).long()
        
        for i, data in enumerate(datas):
            input_ids = data['input_ids']
            #input_mask = data['input_mask']
            #segment_ids = data['segment_ids']
            masked_lm_ids = data['masked_lm_ids']
            edit_tag_ids = data['edit_tag_ids']
            edit_pointing = data['edit_pointing']
            end = lens[i]
            try:
                input_ids_p[i, : end] = torch.tensor(input_ids, dtype=torch.long)
                input_mask_p[i, : end] = torch.ones(end, dtype=torch.float)
                masked_lm_ids_p[i, :end] = torch.tensor(masked_lm_ids, dtype=torch.long)
                edit_tag_ids_p[i, : end] = torch.tensor(edit_tag_ids, dtype=torch.long)
                edit_pointing_p[i, : end] = torch.tensor(edit_pointing, dtype=torch.long)
            except Exception as e:
                raise
        kv = {'input_ids': input_ids_p,
              'input_mask': input_mask_p,
              'segment_ids': segment_ids_p,
              'masked_lm_ids': masked_lm_ids_p,
              'edit_tag_ids': edit_tag_ids_p,
              'edit_pointing': edit_pointing_p,
              
            }
        #return input_ids_p, masked_lm_ids_p, edit_tag_ids_p, edit_pointing_p
        return input_ids_p, input_mask_p, segment_ids_p, masked_lm_ids_p, edit_tag_ids_p, edit_pointing_p
    exa = merge(data)

    return exa

def data_reader():
    from felix_flags import config_opts
    args = config_opts()
    #torch.multiprocessing.set_start_method('spawn')
    dataset = MateIterableDataset(datadir=args.train_file, args=args)
    t0 = time.time()
    bs = 8
    dataloader = DataLoader(dataset, collate_fn=collate_fn, num_workers=0, batch_size=bs,  drop_last=True)
    cnt = 10
    for i, data in enumerate(dataloader):
        if i % cnt == 0:
            t_now = time.time()
            print(f'iter: {i}, {t_now - t0:.3}s / {cnt}')
            t0 = t_now


def sent_cutter(text, maxlen):
    text = re.sub('([。！!？；;\?])([^’”])', r'\1\n\2', text)  # 普通断句符号且后面没有引号
    text = re.sub('(\.{6})([^’”])', r'\1\n\2', text)  # 英文省略号且后面没有引号
    text = re.sub('(\…{2})([^’”])', r'\1\n\2', text)  # 中文省略号且后面没有引号
    text = re.sub('([。！!？；;\?\.{6}\…{2}][’”])([^’”])', r'\1\n\2', text)  # 断句号+引号且后面没有引号
    norm_sents = text.strip().split('\n') 

    # pattern = ',|\.|\?|!|。'
    # norm_sents = re.split(pattern, text)
    sents = []
    for s1 in norm_sents:
        for s2 in s1.split(): #可能是分词的
            if len(sents) > 0 and (len(sents[-1]) + len(s2)) < maxlen:
                sents[-1] += s2 # keep space.
            else:
                sents.append(s2)
    return sents


def pointing_tokens(pointing, tokens):
    #reorder tokens according to pointing
    tokens_n = ['[CLS]'] * len(tokens)
    i = 0
    p = pointing[i]
    
    while i < len(pointing):
        if p != 0:
            tokens_n[i] = tokens[p]
            p = pointing[p]
        else:
            tokens_n[i] = '[CLS]'
            break
        
        i += 1
    return tokens_n


'''
1.wwm mac masked, ngram not included; 0.6; syn:0.7, homo:0.1, rand:0.1, stay:0.1
2.ngram add insert 0.6 + 0.6*0.4; syn:0.25, homo:0.25, rand:0.2, repeat:0.3
3.ngram=1 drop 0.4 * 0.2
4.ngram swap 0.4 * 0.4
'''
if __name__ == "__main__":

    data_reader()
    #python data_loader_freeze.py --train_file /kaggle/input/wiki2019zh-corpus/wiki2019zh_corpus
    #python data_loader_freeze.py --train_file D:/workspace/res_input_output/raw_input/wiki2019zh_corpus


