import copy

class Point(object):
    def __init__(self, point_index, added_phrase=''):
        self.added_phrase = added_phrase

        try:
            self.point_index = int(point_index)
        except ValueError:
            raise ValueError(
                'point_index should be an Integer, not {}'.format(point_index))

    def __str__(self):
        return '{}|{}'.format(self.point_index, self.added_phrase)
    
def compute_points_del(edit_tags, flattened=True, delete='DEL|1'):
    #labels展平后
    if flattened is False:
        edit_tags_flatten =  [_ for tags in edit_tags for _ in tags]
    else:
        edit_tags_flatten = [_ for _ in edit_tags]
    target_points = []
    i = 0
    while i < len(edit_tags_flatten[0:-1]):
        if edit_tags_flatten[i] == delete:
            target_points.append(Point(0))
        else:
            next = i + 1
            for j, v in enumerate(edit_tags_flatten[next:]):
                if v == delete:
                    continue
                else:
                    next += j
                    break
            target_points.append(Point(next, ''))
        i += 1
    target_points.append(Point(0))
    pointing = [p.point_index for p in target_points]
    return pointing

def flatten(two_dim_list):
    flatten_list =  [_ for d in two_dim_list for _ in d]
    return flatten_list

def build_edit_tags(bpe_tokens, masked_pos, noise_tokens, delete='DEL|1'):
    edit_tags =  [['INS|0'] * len(_) for _ in bpe_tokens]
    bpe_tokens_input = copy.deepcopy(bpe_tokens)
    bpe_tokens_output = copy.deepcopy(bpe_tokens)
    masked_pos_lx = []  #合并连续的 masked_pos [[7, 6], [2]]  #逆序
    c_pos = []
    
    masked_pos = [-1] + masked_pos + [-1]
    
    
    for i, p in enumerate(masked_pos):
        if p == masked_pos[i-1] - 1:  #连续的位置，更新
            c_pos.append(p)
        else:
            if len(c_pos) > 0:        #不连续的新位置，保存并重新初始化
                masked_pos_lx.append(c_pos)
            if p != -1: #初始化
                c_pos = [p]
    
    noise_tokens_lx = [] # 合并邻接的词位置和词 [[7, 6], [2]] e.g. [['[ban]', '[dao]', '[ti]'], ['[she]', '[li]'], ['[jiang]']] 
                         #-> [['[she]', '[li]', '[ban]', '[dao]', '[ti]'], ['[jiang]'] ]
    step = 0
    for i, idx in enumerate(masked_pos_lx):
        sz = len(idx)
        tokens_i = noise_tokens[step:step+sz][::-1]
        tokens_i = flatten(tokens_i)
        step = step + sz
        noise_tokens_lx.append(tokens_i)
    
    step = 0
    for i, idx in enumerate(masked_pos_lx):
        
        for j, jdx in enumerate(idx):
            sz = len(noise_tokens[step])
            bpe_tokens_input[jdx][0:sz] = copy.deepcopy(noise_tokens[step])  #same length mask tag
            
            edit_tags[jdx]  = ['INS|0'] *  len(bpe_tokens_input[jdx])  #全部置为INS|0
            step =  step + 1
    
    edit_tags_flatten = flatten(edit_tags)
    bpe_tokens_input_flatten = flatten(bpe_tokens_input)
    bpe_tokens_output_flatten = flatten(bpe_tokens_output)
    pointing = compute_points_del(edit_tags_flatten,  True, delete)
    
    return  bpe_tokens_input_flatten, bpe_tokens_output_flatten, edit_tags_flatten, pointing


def build_edit_tags_with_addition(bpe_tokens, masked_pos, noise_tokens, delete='DEL|1'):
    #测试二阶段的完型填空模式， noise_tokens可 不对齐
    edit_tags =  [['INS|0'] * len(_) for _ in bpe_tokens]
    bpe_tokens_input = copy.deepcopy(bpe_tokens)
    bpe_tokens_output = copy.deepcopy(bpe_tokens)
    masked_pos_lx = []  #合并连续的 masked_pos [[7, 6], [2]]  #逆序
    c_pos = []
    
    assert noise_tokens[0][0] != "[MASK]" 
    
    masked_pos = [-1] + masked_pos + [-1]
    
    for i, p in enumerate(masked_pos):
        if p == masked_pos[i-1] - 1:  #连续的位置，合并更新
            c_pos.append(p)
        else:
            if len(c_pos) > 0:        #不连续的新位置，保存并重新初始化
                masked_pos_lx.append(c_pos)
            if p != -1: #初始化
                c_pos = [p]
    
    noise_tokens_lx = [] # 合并邻接的词位置和词 [[7, 6], [2]] e.g. [['[ban]', '[dao]', '[ti]'], ['[she]', '[li]'], ['[jiang]']] 
                         #-> [['[she]', '[li]', '[ban]', '[dao]', '[ti]'], ['[jiang]'] ]
    step = 0
    for i, idx in enumerate(masked_pos_lx):
        sz = len(idx)
        tokens_i = noise_tokens[step:step+sz][::-1]
        tokens_i = flatten(tokens_i)
        step = step + sz
        noise_tokens_lx.append(tokens_i)
    
    
    for i, idx in enumerate(masked_pos_lx):
        noise_tokens_i = noise_tokens_lx[i]
        #带入噪声词
        add_tokens = ['[unused97]'] + noise_tokens_i + ['[unused98]']    #<rep> ... </rep>
        pos_add = idx[0]
        bpe_tokens_input[pos_add].extend(add_tokens) 
        bpe_tokens_output[pos_add].extend(add_tokens)
        edit_tags[pos_add].extend(['INS|0'] * len(add_tokens))
        
        for j, jdx in enumerate(idx):
            sz = len(bpe_tokens[jdx])
            bpe_tokens_input[jdx][0:sz] = ["[MASK]"] * sz  #same length mask tag
    
    edit_tags_flatten = flatten(edit_tags)
    bpe_tokens_input_flatten = flatten(bpe_tokens_input)
    bpe_tokens_output_flatten = flatten(bpe_tokens_output)
    pointing = compute_points_del(edit_tags_flatten, True, delete)
    
    return  bpe_tokens_input_flatten, bpe_tokens_output_flatten, edit_tags_flatten, pointing



def build_edit_tags_step1(bpe_tokens, masked_pos, noise_tokens, delete='DEL|1'):
    edit_tags =  [['INS|0'] * len(_) for _ in bpe_tokens]
    bpe_tokens_input = copy.deepcopy(bpe_tokens)
    bpe_tokens_output = copy.deepcopy(bpe_tokens)
    masked_pos_lx = []  #合并连续的 masked_pos [[7, 6], [2]]  #逆序
    c_pos = []
    assert noise_tokens[0][0] != "[MASK]",  f"not set noise_tokens [MASK] "
    masked_pos = [-1] + masked_pos + [-1]
    
    for i, p in enumerate(masked_pos):
        if p == masked_pos[i-1] - 1:  #连续的位置，更新
            c_pos.append(p)
        else:
            if len(c_pos) > 0:        #不连续的新位置，保存并重新初始化
                masked_pos_lx.append(c_pos)
            if p != -1: #初始化
                c_pos = [p]
    
    step = 0
    for i, idx in enumerate(masked_pos_lx):
        ins_pos = idx[-1] - 1
        ins_n = 0
        for j, jdx in enumerate(idx):
            ins_n = ins_n + len( bpe_tokens[jdx])
            bpe_tokens_input[jdx] = copy.deepcopy(noise_tokens[step])  #same length mask tag
            bpe_tokens_output[jdx] = ['[unused97]'] * len(bpe_tokens_input[jdx])  #same length mask tag
            edit_tags[jdx]  = ['DEL|1'] *  len(bpe_tokens_input[jdx])  #全部置为DEL|1
            step =  step + 1
        edit_tags[ins_pos][-1] = f'INS|{ins_n}'  #设置编辑操作的插入标签
        
    edit_tags_flatten = flatten(edit_tags)
    bpe_tokens_input_flatten = flatten(bpe_tokens_input)
    bpe_tokens_output_flatten = flatten(bpe_tokens_output)
    pointing = compute_points_del(edit_tags_flatten,  True, delete)
    
    return  bpe_tokens_input_flatten, bpe_tokens_output_flatten, edit_tags_flatten, pointing

if __name__ == '__main__':
    bpe_tokens = [['[CLS]'], ['鸿', '海'], ['将'], ['与'], ['英', '国'], ['arm'], ['设', '立'], ['半', '导', '体'], ['研', '发'], ['基', '地'], ['[SEP]']]
    masked_pos = [7, 6, 2]
    noise_masks = [['[MASK]', '[MASK]', '[MASK]'], ['[MASK]', '[MASK]', ], ['[MASK]']]

    noise_tokens = [['ban', 'dao', 'ti'], ['she', ], ['jiang']]

    #ts =  build_edit_tags_with_addition(bpe_tokens, masked_pos, noise_tokens, 'DEL|1')
   
    #ts = build_edit_tags(bpe_tokens, masked_pos, noise_masks, 'DEL|1')
    t1 = build_edit_tags(bpe_tokens,  masked_pos , noise_masks,   'DEL|1')
    t2 = build_edit_tags_step1(bpe_tokens,  masked_pos , noise_tokens,   'DEL|1')
    t3 = build_edit_tags_with_addition(bpe_tokens, masked_pos, noise_tokens, 'DEL|1')
    for t in t3:
        print(t)