# -*- coding: utf-8 -*-  
'''
bert数据集
    - tfrecord文件格式，每个文件50000条记录
    - 事先读入buffer个上下联，作为随机下联
    - 遍历上下联
    -     随机替换掉buffer中的1条记录
    -     生成tokens
    -        上联首部追加[CLS]，尾部追加[SEP]
    -        一定概率(50%)决定该样本是正样本还是负样本
    -            正样本：追加真实的下联
    -            负样本：随机从buffer中挑1个句子为下联
    -        下联尾部追加[SPE]
    -        tokens = 上联 + 下联
    -     给tokens随机生成[MASK]
    -         按照概率计算tokens应该生成的[MASK]数量
    -         打散tokens的下标，并遍历打散后的下标列表
    -             判断[MASK]是否够数，够数了退出循环
    -             改写改下标的词
    -                 80%的概率改写为[MASK]
    -                 10%的概率保留原词
    -                 10%的概率替换为词库中其他词
    -     写入tfrecord
    
    

@author: luoyi
Created on 2021年4月14日
'''
import tensorflow as tf
import random
import copy
import glob

import utils.conf as conf
import utils.dictionaries_bert as dict_bert
import data.dataset as ds


#    buffer一定数量的上下联，作为随机下联
class BufferSentence(object):
    def __init__(self, buffer_size=1024, random_seed=1024):
        self._buffer_size = buffer_size
        self._buffer = []
        self._initialize(buffer_size)
        
        self._rnd = random.Random(random_seed)
        pass
    
    #    初始化
    def _initialize(self, buffer_size=1024):
        ds_iter = ds.read_couplet_iterator(in_path=conf.DATASET.get_in_train(), out_path=conf.DATASET.get_label_train(), count=buffer_size)
        for f,s in ds_iter:
            self._buffer.append(f)
            self._buffer.append(s)
            pass
        pass
    
    #    随机替换掉buffer中的内容
    def replace(self, f, s):
        '''
            @param f: 上联
            @param s: 下联
        '''
        f_idx = self._rnd.randint(0, 2*self._buffer_size - 1)
        self._buffer[f_idx] = f
        s_idx = self._rnd.randint(0, 2*self._buffer_size - 1)
        self._buffer[s_idx] = s
        pass
    #    随机取1个句子
    def random(self):
        idx = self._rnd.randint(0, 2*self._buffer_size - 1)
        return self._buffer[idx]
    pass


#    概率开关
def prob_switch(prob, rnd=random.Random(1024)):
    return rnd.random() < prob
#    生成tokens
def sample_tokens(f, s, 
                  buffer_sentence=None, 
                  rnd=random.Random(1024), 
                  neg_prob=conf.BERT.get_neg_prob()):
    tokens_a = f
    #    是否负样本
    is_negative = prob_switch(neg_prob, rnd)
    
    if (is_negative):
        tokens_b = buffer_sentence.random()
        buffer_sentence.replace(f, tokens_b)
        pass
    else:
        tokens_b = s
        pass
    
    tokens_a = ['[CLS]'] + tokens_a + ['[SEP]']
    tokens_b = tokens_b + ['[SEP]']
    tokens = tokens_a + tokens_b
    
    #    句子位置编码
    sentence_position_a = [1] * len(tokens_a)
    sentence_position_b = [2] * len(tokens_b)
    sentence_position = sentence_position_a + sentence_position_b
    
    #    逻辑上好理解点，这里转为”是否正样本“
    is_positive = not is_negative
    return tokens, is_positive, sentence_position
#    随机[MASK]
def sample_mask(tokens, 
                rnd=random.Random(1024), 
                rewrite_prob=conf.BERT.get_rewrite_prob(), 
                rewrite_max=conf.BERT.get_rewrite_max(),
                rewrite_mask=conf.BERT.get_rewrite_mask(), 
                rewrite_original=conf.BERT.get_rewrite_original(), 
                rewrite_random=conf.BERT.get_rewrite_random()):
    #    tokens的下标，并随机打散
    tokens_idxs = list(range(len(tokens)))
    rnd.shuffle(tokens_idxs)
    out_tokens = copy.copy(tokens)
    
    #    计算在rewrite_prob概率下应该生成多少个[MASK]
    num_mask = max(1, int(len(tokens) * rewrite_prob))
    num_mask = min(rewrite_max, num_mask)
    mask_idxs = []
    mask_tokens = []
    for token_idx in tokens_idxs:
        #    如果已经够数了，则退出循环
        if (len(mask_idxs) >= num_mask): break
        
        #    计算需要被替换的词
        mask_tag = '[MASK]'
        if (prob_switch(rewrite_mask, rnd)): mask_tag = '[MASK]'
        if (prob_switch(rewrite_original, rnd)): mask_tag = tokens[token_idx]
        if (prob_switch(rewrite_random, rnd)): mask_tag = dict_bert.random_word(rnd)
        out_tokens[token_idx] = mask_tag
        #    记录被替换的词下标，和真实的词
        mask_idxs.append(token_idx)
        mask_tokens.append(tokens[token_idx])
        pass
    return out_tokens, mask_idxs, mask_tokens
#    遍历数据集，生成样本数据
def sample_generator(f_path,
                     s_path,
                     count,
                     max_sen_len=conf.BERT.get_pre_training_sentence_maxlen(),
                     neg_prob=conf.BERT.get_neg_prob(),
                     rewrite_prob=conf.BERT.get_rewrite_prob(),
                     rewrite_max=conf.BERT.get_rewrite_max(),
                     rewrite_mask=conf.BERT.get_rewrite_mask(),
                     rewrite_original=conf.BERT.get_rewrite_original(),
                     rewrite_random=conf.BERT.get_rewrite_random()):
    #    buffer一定数量的句子
    buffer_sentence = BufferSentence()
    #    随机对象
    rnd = random.Random(1024)
    
    #    遍历数据集
    ds_iter = ds.read_couplet_iterator(in_path=f_path, out_path=s_path, count=count)
    for f, s in ds_iter:
        tokens, is_positive, sentence_position = sample_tokens(f, s, buffer_sentence, rnd, neg_prob)
        out_tokens, mask_idxs, mask_tokens = sample_mask(tokens, rnd, rewrite_prob, rewrite_max, rewrite_mask, rewrite_original, rewrite_random)
        
        #    转为词编码，并按max_sen_len统一长度
        out_tokens = dict_bert.word2idx_slist(out_tokens)
        if (len(out_tokens) > max_sen_len): 
            out_tokens = out_tokens[:max_sen_len]
            sentence_position = sentence_position[:max_sen_len]
            pass
        if (len(out_tokens) < max_sen_len): 
            out_tokens = out_tokens + [0] * (max_sen_len - len(out_tokens))
            sentence_position = sentence_position + [0] * (max_sen_len - len(sentence_position))
            pass
        #    被替换的真实词转为词编码
        mask_tokens = dict_bert.word2idx_slist(mask_tokens)
        #    mask_idxs, mask_tokens统一长度
        if (len(mask_idxs) > rewrite_max): 
            mask_idxs = mask_idxs[:rewrite_max]
            mask_tokens = mask_tokens[:rewrite_max]
            pass
        if (len(mask_idxs) < rewrite_max): 
            mask_idxs = mask_idxs + [0] * (rewrite_max - len(mask_idxs))
            mask_tokens = mask_tokens + [0] * (rewrite_max - len(mask_tokens))
            pass
        #    是否正样本标记转为与mask_tokens等长的列表，因为数据集中他们要放一起组成Tensor
        is_positive = [1] * len(mask_tokens) if is_positive else [0] * len(mask_tokens)
        yield out_tokens, is_positive, sentence_position, mask_idxs, mask_tokens, tokens
        pass
    pass
#    写入tfrecord文件
def save_tfrecord(sample_generator, tfrecord_dir, tfrecord_limit=50000):
    fcount = 0      #    当前文件索引
    wcount = 0      #    当前文件中已经写入了多少条数据
    total = 0       #    当前已写入总数量
    #    写入文件
    fpath = tfrecord_dir + '/bert_pre_training_{}.tfrecord'.format(fcount)
    conf.mkfiledir_ifnot_exises(fpath)
    fw = tf.io.TFRecordWriter(fpath)
    for out_tokens, is_positive, sentence_position, mask_idxs, mask_tokens, _ in sample_generator:
        feature = {'out_tokens': tf.train.Feature(int64_list=tf.train.Int64List(value=out_tokens)),                     #    添加[CLS],[SPE],[MASK]，并转为词编码，并统一长度的训练样本
                   'is_positive': tf.train.Feature(int64_list=tf.train.Int64List(value=is_positive)),                   #    是否正样本，1:是，0:不是。（实际是与mask_tokens等长的列表）
                   'sentence_position': tf.train.Feature(int64_list=tf.train.Int64List(value=sentence_position)),       #    句子位置编码[1,1,1...,2,2,2...]
                   'mask_idxs': tf.train.Feature(int64_list=tf.train.Int64List(value=mask_idxs)),                       #    随机替换掉的词下标
                   'mask_tokens': tf.train.Feature(int64_list=tf.train.Int64List(value=mask_tokens))}                   #    被替换的真实词编码
        record = tf.train.Example(features=tf.train.Features(feature=feature))
        record = record.SerializeToString()
        fw.write(record)
        
        wcount += 1
        total += 1
        
        if (wcount >= tfrecord_limit):
            print('写入文件:', fpath, ' wcount:', wcount, 'total:', total)
            fw.close()
            fcount += 1
            wcount = 0
            fpath = tfrecord_dir + '/bert_pre_training_{}.tfrecord'.format(fcount)
            fw = tf.io.TFRecordWriter(fpath)
            pass
        pass
    
    print('写入文件:', fpath, ' wcount:', wcount, 'total:', total)
    print('写入文件完成. total:', total)
    pass


data_fields = {"out_tokens": tf.io.VarLenFeature(tf.int64), 
               "is_positive": tf.io.VarLenFeature(tf.int64),
               'sentence_position': tf.io.VarLenFeature(tf.int64),
               'mask_idxs': tf.io.VarLenFeature(tf.int64),
               'mask_tokens': tf.io.VarLenFeature(tf.int64)}
#    解析数据
def parse_tfrecord(serialized_example):
    parsed = tf.io.parse_single_example(serialized_example, features=data_fields)
    
    out_tokens = tf.sparse.to_dense(parsed['out_tokens'])
    is_positive = tf.sparse.to_dense(parsed['is_positive'])
    sentence_position = tf.sparse.to_dense(parsed['sentence_position'])
    mask_idxs = tf.sparse.to_dense(parsed['mask_idxs'])
    mask_tokens = tf.sparse.to_dense(parsed['mask_tokens'])
    
    #    句子词编码 和 句子位置编码组成x            Tensor(2, max_sen_len)
    x = tf.stack([out_tokens, sentence_position], axis=0)
    x = tf.cast(x, dtype=tf.int64)
    #    是否正样本 掩盖的词下标，掩盖的词编码组成y    Tensor(3, len(mask_idxs))
    y = tf.stack([is_positive, mask_idxs, mask_tokens], axis=0)
    y = tf.cast(y, dtype=tf.int64)
    
    return x, y
#    tensor_db
def tensor_db_from_tfrecord(tfrecord_dir, 
                            batch_size=conf.BERT.get_batch_size(),
                            epochs=conf.BERT.get_epochs(),
                            shuffle_buffer_rate=conf.BERT.get_shuffle_buffer_rate(),
                            tfrecord_buffer_rate=conf.BERT.get_tfrecord_buffer_rate()):
    fpaths = glob.glob(tfrecord_dir + '/*.tfrecord')
    db = tf.data.TFRecordDataset(fpaths, buffer_size=tfrecord_buffer_rate * batch_size)
    db = db.map(map_func=lambda s:parse_tfrecord(s))
    
    if (shuffle_buffer_rate > 0): db = db.shuffle(shuffle_buffer_rate * batch_size)
    if (batch_size > 0): db = db.batch(batch_size, drop_remainder=True)
    if (epochs > 0): db = db.repeat(epochs)
    return db
