# -*- coding: utf-8 -*-  
'''
搜狐THUCNews数据集

Created on 2021年9月7日
@author: luoyi
'''
import random
import tqdm
import jieba_fast
import tensorflow as tf
import glob
from collections import deque

import utils.conf as conf
from utils.iexicon import Punctuation, WordsWarehouse
from data.sohu_thuc_news.original_dataset import OriginalDataset


#    bert预训练数据集
class BertPreTFRecordDataset:
    def __init__(self,
                 dir_path=conf.DATASET_SOHU_THUCNEWS.get_original_dir_path(),
                 count=5000,
                 #    预训练mask生成概率
                 neg_prob=conf.BERT.get_neg_prob(),
                 rewrite_prob=conf.BERT.get_rewrite_prob(),
                 rewrite_max=conf.BERT.get_rewrite_max(),
                 rewrite_mask=conf.BERT.get_rewrite_mask(),
                 rewrite_original=conf.BERT.get_rewrite_original(),
                 rewrite_random=conf.BERT.get_rewrite_random(),
                 #    预训练数据目录
                 pre_training_train=conf.DATASET_SOHU_THUCNEWS.get_pre_training_train_path(),
                 pre_training_val=conf.DATASET_SOHU_THUCNEWS.get_pre_training_val_path(),
                 pre_training_train_count=conf.DATASET_SOHU_THUCNEWS.get_pre_training_train_count(),
                 pre_training_val_count=conf.DATASET_SOHU_THUCNEWS.get_pre_training_val_count(),
                 pre_tfrecord_limit=conf.DATASET_SOHU_THUCNEWS.get_pre_tfrecord_limit(),
                 #    每个句子最大词数
                 max_sen_len=conf.BERT.get_max_sen_len(),
                 buffer_sen=1024,
                 rdm=random.Random(1024)):
        '''bert预训练数据集
            @param pre_training_train: 预训练训练集目录
            @param pre_training_val: 预训练验证集目录
            @param count: 样本条数
        '''
        self._neg_prob = neg_prob
        self._rewrite_prob = rewrite_prob
        self._rewrite_max = rewrite_max
        self._rewrite_mask = rewrite_mask
        self._rewrite_original = rewrite_original
        self._rewrite_random = rewrite_random
        
        conf.mkdir_ifnot_exises(pre_training_train)
        self._pre_training_train = pre_training_train
        self._pre_training_train_count = pre_training_train_count
        
        conf.mkdir_ifnot_exises(pre_training_val)
        self._pre_training_val = pre_training_val
        self._pre_training_val_count = pre_training_val_count
        
        self._max_sen_len = max_sen_len
        self._pre_tfrecord_limit = pre_tfrecord_limit
        
        self._dir_path = dir_path
        self._o_ds = OriginalDataset(dir_path=dir_path, count=count)
        self._buffer_sen = buffer_sen
        self._buffer_sen_deque = deque(maxlen=buffer_sen)
        self._rdm = rdm
        pass
    
    
    #############################################################################################################################
    #
    #    写入tfrecord部分
    #
    #############################################################################################################################
    #    遍历每篇文章每一行
    def sen_part_iterator(self, max_empty_line=10):
        #    遍历每个文件
        for t, f in self._o_ds.files_iterator_shuffle():
            fpath = self._dir_path + '/' + t + '/' + f
            with open(file=fpath, mode='r', encoding='utf-8') as fr:
                line = fr.readline()
                
                empty_line = 0
                while (line):
                    line = line.replace('\t','').replace('\n','').strip()
                    
                    if (len(line) == 0):
                        empty_line += 1
                        if (empty_line > max_empty_line): break
                        pass
                    else:
                        empty_line = 0
                        
                        #    用标点符号分隔，并去掉标点符号
                        sens = Punctuation.split_by_sentence(line)
#                         sens = [sen.replace('“','').replace('”', '') for sen in Punctuation.split_by_sentence(line)]
                        #    如果不足两个句子，放弃该行
                        if (len(sens) < 2): 
                            line = fr.readline()
                            continue
                        
                        #    两两组合成句子对
                        i = 0
                        while i+1 < len(sens):
                            yield sens[i], sens[i+1]
                            pass
                        pass
                    line = fr.readline()
                    pass
                pass
            pass
        pass
    #    句子样本迭代
    def sen_iterator(self, min_sen_len=10):
        for s1, s2 in self.sen_part_iterator():
            #    执行分词
            s1 = jieba_fast.lcut(s1)
            s2 = jieba_fast.lcut(s2)
            
            #    先追加进暂存，用于随机选1个句子组成负样本
            self._buffer_sen_deque.append(s1)
            self._buffer_sen_deque.append(s2)
            
            #    是否正样本
            is_positive = True
            #    如果暂存的句子长度达到最大长度，则开启负样本逻辑
            if (len(self._buffer_sen_deque) >= self._buffer_sen / 2):
                if (self._rdm.random() < self._neg_prob): is_positive = False
                s2 = self._rdm.choice(self._buffer_sen_deque)
                pass
            
            #    s1 s2组合成整句话，并转为wid
            s1 = ['[PAD]'] + s1 + ['[SEP]']
            s2 = s2 + ['[SEP]']
            
            #    过滤掉长度 < 10 的样本。没有意义
            if (len(s1) + len(s2) < min_sen_len): continue
            
            #    句子位置编码
            pos1 = [1] * len(s1)
            pos2 = [2] * len(s2)
            pos = pos1 + pos2
            #    组成一条样本，并转为wid
            sen = s1 + s2
            sen = WordsWarehouse.instance().words_to_wids(sen)
            yield sen, pos, is_positive
            pass
        pass
    #    样本迭代
    def sample_iterator(self):
        for s, pos, is_positive in self.sen_iterator():
            #    需要替换mask的数量[CLS]位不参与替换
            mask_num = int((len(s)-1) * self._rewrite_prob)
            mask_num = min(mask_num, self._rewrite_max)
            
            #    乱序idx。[CLS]不参与替换
            s_idx = list(range(1, len(s)))
            self._rdm.shuffle(s_idx)
            
            #    被mask的位置，和被mask的实际wid
            mask_idx = []
            mask_wid = []
            for idx in s_idx:
                #    如果已经够数了则退出循环
                if (len(mask_idx) >= mask_num): break
                
                mask = WordsWarehouse.instance().words_to_wids(['[MASK]'])[0]
                #    rewrite_original的概率保留原词，rewrite_random的概率随机替换成词库其他词
                if (self._rdm.random() < self._rewrite_original): mask = s[idx]
                elif (self._rdm.random() < self._rewrite_random): mask = self._rdm.randint(a=0, b=WordsWarehouse.instance().words_count()-1)
                
                #    执行mask
                mask_wid.append(s[idx])
                mask_idx.append(idx)
                s[idx] = mask
                pass
            
            yield s, pos, is_positive, mask_idx, mask_wid
            pass
        pass
    
    #    写入训练数据
    def write_tfrecord_sample(self, path, count, desc='写入tfrecord'):
        tc = 0                          #    总写入数量
        fc = 0                          #    当前文件写入数量
        fidx = 0                        #    当前文件索引
        fpath = path + '/bert_pre_training_{}.tfrecord'.format(fidx)
        fw = tf.io.TFRecordWriter(fpath)
        for wids, pos, is_positive, mask_idx, mask_wid in tqdm.tqdm(self.sample_iterator(), total=count, desc=desc, leave=True, ncols=100):
#             #    wids为x数据，补为固定长度
#             if (len(wids) > self._max_sen_len): wids = wids[:self._max_sen_len]
#             if (len(wids) < self._max_sen_len): wids = wids + WordsWarehouse.instance().words_to_wids(words=['[PAD]']) * (self._max_sen_len - len(wids))
#             #    pos为x数据，补为固定长度
#             if (len(pos) > self._max_sen_len): pos = pos[:self._max_sen_len]
#             if (len(pos) < self._max_sen_len): pos = pos + [0] * (self._max_sen_len - len(pos))
#             
#             #    mask_idx为y数据，补为rewrite_max长度。截断也无所谓，打不了截断部分的mask不参与训练
#             if (len(mask_idx) > self._rewrite_max): mask_idx = mask_idx[:self._rewrite_max]
#             if (len(mask_idx) < self._rewrite_max): mask_idx = mask_idx + [0] * (self._rewrite_max - len(mask_idx))
#             if (len(mask_wid) > self._rewrite_max): mask_wid = mask_wid[:self._rewrite_max]
#             if (len(mask_wid) < self._rewrite_max): mask_wid = mask_wid + [0] * (self._rewrite_max - len(mask_wid))
#             #    is_positive也是y数据，为了能和mask组成张量，这里给成[1]或[0]的列表，长度与mask一致
#             is_positive = [1] * self._rewrite_max if (is_positive) else [0] * self._rewrite_max
            is_positive = [1] if (is_positive) else [0]

            #    生成tfrecord需要的record，并写入文件
            feature = {'wids': tf.train.Feature(int64_list=tf.train.Int64List(value=wids)),
                       'sen_pos': tf.train.Feature(int64_list=tf.train.Int64List(value=pos)),
                       'mask_idx': tf.train.Feature(int64_list=tf.train.Int64List(value=mask_idx)),
                       'mask_wid': tf.train.Feature(int64_list=tf.train.Int64List(value=mask_wid)),
                       'is_positive': tf.train.Feature(int64_list=tf.train.Int64List(value=is_positive))}
            record = tf.train.Example(features=tf.train.Features(feature=feature))
            record = record.SerializeToString()
            fw.write(record)
            
            #    计数
            tc += 1
            if (tc >= count): break
            
            fc += 1
            if (fc >= self._pre_tfrecord_limit):
                fw.close()
                fidx += 1
                fc = 0
                fpath = path + '/bert_pre_training_{}.tfrecord'.format(fidx)
                fw = tf.io.TFRecordWriter(fpath)
                pass
            
            pass
        fw.close()
        pass
    
    #    写入训练集
    def write_tfrecord(self):
        #    写入训练集
        self.write_tfrecord_sample(path=self._pre_training_train, 
                                   count=self._pre_training_train_count, 
                                   desc='写入tfrecord训练集')
        #    写入验证集
        self.write_tfrecord_sample(path=self._pre_training_val, 
                                   count=self._pre_training_val_count, 
                                   desc='写入tfrecord验证集')
        pass
    
    
    
    
    #############################################################################################################################
    #
    #    读取tfrecord部分
    #
    #############################################################################################################################
    #    读取训练集数据
    def tensor_db(self,
                  path,
                  batch_size=conf.DATASET_SOHU_THUCNEWS.get_batch_size(),
                  epochs=conf.DATASET_SOHU_THUCNEWS.get_epochs(),
                  shuffle_buffer_rate=conf.DATASET_SOHU_THUCNEWS.get_shuffle_buffer_rate(),
                  tfrecord_buffer_rate=conf.DATASET_SOHU_THUCNEWS.get_tfrecord_buffer_rate()):
        fpaths = glob.glob(path + '/bert_pre_training_*.tfrecord')
        db = tf.data.TFRecordDataset(filenames=fpaths, buffer_size=batch_size, num_parallel_reads=tfrecord_buffer_rate * batch_size)
        db = db.map(map_func=lambda s:self.parse_tfrecord(s))
        
        if (shuffle_buffer_rate > 0): db = db.shuffle(shuffle_buffer_rate * batch_size)
        if (batch_size > 0): db = db.batch(batch_size, drop_remainder=True)
        if (epochs > 0): db = db.repeat(epochs)
        return db
    
    #    字段描述
    TFRECORD_FIELDS = {
            'wids': tf.io.VarLenFeature(tf.int64),
            'sen_pos': tf.io.VarLenFeature(tf.int64),
            'mask_idx': tf.io.VarLenFeature(tf.int64),
            'mask_wid': tf.io.VarLenFeature(tf.int64),
            'is_positive': tf.io.VarLenFeature(tf.int64)
        }
    #    解析record
    def parse_tfrecord(self, sample, pad_idx=WordsWarehouse.instance().words_to_wids(['[PAD]'])):
        parsed = tf.io.parse_single_example(sample, features=BertPreTFRecordDataset.TFRECORD_FIELDS)
        
        #    x数据 Tensor(max_sen_len, )
        wids = tf.sparse.to_dense(parsed['wids'])
        sen_pos = tf.sparse.to_dense(parsed['sen_pos'])
        #    y数据 Tensor(rewrite_max)
        mask_idx = tf.sparse.to_dense(parsed['mask_idx'])
        mask_wid = tf.sparse.to_dense(parsed['mask_wid'])
        is_positive = tf.sparse.to_dense(parsed['is_positive'])
        
        #    wids, sen_pos补全到max_sen_len
        wids = tf.pad(tensor=wids[tf.newaxis, :], paddings=[[0, 0], [0, self._max_sen_len - tf.shape(wids)[0]]], constant_values=pad_idx[0])
        sen_pos = tf.pad(tensor=sen_pos[tf.newaxis, :], paddings=[[0, 0], [0, self._max_sen_len - tf.shape(sen_pos)[0]]], constant_values=0)
        #    mask_idx, mask_wid, is_positive补全到rewrite_max
        mask_idx = tf.pad(tensor=mask_idx[tf.newaxis, :], paddings=[[0, 0], [0, self._rewrite_max - tf.shape(mask_idx)[0]]], constant_values=-1)
        mask_wid = tf.pad(tensor=mask_wid[tf.newaxis, :], paddings=[[0, 0], [0, self._rewrite_max - tf.shape(mask_wid)[0]]], constant_values=-1)
        is_positive = tf.repeat(input=is_positive, repeats=self._rewrite_max, axis=-1)[tf.newaxis, :]
        
        #    组成x数据    Tensor(2, max_sen_len)
        #                    0: 词id，[PAD]表示填充
        #                    1: 句子id，0表示填充
        x = tf.concat([wids, sen_pos], axis=0)
        #    组成y数据    Tensor(3, rewrite_max)
        #                    0: mask_idx
        #                    1: mask_wid
        #                    2: is_positive 1表示正样本，0表示负样本
        y = tf.concat([is_positive, mask_idx, mask_wid], axis=0)
        return x, y
    #    训练集
    def tensor_db_train(self,
                        batch_size=conf.DATASET_SOHU_THUCNEWS.get_batch_size(),
                        epochs=conf.DATASET_SOHU_THUCNEWS.get_epochs(),
                        shuffle_buffer_rate=conf.DATASET_SOHU_THUCNEWS.get_shuffle_buffer_rate(),
                        tfrecord_buffer_rate=conf.DATASET_SOHU_THUCNEWS.get_tfrecord_buffer_rate()):
        return self.tensor_db(path=self._pre_training_train, 
                              batch_size=batch_size, 
                              epochs=epochs, 
                              shuffle_buffer_rate=shuffle_buffer_rate, 
                              tfrecord_buffer_rate=tfrecord_buffer_rate)
    #    验证集
    def tensor_db_val(self,
                      batch_size=conf.DATASET_SOHU_THUCNEWS.get_batch_size(),
                      epochs=conf.DATASET_SOHU_THUCNEWS.get_epochs(),
                      shuffle_buffer_rate=conf.DATASET_SOHU_THUCNEWS.get_shuffle_buffer_rate(),
                      tfrecord_buffer_rate=conf.DATASET_SOHU_THUCNEWS.get_tfrecord_buffer_rate()):
        return self.tensor_db(path=self._pre_training_val, 
                              batch_size=batch_size, 
                              epochs=epochs, 
                              shuffle_buffer_rate=shuffle_buffer_rate, 
                              tfrecord_buffer_rate=tfrecord_buffer_rate)
    
    pass


