# -*- coding: utf-8 -*-  
'''
tbert数据源

Created on 2021年9月12日
@author: luoyi
'''
import glob
import tqdm
import tensorflow as tf
import numpy as np
import jieba_fast

import utils.conf as conf
from utils.iexicon import StopWords, LiteWordsWarehouse, WordsWarehouse
from data.sohu_thuc_news.original_dataset import OriginalDataset


#    tbert数据集
class TBertTFRecordDataset:
    #    tfrecord文件名
    TFRECORD_FNAME = 'tbert_training_{}.tfrecord'
    
    
    def __init__(self,
                 dir_path=conf.DATASET_SOHU_THUCNEWS.get_original_dir_path(),
                 count=-1,
                 lda=None,
                 dmm=None,
                 max_sen=conf.TBERT.get_max_sen(),
                 max_sen_len=conf.TBERT.get_max_sen_len(),
                 ):
        self._dir_path = dir_path
        self._count = count
        
        self._lda = lda
        self._dmm = dmm
        
        self._max_sen = max_sen
        self._max_sen_len = max_sen_len
        
        self._ods = OriginalDataset(dir_path=dir_path, count=count)
        pass
    
    
    #    遍历文档，做成bert, lda, dmm数据集
    def data_iterator(self):
        '''
            @yield tid, bert_words, bert_sen, topic_lda, topic_dmm
                   主题id, bert样本词id(未统一长度), bert样本句子id(未统一长度), lda模型主题id, dmm模型主题id
        '''
        #    遍历每篇文档
        for topic, sens in self._ods.news_iterator_shuffle():
            bert_words = ['[CLS]']      #    bert训练用的词
            topic_words = []            #    主题模型预测用的词
            bert_sen = [1]              #    句子位置编码，1是给[CLS]的
            #    组合所有句子的词
            for i, sen in enumerate(sens):
                #    超过最大句子数则bert不予追加
                if (i < self._max_sen): 
                    #    分词
                    words = jieba_fast.lcut(sen)
                    bert_words += words + ['[SEP]']
                    bert_sen += [i+1] * len(words)
                    pass
                #    分词（bert与主题模型其实用的是两套词库，因为主题模型不需要停用词）
                words = jieba_fast.lcut(sen)
                words = StopWords.instance().filter(words)
                topic_words += words
                pass
            
            #    bert_words转为词id
            bert_wids = WordsWarehouse.instance().words_to_wids(bert_words)
            #    topic_words转为词id
            topic_wids = LiteWordsWarehouse.instance().words_to_wids(topic_words)
            topic_wids = np.array([topic_wids])
            #    预测lda和dmm的主题
            topic_lda, _ = self._lda.divination(topic_wids, max_doc_words=self._max_sen_len)
            topic_dmm = self._dmm.divination(topic_wids)
            
            yield  TOPIC_ID_DICT[topic], bert_wids, bert_sen, topic_lda, topic_dmm
            pass
        pass
    #    写入tfrecord文件
    def write_tfrecord(self,
                       tbert_training_train_path=conf.DATASET_SOHU_THUCNEWS.get_tbert_training_train_path(),
                       tbert_training_train_count=conf.DATASET_SOHU_THUCNEWS.get_tbert_training_train_count(),
                       tbert_training_val_path=conf.DATASET_SOHU_THUCNEWS.get_tbert_training_val_path(),
                       tbert_training_val_count=conf.DATASET_SOHU_THUCNEWS.get_tbert_training_val_count(),
                       tbert_tfrecord_limit=conf.DATASET_SOHU_THUCNEWS.get_tbert_tfrecord_limit()):
        _, _, doc_count = self._ods.stat_files()
        assert tbert_training_train_count + tbert_training_val_count <= doc_count, '训练集数量({})+验证集数量({}) 已经超过 总文件数({}) 不够分'.format(tbert_training_train_count, tbert_training_val_count, doc_count)
        
        #    文件完整路径，不包含索引
        train_fpath = tbert_training_train_path + '/' + TBertTFRecordDataset.TFRECORD_FNAME
        val_fpath = tbert_training_val_path + '/' + TBertTFRecordDataset.TFRECORD_FNAME
        conf.mkfiledir_ifnot_exises(train_fpath)
        conf.mkfiledir_ifnot_exises(val_fpath)
        
        data_iterator = self.data_iterator()
        
        print('写入训练集...')
        total = self.write_tfrecord_by_iterator(train_fpath, data_iterator, tbert_training_train_count, tbert_tfrecord_limit)
        print('写入训练集完成，total:', total)
        print('写入验证集...')
        total = self.write_tfrecord_by_iterator(val_fpath, data_iterator, tbert_training_val_count, tbert_tfrecord_limit)
        print('写入验证集完成，total:', total)
        pass
    def write_tfrecord_by_iterator(self, fpath, news, count, limit):
        fidx = 0
        fc = 0
        total = 0
        fw = tf.io.TFRecordWriter(fpath.format(fidx))
        #    遍历文章
        for tid, bert_words, bert_sen, topic_lda, topic_dmm in tqdm.tqdm(iterable=news, desc='write_tfrecord', total=count, leave=True, ncols=100):
            feature = {'tid': tf.train.Feature(int64_list=tf.train.Int64List(value=[tid])),
                       'bert_words': tf.train.Feature(int64_list=tf.train.Int64List(value=bert_words)),
                       'bert_sen': tf.train.Feature(int64_list=tf.train.Int64List(value=bert_sen)),
                       'topic_lda': tf.train.Feature(int64_list=tf.train.Int64List(value=topic_lda)),
                       'topic_dmm': tf.train.Feature(int64_list=tf.train.Int64List(value=topic_dmm))}
            record = tf.train.Example(features=tf.train.Features(feature=feature))
            record = record.SerializeToString()
            fw.write(record)
            
            fc += 1
            total += 1
            if (fc >= limit):
                fw.close()
                fc = 0
                fidx += 1
                fw = tf.io.TFRecordWriter(fpath.format(fidx))
                pass
            if (count > 0 and total >= count): break
            pass
        return total
    
    
    #    字段描述
    TFRECORD_FIELDS = {
            'tid': tf.io.VarLenFeature(tf.int64),
            'bert_words': tf.io.VarLenFeature(tf.int64),
            'bert_sen': tf.io.VarLenFeature(tf.int64),
            'topic_lda': tf.io.VarLenFeature(tf.int64),
            'topic_dmm': tf.io.VarLenFeature(tf.int64)
        }
    #    解析tfrecord
    def parse_tfrecord(self, s):
        parsed = tf.io.parse_single_example(s, features=TBertTFRecordDataset.TFRECORD_FIELDS)
        
        #    x数据 Tensor(max_sen_len, )
        bert_words = tf.sparse.to_dense(parsed['bert_words'])
        bert_sen = tf.sparse.to_dense(parsed['bert_sen'])
        topic_lda = tf.sparse.to_dense(parsed['topic_lda'])
        topic_dmm = tf.sparse.to_dense(parsed['topic_dmm'])
        #    y数据 Tensor(rewrite_max)
        tid = tf.sparse.to_dense(parsed['tid'])
        
        #    bert_words补全长度
        if (tf.shape(bert_words)[0] >= self._max_sen_len): bert_words = bert_words[:self._max_sen_len][tf.newaxis, :]
        elif (tf.shape(bert_words)[0] < self._max_sen_len): bert_words = tf.pad(bert_words[tf.newaxis, :], paddings=[[0, 0], [0, self._max_sen_len - tf.shape(bert_words)[0]]], constant_values=0)
        #    bert_sen补全长度
        if (tf.shape(bert_sen)[0] >= self._max_sen_len): bert_sen = bert_sen[:self._max_sen_len][tf.newaxis, :]
        elif (tf.shape(bert_sen)[0] < self._max_sen_len): bert_sen = tf.pad(bert_sen[tf.newaxis, :], paddings=[[0, 0], [0, self._max_sen_len - tf.shape(bert_sen)[0]]], constant_values=0)
        #    补全topic_lda, topic_dmm长度
        topic_lda = tf.pad(topic_lda[tf.newaxis, :], paddings=[[0, 0], [0, self._max_sen_len - tf.shape(topic_lda)[0]]], constant_values=-1)
        topic_dmm = tf.pad(topic_dmm[tf.newaxis, :], paddings=[[0, 0], [0, self._max_sen_len - tf.shape(topic_dmm)[0]]], constant_values=-1)
        
        #    组成x数据    Tensor(4, max_sen_len)
        x = tf.concat([bert_words, bert_sen, topic_lda, topic_dmm], axis=0)
        x = tf.cast(x, dtype=tf.int64)
        #    组成y数据
        y = tf.convert_to_tensor(tid[0], dtype=tf.int64)
        return x, y
    #    读取数据集
    def tensor_db(self,
                  path,
                  batch_size=conf.DATASET_SOHU_THUCNEWS.get_batch_size(),
                  epochs=conf.DATASET_SOHU_THUCNEWS.get_epochs(),
                  shuffle_buffer_rate=conf.DATASET_SOHU_THUCNEWS.get_shuffle_buffer_rate(),
                  tfrecord_buffer_rate=conf.DATASET_SOHU_THUCNEWS.get_tfrecord_buffer_rate()):
        fpaths = glob.glob(path + '/' + TBertTFRecordDataset.TFRECORD_FNAME.format('*'))
        db = tf.data.TFRecordDataset(filenames=fpaths, buffer_size=batch_size, num_parallel_reads=tfrecord_buffer_rate * batch_size)
        db = db.map(map_func=lambda s:self.parse_tfrecord(s))
        
        if (shuffle_buffer_rate > 0): db = db.shuffle(shuffle_buffer_rate * batch_size)
        if (batch_size > 0): db = db.batch(batch_size, drop_remainder=True)
        if (epochs > 0): db = db.repeat(epochs)
        return db
    #    训练集
    def tensor_train_db(self,
                        path=conf.DATASET_SOHU_THUCNEWS.get_tbert_training_train_path(),
                        batch_size=conf.DATASET_SOHU_THUCNEWS.get_batch_size(),
                        epochs=conf.DATASET_SOHU_THUCNEWS.get_epochs(),
                        shuffle_buffer_rate=conf.DATASET_SOHU_THUCNEWS.get_shuffle_buffer_rate(),
                        tfrecord_buffer_rate=conf.DATASET_SOHU_THUCNEWS.get_tfrecord_buffer_rate()):
        return self.tensor_db(path, batch_size, epochs, shuffle_buffer_rate, tfrecord_buffer_rate)
    #    验证集
    def tensor_val_db(self,
                        path=conf.DATASET_SOHU_THUCNEWS.get_tbert_training_val_path(),
                        batch_size=conf.DATASET_SOHU_THUCNEWS.get_batch_size(),
                        epochs=conf.DATASET_SOHU_THUCNEWS.get_epochs(),
                        shuffle_buffer_rate=conf.DATASET_SOHU_THUCNEWS.get_shuffle_buffer_rate(),
                        tfrecord_buffer_rate=conf.DATASET_SOHU_THUCNEWS.get_tfrecord_buffer_rate()):
        return self.tensor_db(path, batch_size, epochs, shuffle_buffer_rate, tfrecord_buffer_rate)
    
    pass


#    主题-id对应
TOPIC_ID_DICT = {'财经':0, '彩票':1, '房产':2, '股票':3, '家居':4, '教育':5, '科技':6, '社会':7, '时尚':8, '时政':9, '体育':10, '星座':11, '游戏':12, '娱乐':13}
ID_TOPIC_DICT = {v:k for k,v in TOPIC_ID_DICT.items()}

