# -*- coding: utf-8 -*-  
'''
lds和gsdmm用的数据集
    - 都是词id，和一块写了

Created on 2021年9月8日
@author: luoyi
'''
import jieba_fast
import glob
import tensorflow as tf

import utils.conf as conf
from utils.iexicon import StopWords, LiteWordsWarehouse, Encoder
from data.sohu_thuc_news.original_dataset import OriginalDataset


#    LDS和GSDMM数据集
class LdaGSDmmPreDataset:
    def __init__(self,
                 dir_path=conf.DATASET_SOHU_THUCNEWS.get_original_dir_path(),
                 count=5000,
                 words_path=conf.DATASET_SOHU_THUCNEWS.get_words_path(),
                 words_count=conf.DATASET_SOHU_THUCNEWS.get_words_count(),
                 max_doc_words=conf.LDA.get_max_doc_words()):
        conf.mkfiledir_ifnot_exises(words_path)
        self._words_path = words_path
        self._words_count = words_count
        self._max_doc_words = max_doc_words
        
        self._ods = OriginalDataset(dir_path=dir_path, count=count)
        pass
    
    #    写入词数据集
    def write_words_dataset(self):
        _, _, num_d = self._ods.stat_files()
        
        fc = 0
        fidx = 0
        fw = open(file=self._words_path.format(fidx), mode='w', encoding='utf-8')
        #    遍历每一篇文章
#         for _, sens in tqdm.tqdm(iterable=self._ods.news_iterator_shuffle(), 
#                                  total=num_d, 
#                                  desc='文章词数据集', leave=True, ncols=100):
        total = 0
        for topic, sens in self._ods.news_iterator_shuffle():
            doc = []
            #    对每句话进行分词，并去掉停用词，并去掉非utf-8字符，并转为词id
            for sen in sens:
                words = jieba_fast.lcut(sen)
                words = StopWords.instance().filter(words)
                words = list(filter(lambda w:not Encoder.is_utf8(w), words))
#                 wids = LiteWordsWarehouse.instance().words_to_wids(words)
                
                doc += words
                pass
            doc = ' '.join(doc)
            s = topic + ' ' + doc 
            fw.write(s + '\n')
            
            fc +=1 
            total += 1
            if (fc >= self._words_count):
                fc = 0
                fidx += 1
                fw.close()
                fw = open(file=self._words_path.format(fidx), mode='w', encoding='utf-8')
                pass
            pass
        fw.close()
        print('词数据集写入完成. total:', total, ' num_d:', num_d)
        pass
    
    #    迭代每篇文档的词
    def words_iterator(self,
                       words_path=conf.DATASET_SOHU_THUCNEWS.get_words_path(),
                       count=-1,
                       max_empty_line=10):
        fpaths = glob.glob(words_path.format('*'))
        total = 0
        for fpath in fpaths:
            with open(file=fpath, mode='r', encoding='utf-8') as fr:
                line = fr.readline()
                empty_line = 0
                while (line):
                    line = line.replace('\n', '').replace('\t', '').strip()
                    
                    if (len(line) == 0):
                        empty_line += 1
                        if (empty_line >= max_empty_line): break
                        pass
                    else:
                        empty_line = 0
                        
                        words = line.split(' ')
                        topic = words[0]
                        words = words[1:]
                        wids = LiteWordsWarehouse.instance().words_to_wids(words)
                        
                        yield topic, wids
                        
                        total += 1
                        if (count > 0 and total >= count): break
                        pass
                    
                    line = fr.readline()
                    pass
                pass
            
            if (count > 0 and total >= count): break
            pass
        pass
    
    #    tensor_iterator
    def tensor_iterator(self,
                        words_path=conf.DATASET_SOHU_THUCNEWS.get_words_path(),
                        count=-1):
        for wids in self.words_iterator(words_path, count):
            #    不够max_doc_words长度的补-1，超过截断
            if (len(wids) > self._max_doc_words): wids = wids[: self._max_doc_words]
            if (len(wids) < self._max_doc_words): wids = wids + [-1] * (self._max_doc_words - len(wids))
            
            #    wids组成x数据
            x = tf.convert_to_tensor(wids, dtype=tf.int32)
            
            #    无监督
            y = tf.convert_to_tensor(1, dtype=tf.int8)
            
            yield x, y
            pass
        pass
    
    #    tensor数据集
    def tensor_db(self,
                  words_path=conf.DATASET_SOHU_THUCNEWS.get_words_path(),
                  count=-1,
                  batch_size=conf.DATASET_SOHU_THUCNEWS.get_batch_size(),
                  epochs=conf.DATASET_SOHU_THUCNEWS.get_epochs(),
                  shuffle_buffer_rate=conf.DATASET_SOHU_THUCNEWS.get_shuffle_buffer_rate()):
        x_shape = tf.TensorShape((self._max_doc_words, ))
        y_shape = tf.TensorShape(())
            
        db = tf.data.Dataset.from_generator(generator=lambda :self.tensor_iterator(words_path=words_path,
                                                                                   count=count), 
                                            output_types=(tf.int32, tf.int8), 
                                            output_shapes=(x_shape, y_shape))
        
        if (shuffle_buffer_rate > 0): db = db.shuffle(shuffle_buffer_rate * batch_size)
        if (batch_size > 0): db = db.batch(batch_size, drop_remainder=True)
        if (epochs > 0): db = db.repeat(epochs)
        return db
    
    pass

