# -*- coding: utf-8 -*-  
'''
搜狐新闻数据集

Created on 2021年8月19日
@author: luoyi
'''
import glob
import jieba_fast
import pickle
import tensorflow as tf

import utils.conf as conf
from utils.iexicon import StopWords, Encoder, LiteWordsWarehouse


#    reduced数据集
class DatasetReduced:
    
    def __init__(self, 
                 reduced_dir=conf.DATASET_SOHU_NEWS.get_reduced_dir_path(),
                 max_empty_line=10
                 ):
        self._reduced_dir = reduced_dir
        self._max_empty_line = max_empty_line
        pass
    
    #    读取每个新闻
    def news_iterator(self):
        files = glob.glob(self._reduced_dir + '/*.txt')
        
        #    遍历每一个文件
        for file in files:
            with open(file, mode='r', encoding='gb18030', errors='ignore') as fr:
                line = fr.readline()
                num_empty_line = 0
                #    当前新闻信息
                url, docno, contenttitle, content = None, None, None, None
                while (line):
                    line = line.replace('\r','').replace('\n','').strip()
                    
                    #    新闻开始标记，清空当前信息
                    if (line.startswith('<doc>')): url, docno, contenttitle, content = None, None, None, None
                    elif (line.startswith('<url>')): url = line[5:-6]
                    elif (line.startswith('<docno>')): docno = line[7:-8]
                    elif (line.startswith('<contenttitle>')): contenttitle = line[14:-15]
                    elif (line.startswith('<content>')): content = line[9:-10]
                    elif (line.startswith('</doc>')): 
                        if (len(content) > 0):
                            yield (url, docno, contenttitle, content)
                            pass
                        url, docno, contenttitle, content = None, None, None, None
                    else:
                        print('读到了未知字段:', line) 
                        pass
                    
                    if (len(line) == 0):
                        num_empty_line += 1
                        if (num_empty_line > self._max_empty_line): break
                        pass
                    else:
                        num_empty_line = 0
                        pass
                    line = fr.readline()
                    pass
                pass
            pass
        pass
    
    pass


#    Docs数据集写入相关
class DatasetDocsWriter:
    def __init__(self,
                 ds_reduced=None,
                 words_path=conf.DATASET_SOHU_NEWS.get_words_path(),
                 words_count=conf.DATASET_SOHU_NEWS.get_words_count()):
        self._ds_reduced = ds_reduced
        self._words_path = words_path
        self._words_count = words_count
        
        #    停用词
        self._stop_words = StopWords.instance()
        pass
    
    #    写入分词后的文件
    def write_words(self):
        num_file = 0
        num_total = 0
        idx_file = 0
        fpath = self._words_path.format(idx_file)
        fw = open(file=fpath, mode='w', encoding='utf-8')
        #    迭代所有的新闻
        for (_, _, _, content) in self._ds_reduced.news_iterator():
            #    全角英文替换成半角
            content = Encoder.q_to_b(content)
                
            #    对新闻内容做分词，并去掉停用词
            words = jieba_fast.lcut(content)
            words = self._stop_words.filter(words)
            #    去掉非utf8字符
            words = list(filter(lambda w:not Encoder.is_utf8(w), words))
                
            fw.write(' '.join(words) + '\n')
            num_file += 1
            num_total += 1
            
            #    如果超过了单文件限制，新开一个文件
            if (num_file >= self._words_count):
                print('写满一个文件. fpath:', fpath, " num_file:", num_file)
                idx_file += 1
                num_file = 0
                
                fw.close()
                fpath = self._words_path.format(idx_file)
                fw = open(file=fpath, mode='w', encoding='utf-8')
                pass
            pass
        fw.close()
        print('写入文件完成. num_total:', num_total)
        pass
    pass


#    Docs数据集读取相关
class DatasetDocsReader:
    #    词-id文件名
    FNAME_WORD_WID = 'word_id.pkl'
    FNAME_WID_WORD = 'id_word.pkl'
    #    词频文件名
    FNAME_WORD_FREQUENCY = 'wid_frequency.pkl'
    FNAME_WORD_FREQUENCY_TXT = 'wid_frequency.txt'
    
    def __init__(self,
                 words_dir=conf.DATASET_SOHU_NEWS.get_train_words_path(),
                 count=-1,
                 max_empty_line=10):
        self._words_dir = words_dir
        self._count = count
        self._max_empty_line = max_empty_line
        pass
    
    #    遍历每一份words
    def words_iterator(self):
        #    遍历每一个文件
        fpaths = glob.glob(self._words_dir + '/*')
        num_words = 0
        i = 0
        while (i < len(fpaths)):
            fpath = fpaths[i]
            i += 1
            num_empty_line = 0
            #    遍历文件中的每一行
            with open(file=fpath, mode='r', encoding='utf-8') as fr:
                line = fr.readline()
                while (line):
                    tline = line.replace('\r','').replace('\n','').strip()
                    
                    if (len(tline) == 0):
                        num_empty_line += 1
                        if (num_empty_line >= self._max_empty_line): break
                        pass
                    else:
                        num_empty_line = 0
                        words = tline.split(' ')
                        yield words
                        
                        #    如果当前读取的总记录数已经超过要读取的数量，直接返回
                        num_words += 1
                        if (self._count > 0 and num_words >= self._count):
                            i = len(fpaths)     #    破坏最外层循环条件
                            break
                        pass
                    line = fr.readline()
                    pass
                pass
            
            #    如果已经读到最后一个文件，但总数还是不够。则从第一个文件开始重复读，直到总数够了为止
            if (self._count > 0 
                    and i >= (len(fpaths) - 1) 
                    and num_words < self._count):
                i = 0
                pass
            pass
        pass
    
    
    #    统计训练集词数，词频，并写入文件
    def statistics(self,
                   word_id_path=conf.DATASET_SOHU_NEWS.get_word_id_path(),
                   word_frequency_path=conf.DATASET_SOHU_NEWS.get_word_frequency_path(),
                   min_word_frequency=10):
        '''统计训练集，并生成：词-id、id-词频 2个文件'''
        #    遍历训练集
        word_id_dict = {}                   #    词-id字典
        word_frequency_dict = {}            #    词频字典
        #    先统计词频
        for words in self.words_iterator():
            for word in words:
                if (word_frequency_dict.get(word) is None): word_frequency_dict[word] = 1
                else: word_frequency_dict[word] = word_frequency_dict[word] + 1
                pass
            pass
        
        #    降序排列
        l = sorted(word_frequency_dict.items(), key=lambda item:item[1], reverse=True)
        with open(file=word_frequency_path + '/' + DatasetDocsReader.FNAME_WORD_FREQUENCY_TXT, mode='w', encoding='utf-8') as fw:
            for item in l:
                word = item[0]              #    词
                frequency = item[1]         #    频
                fw.write('{} : {}'.format(word, frequency) + '\n')
                
                #    频率超过最小词频的词才会被写入字典（降序遍历的，越高频词id越小）
                if (frequency >= min_word_frequency):
                    wid = len(word_id_dict)
                    word_id_dict[word] = wid
                    word_frequency_dict[wid] = frequency
                    pass
                pass
            pass
        id_word_dict = {v:k for k,v in word_id_dict.items()}
        
        #    写入词-id，词频文件
        with open(file=word_id_path + '/' + DatasetDocsReader.FNAME_WORD_WID, mode='wb') as fw: pickle.dump(word_id_dict, fw)
        with open(file=word_id_path + '/' + DatasetDocsReader.FNAME_WID_WORD, mode='wb') as fw: pickle.dump(id_word_dict, fw)
        #    写入词频文件
        with open(file=word_frequency_path + '/' + DatasetDocsReader.FNAME_WORD_FREQUENCY, mode='wb') as fw: pickle.dump(word_frequency_dict, fw)

        return len(id_word_dict)
    pass


#    用于训练的Tensor数据集
class TensorWordsDataset:
    #    每篇文档的词数据集
    def __init__(self,
                 train_words_path=conf.DATASET_SOHU_NEWS.get_train_words_path(),
                 val_words_path=conf.DATASET_SOHU_NEWS.get_val_words_path(),
                 max_doc_words=conf.LDA.get_max_doc_words(),
                 ):
        self._train_words_path = train_words_path
        self._val_words_path = val_words_path
        self._max_doc_words = max_doc_words
        pass
    
    #    遍历每篇文档
    def words_iterator(self, words_path, count):
        ds_doc_r = DatasetDocsReader(words_dir=words_path, count=count)
        #    遍历每一个词
        for words in ds_doc_r.words_iterator():
            #    词转为词id，并过滤掉低频词
            wids = LiteWordsWarehouse.instance().words_to_wids(words)
            #    不够max_doc_words长度的补-1，超过截断
            if (len(wids) > self._max_doc_words): wids = wids[: self._max_doc_words]
            if (len(wids) < self._max_doc_words): wids = wids + [-1] * (self._max_doc_words - len(wids))
            
            #    wids组成x数据
            x = tf.convert_to_tensor(wids, dtype=tf.int32)
            
            #    无监督
            y = tf.convert_to_tensor(1, dtype=tf.int8)
            yield x, y
            pass
        pass
    
    def tensor_db(self,
                  words_path=None,
                  count=-1,
                  batch_size=conf.DATASET_SOHU_NEWS.get_batch_size(),
                  epochs=conf.DATASET_SOHU_NEWS.get_epochs(),
                  shuffle_buffer_rate=conf.DATASET_SOHU_NEWS.get_shuffle_buffer_rate()):
        x_shape = tf.TensorShape((self._max_doc_words, ))
        y_shape = tf.TensorShape(())
            
        db = tf.data.Dataset.from_generator(generator=lambda :self.words_iterator(words_path=words_path,
                                                                                  count=count), 
                                            output_types=(tf.int32, tf.int8), 
                                            output_shapes=(x_shape, y_shape))
        
        if (shuffle_buffer_rate > 0): db = db.shuffle(shuffle_buffer_rate * batch_size)
        if (batch_size > 0): db = db.batch(batch_size, drop_remainder=True)
        if (epochs > 0): db = db.repeat(epochs)
        return db
    def tensor_train_db(self,
                        count=-1,
                        batch_size=conf.DATASET_SOHU_NEWS.get_batch_size(),
                        epochs=conf.DATASET_SOHU_NEWS.get_epochs(),
                        shuffle_buffer_rate=conf.DATASET_SOHU_NEWS.get_shuffle_buffer_rate()):
        return self.tensor_db(self._train_words_path, count, batch_size, epochs, shuffle_buffer_rate)
    def tensor_val_db(self,
                      count=-1,
                      batch_size=conf.DATASET_SOHU_NEWS.get_batch_size(),
                      epochs=conf.DATASET_SOHU_NEWS.get_epochs(),
                      shuffle_buffer_rate=conf.DATASET_SOHU_NEWS.get_shuffle_buffer_rate()):
        return self.tensor_db(self._val_words_path, count, batch_size, epochs, shuffle_buffer_rate)

    pass

