# -*- coding: utf-8 -*-  
'''

Created on 2021年8月19日
@author: luoyi
'''
import threading
import pickle
import re

import utils.conf as conf


#    停用词库
class StopWords:
    _INSTANCE_LOCK = threading.Lock()
    
    @classmethod
    def instance(cls, *args, **kwargs):
        if not hasattr(StopWords, '_instance'):
            with StopWords._INSTANCE_LOCK:
                if not hasattr(StopWords, '_instance'):
                    StopWords._instance = StopWords(*args, **kwargs)
                    pass
                pass
            pass
        return StopWords._instance
    
    def __init__(self, fpath=conf.STOP_WORDS.get_file_path(), max_empty_line=10):
        self._fpath = fpath
        self._max_empty_line = max_empty_line
        
        #    停用词库
        self._words = set()
        self.init(fpath)
        pass
    
    #    初始化停用词库
    def init(self, fpath):
        print('加载停用词, fpath:', self._fpath)
        fr = open(file=fpath, mode='r', encoding='utf-8')
        
        line = fr.readline()
        num_empty_line = 0
        while (line):
            #    去掉一些空格，换行
            line = line.replace('\r', '').replace('\n','').strip()
            
            #    如果连续出现10个空行，则判定文件读到末尾
            if (len(line) == 0):
                num_empty_line += 1
                if (num_empty_line > self._max_empty_line): break
                pass
            else:
                num_empty_line = 0
                self._words.add(line)
                pass
            line = fr.readline()
            pass
        fr.close()
        
        print('停用词加载完成. num:', len(self._words))
        pass
    
    #    去掉list中的停用词
    def filter(self, words):
        return list(filter(lambda w:w not in self._words, words))
    
    pass


#    标点符号相关
class Punctuation:
    #    句子结束标点符号
    @classmethod
    def stop_sentence(cls): return ['。', '！', '？', '.', '!', '?']
    
    #    用结束标点符号切分句子
    @classmethod
    def split_by_stop_sentence(cls, docs, pattern='[。！？.!?\ ]'): return re.split(pattern=pattern, string=docs)
    
    #    用标点符号切分句子
    @classmethod
    def split_by_sentence(cls, docs, pattern='[。！？，；.!?,;\ ]'): return re.split(pattern=pattern, string=docs)
    
    #    去掉标点符号
    @classmethod
    def replace_all(cls, docs): return [doc.replace('“','') for doc in docs ]
    pass


#    编码相关工具
class Encoder:
    def __init__(self):
        pass
    
    #    全角转半角
    @classmethod
    def q_to_b(cls, ustring):
        rstring = ""
        for uchar in ustring:
            inside_code=ord(uchar)
            if inside_code == 12288:                              #全角空格直接转换            
                inside_code = 32 
            elif (inside_code >= 65281 and inside_code <= 65374): #全角字符（除空格）根据关系转化
                inside_code -= 65248
    
            rstring += chr(inside_code)
        return rstring
    
    #    判断是否是utf8字符串
    @classmethod
    def is_utf8(cls, string):
        remain = 0         #剩余byte数
        for x in range(len(string)):
            if remain == 0:
                if (ord(string[x]) & 0x80) == 0x00:
                    remain = 0
                elif (ord(string[x]) & 0xE0) == 0xC0:
                    remain = 1
                elif (ord(string[x]) & 0xF0) == 0xE0:
                    remain = 2
                elif(ord(string[x]) & 0xF8) == 0xF0:
                    remain = 3
                else:
                    return False
            else:
                if not ((ord(string[x]) & 0xC0) == 0x80):
                    return False
                remain = remain - 1
        if remain == 0:         #最后如果remain不等于零，可能没有匹配完整
            return True
        else:
            return False
        pass
    
    pass


#    主题模型用的词库
class WordsWarehouse:
    '''主题模型用的词库，会去掉一些停用词'''
    _INSTANCE_LOCK = threading.Lock()
    
    #    词-id文件名
    FNAME_WORD_WID = 'word_id.pkl'
    FNAME_WID_WORD = 'id_word.pkl'
    #    词频文件
    FNAME_WORD_FREQUENCY = 'wid_frequency.pkl'
    FNAME_WORD_FREQUENCY_TXT = 'wid_frequency.txt'
    
    @classmethod
    def instance(cls, *args, **kwargs):
        if not hasattr(WordsWarehouse, '_instance'):
            with WordsWarehouse._INSTANCE_LOCK:
                if not hasattr(WordsWarehouse, '_instance'):
                    WordsWarehouse._instance = WordsWarehouse(*args, **kwargs)
                    pass
                pass
            pass
        return WordsWarehouse._instance
    
    def __init__(self, 
                 word_id_path=conf.DATASET_SOHU_THUCNEWS.get_word_id_path(),
                 word_frequency_path=conf.DATASET_SOHU_THUCNEWS.get_word_frequency_path()):
        self._word_wid = {}
        self._wid_word = {}
        
        self._wid_frequency = {0:0}
        
        self.load_pkl(word_id_path, word_frequency_path)
        pass
    
    
    @classmethod
    def default_word_wid(cls): return {'[PAD]':0, '[UNK]':1, '[CLS]':2, '[SEP]':3, '[MASK]':4}
    @classmethod
    def default_wid_word(cls): return {0:'[PAD]', 1:'[UNK]', 2:'[CLS]', 3:'[SEP]', 4:'[MASK]'}
    
    
    #    加载词-id，词频文件
    def load_pkl(self,
                 word_id_path=conf.DATASET_SOHU_THUCNEWS.get_word_id_path(), 
                 word_frequency_path=conf.DATASET_SOHU_THUCNEWS.get_word_frequency_path()):
        #    加载词-id文件
        with open(file=word_id_path + '/' + WordsWarehouse.FNAME_WORD_WID, mode='rb') as fr: self._word_wid = pickle.load(fr)
        with open(file=word_id_path + '/' + WordsWarehouse.FNAME_WID_WORD, mode='rb') as fr: self._wid_word = pickle.load(fr)
        
        #    记录词库总词数
        self._words_count = len(self._wid_word)
        
        #    加载词频
#         with open(file=word_frequency_path + '/' + WordsWarehouse.FNAME_WORD_FREQUENCY, mode='rb') as fr: self._wid_frequency = pickle.load(fr)
        pass
    #    追加词典
    def append_word(self, word):
        if (self._word_wid.get(word) is not None): return
        
        wid = len(self._word_wid)
        self._word_wid[word] = wid
        self._wid_word[wid] = word
        pass
    
    #    词转为词id
    def words_to_wids(self, words):
        res = []
        for w in words:
            wid = self._word_wid.get(w)
            if (wid is None): continue
            
            res.append(wid)
            pass
        return res
    
    #    词库总词数
    def words_count(self):
        return self._words_count
    
    pass


#    bert词库相关
class LiteWordsWarehouse:
    '''主题模型用的词库，会去掉一些停用词'''
    _INSTANCE_LOCK = threading.Lock()
    
    #    词-id文件名
    FNAME_WORD_WID_LITE = 'word_id_lite.pkl'
    FNAME_WID_WORD_LITE = 'id_word_lite.pkl'
    
    #    词频文件名
    FNAME_WORD_FREQUENCY_LITE = 'wid_frequency_lite.pkl'
    FNAME_WORD_FREQUENCY_LITE_TXT = 'wid_frequency_lite.txt'
    
    @classmethod
    def instance(cls, *args, **kwargs):
        if not hasattr(LiteWordsWarehouse, '_instance'):
            with LiteWordsWarehouse._INSTANCE_LOCK:
                if not hasattr(LiteWordsWarehouse, '_instance'):
                    LiteWordsWarehouse._instance = LiteWordsWarehouse(*args, **kwargs)
                    pass
                pass
            pass
        return LiteWordsWarehouse._instance
    
    def __init__(self, 
                 word_id_path=conf.DATASET_SOHU_THUCNEWS.get_word_id_path(),
                 word_frequency_path=conf.DATASET_SOHU_THUCNEWS.get_word_frequency_path()):
        self._word_wid_lite = {}
        self._wid_word_lite = {}
        
        self._wid_frequency = {0:0}
        
        self.load_pkl(word_id_path, word_frequency_path)
        pass
    
    #    加载词-id，词频文件
    def load_pkl(self,
                 word_id_path=conf.DATASET_SOHU_THUCNEWS.get_word_id_path(), 
                 word_frequency_path=conf.DATASET_SOHU_THUCNEWS.get_word_frequency_path()):
        #    加载精简词-id文件
        with open(file=word_id_path + '/' + LiteWordsWarehouse.FNAME_WORD_WID_LITE, mode='rb') as fr: self._word_wid_lite = pickle.load(fr)
        with open(file=word_id_path + '/' + LiteWordsWarehouse.FNAME_WID_WORD_LITE, mode='rb') as fr: self._wid_word_lite = pickle.load(fr)
        #    记录词库总词数
        self._words_count_lite = len(self._wid_word_lite)
        
        #    加载词频
#         with open(file=word_frequency_path + '/' + WordsWarehouse.FNAME_WORD_FREQUENCY, mode='rb') as fr: self._wid_frequency = pickle.load(fr)
        pass
    
    #    词转为词id
    def words_to_wids(self, words):
        res = []
        for w in words:
            wid = self._word_wid_lite.get(w)
            #    不存在与词库中的词则忽略
            if (wid is None): continue
            
            res.append(wid)
            pass
        return res
    
    #    词库总词数
    def words_count(self):
        return self._words_count_lite
    
    pass
