import os
import re
from pandas import DataFrame
from tqdm import tqdm
from jieba import posseg
from functools import reduce
from typing import Tuple, Dict, List, Union

# Load terms from file
import jieba

my_stop_words = set()


# jieba.enable_paddle()
def __init() -> None:
    for filepath,dirnames,filenames in os.walk('./user_dict'):
        for filename in filenames:
            if filename[-4:] != '.txt':
                continue
            jieba.load_userdict(os.path.join(filepath,filename))
        
    if os.path.exists('./stop_words.txt'):
        global my_stop_words
        with open("stop_words.txt", 'r', encoding='utf-8') as f:
            lines = f.readlines()
            my_stop_words = set([i.strip(' \n') for i in lines])


class TextCleaner:
    def __init__(self) -> None:
        pass
    
    def add_stop_words(self, words: List[str]) -> None:
        """
        @param
            words: stop words to add in
        @function
            update stop words
        @return
            None
        """
        global my_stop_words
        my_stop_words += list(set(words))
    
    def FilterText(self, keep_func, corpora: List[str], word_separator: str = ' ') -> Union[List[str], str]:
        """
        @param
            keep_func: function to judge whether to keep a word or not
            corpora: text content
            word_separator: the way that words in text is separated
        @function
            filter target words
        @return
            text without target words
        """
        if type(corpora) is str:
            words = corpora.split(word_separator)
            return word_separator.join(list(filter(keep_func, words))).strip()
        else:
            words_lists = [text.split(word_separator) for text in corpora]
            return [word_separator.join(list(filter(keep_func, words))).strip() for words in words_lists]
    
    def CleanedCorpora(self, corpora: List[str], stop_words=None, filter_pattern: str = r'[^\u4e00-\u9fa5]',show_progress: bool=True) -> Tuple[List, Dict]:
        """
        @param
            corpora: text content
            stop_words: words to remove from corpora
            filter_pattern: regular expression, pattern of str to remove from corpora
            show_progress: whether or not show real-time progress with progress bars
        @function
            split sentences into words and remove all except Chinese, then remove all stop words
        @return
            [cleaned texts] and {word-tag dict}
        """
        if stop_words is None:
            stop_words = my_stop_words
        pattern_zh = re.compile(filter_pattern)
        pattern_spaces = re.compile(r' {2,}')
        corpora = [pattern_zh.sub(' ', i) for i in corpora]
        corpora = [pattern_spaces.sub(' ', i).strip() for i in corpora]

        if show_progress:
            bar1 = tqdm(corpora, desc="Cutting words...")
        else:
            bar1 = corpora
        cut_res = [list(posseg.cut(i)) for i in bar1]
        word_dict = list(reduce(lambda x, y: set(x) | set(y), cut_res))

        if show_progress:
            bar2 = tqdm(word_dict, desc="Forming dict...")
            bar3 = tqdm(cut_res, desc="Removing stop words...")
        else:
            bar2 = word_dict
            bar3 = cut_res
        word_dict = {i.word: i.flag for i in bar2 if i.word not in stop_words}
        cleaned_text = [" ".join([i.word for i in lst if i.word not in stop_words]) for lst in bar3]
        cleaned_text = [pattern_spaces.sub(' ', i).strip() for i in cleaned_text]

        return cleaned_text, word_dict
    
    def alias_replace(self, cleaned_corpora, alias_dict: Dict, word_separator: str = ' ') -> Union[List[str], str]:
        keys = set(alias_dict.keys())
        if type(cleaned_corpora) is str:
            words = cleaned_corpora.split(word_separator)
            return word_separator.join([alias_dict[wd] if wd in keys else wd for wd in words]).strip()
        else:
            words_lists = [i.split(word_separator) for i in cleaned_corpora]
            return [word_separator.join([alias_dict[wd] if wd in keys else wd for wd in l]).strip() for l in words_lists]

__init()