from os import name
from re import match
from dictionary import Dictionary, Morpheme, Name
import copy
import re
import math

dictionary_datasets = {
    "./asset/dev.txt": r"[\u4e00-\u9fa5]+", 
    "./asset/train.txt": r"[\u4e00-\u9fa5]+", 
    "./asset/words.txt": r"(?<=[\s^])[\u4e00-\u9fa5]{1,3}(?=[\s$])", 
    "./asset/places.txt": r"[\u4e00-\u9fa5]+", 
    "./asset/places.txt": r"[\u4e00-\u9fa5]+(?=[区|市|县])", 
    "./asset/names.txt": r"[\u4e00-\u9fa5]+", 
    "./asset/famous.txt": r"[\u4e00-\u9fa5]+", 
    "./asset/tongyi.txt": r"(?<=[\s^])[\u4e00-\u9fa5]{1,3}(?=[\s$])", 
    "./asset/idioms.txt": r"[\u4e00-\u9fa5]+", 
    "./asset/idioms_2.txt": r"[\u4e00-\u9fa5]", 
    "./asset/global_locations.txt": r"[\u4e00-\u9fa5]+", 
    "./asset/cn_names.txt": r"[王李张刘陈杨黄吴赵周徐孙马朱胡林郭何高罗郑梁谢宋唐许邓冯韩曹曾彭肖蔡潘田董袁余蒋叶杜苏魏程吕丁沈任姚卢钟姜崔谭廖范汪陆金石戴贾韦夏邱方侯邹熊孟秦][\u4e00-\u9fa5]+", 
    "./asset/road_bank.txt": r"[\u4e00-\u9fa5]+", 
    "./asset/dieci_bank.txt": r"[\u4e00-\u9fa5]+", 
    "./asset/THUCNEWS.txt": r"(?<=[\s^])[\u4e00-\u9fa5]{1,3}(?=[\s$])", 
    "./asset/THUOCL_law.txt": r"(?<=[\s^])[\u4e00-\u9fa5]{1,3}(?=[\s$])", 
    "./asset/THUOCL_poem.txt": r"[\u4e00-\u9fa5]+", 
    "./asset/citys.txt": r"[\u4e00-\u9fa5]+", 
}

morpheme_datasets = {
    "./asset/dev.txt": r"[\d|\u4e00-\u9fa5]+", 
    "./asset/train.txt": r"[\d|\u4e00-\u9fa5]+", 
    "./asset/THUCNEWS.txt": r"[\d|\u4e00-\u9fa5]+", 
    "./asset/sentences.txt": r"[\d|\u4e00-\u9fa5]+", 
}

name_datasets = {
    "./asset/names.txt": r"[\u4e00-\u9fa5]+", 
}

re_patterns = {
    "chinese_punc1": r"——", 
    "chinese_punc2": r"\.{2,}", 
    "chinese_punc3": r"·{2,}", 
    "chinese_punc4": r"-{2,}", 
    "chinese_punc5": r"~{3,}", 
    "email": r"[a-zA-Z0-9][\.|\w|\-]*@[a-zA-Z0-9]+\.[a-zA-Z]{2,4}", 
    "website": r"(?:http://|https://)*(?:www\.)*[a-zA-Z0-9|\-]+(?:[/|\.][\w|\-]+)*(?:[/|\.](?!\d+)[a-zA-Z]+)", 
    "percentage": r"\d+(?:\.\d+)?%", 
    "number_char": r"\d+(?:\.\d+)?[十|百|千|万|亿|年|月|日|时][万|亿|余|多]?", 
    "time": r"\d+(?:[\.:]\d+)?(?:点)半?多?", 
    "float": r"\d+\.\d+", 
    "digit": r"[0-9A-Za-z]+", 
    "punctuation": r"\W", 
    "date": r"(?<=[一|两|三|四|五|六|七|八|九|十])[天|年|日|个|道]", 
    "correction1": r"不(?=[要能是会可])", 
    "correction2": r"这(?=[是|个|家|中|片|种])"
}

def var(c):
    avg = sum(c)/len(c)
    return sum([math.pow(n-avg, 2) for n in c])

class MMSEG(object):
    """MMSEG Algorithm: see http://technology.chtsai.org/mmseg/ or README.md in this repo for details.
    """
    def __init__(self, **kargs):
        self.dictionary = Dictionary()
        self.morpheme = Morpheme()
        self.name = Name()
        self.max_len = kargs["max_len"]

    def train(self, dictionary_datasets, morpheme_datasets, name_datasets):
        """Use `dictionary_datasets`, `morpheme datastes` and "name_datasets" to train a dictionary, a morpheme dictionary, and a name dictionary.
        """
        self.dictionary.load(dictionary_datasets, self.max_len)
        self.morpheme.load(morpheme_datasets)
        self.name.load(name_datasets)

    def re_layer(self, segs):
        """A preprocessing layer before mmseg-layer. This is used to identify pre-set patterns (such as numbers and email address) and prevents them from being cut. 
        The patterns are defined in the beginning of this file (`re_patterns`).
        """
        for pattern in re_patterns.values():
            devia = 0
            for i, entry in enumerate(copy.deepcopy(segs)):
                # 标识为true，表示是由是由之前的正则表达式识别出的pattern，不用切分
                if entry[1] == True:
                    continue
                else:
                    tmp = []
                    index = 0
                    for obj in re.finditer(pattern, entry[0]):
                        span = obj.span(0)
                        if len(entry[0][index:span[0]]) > 0:
                            tmp.append((entry[0][index:span[0]], False))
                        tmp.append((entry[0][span[0]:span[1]], True))
                        index = span[1]
                    if len(entry[0][index:]) > 0:
                        tmp.append((entry[0][index:], False))
                    segs[i+devia:i+devia+1] = tmp
                    devia += len(tmp)-1
        return segs

    def mmseg_layer(self, segs):
        """The implementation of MMSEG Algorithm. 
        """
        ret = []
        for seg in segs:
            if seg[1]:
                # 如果是正则表达式匹配出的字串，则它们是原子的，不应该进一步分词
                ret.append(seg[0])
                continue
            else:
                # 否则进行深度为四的切分处理
                sentence = seg[0]
                while len(sentence) > 0:
                    result = None
                    chunks = []
                    self.match(sentence, record=[], depth=0, chunks=chunks)
                    # 获取长度最长的chunks
                    chunks = self._get_max_chunk_length(chunks)
                    if len(chunks) == 1:
                        ret.append(sentence[0:chunks[0][0]])
                        sentence = sentence[chunks[0][0]:]
                        continue
                    # 获取平均长度最长的chunks
                    chunks = self._get_max_avg_length(chunks)
                    if len(chunks) == 1:
                        ret.append(sentence[0:chunks[0][0]])
                        sentence = sentence[chunks[0][0]:]
                        continue
                    # 获取长度方差最小的chunks
                    chunks = self._get_min_var(chunks)
                    if len(chunks) == 1:
                        ret.append(sentence[0:chunks[0][0]])
                        sentence = sentence[chunks[0][0]:]
                        continue
                    # 获取单字自由度之和最大的chunks
                    chunks = self._get_max_sum_morpheme(chunks, sentence)
                    ret.append(sentence[0:chunks[0][0]])
                    sentence = sentence[chunks[0][0]:]
                    continue
        return ret

    
    def match(self, sentence, record, depth, chunks):
        """This method uses self.dictionary to look up the first word of `sentence`. The result is stored in `record`. `depth` and `chunks` are used for recurrsion.  
        """
        # 使用字典进行词语匹配
        if depth == 4 or len(sentence)==0:
            chunks.append(record)
            return
        else:
            match_result = self.dictionary.match(sentence)
            for length, word in match_result.items():
                new_record = copy.deepcopy(record)
                new_record.append(length)
                self.match(sentence[length:], new_record, depth+1, chunks)
            return

    def _get_max_chunk_length(self, chunks):
        """Find the chunk(s) with the max length in `chunks`. 
        """
        chunk_length_pair = [[c, sum(c)] for c in chunks]
        chunk_length_pair.sort(key=lambda x: x[1], reverse=True)
        max_length = chunk_length_pair[0][1]
        candidate = list(filter(lambda x: True if x[1] == max_length else False, chunk_length_pair))
        candidate = [p[0] for p in candidate]
        return candidate

    def _get_max_avg_length(self, chunks):
        """Find the chunk(s) with the max average length in `chunks`.
        """
        chunks_avg_pair = [[c, sum(c)/len(c)] for c in chunks]
        chunks_avg_pair.sort(key=lambda x: x[1], reverse=True)
        max_avg = chunks_avg_pair[0][1]
        candidate = list(filter(lambda x: True if x[1] == max_avg else False, chunks_avg_pair))
        candidate = [p[0] for p in candidate]
        return candidate

    def _get_min_var(self, chunks):
        """Find the chunk(s) with the min var in `chunks`.
        """
        chunks_var_pair = [[c, var(c)] for c in chunks]
        chunks_var_pair.sort(key=lambda x: x[1])
        min_var = chunks_var_pair[0][1]
        candidate = list(filter(lambda x: True if x[1] == min_var else False, chunks_var_pair))
        candidate = [p[0] for p in candidate]
        return candidate

    def _get_max_sum_morpheme(self, chunks, sentence):
        """Find the chunk(s) with the max sum of morpheme in chunks.
        """
        chunks_morph_pair = [[c, self._calculate_morpheme(c, sentence)] for c in chunks]
        chunks_morph_pair.sort(key=lambda x: x[1], reverse=True)
        candidate = [chunks_morph_pair[0][0]]
        return candidate
        
    def _calculate_morpheme(self, chunk, sentence):
        ret = 0
        index = 0
        for i in chunk:
            if i == 1:
                ret += self.morpheme.get(sentence[index])
            index += i
        return ret

    def restore_layer(self, segs):
        # 切分结果在通过mmseg_layer后再通过restore_layer，恢复被过度切分的英文人名
        offset = 0
        length = len(segs)
        copy_segs = copy.deepcopy(segs)
        for i in range(len(copy_segs)):
            if (copy_segs[i] == "·" or copy_segs[i]=="-") and i != 0 and i != (length -1):
                if copy_segs[i-1] in self.name.data or copy_segs[i+1] in self.name.data:
                    segs[i-1-offset:i+2-offset] = [copy_segs[i-1]+copy_segs[i]+copy_segs[i+1]]
                    offset+=2
        return segs


    def predict(self, sentence):
        """Cut the sentence. The result is stored in list.
        """
        segs = [(sentence, False)]
        segs = self.re_layer(segs)
        segs = self.mmseg_layer(segs)
        segs = self.restore_layer(segs)
        
        return segs
    
    def test(self, in_path, out_path):
        print("Gnerating test results...")
        results = []
        with open(in_path) as F:
            s = F.readlines()
        for i, sentence in enumerate(s):
            sentence = sentence.strip()
            results.append(self.predict(sentence))
            if i % 10 == 0:
                print("{} sentences finished.".format(i))

        results = [" ".join(l)+"\n" for l in results]
        with open(out_path, "w") as F:
            F.writelines(results)
    

if __name__ == "__main__":
    agent = MMSEG(max_len=8)
    agent.train(dictionary_datasets, morpheme_datasets, name_datasets)   
    agent.test(in_path="./asset/test.txt", out_path="./asset/181220014.txt")
