import jieba
import pypinyin
import os
import json
import string
import re
import heapq
import pickle
import gc

'''
这个实现只有一个字典和一个set
字典第一层索引是拼音，同一个字在不同的读音下，可能他的频率并不一样
set中包含了所有的需要处理的汉字
'''

PUNCUATION = r"！？。＂＃＄％＆＇（）＊＋，－／：；＜＝＞＠［＼］＾＿｀｛｜｝～｟｠｢｣､、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘'‛“”„‟…‧﹏."
PUNCUATION += string.punctuation
RETURN_NUM = 5


def is_chinese(ch):
    return ch >= u'\u4e00' and ch <= u'\u9fa5'


def get_num(stat: dict, words: str):
    '''
    stat应该是去掉最外层拼音的字典
    '''
    def get_dict_num(d: dict):
        if isinstance(d, int):
            return d
        sum = 0
        for value in d.values():
            if isinstance(value, dict):
                sum += get_dict_num(value)
            elif isinstance(value, int):
                sum += value
        return sum

    temp_d = stat
    step = 0
    while step < len(words):
        if not temp_d.get(ord(words[step])):
            return 0
        temp_d = temp_d[ord(words[step])]
        step += 1
    return get_dict_num(temp_d)


class PinyinParser:
    def __init__(self, N=3):
        self.word_stat = []
        self.pinyin_index = {}
        self.N = N
    
    def get_index(self, pinyin):
        return self.pinyin_index[pinyin]

    def init(self, path):
        '''
        加载拼音列表，输入包含汉字表和拼音表的文件夹路径  
        path: 包含汉字表和拼音表的文件夹路径
        '''
        pinyin_list_path = os.path.join(path, 'pinyin_list.txt')
        hanzi_table_path = os.path.join(path, 'hanzi_table.txt')
        # 处理pinyin_list
        index = 0
        with open(pinyin_list_path, 'r', encoding='gbk') as pinyin_list:
            line = pinyin_list.readline().strip('\n')
            while line:
                line = line.split(' ')
                self.word_stat.append({})   #每一个插入的字典表示一个拼音对应的字 
                self.pinyin_index[line[0]] = index
                index += 1
                line = pinyin_list.readline().strip('\n')
            # for hanzi in line[1:]:
            #     self.word_stat[line[0]][hanzi] = {}
        # 包含hanzi_table中的字
        with open(hanzi_table_path, 'r', encoding='gbk') as hanzi_table:
            content = hanzi_table.read()
            self.hanzi_table = set(content)


    def handle_frequency(self, content):
        pred_hanzi = []
        content = ''.join([word for word in content if is_chinese(word)])
        content_pinyin = pypinyin.lazy_pinyin(content)
        assert(len(content) == len(content_pinyin))
        for word, word_pinyin in zip(content, content_pinyin):
            word_codec = ord(word)    #存储unicode编码而不是字符串
            if word_pinyin=='n':
                word_pinyin='en'
            word_pinyin = word_pinyin.replace('ve','ue')
            if word not in self.hanzi_table or word_pinyin not in self.pinyin_index:
                pred_hanzi.clear()
                continue
            pinyin_index = self.pinyin_index[word_pinyin]
            if word_codec not in self.word_stat[pinyin_index]:
                self.word_stat[pinyin_index][word_codec] = {}
            if len(pred_hanzi) < self.N-1:
                pred_hanzi.append((word_codec, word_pinyin))
                continue
            # 获得word_pinyin对应的字典
            temp_dict = self.word_stat[self.get_index(pred_hanzi[0][1])]
            # temp_dict = self.word_stat[pred_hanzi[0][1]]
            for i in range(self.N-1):
                if not temp_dict.get(pred_hanzi[i][0]):
                    temp_dict[pred_hanzi[i][0]] = {}
                temp_dict = temp_dict[pred_hanzi[i][0]]
            if temp_dict.setdefault(word_codec, 1):
                temp_dict[word_codec] += 1
            pred_hanzi = pred_hanzi[1:]
            pred_hanzi.append((word_codec, word_pinyin))

    def hanzi_stat(self, path):
        '''
        统计所给训练数据的内容  
        path: 训练数据的文件路径
        '''
        CONTENT_KEY = 'html'
        TITLE_KEY = 'title'
        # file_list = [os.path.join(path, f) for f in os.listdir(path) if os.path.isfile(os.path.join(path,f))]
        # for train_file_path in file_list:
        trained_news= 0
        print('Start train {}'.format(path))
        with open(path, 'r',encoding='gbk') as train_file:
            line = train_file.readline().strip('\n')
            while line:
                print('train {} news\r'.format(trained_news), end='')
                news = json.loads(line)
                # 先处理html的内容
                self.handle_frequency(news[CONTENT_KEY])
                self.handle_frequency(news[TITLE_KEY])
                line = train_file.readline().strip('\n')
                trained_news += 1
        print('finish train {}'.format(path))


    def find_match(self, pinyin_str):
        pinyin_list = [p for p in pinyin_str.split(' ') if p]
        winner = []
        # 采用viterbi算法
        # stat = self.word_stat[pinyin_list[0]]
        pinyin = re.sub(r'[^a-z]','',pinyin_list[0])
        stat = self.word_stat[self.get_index(pinyin)]
        single_num = get_num(stat, [])
        # 先给出第一个字的各个概率
        for word in stat:
            word = chr(word)
            count_set = [get_num(stat, word)]
            prob = count_set[0] / single_num
            winner.append((word, prob, count_set))
        for i,pinyin in enumerate(pinyin_list[1:]):
            i = i+1
            # stat = self.word_stat[pinyin]   #stat的key为当前拼音下的所有字
            pinyin = re.sub(r'[^a-z]','',pinyin)
            stat = self.word_stat[self.get_index(pinyin)]
            new_winner = []
            for word in stat:
                word = chr(word)
                max_val = -1
                max_candidate = None
                max_count = None
                for candidate in winner:
                    count_set = [get_num(stat, word)]
                    for k in range(1, self.N):
                        if i-k>=0:
                           # count_set.append(get_num(self.word_stat[pinyin_list[i-k]], candidate[0][-k:]+word))#包括word和前N的字的所有的出现次数
                            index = self.get_index(pinyin_list[i-k])
                            count_set.append(get_num(self.word_stat[index], candidate[0][-k:]+word))#包括word和前N的字的所有的出现次数
                    # count_set = [get_num(self.word_stat[i-k], candidate[0][-k:]+word) for k in range(self.N)]
                    prob = 10**(1-self.N)*count_set[0] / single_num
                    # word 在 candidate[0][-k:]情况下的出现的概率
                    for k in range(1,self.N):
                        if i-k>=0:
                            index = self.get_index(pinyin_list[i-k])
                            # pred_count = get_num(self.word_stat[index], candidate[0][-k:])
                            pred_count = candidate[2][k-1]
                            if pred_count==0:
                                assert(count_set[k] == 0)
                                pred_count = 1
                            prob += 10**(k+1-self.N)*count_set[k] / pred_count
                    prob *= candidate[1]
                    if prob > max_val:
                        max_val = prob
                        max_candidate = candidate[0] + word
                        max_count = count_set
                new_winner.append((max_candidate, max_val,max_count))
            winner = new_winner
        return heapq.nlargest(RETURN_NUM, winner, key=lambda x: x[1])

    def save(self, path):
        save_path = os.path.join(path, '{}_gram.cache'.format(self.N))
        print('Save to {}\r'.format(save_path), end='')
        with open(save_path, 'wb') as f:
            pickle.dump(self.word_stat, f)
            pickle.dump(self.hanzi_table,f)
            pickle.dump(self.pinyin_index,f)
        print('Save successful!')

    def load(self, path):
        load_path = os.path.join(path, '{}_gram.cache'.format(self.N))
        print("Load {}\r".format(load_path), end='')
        with open(load_path, 'rb') as f:
            self.word_stat = pickle.load(f)
            self.hanzi_table = pickle.load(f)
            self.pinyin_index = pickle.load(f)
        print('Load successful!')
