import jieba
import os, sys
import json
import re
import argparse
import heapq
import pickle
import pypinyin

useless_character = r"[-；“”：。—+=！、，【】「」？@#¥%^&*（）()$!?{,.}1234567890;:'<>《》a-zA-Z]"
RETURN_NUM = 1


def is_chinese(ch):
    return ch >= u'\u4e00' and ch <= u'\u9fa5'


class WordParser:
    '''
    基于字的三元模型
    '''
    def __init__(self):
        self.pinyin_dict = {}
        self.alpha_freq = {}  # 统计每个字的频率
        self.word_freq = {}  # 统计一个二元单词的频率
        self.word_freq_three = {}   #统计三元组合的频率
        self.word_num = 0
        self.alpha_num = 0
        self.word_three_num = 0

    def read_pinyin_list(self, path):
        '''
        构建一个字典，key表示拼音，value表示对应的可能汉字
        '''
        with open(path, 'r', encoding='gbk') as file:
            line = file.readline().strip('\n')
            while line:
                pinyin_list = line.split(' ')
                self.pinyin_dict[pinyin_list[0]] = set(pinyin_list[1:])
                line = file.readline().strip('\n')
        print('Read pinyin list success!')

    def init_freq(self, path):
        '''读入一二级汉字表，初始化字频字典
        '''
        with open(path, 'r', encoding='gbk') as file:
            alpha_list = file.read()
            self.word_freq['#'] = {}  # 用作表示没有前缀字符的频率统计
            self.alpha_freq['#'] = 0
            self.word_freq_three['#'] = {}
            for alpha in alpha_list:
                alpha_pinyin = pypinyin.lazy_pinyin(alpha)[0]
                if not self.pinyin_dict.get(alpha_pinyin):
                    self.pinyin_dict[alpha_pinyin] = set()
                self.pinyin_dict[alpha_pinyin].add(alpha)
                self.alpha_freq[alpha] = 0
                self.word_freq[alpha] = {}
                self.word_freq_three[alpha] = {}
        print('Init frequency dict success!')

    def statitical_frequency(self, path):
        '''输入语料库，进行词语的分割与二元词组统计
        '''
        context_key = 'html'
        title_key = 'title'
        ###
        def train():
            with open(path, 'r', encoding='gbk') as file:
                line = file.readline().strip('\n')
                news_num = 1
                while line:
                    print('Train {} news\r'.format(news_num), end='')
                    json_file = json.loads(line)
                    context = json_file[context_key]
                    title = json_file[title_key]
                    count_words(context)
                    count_words(title)
                    line = file.readline().strip('\n')
                    news_num += 1
        
        def count_words(context :str):
            prev_ch = ['#']  # 表示前3元模型的前2个之前的字符
            for ch in context:
                if is_chinese(ch):
                    if ch in self.alpha_freq:
                        # 下面统计一元词频
                        self.alpha_freq[ch] += 1
                        self.alpha_num += 1
                        self.word_num += 1
                        # 再统计二元词频
                        if prev_ch[-1] == '#':
                            self.alpha_freq['#'] += 1
                        if not self.word_freq[prev_ch[-1]].get(ch):
                            # 如果二元词频中没有这个组合
                            self.word_freq[prev_ch[-1]][ch] = 1
                        else:
                            # 如果二元词频中有这个组合
                            self.word_freq[prev_ch[-1]][ch] += 1

                        # 下面统计三元词频
                        if len(prev_ch) > 1:
                            self.word_three_num += 1
                            if not (self.word_freq_three[prev_ch[0]].get(prev_ch[1])):
                                self.word_freq_three[prev_ch[0]][prev_ch[1]] = {}
                                self.word_freq_three[prev_ch[0]][prev_ch[1]][ch] = 1
                            elif not (self.word_freq_three[prev_ch[0]][prev_ch[1]].get(ch)):
                                self.word_freq_three[prev_ch[0]][prev_ch[1]][ch] = 1
                            else:
                                self.word_freq_three[prev_ch[0]][prev_ch[1]][ch] += 1
                        
                            prev_ch[0] = prev_ch[1]
                            prev_ch[1] = ch
                        else:
                            assert(len(prev_ch) == 1)
                            prev_ch.append(ch)
                    else:
                        prev_ch = ['#']
                else:
                    prev_ch = ['#']
        ###
        train()

    def find_match(self, pinyin,gamma=0.1,theta=0.01) -> str:
        '''输入一个拼音，返回一个文字，采用viterbi动态规划
        '''
        ###
        def viterbi_find_three(pinyin_list: list):
            '''
            Parameter:  
              
            gamma:表示三元模型中做平滑时，二元概率的比例  
            theta:表示三元模型中做平滑时，三元概率的比例
            '''
            index = 0
            candidates = [('#', 1)] #每个元素为一个元组，每个元组的第一个位置是字符串，第二个位置是相应的概率
            while index < len(pinyin_list):
                new_cadidates = []
                pinyin = pinyin_list[index]  # 表示第index个拼音
                pinyin = re.sub(r'[^a-z]','',pinyin)
                character_set = self.pinyin_dict[pinyin]  # 当前拼音的候选集合
                # 下面会对每个character set中的字符选出一个当前最可能的组合
                for ch in character_set:
                    possible_path = []
                    if self.alpha_freq[ch] == 0:  # 如果这个字在语料库从未出现过，直接跳过
                        continue
                    for candidate in candidates:
                        if len(candidate[0]) == 1:
                            prev_ch = candidate[0][-1]  # 取出候选集最后一个字符
                            cnt_w1_w2 = (self.word_freq[prev_ch].get(ch) if self.word_freq[prev_ch].get(ch) else 0)# 表示cnt(w1,w2)
                            cnt_w1 = (self.alpha_freq[prev_ch]) # 表示cnt(w1)
                            prob_w2 = (self.alpha_freq[ch]) / self.alpha_num  # 表示p(w2)
                            prob_w1_w2 = cnt_w1_w2 / cnt_w1  # 表示p(w2|w1)
                            approximate_prob = (1 - gamma) * prob_w1_w2 + gamma * prob_w2
                            possible_path.append((candidate[0] + ch, approximate_prob * candidate[1]))
                        else:
                            prev_ch = candidate[0][-2:]
                            if not(self.word_freq_three[prev_ch[0]].get(prev_ch[1])) or not(self.word_freq_three[prev_ch[0]][prev_ch[1]].get(ch)):
                                prob_3 = 0
                            else:
                                cnt_w1_w2_w3 = self.word_freq_three[prev_ch[0]][prev_ch[1]][ch] # 表示cnt(w3,w2,w1) 
                                prob_3= cnt_w1_w2_w3 / self.word_freq[prev_ch[0]][prev_ch[1]] # 表示p(w3|w1,w2)
                            cnt_w2_w3 = self.word_freq[prev_ch[-1]].get(ch) if self.word_freq[prev_ch[-1]].get(ch) else 0 # 表示cnt(w3,w2)
                            prob_1 = (self.alpha_freq[ch]) / self.alpha_num  # 表示p(w3)
                            cnt_w2 = (self.alpha_freq[prev_ch[-1]]) # 表示cnt(w2)
                            prob_2 = cnt_w2_w3 / cnt_w2  # 表示p(w3|w2)
                            approximate_prob = gamma * prob_2 + (1-gamma-theta) * prob_3 + (theta) * prob_1
                            possible_path.append((candidate[0] + ch, approximate_prob * candidate[1]))
                    winner = max(possible_path, key=lambda x: x[1])
                    new_cadidates.append(winner)
                candidates.clear()

                if index > 0 and index % 7 == 0:
                    # 防止值过小超过范围限制
                    candidates = [(x[0], x[1] * 1e10) for x in new_cadidates]
                else:
                    candidates = new_cadidates
                index += 1
            #返回最有可能的前5个结果
            if index == len(pinyin_list):
                return heapq.nlargest(RETURN_NUM, candidates, key=lambda x: x[1])
        ###
        pinyin_list = [pin for pin in pinyin.split(' ') if pin]
        res = viterbi_find_three(pinyin_list)[0]
        return res[0].strip('#')

    def save(self, path):
        '''将训练好的模型数据保存下来
        '''
        with open(path, 'wb') as f:
            pickle.dump(self.pinyin_dict, f)
            pickle.dump(self.alpha_freq, f)
            pickle.dump(self.word_freq, f)
            pickle.dump(self.word_freq_three, f)
            pickle.dump(self.alpha_num, f)
            pickle.dump(self.word_num, f)
            pickle.dump(self.word_three_num, f)

    def load(self, path):
        '''读入训练好的模型参数
        '''
        with open(path, 'rb') as f:
            self.pinyin_dict = pickle.load(f)
            self.alpha_freq = pickle.load(f)
            self.word_freq = pickle.load(f)
            self.word_freq_three = pickle.load(f)
            self.alpha_num = pickle.load(f)
            self.word_num = pickle.load(f)
            self.word_three_num = pickle.load(f)
    
    def summary(self):
        print('Word num : {}'.format(self.word_num))
        print('Three Word num: {}'.format(self.word_three_num))