import itertools
from collections import Counter
from pathlib import Path
from typing import Dict

import fjson

from common.chinese_char_dataset import ChineseCharDataset
from news_dataset import CorpusDataset

ROOT = (Path(__file__) / '../../').resolve()
DATASET = (ROOT / 'dataset').resolve()


def to_prob(counter: Dict[str, int]) -> Dict[str, float]:
    result: Dict[str, float] = {}

    total_num = 0
    for value in counter.values():
        total_num += value
    for key, value in counter.items():
        result[key] = value / total_num

    return result


def generate_markov_matrix(*, char_table: str, in_path: Path, out_path: Path):
    chn_data = ChineseCharDataset(
        char_table=char_table
    )
    corpus = CorpusDataset(
        chn_char_data=chn_data,
        in_path=in_path,
    )

    initial_counter, end_counter, gram2_counter = Counter(), Counter(), Counter()
    # 获取语料库中的连续中文
    for segment in corpus.segment_generator():
        # 抽取 segment 中前后相连的两个字，计算其频率
        for i in range(len(segment) - 1):
            gram2_counter[f'{segment[i]}{segment[i + 1]}'] += 1

        # 计算第一个字和最后一个字的频率
        initial_counter[segment[0]] += 1
        end_counter[segment[-1]] += 1

    transition_matrix = dict()

    # 统计 2-gram 概率
    sorted_keys = sorted(gram2_counter.keys())
    for _k, group_generator in itertools.groupby(sorted_keys, lambda k: k[0]):
        group = list(group_generator)

        total_num = 0
        for key in group:
            total_num += gram2_counter[key]
        for key in group:
            transition_matrix[key] = gram2_counter[key] / total_num

    json_text = fjson.dumps({
        'initial_matrix': to_prob(initial_counter),
        'transition_matrix': transition_matrix,
        'end_matrix': to_prob(end_counter),
    }, float_format='.3e', separators=(',', ':'))
    out_path.write_text(json_text, encoding='utf-8-sig')


if __name__ == '__main__':
    generate_markov_matrix(
        char_table=(DATASET / 'chinese-char-table.txt').read_text(encoding='utf-8-sig'),
        in_path=Path(r'C:\Users\ipid\Documents\Corpus\微信公众号语料库.txt'),
        out_path=(DATASET / 'markov-matrix.json'),
    )
