# from wenet_gxl.dataset.processor import __tokenize_by_bpe_model
from typing import Dict, List

import torch

from wenet_gxl.AiConstant import gxl_logger

logging = gxl_logger


def tokenize(context_list_path, symbol_table, bpe_model=None):
    """ Read biasing list from the biasing list address, tokenize and convert it
        into token id
        Args:
            context_list_path (str): biasing list address
            symbol_table (Dict[str, int]): symbol table, token2id dict
            bpe_model (str): bpe model,used to tokenize
        Returns:
            context_list (List[List[int]]): token id list
    """
    if bpe_model is not None:
        import sentencepiece as spm
        sp = spm.SentencePieceProcessor()
        sp.load(bpe_model)
    else:
        sp = None
    with open(context_list_path, "r") as fin:
        context_txts = fin.readlines()
    context_list = []
    for context_txt in context_txts:
        context_txt = context_txt.strip()

        labels = []
        tokens = []
        if bpe_model is not None:
            # tokens = __tokenize_by_bpe_model(sp, context_txt)
            logging.info('暂时不考虑bpe模型')
        else:
            for ch in context_txt:
                if ch == ' ':
                    ch = "▁"
                tokens.append(ch)
        for ch in tokens:
            if ch in symbol_table:
                labels.append(symbol_table[ch])
            elif '<unk>' in symbol_table:
                labels.append(symbol_table['<unk>'])
        context_list.append(labels)
    return context_list


class ContextGraph:
    """ Context decoding graph, constructing graph using dict instead of WFST
        Args:
            context_list_path(str): context list path
            bpe_model(str): model for english bpe part
            context_score(float): context score for each token
        Example:
            >>>inputs = torch.arange(1, 11)
            >>>inputs = inputs.repeat(800, 1).tolist()
            >>>graph = ContextGraph('', {}, None, 6)
            >>>(graph.build_graph(inputs))
            >>>print(graph.graph_size)
            >>>print(graph.graph)
            >>>print(graph.state2token)
            >>>print(graph.back_score)
    """

    def __init__(self,
                 context_list_path: str,
                 symbol_table: Dict[str, int],
                 bpe_model: str = None,
                 context_score: float = 6):
        self.context_score = context_score
        # self.context_list = tokenize(context_list_path, symbol_table,
        #                              bpe_model)
        self.graph = {0: {}}
        self.graph_size = 0
        self.state2token = {}
        self.back_score = {0: 0.0}
        # self.build_graph(self.context_list)

    def build_graph(self, context_list: List[List[int]]):
        """ Constructing the context decoding graph,
            add arcs with negative scores returning to the starting state for
            each non-terminal tokens of hotwords,
            and add arcs with scores of 0 returning to the starting state for
            terminal tokens.
            创建词图,原理如下:
            每个句子代表一条路径(path),规定所有的路径都是从零状态出发的,路径有状态点组成, 状态点之间的边的方向是单行的, 状态点具有token_id的属性
            两条路径重复的部分共用一段path,所谓的重复是指:从根节点到某一段的连续重复段
        """
        # state_id : 和其相连的所有下一个状态,(有向图),下一个状态内容:{token_id:state_id} ,
        # token_id是有限的, 但是state_id的数量约等于是预料库中的单词数(当所有路径都不重复时,所谓的重复必须是从根节点到某一段的连续重复段)
        self.graph = {0: {}}
        self.graph_size = 0  # 现在只有零个状态, # 两个句子: 你好呀,  你是谁
        self.state2token = {}
        self.back_score = {0: 0.0}  # 所有的路径都是从零状态出发的, 其回溯值为0(从其到根节点阻碍显然为零)
        for context_token in context_list:  # context_token:list[int], [1,2,3,4]
            now_state = 0  # 现在处于第零个状态
            for i in range(len(context_token)):
                """对于该行的每个token_id"""
                # 如果新考虑的token_id已经是现状态的后继了, 首先可以明确该token必定存在对应与该后继的状态,
                if context_token[i] in self.graph[now_state]:  # 对于一开始, 不可能已存在,跳转至else
                    """已存在从当前节点到目标点的边了"""
                    now_state = self.graph[now_state][context_token[i]]
                    if i == len(context_token) - 1:
                        self.back_score[now_state] = 0
                else:  # 一开始,所有的路径(也就是一句话,一个row)都是从零状态出发
                    self.graph_size += 1  # 现在只有零个状态, 新增加一个状态, 其值同时也是状态的id
                    # 将该状态加入图中, 状态id为self.graph_size, 下一个状态内容为空,加入时肯定不知道它的下面会通往哪里
                    self.graph[self.graph_size] = {}
                    # 将该状态放入现状态的后继中, 放入格式为one_item:{token_id:state_id}
                    self.graph[now_state][context_token[i]] = self.graph_size
                    # 然后将当前状态置为新加入的状态
                    now_state = self.graph_size
                    # 设置回溯分数, 其值便是path的长度*context_score
                    if i != len(context_token) - 1:
                        self.back_score[now_state] = -(i +
                                                       1) * self.context_score
                    else:
                        # 最后一个节点的回溯值设置为0, 用以标注结尾状态
                        self.back_score[now_state] = 0
                    #  更新state2token
                    self.state2token[now_state] = context_token[i]

    def find_next_state(self, now_state: int, token: int):
        """ 查找下一个状态
             Search for an arc with the input being a token from the current state,
            returning the `score on the arc` and `the state it points to`. If there is
            no match, return to the starting state and perform an additional search
            from the starting state to avoid token consumption due to mismatches.
            回溯值: 状态的回溯值: 从当前状态向后走走到根节点的距离, 显然是负数
            边上的回溯值: 从source state 向后走向 target state的距离, 很可能是正值.
            Return:
                next_state (int): the next state
                score (float): the score on the arc, 该弧上的分数, 弧的起点的now state, 弧的终点的对应token的next state,
                                由于是从now state出发的,所以是now state的回溯值
        """
        if token in self.graph[now_state]:
            return self.graph[now_state][token], self.context_score
        # if token in self.graph[now_state], now_state 的后继中没有token, 就先到根节点,再希望根节点下有这个token对应的是state,如果有,就扯一个边过去,并计算回溯值
        back_score = self.back_score[now_state]  # 得到now state的回溯值
        now_state = 0
        if token in self.graph[now_state]:
            return self.graph[now_state][
                       token], back_score + self.context_score
        return 0, back_score


if __name__ == '__main__':
    inputs = torch.arange(1, 11)
    inputs = inputs.repeat(800, 1).tolist()
    graph = ContextGraph('', {}, None, 6)
    (graph.build_graph(inputs))
    print(graph.graph_size)
    print(graph.graph)
    print(graph.state2token)
    print(graph.back_score)
