import torch
import torch.nn as nn
from application.config.config import Config
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
class CRF(nn.Module):
    """
    初始化参数
    """
    def __init__(self , label_num):
        super(CRF , self).__init__()
        #赋值
        self.label_num = label_num  # 状态的额数量，B-disI-disB-symI-sym0
        # 初始化转移分数，模型需要学习的参数
        self.transition_scores = nn.Parameter(torch.randn(self.label_num+ 2, self.label_num + 2))
        # 定义开始标签和结束标签
        self.START_TAG,self.END_TAG = self.label_num,self.label_num + 1
        #设置-1000不称为最大值
        self.transition_scores.data[: , self.START_TAG] = -1000
        self.transition_scores.data[self.END_TAG,:]=-1000
        #定义一个较小的值用于扩展发射和转移矩阵时填充
        self.fill_value = -1000

    def get_real_path_socer(self , emission_score,sequence_label):
        """
        计算单条路径的分数
        :param emission_score: 发射分数
        :param sequence_label: 序列标签
        :return:
        """
        # 获取sequence_label长度
        seq_len = len(sequence_label)
        # 计算发射分数
        #emission_score中每一行是一个字的发射分数
        real_emission_socer = torch.sum(emission_score[list(range(seq_len)),sequence_label])
        #计算转移路径的分数
        b_id = torch.tensor([self.START_TAG] , dtype=torch.int32 , device=Config.DEVICE)
        e_id = torch.tensor([self.END_TAG] , dtype=torch.int32 , device=Config.DEVICE)
        sequence_label_expand = torch.cat([b_id, sequence_label, e_id])  # [5,1, 2, 3, 4]
        #获取转移路径
        pre_tag = sequence_label_expand[list(range(seq_len +1))]   #[5,1,2,3]
        now_tag = sequence_label_expand[list(range(1, seq_len + 2))]   #[1,2,3,6]
        real_transition_socer = torch.sum(self.transition_scores[pre_tag,now_tag])
        #真实的路径分数
        real_transition_socer = real_emission_socer + real_transition_socer

    def log_sum_exp(self , score):
        """
        根据公式计算路径的分数
        :param score: 分数
        :return:
        """
        #先让每个元素减去最大值，计算完成后，再把最大值加回来
        max_score , _ = torch.max(score , dim = 0)
        #减去最大值
        max_score.expand = max_score.expand(score.shape)
        #返回增加最大知后的结果
        return max_score + torch.log(torch.sum(torch.exp(score - max_score_expend)))

    def expend_emission_matrix(self , emission_score):
        """
        对发射分数进行扩充
        :param emission_score:发射分数
        :return: 扩充后的发射分数
        """
        #获取发射分数的长度
        seq_length = emission_score.shape[0]
        #对seq_length进行扩充
        self.fill_value + torch.ones([seq_length,2],dtype=torch.float32,device=Config.DEVICE)
        #
        emission_score_expand = torch.cat([emission_score,expand_matrix],dim=1)
        #增加开始和结束两个标签
        start = torch.tensor([[self.fill_value] * self.label_num + [0, self.fill_value]], device=Config.DEVICE)
        end = torch.tensor([[self.fill_value] * self.label_num + [self.fill_value, 0]], device=Config.DEVICE)
        emission_score_expand = torch.cat([start, emission_score_expand, end], dim=0)
        #返回扩展后的发射分数
        return emission_score_expand

    def _get_total_path_score(self, emission_score):
        """
        获取所有发射分数总和
        :param emission_score: 发射分数
        :return: 发射分数总和
        """
        # 扩展发射分数矩阵
        emission_score_expand = self._expand_emission_matrix(emission_score)
        # 计算所有路径分数
        pre = emission_score_expand[0] # pre代表的是累计到上一个时刻，每个状态之前的所有路径分数之和
        for emission in emission_score_expand[1:]:
            # 扩展pre的维度，把pre转置，横向广播一个维度
            emission_expand = emission.reshape(-1, 1).expand([self.label_num+2, self.label_num+2])
            # pre.reshape(-1, 1)
            # [[-1000],
            #  [-1000],
            #  [-1000],
            #  [-1000],
            #  [-1000],
            #  [0],
            #  [-1000],]
            # 扩展obs的维度，纵向添加一个维度
            # 扩展obs的维度，纵向添加一个维度
            pre_expand = pre.expand([self.label_num+2, self.label_num+2])
            # obs [ , , , , , -1000, -1000]
            # obs_expand 7行
            # 按照矩阵计算的目录，计算上一个时刻的每种状态 到这个时刻的每种状态的组合方式全部包含在矩阵运算
            score = emission_expand + pre_expand + self.transition_scores
            # 计算分数
            # print('\nscore:', score)
            # print('\nscore.shape:', score.shape)
            pre = self._log_sum_exp(score)
            # 1 x 7 每一列代表的是上一个时刻的所有状态到这个时刻的某一个状态之和
        # for结束仍然得到一个pre 代表是最后一个时刻, 1 x 7 每一列代表的是上一个时刻的所有状态到这个时刻的某一个状态之和
        # 因为for循环执行完成后，pre最后一个时刻，每个状态之前的所有路径之和
        # 最终结果计算全部路径之和，因此还需要进行最后一步计算
        return self._log_sum_exp(pre)

    def forward(self, emission_scores, sequence_labels):
        """
        前向传播
        :param emission_scores: 发射分数总和
        :param sequence_labels: 序列标签
        :return: 总损失
        """
        # 计算损失值
        # 是一个批次的
        total = 0.0
        for emission_score, sequence_label in zip(emission_scores, sequence_labels):
            # 计算单条路径的分数
            real_path_score = self._get_real_path_score(emission_score, sequence_label)
            # 获取总的发射分数
            total_path_score = self._get_total_path_score(emission_score)
            # 总的发射分数减去真实分数
            loss = total_path_score - real_path_score
            # 计算总损失
            total += loss
        return total

    def predict(self, emission_score):
        """
        使用维特比算法实现预测函数
        预测每个样本中的每个字的标签
        :param emission_score: 某个样本通过bilstm计算后的发射分数
        :return:
        """
        # 扩展emission_score
        emission_score_expand = self._expand_emission_matrix(emission_score)
        # 记录每个时刻对应 每个状态对应的 最大分数，以及索引
        ids = torch.zeros(1, self.label_num+2, dtype=torch.long, device=Config.DEVICE)
        val = torch.zeros(1, self.label_num+2, device=Config.DEVICE)
        pre = emission_score_expand[0]
        for obs in emission_score_expand[1:]:
            # 扩展pre维度
            pre_extend = pre.expand([self.label_num+2, self.label_num+2])
            # 扩展obs维度
            obs_extend = obs.reshape(-1, 1).expand([self.label_num+2, self.label_num+2])
            # 累加，矩阵对用位置进行累加，得到的结果是上一个时刻的所有状态到这个时刻的所有状态可能转移方式
            score = obs_extend + pre_extend + self.transition_scores
            # 记录当前时刻最大的分值和索引
            value, index = score.max(dim=1)
            # 拼接每一个时间步的结果
            ids = torch.cat([ids, index.unsqueeze(0)], dim=0)
            val = torch.cat([val, value.unsqueeze(0)], dim=0)
            # 计算分数
            pre = value
        # 取出最后一个时刻的最大值
        index = torch.argmax(val[-1])
        best_path = [index]
        # print('val[-1]:', val[-1])
        # print('best_path:', best_path)
        # 为了方便拼接，在第一个位置默认填充了0
        for i in reversed(ids[1:]):
            # 获取分数最大的索引
            index = i[index].item()
            # 获取索引对应的标签ID
            best_path.append(index)
            # print(i, 'best_path:', best_path)
        best_path = best_path[::-1][1:-1]
        return best_path

class BiLSTM(nn.Module):
    """
    参数初始化
    """
    def __init__(self, vocab_size, label_num):
        super(BiLSTM, self).__init__()
        # embeding
        self.embed = nn.Embedding(num_embeddings=vocab_size, embedding_dim=256)
        # bilstm，没有BiLSTM对象，只有LSTM，bidirectional
        self.bilstm = nn.LSTM(
            input_size=256,
            hidden_size=512,
            bidirectional=True,
            num_layers=1
        )
        # 线性层, 最终输出是发射概率矩阵
        self.linear = nn.Linear(in_features=1024, out_features=label_num)

    def forward(self, inputs, length):
        # 嵌入层，得到向量
        outputs_embed = self.embed(inputs)
        # 得到的每句话的结果会被补0
        outputs_packd = pack_padded_sequence(outputs_embed, length)
        # 把压缩后的结果输入到lstm中
        outputs_blstm, (_, _) = self.bilstm(outputs_packd)
        # 把结果长度填充一致
        outputs_paded, outputs_lengths = pad_packed_sequence(outputs_blstm)
        # 调整形状，batch_size放在下标为0的维度
        outputs_paded = outputs_paded.transpose(0, 1)
        # 线性层
        outputs_logits = self.linear(outputs_paded)
        # 取出每句话真实长度发射概率矩阵
        # 最终输入到crf 作为emission_score
        outputs = []
        for outputs_logit, outputs_length in zip(outputs_logits, outputs_lengths):
            outputs.append(outputs_logit[:outputs_length])
        return outputs

    def predict(self, inputs):
        """
        模型预测
        :param inputs:
        :return:
        """
        output_embed = self.embed(inputs)
        # print('output_embed.shape:', output_embed.shape)
        # 在batch size增加一个维度1
        output_embed = output_embed.unsqueeze(1)
        # print('output_embed.shape1:', output_embed.shape)
        output_bilstm, (_, _) = self.bilstm(output_embed)
        # 去掉batch size的维度
        output_bilstm = output_bilstm.squeeze(1)
        output_linear = self.linear(output_bilstm)
        return output_linear

class NER(nn.Module):
    # def __init__(self, vocab_size:int, label_num:int)->None:
    # 这里的int None就是对参数和返回值的类型的提示
    def __init__(self, vocab_size, label_num):
        super(NER, self).__init__()
        # vocab_size label_num
        # BiLSTM CRF两个模型
        self.vocab_size = vocab_size
        self.label_num = label_num
        self.bilstm = BiLSTM(vocab_size=self.vocab_size, label_num=self.label_num)
        self.crf = CRF(label_num=self.label_num)

    def forward(self, inputs, labels, length):
        """
        前向传播
        :param inputs: 输入的句子
        :param labels: 标签
        :param length: 句子的长度
        :return: 一个批次的损失值
        """
        # bilstm的forward函数返回 发射分数矩阵
        emission_scores = self.bilstm(inputs, length)
        # 得到一个批次的损失值
        batch_loss = self.crf(emission_scores, labels)
        return batch_loss

    def predict(self, inputs):
        """
        预测
        :param inputs: 输入句子
        :return:
        """
        # 得到输入句子的发射分数矩阵
        # print('inputs.shape:', inputs.shape)
        emission_scores = self.bilstm.predict(inputs)
        logits = self.crf.predict(emission_scores)
        return logits

    def save_model(self, save_path):
        """
        保存模型
        :param save_path: 保存模型路径
        :return: None
        """
        save_info = {
            'init': {'vocab_size': self.vocab_size, 'label_num': self.label_num},
            'state': self.state_dict()
        }
        torch.save(save_info, save_path)





