import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence

class BiLSTM(nn.Module):
    def __init__(self, vocab_size, label_num):
        super(BiLSTM, self).__init__()
        self.embed = nn.Embedding(num_embeddings=vocab_size, embedding_dim=256)

        # 没有BiLSTM模型，只有LSTM + bidirectional
        self.bilstm = nn.LSTM(
            input_size=256,
            hidden_size=512,
            bidirectional=True,
            num_layers=1
        )

        # 线性层，把每个字输出成一个向量，长度和状态数量一致
        self.linear = nn.Linear(in_features=1024, out_features=label_num)

    def forward(self, inputs, length):

        output_embed = self.embed(inputs)

        # 得到的结果中，如果长度比最长的短，短的这部分会备补0
        # 我头痛0
        # 我肚子痛
        output_packed = pack_padded_sequence(output_embed, length)

        output_bilstm, (_, _) = self.bilstm(output_packed)

        outputs_paded, output_lengths = pad_packed_sequence(output_bilstm)

        output = outputs_paded.transpose(0, 1)

        output_logits = self.linear(output)

        outputs = []
        for output_logit, output_length in zip(output_logits, output_lengths):
            outputs.append(output_logit[:output_length])

        return outputs


    def predict(self, input):
        output_embed = self.embed(input)

        output_embed = output_embed.unsqueeze(1)

        output_bilstm , (_, _) = self.bilstm(output_embed)

        output_bilstm = output_bilstm.squeeze(1)

        output_linear = self.linear(output_bilstm)

        return output_linear

class CRF(nn.Module):
    def __init__(self, label_num):
        super(CRF, self).__init__()

        self.label_num = label_num

        params = torch.randn(self.label_num+2, self.label_num+2)
        self.transition_scores = nn.Parameter(params)

        self.START_TAG, self.END_TAG = self.label_num, self.label_num+1
        # 所有的状态都不能转到start_tag
        # 到了end_tag之后不能转到别的地方去
        self.transition_scores.data[:, self.START_TAG] = -1000
        self.transition_scores.data[self.END_TAG, :] = -1000

        self.fill_num = -1000
        pass

    def _get_real_path_score(self, emission_score, sequence_label):
        """
        获取真实路径的分值，这个分值包括两部分，所有的发射分数、状态间转移分数
        :param emission_score:
        :param sequence_label:
        :return:
        """
        seq_length = len(sequence_label)
        # 计算发射分数
        real_emission_score = torch.sum(emission_score[list(range(seq_length)), sequence_label])

        b_id = torch.tensor([self.START_TAG], dtype=torch.int32)
        e_id = torch.tensor([self.END_TAG], dtype=torch.int32)
        sequence_label_expand = torch.cat([b_id, sequence_label, e_id])
        pre_tag = sequence_label_expand[list(range(0, seq_length+1))]
        now_tag = sequence_label_expand[list(range(1, seq_length+2))]
        # 计算状态间转移分数
        real_transition_score = torch.sum(self.transition_scores[pre_tag, now_tag])

        return real_emission_score + real_transition_score

    def _log_sum_exp(self, score):
        max_score, _ = torch.max(score, dim=0)
        max_score_expand = max_score.expand(score.shape)
        return max_score + torch.log(torch.sum(torch.exp(score-max_score_expand), dim=0))

    def _expand_emission_score(self, emission_score):
        """
        扩展发射分数
        :param emission_score:
        :return:
        """
        """
        我   [1,2,3,4,5]
        肚   [2,2,2,2,2] 
        子   [1,1,1,1,1]
        痛   [3,3,3,3,3]
        
        我   [1,2,3,4,5, -1000, -1000]
        肚   [2,2,2,2,2, -1000, -1000] 
        子   [1,1,1,1,1, -1000, -1000]
        痛   [3,3,3,3,3, -1000, -1000]
        
            [-1000, -1000, -1000, -1000, -1000, 0, -1000]
        我   [1,2,3,4,5, -1000, -1000]
        肚   [2,2,2,2,2, -1000, -1000] 
        子   [1,1,1,1,1, -1000, -1000]
        痛   [3,3,3,3,3, -1000, -1000]
            [-1000, -1000, -1000, -1000, -1000, -1000, 0]
        
        
        """
        seq_length = emission_score.shape[0]
        b_s = torch.tensor([self.fill_num]*self.label_num + [0, self.fill_num])
        e_s = torch.tensor([self.fill_num]*self.label_num + [self.fill_num, 0])
        expand_matrix = torch.ones([seq_length, 2])*self.fill_num
        expand_matrix = torch.cat([emission_score, expand_matrix], dim = 1)
        expand_matrix = torch.cat([b_s, expand_matrix, e_s], dim=0)

        return expand_matrix


    def _get_total_path_score(self, emission_score):
        """
        通过接收的emission_score，和模型自己的transition_score计算全部路径的分数
        :param emission_score:
        :return:
        """
        """
        原始的发射矩阵
        我   [1,2,3,4,5]
        肚   [2,2,2,2,2] 
        子   [1,1,1,1,1]
        痛   [3,3,3,3,3]
        """
        emission_score_expand = self._expand_emission_score(emission_score)
        """
        扩展后的
            [-1000, -1000, -1000, -1000, -1000, 0, -1000]
        我   [1,2,3,4,5, -1000, -1000]
        肚   [2,2,2,2,2, -1000, -1000] 
        子   [1,1,1,1,1, -1000, -1000]
        痛   [3,3,3,3,3, -1000, -1000]
            [-1000, -1000, -1000, -1000, -1000, -1000, 0] 
        """
        pre = emission_score_expand[0] # 代表的是之前的所有状态的路径分数之和
        for obs in emission_score_expand[1:]:
            pre_expand = pre.reshape(-1, 1).expand([self.label_num+2, self.label_num+2])
            obs_expand = obs.expand([self.label_num+2, self.label_num+2])
            score = pre_expand + self.transition_scores + obs_expand

            pre = self._log_sum_exp(score)

        return self._log_sum_exp(pre)





if __name__ == '__main__':
    # 参数1:码表与id对照
    char_to_id = {"双": 0, "肺": 1, "见": 2, "多": 3, "发": 4, "斑": 5, "片": 6,
                  "状": 7, "稍": 8, "高": 9, "密": 10, "度": 11, "影": 12, "。": 13}

    # 参数2:标签码表对照
    tag_to_id = {"O": 0, "B-dis": 1, "I-dis": 2, "B-sym": 3, "I-sym": 4}

    # 参数：字向量维度
    # EMBEDDING_DIM = 256

    # 参数：隐层维度
    # HIDDEN_DIM = 512

    # 参数：堆叠 LSTM 层数
    # NUM_LAYERS = 1

    bilstm = BiLSTM(vocab_size=len(char_to_id),
               label_num=len(tag_to_id),)
    print(bilstm)