import math
import torch
import pandas as pd
import torch.nn as nn
from dataproc_npy_sanwei import TraceSet, load_yaml
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pack_padded_sequence

def torch_cov(input_vec):
    """ 计算一个batch的协方差
    Args:
        input_vec: shape = (bn, seq, 2)
    """
    m = input_vec - torch.mean(input_vec, dim=1, keepdim=True) # 数据减去其均值
    mT = torch.transpose(m, 1, 2) # 交换两个维度的顺序
    cov_matrix = torch.matmul(mT, m) / (m.shape[1] - 1) # 相乘并除以n-1，计算协方差的公式
    return cov_matrix.reshape(input_vec.shape[0], -1) # 返回协方差矩阵

def torch_cov_single(input_vec):
    """ 计算单个矢量的协方差
    Args:
        input_vec: shape = (seq, 2)
    """
    m = input_vec - torch.mean(input_vec, dim=0, keepdim=True) # 数据减去其均值
    cov_matrix = torch.matmul(m.T, m) / (m.shape[0] - 1) # 相乘并除以n-1，计算协方差的公式

    return cov_matrix


def masked_position(positions):
    """ masked位置 """
    positions = positions.squeeze(1) <= 0
    positions = positions[:, :, None]
    positions = torch.repeat_interleave(positions, 3, dim=-1)
    return positions


class PositionalEmbedding(nn.Module):

    def __init__(self, d_model, max_len=512):
        super().__init__()

        # Compute the positional encodings once in log space.
        pe = torch.zeros(max_len, d_model).float()
        pe.require_grad = False

        position = torch.arange(0, max_len).float().unsqueeze(1)
        div_term = (torch.arange(0, d_model, 2).float() * -(math.log(1000.0) / d_model)).exp()

        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)

        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe)

    def forward(self, x):
        return self.pe[:, :x.size(1)]


class AttnHead(nn.Module):
    """ 单头注意力层 """
    def __init__(self, dim_in, dim_out, mask_value=-9999, **kwargs):
        super(AttnHead, self).__init__()
        self.Q = nn.Linear(dim_in, dim_out)
        self.K = nn.Linear(dim_in, dim_out)
        self.V = nn.Linear(dim_in, dim_out)
        self.msk_value = mask_value
    
    def forward(self, seq_inputs, query=None, mask=None):
        if query is not None:
            q = self.Q(query)
        else:
            q = self.Q(seq_inputs)

        k = self.K(seq_inputs) # [batch_size,seq_len,dim_out]
        v = self.V(seq_inputs) # [batch_size,seq_len,dim_out]

        qk = torch.matmul(q, k.permute(0, 2, 1)) # [batch_size,seq_len,seq_len] seq_len维的矩阵
        qk /= (self.Q.out_features ** 0.5)

        if mask is not None:
            # qk = qk.masked_fill(mask==self.msk_value, -1e9) # 用value填充tensor中与mask中值为0位置相对应的元素
            qk = qk.masked_fill(mask==0, -1e9)

        attn = torch.softmax(qk, dim=-1)

        outs = torch.matmul(attn, v)

        return outs

class MultiAttnHead(nn.Module):
    """ 多头注意力层 """
    def __init__(self, dim_in, dim_out, n_heads=4, mask_value=-9999):
        super(MultiAttnHead, self).__init__()
        self.attn = nn.ModuleList([
            AttnHead(dim_in, dim_out, mask_value) for _ in range(n_heads)
        ])
        self.linear = nn.Linear(dim_out * n_heads, dim_in)
        self.norm   = nn.LayerNorm(dim_in)
    
    def forward(self, seq_inputs, query=None, mask=None):
        s = [head(seq_inputs, query, mask) for head in self.attn]
        s = torch.cat(s, dim=-1)
        out = self.linear(s)
        out = self.norm(out + seq_inputs)
        return out

class FeedForward(nn.Module):
    
    def __init__(self, dim_in, dim_out, dropout=0.1):
        super(FeedForward, self).__init__()
        self.feed_forward = nn.Sequential(
            nn.Linear(dim_in, dim_out),
            nn.LayerNorm(dim_out),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(dim_out, dim_out),
            nn.LayerNorm(dim_out),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(dim_out, dim_in)
        )
        self.norm = nn.LayerNorm(dim_in)
    
    def forward(self, x):
        outs = self.feed_forward(x)
        outs = self.norm(outs + x)
        return outs

class Encoder(nn.Module):
    """ Encoder层 """
    def __init__(self, dim_in, dim_out, n_heads=4, dropout=0.1, scale=1, mask_value=-9999):
        super(Encoder, self).__init__()
        self.attn = MultiAttnHead(dim_in, dim_out, n_heads, mask_value)
        self.feed_forward = FeedForward(dim_in, dim_out // scale, dropout)
        self.norm = nn.LayerNorm(dim_in)

    def forward(self, x, query=None, mask=None):
        attn = self.attn(x, query, mask)
        outs = self.feed_forward(attn)
        outs = self.norm(outs)
        return outs


class BERTEncoder(nn.Module):
    """ Bert Encoder层 """
    def __init__(self, dim_in, dim_out, n_heads=4, n_layers=2, dropout=0.1, scale=1, mask_value=-9999):
        super(BERTEncoder, self).__init__()
        self.encoders = nn.ModuleList([
            Encoder(dim_in, dim_out, n_heads, dropout, scale, mask_value) for _ in range(n_layers)
        ])

    def forward(self, x, query=None, mask=None) -> torch.Tensor:
        for layer in self.encoders:
            x = layer(x, query, mask)
        return x


class MTM(nn.Module):
    """ 对应MLM, 训练遮盖的轨迹 """

    def __init__(self, loc_size, encode_dim, hidden_dim):
        super(MTM, self).__init__()
        self.mlp1 = nn.Sequential(
            nn.Linear(encode_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )
        self.mlp2 = nn.Sequential(
            nn.Linear(encode_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )
        self.mlp3 = nn.Sequential(
            nn.Linear(encode_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

    def forward(self, x, positions):
        positions = masked_position(positions)
        mtm_x1 = self.mlp1(x)
        mtm_x2 = self.mlp2(x)
        mtm_x3 = self.mlp3(x)

        mtm_x = torch.cat([mtm_x1, mtm_x2, mtm_x3], dim=-1)
        mtm_x = mtm_x * positions

        return mtm_x


class MLPWithLayerNorm(nn.Module):
    def __init__(self, input_size, hidden_dim):
        super(MLPWithLayerNorm, self).__init__()
        self.linear1 = nn.Linear(input_size, hidden_dim)
        self.non_lin1 = nn.ReLU()
        self.layer_norm1 = nn.LayerNorm(hidden_dim)
        self.linear2 = nn.Linear(hidden_dim, hidden_dim)
        self.non_lin2 = nn.ReLU()
        self.layer_norm2 = nn.LayerNorm(hidden_dim)

    def forward(self, hidden):
        return self.layer_norm2(self.non_lin2(self.linear2(self.layer_norm1(self.non_lin1(self.linear1(hidden))))))


class BertPairTargetPredictionHead(nn.Module):
    def __init__(self, hidden_dim, loc_size, max_targets, position_embedding_size=200):
        super(BertPairTargetPredictionHead, self).__init__()
        self.position_embedding_size = position_embedding_size
        self.position_embeddings = nn.Embedding(max_targets, self.position_embedding_size)
        self.mlp_layer_norm = MLPWithLayerNorm(hidden_dim * 4 + self.position_embedding_size, hidden_dim)
        # The output weights are the same as the input embeddings, but there is
        # an output-only bias for each token.
        self.decoder1 = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )
        self.decoder2 = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )
        self.decoder3 = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1),
        )

        self.max_targets = max_targets

    def forward(self, hidden_states, pairs, length_indicator, binary_mask, pairs_mask):
        bs, num_pairs, _ = pairs.size()
        bs, seq_len, dim = hidden_states.size()
        # pair indices: (bs, num_pairs)
        left1, left2,  right1, right2 = pairs[:, :, 0], pairs[:, :, 1], pairs[:, :, 2], pairs[:, :, 3]
        # (bs, num_pairs, dim)
        left1_hidden = torch.gather(hidden_states, 1, left1.unsqueeze(2).repeat(1, 1, dim).long())
        # (bs, num_pairs, dim)
        pairs_mask_label = pairs_mask.unsqueeze(2).expand_as(left1_hidden)
        '''将pairs填充置0'''
        left1_hidden = left1_hidden * pairs_mask_label.float()
        # pair states: bs * num_pairs, max_targets, dim
        left1_hidden = left1_hidden.contiguous().view(bs * num_pairs, dim).unsqueeze(1).repeat(1, self.max_targets, 1)
        left2_hidden = torch.gather(hidden_states, 1, left2.unsqueeze(2).repeat(1, 1, dim).long())
        left2_hidden = left2_hidden * pairs_mask_label.float()
        left2_hidden = left2_hidden.contiguous().view(bs * num_pairs, dim).unsqueeze(1).repeat(1, self.max_targets, 1)

        right1_hidden = torch.gather(hidden_states, 1, right1.unsqueeze(2).repeat(1, 1, dim).long())
        right1_hidden = right1_hidden * pairs_mask_label.float()
        right1_hidden = right1_hidden.contiguous().view(bs * num_pairs, dim).unsqueeze(1).repeat(1, self.max_targets, 1)

        right2_hidden = torch.gather(hidden_states, 1, right2.unsqueeze(2).repeat(1, 1, dim).long())
        right2_hidden = right2_hidden * pairs_mask_label.float()
        right2_hidden = right2_hidden.contiguous().view(bs * num_pairs, dim).unsqueeze(1).repeat(1, self.max_targets, 1)
        # (max_targets, dim)
        position_embeddings = self.position_embeddings.weight
        # (bs * num_pairs, max_targets, dim * 4 + self.position_embedding_size)
        hidden_states_input = torch.cat((left1_hidden, left2_hidden, right1_hidden, right2_hidden,
                                         position_embeddings.unsqueeze(0).repeat(bs * num_pairs, 1, 1)), -1) * binary_mask.float()
        """删除全是0填充的位置"""
        mask = length_indicator != 0
        hidden_states_input = hidden_states_input[mask]
        length_indicator = length_indicator[mask].cpu()

        hidden_states_input = pack_padded_sequence(input=hidden_states_input, lengths=length_indicator,
                                                   batch_first=True, enforce_sorted=False)
        hidden_states_input_data = hidden_states_input.data
        hidden_states = self.mlp_layer_norm(hidden_states_input_data)
        # target scores : bs * num_pairs, max_targets, vocab_size
        target_scores1 = self.decoder1(hidden_states)
        target_scores2 = self.decoder2(hidden_states)
        target_scores3 = self.decoder3(hidden_states)
        target_scores = torch.cat([target_scores1, target_scores2, target_scores3],
                                  dim=-1)
        # target_scores, seq_len = pad_packed_sequence(target_scores, batch_first=True, total_length=self.max_targets)
        return target_scores


class BERT(nn.Module):
    """ BERT轨迹优化 """
    def __init__(self, loc_size, dim_in, dim_out, max_targets,
                 n_heads, n_layers, position_embedding_size, dropout=0.1,
                 max_len=100, scale=1, mask_value=-9999):
        super(BERT, self).__init__()
        self.token_embed = nn.Linear(loc_size, dim_in)  # 用nn.Linear代替token embedding
        self.token_norm = nn.LayerNorm(dim_in)
        self.pos_embed = PositionalEmbedding(dim_in, max_len)
        self.encoder = BERTEncoder(dim_in, dim_out, n_heads, n_layers, dropout, scale)
        self.mtm = MTM(loc_size, dim_in, dim_out)
        self.ptp = BertPairTargetPredictionHead(dim_in, loc_size, max_targets, position_embedding_size)

    def forward(self, tokens, pairs, length_indicator, binary_mask, mask=None, pairs_mask=None):
        # embedding: token + position
        tokens_embed = self.token_embed(tokens)
        tokens_embed = self.token_norm(tokens_embed)
        pos_embed = self.pos_embed(tokens)
        embeded = tokens_embed + pos_embed               # 全连接以及位置编码
        encoder_x = self.encoder(embeded, mask=mask)
        # mtm
        mtm = None if mask is None else self.mtm(encoder_x, mask)

        pairs_predictions = None if pairs_mask is None else self.ptp(encoder_x, pairs, length_indicator, binary_mask, pairs_mask)


        return mtm, pairs_predictions


if __name__ == "__main__":
    model = BERT(3, 256, 256, 15, 4, 12, 100, 0, 200, 1)
    cfgs = load_yaml()
    cfgs['Data']['data_root'] = 'E:/pythonProject/粒子滤波/ADS-B/Test'
    # 创建一个 TraceSet 对象
    dataset = TraceSet(cfgs, phase='train')
    train_loader = DataLoader(dataset, shuffle=True, batch_size=cfgs['Train']['batch_size'],
                              num_workers=cfgs['Train']['n_worker'], pin_memory=True, collate_fn=dataset.collate_fn)
    n = 6  # 假设我们想看第5个batch
    for i, batch in enumerate(train_loader):
        if i == n:
            masked_sentence, target, pairs, length_indicator, binary_mask, pairs_mask, masked_label, pairs_target = batch
            break  # 找到第n个batch后，我们使用break停止迭代
    mtm, pairs_predictions = model(masked_sentence, pairs, length_indicator,
                                         binary_mask, masked_label, pairs_mask)
    print('pairs', pairs.size())
    print(pairs[0])
    # print('pairs_mask:', pairs_mask.shape)
    # print(pairs_mask)
    # print('traj:', traj.shape)
    # print(traj[0])
    # print('mtm:', mtm.shape)
    # print(mtm[0])
    # print('pairs_target:', pairs_target.shape)
    # print(pairs_target[0, 0])
    # print('pairs_predictions:', pairs_predictions.shape)
    # print(pairs_predictions[0])

    """
    cov_traj  = torch_cov(traj).reshape(16, -1)
    cov_traj0 = torch_cov_single(traj[0])
    
    print(cov_traj[0])
    print(cov_traj0)
    """