import torch
import torch.nn as nn
import math
import torch.nn.functional as F
import copy
from utils import *
import numpy as np
from utils import SQE

class PE_layer(nn.Module):
  def __init__(self, d_model, dropout=0.1, max_len=5000, batch_size=1):
    # d_model: embedding维度d
    super(PE_layer, self).__init__()
    self.dropout = nn.Dropout(p=dropout)
    self.batch_size=batch_size
    pe = torch.zeros(max_len, d_model)
    position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)  # max_len, 1
    # [10000^(2i/d_model)]^(-1)
    div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
    pe[:, 0::2] = torch.sin(position * div_term)  # (max_len, 1) * (d_model/2,) -> (max_len, d_model/2)
    pe[:, 1::2] = torch.cos(position * div_term)
    pe = pe.unsqueeze(0)  # (1, max_len, d_model)
    self.register_buffer('pe', pe)
    self.pos_MLP_embedding = nn.Sequential(
    nn.Linear(d_model, 2 * d_model),
    nn.ReLU(inplace=False),
    nn.Linear(2 * d_model, d_model))


  def forward(self, x):
    # (N, seq_len, d_model)
    if len(x.shape) == 3:
      batch_size, seq_len, _ = x.shape
      pe = self.pe[:, :seq_len]
      pe = pe.expand(batch_size, -1, -1)
    elif len(x.shape) == 2:
      seq_len, d_model = x.shape
      pe = self.pe[:, :seq_len]
    x = (x + pe).detach()

    x.requires_grad = False
    # import pdb;pdb.set_trace()
    x = self.pos_MLP_embedding(x)
    return self.dropout(x)

class TransDOA(nn.Module):
    def __init__(self, dim_embedding=256, encoder_head=4, 
                 num_enLayers=2, decoder_head=4, num_deLayers=2,
                 num_query=10, model_cfg=None, d_src=None, device=None):
      super(TransDOA, self).__init__()
      # 位置编码层
      self.PE_KV = PE_layer(d_model=dim_embedding)
      self.PE_Q = PE_layer(d_model=dim_embedding)
      # 编码解码器
      encoder_layer = nn.TransformerEncoderLayer(d_model=dim_embedding, nhead=encoder_head)
      decoder_layer = nn.TransformerDecoderLayer(d_model=dim_embedding, nhead=decoder_head)
      self.Encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_enLayers)
      self.Decoder = nn.TransformerDecoder(decoder_layer, num_layers=num_deLayers)
      # 参数配置
      self.num_query = num_query
      self.dim_embedding = dim_embedding
      # Head & Embedding
      self.head = TaskHead(in_channels=dim_embedding, targets_per_anchor=3, device=device)
      self.QueryEmbed = SQE(num_query, dim_embedding,device=device)
      # 输入编码
      self.input_embedding = nn.Sequential(
        nn.Linear(d_src, 4 * d_src),
        nn.ReLU(inplace=True),
        nn.Linear(4 * d_src, dim_embedding))

    def forward(self, x):
      batch_size, len_signal, dim = x.shape
      x = self.input_embedding(x)
      x = self.PE_KV(x)
      memory = self.Encoder(x)  # KV
      # memory = self.PE_KV(memory)  # 这里的KV位置编码要考虑一下，不一定需要添加
      query = self.QueryEmbed.gen_queryEmbed()  # 从anchor角度假设生成query嵌入
      query = query.repeat(batch_size, 1, 1)
      query = self.PE_Q(query)  # Query和输入使用不同的位置编码
      query = query.permute(1, 0, 2)
      memory = memory.permute(1,0,2)
      output = self.Decoder(query, memory)
      result = self.head(output.transpose(0,1))  # (N, n_q, 6)
      return result

class TaskHead(nn.Module):
  def __init__(self,
                in_channels,
                targets_per_anchor,
                device,
                heads=(6, 2),
                groups=1,
                head_conv=64,
                final_kernel=1,
                thres_prob=0.5,
                ):
    super(TaskHead, self).__init__()
    self.heads = heads
    self.device = device
    self.in_channels = in_channels
    self.groups = groups
    self.head_conv = head_conv
    self.final_kernel = final_kernel
    self.thres_prob = thres_prob
    self.targets_per_anchor = targets_per_anchor
    # self.conv_layers = self.build_conv_layers()
    self.FFN = nn.Sequential(
            nn.Linear(in_channels, in_channels * 4),
            nn.ReLU(inplace=True),
            nn.Linear(in_channels * 4, heads[0])
        )
    self.output_dim = heads[0]

  def forward(self, x):
    # 返回num_query的置信度和角度解析
    N, num_query, dim_hidden = x.shape  # d是解析需要的回归维度
    anchor = np.linspace(0, np.pi, num_query)[None,:,None]
    anchor = np.broadcast_to(anchor, (N, num_query, self.output_dim)).astype(np.float32)
    anchor[:,:,0::2] = 0
    anchor_tensor = torch.from_numpy(anchor).to(self.device).detach()
    mul_pi = np.ones((N, num_query, self.output_dim))

    mul_pi[:,:,1::2] = np.pi
    mul_pi_tensor = torch.from_numpy(mul_pi).float().to(self.device).detach()
    # anchor = torch.linspace(0, np.pi, num_query)[None,:,None].to(self.device).detach()  # (1, num_q, 1)
    # x = self.conv_layers(x).sigmoid()  # bias (0~1)
    # x = self.FFN(x).sigmoid()
    x = self.FFN(x).sigmoid()
    x = x * mul_pi_tensor + anchor_tensor
    # prob, pre_theta = x[:,:,0::2], x[:,:,1::2]  # x -> (N,n_q,6)
    # theta = pre_theta * np.pi + anchor
    # x[:,:,1::2] = theta
    # result = []
    # for i in range(N):
    #   prob_b = prob[i,:]
    #   theta_b = theta[i,:]
    #   mask = prob_b>self.thres_prob
    #   pred_theta = theta_b[mask]  # (n_result,)
    #   result.append(pred_theta)
    return x  # (N, n_q, 6)

  def build_conv_layers(self):
    len_predict, num_conv = self.heads
    in_channels = self.in_channels
    groups = self.groups
    head_conv = self.head_conv
    final_kernel = self.final_kernel
    conv_layers = []
    c_in = in_channels  # 这里的in_channels是分groups后的
    for _ in range(num_conv - 1):
      conv_layers.extend([
          nn.Conv1d(
              c_in * groups,  # groups
              head_conv * groups,
              kernel_size=final_kernel,
              stride=1,
              padding=final_kernel // 2,
              groups=groups,
              bias=False),
          GroupLayerNorm1d(head_conv * groups, groups=groups),
          nn.ReLU(inplace=False)
      ])
      c_in = head_conv

    conv_layers.append(
        nn.Conv1d(
            head_conv * groups,
            len_predict * groups,
            kernel_size=final_kernel,
            stride=1,
            padding=final_kernel // 2,
            groups=groups,
            bias=True))
    return nn.Sequential(*conv_layers)




class TransformerEncoderLayer(nn.Module):
  def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1):
    super(TransformerEncoderLayer, self).__init__()
    self.pos_encoder = PE_layer(d_model)
    self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
    # Implementation of Feedforward model
    self.linear1 = nn.Linear(d_model, dim_feedforward)
    self.dropout = nn.Dropout(dropout)
    self.linear2 = nn.Linear(dim_feedforward, d_model)

    self.norm1 = nn.LayerNorm(d_model)
    self.norm2 = nn.LayerNorm(d_model)
    self.dropout1 = nn.Dropout(dropout)
    self.dropout2 = nn.Dropout(dropout)

  def forward(self, src, src_mask=None, src_key_padding_mask=None):
    src2 = self.self_attn(src, src, src, attn_mask=src_mask,
                          key_padding_mask=src_key_padding_mask)[0]
    src = src + self.dropout1(src2)  # 残差结构
    src = self.norm1(src)
    src2 = self.linear2(self.dropout(F.relu(self.linear1(src))))
    src = src + self.dropout2(src2)
    src = self.norm2(src)
    return src

class TransformerEncoder(nn.Module):
  def __init__(self, encoder_layer, num_layers, norm=None):
    super(TransformerEncoder, self).__init__()
    self.layers = _get_clones(encoder_layer, num_layers)
    self.num_layers = num_layers
    self.norm = norm

  def forward(self, src, src_mask=None, src_key_padding_mask=None):
    output = src
    for mod in self.layers:
      output = mod(output, src_mask=src_mask,
                   src_key_padding_mask=src_key_padding_mask)

    if self.norm is not None:
      output = self.norm(output)

    return output

def _get_clones(module, N):
  return nn.ModuleList([copy.deepcopy(module) for i in range(N)])

