import sys
path_to_add = "../"
sys.path.append(path_to_add)
import torch
import torch.nn as nn
import numpy as np
from DOA import DOA


class LayerNormFunction(torch.autograd.Function):
    @staticmethod
    def forward(ctx, x, weight, bias, groups, eps):
        ctx.groups = groups
        ctx.eps = eps
        N, C, L = x.size()
        x = x.view(N, groups, C // groups, L)
        mu = x.mean(2, keepdim=True)
        var = (x - mu).pow(2).mean(2, keepdim=True)
        y = (x - mu) / (var + eps).sqrt()
        ctx.save_for_backward(y, var, weight)
        y = weight.view(1, C, 1) * y.view(N, C, L) + bias.view(1, C, 1)
        return y

    @staticmethod
    def backward(ctx, grad_output):
        groups = ctx.groups
        eps = ctx.eps

        N, C, L = grad_output.size()
        y, var, weight = ctx.saved_variables
        g = grad_output * weight.view(1, C, 1)
        g = g.view(N, groups, C//groups, L)
        mean_g = g.mean(dim=2, keepdim=True)
        mean_gy = (g * y).mean(dim=2, keepdim=True)
        gx = 1. / torch.sqrt(var + eps) * (g - y * mean_gy - mean_g)
        return gx.view(N, C, L), (grad_output * y.view(N, C, L)).sum(dim=2).sum(dim=0), grad_output.sum(dim=2).sum(
            dim=0), None, None

class GroupLayerNorm1d(nn.Module):
    def __init__(self, channels, groups=1, eps=1e-6):
        super(GroupLayerNorm1d, self).__init__()
        self.register_parameter('weight', nn.Parameter(torch.ones(channels)))
        self.register_parameter('bias', nn.Parameter(torch.zeros(channels)))
        self.groups = groups
        self.eps = eps

    def forward(self, x):
        return LayerNormFunction.apply(x, self.weight, self.bias, self.groups, self.eps)

# Signal Query Embedding
class SQE(DOA):
    '''
    这里认为信号的初始相位不重要，将阵列之间的相位差传入网络作为query
    '''
    def __init__(self, num_query, hidden_dim,device="cuda"):
        super(SQE, self).__init__()
        self.f_sample = 8000  # 采样频率
        self.f_signal = 1000  # 信号频率
        self.len_signal = 500
        self.num_query = num_query
        self.num_sensors = 4
        self.device=device
        self.hidden_dim = hidden_dim
        self.dim_embed = nn.Sequential(
            nn.Linear(2*self.num_sensors, self.hidden_dim * 4),
            nn.ReLU(inplace=False),
            nn.Linear(self.hidden_dim * 4, self.hidden_dim)
        ).to(device)

    def gen_signalFromTheta(self, data_theta):
        '''
        theta[tensor(n_theta,)]: n_theta个角度真值
        return:
        array_signal:(num_theta, num_sensors) 输出仿真信号
        '''
        num_theta = len(data_theta)
        array_signal = np.zeros((num_theta, 2*self.num_sensors), dtype=complex)
        d_over_lambda = self.sensor_distance / self.wavelength
        for i_theta, theta in enumerate(data_theta):
            # t = np.arange(self.len_signal) / self.f_sample  # 采样时刻序列
            # wave_phase = 2 * np.pi * self.f_signal * t
            # wave = np.exp(1j * wave_phase)[:,None]
            sensor_response = np.exp(1j * 2 * np.pi * torch.arange(self.num_sensors) * np.cos(
                    theta) * d_over_lambda)
            sensor_response = np.concatenate((np.real(sensor_response), np.imag(sensor_response)), axis=0)
            array_signal[i_theta, :] = sensor_response  #  * wave
        return torch.from_numpy(array_signal.astype(np.float32))

    def gen_queryEmbed(self):
        anchor = torch.linspace(0, np.pi, self.num_query)
        array_signal = self.gen_signalFromTheta(anchor).to(self.device)
        query_embed = self.dim_embed(array_signal)  # array_signal:(num_query, 2*num_sensors)
        return query_embed
    
class DOA_Dataset(torch.utils.data.Dataset):
    def __init__(self, signals, labels):
        self.signals = signals
        self.labels = labels

    def __len__(self):
        return len(self.signals)

    def __getitem__(self, idx):
        return self.signals[idx], self.labels[idx]
