# 8个线性天线3层2次多项式拟合：12天线、sslDb=-30dB、扫描角度0度
import numpy as np
from scipy.signal import windows as SSW # import chebwin
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import ReduceLROnPlateau
from apps.wfs.nlbf_config import NlbfConfig as NG

class ComplexReLU(torch.nn.Module):
    def __init__(self):
        super(ComplexReLU, self).__init__()
        self.relu = torch.nn.ReLU()

    def forward(self, z):
        # 分别对实部和虚部应用ReLU
        real = self.relu(z.real)
        imag = self.relu(z.imag)
        return torch.complex(real, imag)

class NlbfModel(nn.Module):
    def __init__(self, N_base:int =8, rank:int =3, init_coefs:bool =False, avec_dim:int =1, layers:int = 1):
        super(NlbfModel, self).__init__()
        self.rank = rank
        self.N_base = N_base
        self.layers = layers
        self.L = [8, 16, 32, 16, 8, 1]
        # 定义一个N_base x rank的矩阵，每一行对应一个多项式的系数
        if init_coefs:
            self.load()
        else:
            neurons_num = 0
            for lidx in range(len(self.L)-1):
                neurons_num += self.L[lidx]*self.L[lidx+1] + self.L[lidx+1]
            neurons = torch.randn((neurons_num,), dtype=torch.complex128)
            neurons = neurons.to(NG.device)
            # 第1层：从8到16
            start_idx = 0
            w_0_1 = neurons[start_idx:start_idx+self.L[0]*self.L[1]].reshape((self.L[0], self.L[1])) 
            start_idx += self.L[0]*self.L[1]
            b_1 = neurons[start_idx : start_idx+self.L[1]]
            start_idx += self.L[1]
            self.register_parameter(f'w_0_1', nn.Parameter(w_0_1))
            self.register_parameter(f'b_1', nn.Parameter(b_1))
            self.relu_1 = ComplexReLU()
            #  第2层：从16到32
            w_1_2 = neurons[start_idx : start_idx + self.L[1]*self.L[2]].reshape((self.L[1], self.L[2]))
            start_idx += self.L[1]*self.L[2]
            b_2 = neurons[start_idx : start_idx + self.L[2]]
            start_idx += self.L[2]
            self.register_parameter(f'w_1_2', nn.Parameter(w_1_2))
            self.register_parameter(f'b_2', nn.Parameter(b_2))
            self.relu_2 = ComplexReLU()
            # 第3层：从32到16
            w_2_3 = neurons[start_idx : start_idx + self.L[2]*self.L[3]].reshape((self.L[2], self.L[3]))
            start_idx += self.L[2]*self.L[3]
            b_3 = neurons[start_idx : start_idx + self.L[3]]
            start_idx += self.L[3]
            self.register_parameter(f'w_2_3', nn.Parameter(w_2_3))
            self.register_parameter(f'b_3', nn.Parameter(b_3))
            self.relu_3 = ComplexReLU()
            # 第4层：从16到8
            w_3_4 = neurons[start_idx : start_idx + self.L[3]*self.L[4]].reshape((self.L[3], self.L[4]))
            start_idx += self.L[3]*self.L[4]
            b_4 = neurons[start_idx : start_idx + self.L[4]]
            start_idx += self.L[4]
            self.register_parameter(f'w_3_4', nn.Parameter(w_3_4))
            self.register_parameter(f'b_4', nn.Parameter(b_4))
            self.relu_4 = ComplexReLU()
            # 第5层：从8到1
            w_4_5 = neurons[start_idx : start_idx + self.L[4]*self.L[5]].reshape((self.L[4], self.L[5]))
            start_idx += self.L[4]*self.L[5]
            b_5 = neurons[start_idx : start_idx + self.L[5]]
            start_idx += self.L[5]
            self.register_parameter(f'w_4_5', nn.Parameter(w_4_5))
            self.register_parameter(f'b_5', nn.Parameter(b_5))
            self.relu_5 = ComplexReLU()
    
    def forward(self, a, x=None, mode:int =1):
        '''
        a: ((1800, 8), complex128) batch_size = 1800
        x: ((8,), complex128)
        输出为：((1800,1), float64)
        '''
        a1 = self.relu_1(torch.matmul(a, getattr(self, f'w_0_1')) + getattr(self, f'b_1'))
        a2 = self.relu_2(torch.matmul(a1, getattr(self, f'w_1_2')) + getattr(self, f'b_2'))
        a3 = self.relu_3(torch.matmul(a2, getattr(self, f'w_2_3')) + getattr(self, f'b_3'))
        a4 = self.relu_4(torch.matmul(a3, getattr(self, f'w_3_4')) + getattr(self, f'b_4'))
        a5 = self.relu_5(torch.matmul(a4, getattr(self, f'w_4_5')) + getattr(self, f'b_5'))
        outputs = torch.abs(a5) / torch.max(torch.abs(a5))
        return outputs

    def save(self, pt_fn='./work/wfs/nlbf.pt') -> None:
        torch.save(self.state_dict(), pt_fn)

    def load(self, pt_fn='./work/wfs/nlbf.pt') -> None:
        self.load_state_dict(torch.load(pt_fn))

    def get_weights(self) -> torch.tensor:
        return self.coefficients[0]     