# CNN用于多参数估计
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from apps.fmcw.conf.app_config import AppConfig as AF

class MpeCnnModelV3(nn.Module):
    '''
    256*256*8 -> 256*8*256 通过修改数据集
    256*8*256 -> 256*8*128 kerner(32*3)
    256*8*256 -> 256*8*64 kerner(32*3)
    256*8*256 -> 256*8*32 kerner(32*3)
    256*8*32  -> 256*8*1 Prob
              -> 256*8*3 Range, Velocity, Theta
    # 256*256*8*(1) -> 256*256*8*(64) kernel:31*31*3 same
    # 256*256*8*(64) -> 256*256*8*(32) kernel:3*3*3 same
    # 256*256*8*(32) -> 256*256*8*(16)
    # 256*256*8*(16) -> 256*256*8*(4) 0:概率; 1:距离; 2:速度; 3:水平到达角;
    # 后处理方法：
    #   从256*256*8个概率sigmoid(y[r][v][t][0])中找出大于threshold的锚点，
    #   y[r][v][t][1:3]为距离、速度、水平到达角的归一化后的预测值
    '''
    def __init__(self):
        super().__init__()
        # 第1层 256*256*8 -> 128*256*8 kerner(33*3)
        l1_in, l1_out = 256, 128
        self.l1_conv2d = nn.Conv2d(in_channels=l1_in, out_channels=l1_out, kernel_size=(33, 3), stride=(1,1), padding='same')
        self.l1_bn = nn.BatchNorm2d(l1_out)
        self.l1_relu = nn.ReLU() #inplace=True)
        # 第2层 128*256*8 -> 64*256*8
        l2_in, l2_out = 128, 64
        self.l2_conv2d = nn.Conv2d(in_channels=l2_in, out_channels=l2_out, kernel_size=(33, 3), stride=(1,1), padding='same')
        self.l2_bn = nn.BatchNorm2d(l2_out)
        self.l2_relu = nn.ReLU() #(inplace=True)
        # 第3层 64*256*8 -> 32*256*8
        l3_in, l3_out = 64, 32
        self.l3_conv2d = nn.Conv2d(in_channels=l3_in, out_channels=l3_out, kernel_size=(33, 3), stride=(1,1), padding='same')
        self.l3_bn = nn.BatchNorm2d(l3_out)
        self.l3_relu = nn.ReLU() #(inplace=True)
        # 第4.1层 32*256*8 -> 1*256*8 概率输出
        l41_in, l41_out = 32, 1
        self.l41_conv2d = nn.Conv2d(in_channels=l41_in, out_channels=l41_out, kernel_size=(33, 3), stride=(1,1), padding='same')
        self.l41_bn = nn.BatchNorm2d(l41_out)
        self.l41_sigmoid = nn.Sigmoid()
        # 第4.2层 32*256*8 -> 3*256*8 距离、速度、水平到达角输出
        l42_in, l42_out = 32, 3
        self.l42_conv2d = nn.Conv2d(in_channels=l42_in, out_channels=l42_out, kernel_size=(33, 3), stride=(1,1), padding='same')

    def forward(self, x):
        # 第1层
        a1_real = self.l1_relu(self.l1_bn(self.l1_conv2d(torch.real(x).float())))
        a1_imag = self.l1_relu(self.l1_bn(self.l1_conv2d(torch.imag(x).float())))
        # 第2层
        a2_real = self.l2_relu(self.l2_bn(self.l2_conv2d(a1_real)))
        a2_imag = self.l2_relu(self.l2_bn(self.l2_conv2d(a1_imag)))
        a2 = a2_real + a2_imag
        # 第3层
        a3 = self.l3_relu(self.l3_bn(self.l3_conv2d(a2)))
        # 第4.1层
        # a41 = self.l41_sigmoid(self.l41_bn(self.l41_conv2d(a3)))
        a41 = self.l41_sigmoid(self.l41_conv2d(a3))
        # 第4.2层
        a42 = self.l42_conv2d(a3)
        return a41, a42



    def conv3d_init(self) -> None:
        # 第一层：256*256*8*(1) -> 256*256*8*(64) kernel:31*31*3 same
        l1_in, l1_out = 1, 64
        self.l1_conv3d = nn.Conv3d(in_channels=l1_in, out_channels=l1_out, kernel_size=(31, 31, 3), stride=(1,1,1), padding=(15,15,1))
        self.l1_bn = nn.BatchNorm3d(l1_out)
        self.l1_relu = nn.ReLU(inplace=True)
        # 第2层：256*256*8*(64) -> 256*256*8*(32) kernel:3*3*3 same
        l2_in, l2_out = 64, 32
        self.l2_conv3d = nn.Conv3d(in_channels=l2_in, out_channels=l2_out, kernel_size=(31, 31, 3), stride=(1,1,1), padding=(15,15,1))
        self.l2_bn = nn.BatchNorm3d(l2_out)
        self.l2_relu = nn.ReLU(inplace=True)
        # 第3层：256*256*8*(32) -> 256*256*8*(16)
        l3_in, l3_out = 32, 16
        self.l3_conv3d = nn.Conv3d(in_channels=l3_in, out_channels=l3_out, kernel_size=(31, 31, 3), stride=(1,1,1), padding=(15,15,1))
        self.l3_bn = nn.BatchNorm3d(l3_out)
        self.l3_relu = nn.ReLU(inplace=True)
        # 第3->4层：256*256*8*(16) -> 1：CFAR阈值
        self.l4_flatten = nn.Flatten()
        self.l4_linear = nn.Linear(AF.numADC*AF.numChirps*(AF.numTX*AF.numRX)*l3_out, 1)
        self.l4_sigmoid = nn.Sigmoid()
        # 第3->5层：256*256*8*(16) -> 256*256*8*(1)：每个锚点是目标的概率
        self.l5_conv3d = nn.Conv3d(in_channels=l3_out, out_channels=1, kernel_size=(31, 31, 3), stride=(1,1,1), padding=(15,15,1))
        self.l5_sigmoid = nn.Sigmoid()
        self.l5_relu = nn.ReLU(inplace=True)
        # 第3->6层：256*256*8*(16) -> 256*256*8*(3): 预测距离、速度、水平到达角
        self.l6_conv3d = nn.Conv3d(in_channels=l3_out, out_channels=3, kernel_size=(31, 31, 3), stride=(1,1,1), padding=(15,15,1))
    
    def conv3d_forward(self, x):
        a1_real = self.l1_relu(self.l1_bn(self.l1_conv3d(torch.real(x).float())))
        a1_imag = self.l1_relu(self.l1_bn(self.l1_conv3d(torch.imag(x).float())))
        a2_real = self.l2_relu(self.l2_bn(self.l2_conv3d(a1_real)))
        a2_imag = self.l2_relu(self.l2_bn(self.l2_conv3d(a1_imag)))
        a3_real = self.l3_relu(self.l3_bn(self.l3_conv3d(a2_real)))
        a3_imag = self.l3_relu(self.l3_bn(self.l3_conv3d(a2_imag)))
        # L4
        a4_real = self.l4_sigmoid(self.l4_linear(self.l4_flatten(a3_real)))
        a4_imag = self.l4_sigmoid(self.l4_linear(self.l4_flatten(a3_imag)))
        # L5
        # a5_real = self.l5_relu(self.l5_sigmoid(self.l5_conv3d(a3_real))-a4_real.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1))
        # a5_imag = self.l5_relu(self.l5_sigmoid(self.l5_conv3d(a3_imag))-a4_imag.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1))
        a5_real = self.l5_sigmoid(self.l5_conv3d(a3_real))
        a5_imag = self.l5_sigmoid(self.l5_conv3d(a3_imag))
        # L6
        a6_real = self.l6_conv3d(a3_real)
        a6_imag = self.l6_conv3d(a3_imag)
        return a4_real+a4_imag, (a5_real+a5_imag)/2, a6_real + a6_imag