import torch.nn as nn
import torch
import torch.nn.functional as F
import math
from global_config import * 
class Conv2dSame(torch.nn.Conv2d):

    def calc_same_pad(self, i: int, k: int, s: int, d: int) -> int:
        return max((math.ceil(i / s) - 1) * s + (k - 1) * d + 1 - i, 0)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        ih, iw = x.size()[-2:]

        pad_h = self.calc_same_pad(i=ih, k=self.kernel_size[0], s=self.stride[0], d=self.dilation[0])
        pad_w = self.calc_same_pad(i=iw, k=self.kernel_size[1], s=self.stride[1], d=self.dilation[1])

        if pad_h > 0 or pad_w > 0:
            x = F.pad(
                x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]
            )
        return F.conv2d(
            x,
            self.weight,
            self.bias,
            self.stride,
            self.padding,
            self.dilation,
            self.groups,
        )
        
def conv2d_layer(in_channels,out_channels=256, kernel_size=None, stride=None,padding = None,dilation = None):
    """Conv2D Layer
    Args:
        filters: int, the output channels of the conv2d layer
        kernel_size: list, the kernel size of the conv2d layer
        strides: list, the strides of the conv2d layer
    """
    if kernel_size is None:
        kernel_size = [1, 1]
    if stride is None:
        strides = [1, 1]
    layer =  nn.Sequential(
        #Conv2dSame(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,groups=1,bias=True).to(torch.device('cuda')),        
        nn.Conv2d(in_channels, out_channels,kernel_size,stride,padding,dilation).to(torch.device('cuda')),
        nn.LeakyReLU())
    return layer

class FormantLayer(nn.Module):
    """Formant Layer
    Args:
        kernels_size: list, the kernel size of each conv2d layer
        outputs: list, the output channels of each conv2d layer    
    """
    def __init__(self, kernels_size=None, outputs=None,mfcc = False,keep_pro = 0.5):
        super(FormantLayer, self).__init__()

        if kernels_size is None:
            # kernels_size = [[3, 1], [3, 1], [3, 1], [3, 1], [2, 1]]
            kernels_size = [[1, 3], [1, 3], [1, 3], [1, 3], [1, 2]]
        if outputs is None:
            outputs = [72, 108, 162, 243, 256]
        
        strides = None
        if mfcc:
            strides = [[1, 2], [1, 2], [1, 2], [1, 2], [1, 3]]
        else:
            strides = [[1, 2], [1, 2], [1, 2], [1, 2], [1, 2]]
            
        
        padding = [[0, 1], [0, 1],[0, 1],[0, 1],0]
        dilation = [1,1,1,1,1]

        self.kernels_size = kernels_size
        self.outputs = outputs

        self.formant_layers = nn.Sequential()
        last_out_channels = -1
        for i in range(len(self.kernels_size)):
            if i == 0:
                in_channels = 1
            else:
                in_channels = last_out_channels
            # ,
            self.formant_layers.add_module(f'conv2d_{i}',conv2d_layer(in_channels=in_channels,out_channels=self.outputs[i],
                                                 kernel_size=self.kernels_size[i],
                                                 stride=strides[i],padding=padding[i],dilation=dilation[i]))
            last_out_channels = self.outputs[i]
            
            if i == 1:
                 self.formant_layers.add_module(f'drop_out{i}',nn.Dropout2d(keep_pro))

    def forward(self, x):
        for i in range(len(self.kernels_size)):
            x = self.formant_layers[i](x)  
        return x

class ArticulationLayer(nn.Module):
    """Articulation Layer
    Args:
        kernels_size: list, the kernel size of each conv2d layer
        E: int, the channels of the emotion layer
        conv2d_strides: list, the strides of each conv2d layer
        emotion_strides: list, the strides of each emotion layer
    """
    def __init__(self, kernels_size=None, E=0, conv2d_strides=None, emotion_strides=None,keep_pro = 0.5):
        super(ArticulationLayer, self).__init__()

        self.E = E 
        if kernels_size is None:
            # kernels_size = [[1, 3], [1, 3], [1, 3], [1, 3], [1, 4]]
            kernels_size = [[3, 1], [3, 1], [3, 1], [3, 1], [4, 1]]
        if emotion_strides is None:
            # emotion_strides = [[1, 2], [1, 4], [1, 8], [1, 16], [1, 64]]
            emotion_strides = [[2, 1], [4, 1], [8, 1], [16, 1], [64, 1]]
        if conv2d_strides is None:
            # conv2d_strides = [[1, 2], [1, 2], [1, 2], [1, 2], [1, 4]]
            conv2d_strides = [[2, 1], [2, 1], [2, 1], [2, 1], [4, 1]]

        padding = [[1, 0], [1, 0],[1, 0],[1, 0],[1, 0]]
        dilation = [1,1,1,1,1]
        
        self.kernels_size = kernels_size
        self.emotion_strides = emotion_strides
        self.conv2d_strides = conv2d_strides
        
        self.emotion = torch.normal(mean=0.0, std=1.0,size=(1, self.E, 64, 1))

        self.articulation_layer = nn.Sequential()       
        for i in range(len(self.kernels_size)):            
            if i == 0:
                in_channels0 = 256
                in_channels1 = 16
            else:
                in_channels0 = 256 + self.E
                in_channels1 = 16
                
            # self.articulation_layer.append([conv2d_layer(in_channels0,256, self.kernels_size[i], self.conv2d_strides[i],padding[i],dilation[i]),
            #                                 conv2d_layer(in_channels1, self.E, self.kernels_size[i], self.emotion_strides[i],padding[i],dilation[i])])
            self.articulation_layer.add_module(f'conv2d_{i}',nn.Conv2d(in_channels0,256, self.kernels_size[i], self.conv2d_strides[i],padding[i],dilation[i]))
            self.articulation_layer.add_module(f'relu_{i}',nn.LeakyReLU())
            
            if i == 1:
                 self.articulation_layer.add_module(f'drop_out{i}',nn.Dropout2d(keep_pro))
        
        # i = 0
        # self.articulation_layer.add_module(f'conv2d_{i}',nn.Conv2d(256 , 256 , (3, 1), (2, 1), (1, 0), 1))  
        # self.articulation_layer.add_module(f'conv2d_{i}',nn.LeakyReLU())
        
        # i = i + 1       
        # self.articulation_layer.add_module(f'conv2d_{i}',nn.Conv2d(2256 , 256 , (3, 1), (2, 1), (1, 0), 1))
        # self.articulation_layer.add_module(f'conv2d_{i}',nn.LeakyReLU())
        
        # i = i + 1       
        # self.articulation_layer.add_module(f'conv2d_{i}',nn.Conv2d(2256 , 256 , (3, 1), (2, 1), (1, 0), 1))
        # self.articulation_layer.add_module(f'conv2d_{i}',nn.LeakyReLU())
        
        # i = i + 1       
        # self.articulation_layer.add_module(f'conv2d_{i}',nn.Conv2d(2256 , 256 , (3, 1), (2, 1), (1, 0), 1))
        # self.articulation_layer.add_module(f'conv2d_{i}',nn.LeakyReLU())
        
        # i = i + 1       
        # self.articulation_layer.add_module(f'conv2d_{i}',nn.Conv2d(2256 , 256 , (4, 1), (4, 1), (1, 0), 1))
        # self.articulation_layer.add_module(f'conv2d_{i}',nn.LeakyReLU())
        
        # i = i + 1       
        # self.articulation = nn.Sequential(
        #     nn.Conv2d(
        #         256 , 256 , (3, 1), (2, 1), (1, 0), 1
        #     ),
        #     nn.LeakyReLU(),
        #     nn.Conv2d(
        #         256 , 256 , (3, 1), (2, 1), (1, 0), 1
        #     ),
        #     nn.LeakyReLU(),
        #     nn.Conv2d(
        #         256 , 256 , (3, 1), (2, 1), (1, 0), 1
        #     ),
        #     nn.LeakyReLU(),
        #     nn.Conv2d(
        #         256 , 256 , (3, 1), (2, 1), (1, 0), 1
        #     ),
        #     nn.LeakyReLU(),
        #     nn.Conv2d(
        #         256 , 256 , (4, 1), (4, 1), (1, 0), 1
        #     ),
        #     nn.LeakyReLU(),
        # )

    def forward(self, x):
        # emotion_input = torch.tile(self.emotion, [x.shape[0], 1, 1, x.shape[3]]).to(torch.device('cuda'))
        
        # for i in range(len(self.kernels_size)):
        #     conv_x = self.articulation_layer[i][0](x)
        #     # emotion_x = self.articulation_layer[i][1](emotion_input)
        #     # mixed_x = torch.concat([conv_x, emotion_x], 1) # Concatenate the channels
        #     # x = mixed_x
        #     y = conv_x
        y = self.articulation_layer(x)
        # y = self.articulation(x)
        return (y)

class OutputLayer(nn.Module):
    """Output Layer
    Args:
        output_size: int, the output size of the output layer
        keep_pro: float, the keep probability of the dropout layer
    """
    def __init__(self, output_size, keep_pro):

        super(OutputLayer, self).__init__()
        self.output_layer = nn.Sequential(
            # nn.Flatten(),
            nn.Linear(256,150),
            nn.Dropout(keep_pro),
            nn.Linear(150,output_size),
            nn.Tanh()
        )
    def forward(self, x):
        return self.output_layer(x)

class Audio2Face(nn.Module):
    """Audio2Face Model
    Args:
        output_size: int, the output size of the output layer
        keep_pro: float, the keep probability of the dropout layer
    """
    def __init__(self, output_size, keep_pro,mfcc):
        super(Audio2Face, self).__init__()
        self.output_size = output_size
        self.keep_pro = keep_pro
        self.FormantLayer = FormantLayer(mfcc=mfcc,keep_pro = self.keep_pro)
        self.ArticulationLayer = ArticulationLayer(keep_pro = self.keep_pro)
        self.OutputLayer = OutputLayer(self.output_size, self.keep_pro)
        
        if mfcc:            
            conv = nn.Conv2d(243, 256, (1, 2), (1, 3))
        else:            
            conv = nn.Conv2d(243, 256, (1, 2), (1, 2))
    
    
        self.formantAnalysis = nn.Sequential(
            nn.Conv2d(1, 72, (1, 3), (1, 2), (0, 1), 1),
            nn.LeakyReLU(),
            nn.Conv2d(72, 108, (1, 3), (1, 2), (0, 1), 1),
            nn.LeakyReLU(),
            nn.Conv2d(108, 162, (1, 3), (1, 2), (0, 1), 1),
            nn.LeakyReLU(),
            nn.Conv2d(162, 243, (1, 3), (1, 2), (0, 1), 1),
            nn.LeakyReLU(),
            conv,
            nn.LeakyReLU(),
        )
        
        self.articulation = nn.Sequential(
            nn.Conv2d(
                256 , 256 , (3, 1), (2, 1), (1, 0), 1
            ),
            nn.LeakyReLU(),
            nn.Conv2d(
                256 , 256 , (3, 1), (2, 1), (1, 0), 1
            ),
            nn.LeakyReLU(),
            nn.Conv2d(
                256 , 256 , (3, 1), (2, 1), (1, 0), 1
            ),
            nn.LeakyReLU(),
            nn.Conv2d(
                256 , 256 , (3, 1), (2, 1), (1, 0), 1
            ),
            nn.LeakyReLU(),
            nn.Conv2d(
                256 , 256 , (4, 1), (4, 1), (1, 0), 1
            ),
            nn.LeakyReLU(),
        )
        
    def forward(self, x):
        if self.output_size == 51:
            x = torch.unsqueeze(x,1)
            # input_var = torch.transpose(input_var, 2, 3)
            
        elif self.output_size == 37:
            x = torch.transpose(x, 1, 3)
        # x = self.FormantLayer(x)
        x2 = self.formantAnalysis(x)
        x3 = self.ArticulationLayer(x2)
        
        # x3 = self.articulation(x2)
        x4 = self.OutputLayer(x3.view(-1, 256))
        # return (x, emotion_input)
        return x4

def losses(output, y):
    """Loss Function
    Args:
        y: tensor, the ground truth
        output: tensor,[pred , emotion_input] the output of the model
    """
    y_ = output

    y = y.to(dtype=torch.float32)    # Cast the type of y to float32
    y_ = y_.to(dtype=torch.float32)  # Cast the type of y_ to float32
    # emotion_input = emotion_input.to(dtype=torch.float32) # Cast the type of emotion_input to float32

    loss_P = torch.mean(torch.square(y - y_)) # Calculate the loss_P

    # Calculate the loss_M
    split_y = torch.split(y, (int)(y.shape[0] / 2), 0)  # Parameter: tensor, split number, dimension
    split_y_ = torch.split(y_, (int)(y_.shape[0] / 2), 0)

    y0 = split_y[0] # y0 is the first half of y
    y1 = split_y[1] # y1 is the second half of y
    y_0 = split_y_[0]   # y_0 is the first half of y_
    y_1 = split_y_[1]   # y_1 is the second half of y_
    loss_M = 2 * torch.mean(torch.square(y0 - y1 - y_0 + y_1)) # Calculate the loss_M

    # Calculate the loss_R
    # split_emotion_input = torch.split(emotion_input, (int)(emotion_input.shape[0] / 2), 0)
    # emotion_input0 = split_emotion_input[0] # emotion_input0 is the first half of emotion_input
    # emotion_input1 = split_emotion_input[1] # emotion_input1 is the second half of emotion_input

    # # Formula(3), Rx3 is R'(x)
    # Rx0 = torch.square(emotion_input0 - emotion_input1)  # Calculate the m[·]
    # Rx1 = torch.sum(Rx0, 1)  # 4-dim, sum of the height
    # Rx2 = torch.sum(Rx1, 1)  # 3-dim, sum of the width
    # Rx3 = 2 * torch.mean(Rx2, 1)  # 2-dim, mean of the emotion

    # # Formula(4), Rx is R(x), length is batch_size/2
    # e_mean0 = torch.sum(torch.square(emotion_input0), 2)  # 4-dim, sum of the width
    # e_mean1 = torch.mean(e_mean0)  # 2-dim, mean of the emotion
    # Rx = Rx3 / e_mean1  # R(x)

    # Formula(5)
    # beta = 0.99
    # R_vt = beta * R_vt_input + (1-beta) * torch.reduce_mean(torch.square(Rx)) # every epoch update
    # R_vt_ = R_vt/(1-torch.pow(beta, step))

    # Formula(6) Calculate the loss_R
    # loss_R = torch.reduce_mean(Rx)/(torch.sqrt(R_vt_)+epsilon)
    # loss_R = torch.mean(Rx)
    # loss_R = torch.reduce_mean(torch.square(emotion_input1 - emotion_input0), name='loss_R')

    # Calculate the total loss
    loss = loss_P + loss_M #+ loss_R
    return loss
