import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary

class DoubleConv(nn.Module):
    def __init__(self, in_channels, out_channels, dropout_rate=.0, kernel_size=(7, 1)):
        super().__init__()
        self.double_conv = nn.Sequential(
            # nn.BatchNorm2d(out_channels),
            nn.Conv2d(in_channels, out_channels, kernel_size, padding=(
                int(kernel_size[0]/2), int(kernel_size[1]/2))),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, kernel_size, padding=(
                int(kernel_size[0]/2), int(kernel_size[1]/2))),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return self.double_conv(x)


class SingleConv(nn.Module):
    def __init__(self, in_channels, out_channels, dropout_rate=.0, kernel_size=(7, 1)):
        super().__init__()
        self.single_conv = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size, padding=(
                int(kernel_size[0]/2), int(kernel_size[1]/2))),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return self.single_conv(x)


class FinalConv(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=(7, 1)):
        super().__init__()
        self.double_conv = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size, padding=(
                int(kernel_size[0]/2), int(kernel_size[1]/2))),
            nn.BatchNorm2d(out_channels),
            nn.Softmax(dim=1)
        )

    def forward(self, x):
        return self.double_conv(x)


class Down(nn.Module):
    def __init__(self, in_channels, out_channels, dropout_rate=.0, stride_size=(4, 1), kernel_size=(7, 1)):
        super().__init__()
        self.down_conv = nn.Sequential(
            # nn.MaxPool2d((7,1)),
            nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride_size, padding=(
                int(kernel_size[0]/2), int(kernel_size[1]/2))),
            nn.ReLU(inplace=True),
            SingleConv(in_channels, out_channels)
        )

    def forward(self, x):
        return self.down_conv(x)


class Up(nn.Module):
    def __init__(self, in_channels, out_channels, dropout_rate=.0, stride_size=(4, 1), kernel_size=(7, 1)):
        super().__init__()
        self.up = nn.ConvTranspose2d(
            in_channels, out_channels, kernel_size=kernel_size, stride=stride_size)
        self.act = nn.ReLU()
        self.conv = SingleConv(2*out_channels, out_channels)

    def forward(self, x1, x2):
        x1 = self.up(x1)
        x1 = self.act(x1)

        # diffY = torch.tensor([x2.size()[2] - x1.size()[2]])
        # diffX = torch.tensor([x2.size()[3] - x1.size()[3]])
        # x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
        #                 diffY // 2, diffY - diffY // 2])

        # 填充0，使得x1、x2尺寸相同
        dy = (x2.size()[2] - x1.size()[2])
        dx = (x2.size()[3] - x1.size()[3])
        x1 = F.pad(x1, [int(dx/2), dx-int(dx/2), int(dy/2), dy-int(dy/2)])

        x = torch.cat([x2, x1], dim=1)
        return self.conv(x)


class Up_stft(nn.Module):
    def __init__(self, in_channels, out_channels, dropout_rate=.0, stride_size=(4, 1), kernel_size=(7, 1)):
        super().__init__()
        self.up = nn.ConvTranspose2d(
            in_channels, out_channels, kernel_size=kernel_size, stride=stride_size)
        self.act = nn.ReLU()
        self.conv1 = SingleConv(2*out_channels, out_channels)
        self.conv2 = SingleConv(2*out_channels, out_channels)

    def forward(self, x1, x2, x3):
        '''
        x1:from deeper layer
        x2:skip connection
        x3:stft conv results
        '''
        x1 = self.up(x1)
        x1 = self.act(x1)

        # print("x1.shape", x1.shape)
        # print("x2.shape", x2.shape)
        # print("x3.shape", x3.shape)
        # 填充0，使得x1、x2、x3尺寸相同
        dy = (x2.size()[2] - x1.size()[2])
        dx = (x2.size()[3] - x1.size()[3])
        x1 = F.pad(x1, [int(dx/2), dx-int(dx/2), int(dy/2), dy-int(dy/2)])
        dy = (x2.size()[2] - x3.size()[2])
        dx = (x2.size()[3] - x3.size()[3])
        x3 = F.pad(x3, [int(dx/2), dx-int(dx/2), int(dy/2), dy-int(dy/2)])
        # print("x1.shape", x1.shape)
        # print("x2.shape", x2.shape)
        # print("x3.shape", x3.shape)
        x = self.conv1(torch.cat([x1, x2], dim=1))
        x = self.conv2(torch.cat([x, x3], dim=1))
        return x


class STFT_conv(nn.Module):
    def __init__(self, out_channels=22, n_fft=64):
        super().__init__()
        self.n_fft = n_fft
        kernel_size = (7, n_fft//2)  # TODO test to find better ks
        self.doubleconv = nn.Sequential(
            # 频率维度不要padding，时间维度要same padding
            nn.Conv2d(3, 2*out_channels,
                      kernel_size=kernel_size, padding=(3, 0)),
            nn.BatchNorm2d(2*out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(2*out_channels, out_channels,
                      kernel_size=(7, 1), padding=(3, 0)),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
        )

    def forward(self, x):
        # print('x.shape:',x.shape)
        batch_size = x.shape[0]
        # 合并batch和channel维度，适应stft的api要求
        stft_input = x.view(3*batch_size, x.shape[-1])
        # print('stft_input.shape:', stft_input.shape)
        # use [:, 1:] to remove first freq
        x_stft = torch.stft(stft_input, n_fft=self.n_fft, hop_length=16,
                            return_complex=True, normalized=True).abs()[:, 1:]
        x_stft = x_stft / x_stft.sum(dim=2, keepdim=True)
        # print('x_stft.shape', x_stft.shape)
        # 维度改回模型所需
        x_stft = x_stft.view(batch_size, 3, self.n_fft//2, x_stft.shape[2])
        # x_stft=x_stft.unsqueeze(3)
        x_stft = x_stft.permute(0, 1, 3, 2)
        # print('x_stft.shape', x_stft.shape)
        return self.doubleconv(x_stft)


class Model(nn.Module):
    def __init__(self, dropout_rate=.0):
        super().__init__()
        print("initializing unetstft2")
        self.n_channels = 3
        self.n_classes = 3
        # 定义每一个层
        self.stft = STFT_conv(32)  # same with up2 outchannels
        self.inc = DoubleConv(self.n_channels, 16, dropout_rate=dropout_rate)
        self.down1 = Down(16, 22, dropout_rate=dropout_rate)
        self.down2 = Down(22, 32, dropout_rate=dropout_rate)
        self.down3 = Down(32, 44, dropout_rate=dropout_rate)
        self.down4 = Down(44, 64, dropout_rate=dropout_rate)
        self.up1 = Up(64, 44, dropout_rate=dropout_rate)
        self.up2 = Up_stft(44, 32, dropout_rate=dropout_rate)
        self.up3 = Up(32, 22, dropout_rate=dropout_rate)
        self.up4 = Up(22, 16, dropout_rate=dropout_rate)
        self.outc = FinalConv(16, self.n_classes)

    def forward(self, x):
        # 正向传播过程：数据经过每个层，得到输出
        x_stftconv = self.stft(x)
        # print('x_stftconv.shape', x_stftconv.shape)
        x = x.unsqueeze(3)
        x1 = self.inc(x)
        x2 = self.down1(x1)
        x3 = self.down2(x2)
        x4 = self.down3(x3)
        x5 = self.down4(x4)
        # print("x3.shape", x3.shape)
        # print("x4.shape",x4.shape)
        x = self.up1(x5, x4)
        x = self.up2(x, x3, x_stftconv)
        x = self.up3(x, x2)
        x = self.up4(x, x1)
        logits = self.outc(x)
        logits = logits.squeeze(dim=3)
        return logits


if __name__ == "__main__":
    model = Model()
    summary(model,(3,6144),device='cpu')
    x = torch.randn([88, 3, 6144])
    model(x)
