# -*- coding: utf-8 -*-

# Module SCSE from paper "Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks"
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

def gaussian_weight(ksize, sigma=None):
    if sigma is None:
        sigma = 0.3 * ((ksize - 1) * 0.5 - 1) + 0.8

    center = ksize // 2
    x = (np.arange(ksize, dtype=np.float32) - center)
    kernel_1d = np.exp(-(x ** 2) / (2 * sigma ** 2))
    kernel = kernel_1d[..., None] @ kernel_1d[None, ...]
    kernel = torch.from_numpy(kernel)
    kernel = kernel / kernel.sum()  # Normalization
    return kernel


def gaussian_filter(img, ksize, sigma=None):
    kernel = gaussian_weight(ksize, sigma)
    kernel = kernel.view(1, 1, ksize, ksize).repeat(img.shape[1], 1, 1, 1)
    pad = (ksize - 1) // 2
    img = F.conv2d(img, weight=kernel, stride=1, padding=pad, groups=img.shape[1])
    return img


def bilateral_filter(img, ksize, sigma_space=None, sigma_density=None):
    device = img.device
    if sigma_space is None:
        sigma_space = 0.3 * ((ksize - 1) * 0.5 - 1) + 0.8
    if sigma_density is None:
        sigma_density = sigma_space

    pad = (ksize - 1) // 2
    pad_img = F.pad(img, pad=[pad, pad, pad, pad], mode='reflect')
    pad_img_patches = pad_img.unfold(2, ksize, 1).unfold(3, ksize, 1)
    patch_dim = pad_img_patches.dim()

    diff_density = pad_img_patches - img.unsqueeze(-1).unsqueeze(-1)
    weight_density = torch.exp(-(diff_density ** 2) / (2 * sigma_density ** 2))
    weight_density /= weight_density.sum(dim=(-1, -2), keepdim=True)

    weight_space = gaussian_weight(ksize, sigma_space).to(device=device)
    weight_space_dim = (patch_dim - 2) * (1,) + (ksize, ksize)
    weight_space = weight_space.view(*weight_space_dim).expand_as(weight_density)

    weight = weight_density * weight_space
    weight_sum = weight.sum(dim=(-1, -2))
    img = (weight * pad_img_patches).sum(dim=(-1, -2)) / weight_sum
    return img


######################################################
######################################################
######################################################

class GaussianFilter(nn.Module):
    def __init__(self, ksize=5, sigma=None):
        super(GaussianFilter, self).__init__()
        # initialize guassian kernel
        if sigma is None:
            sigma = 0.3 * ((ksize - 1) / 2.0 - 1) + 0.8
        # Create a x, y coordinate grid of shape (kernel_size, kernel_size, 2)
        x_coord = torch.arange(ksize)
        x_grid = x_coord.repeat(ksize).view(ksize, ksize)
        y_grid = x_grid.t()
        xy_grid = torch.stack([x_grid, y_grid], dim=-1).float()

        # Calculate the 2-dimensional gaussian kernel
        center = ksize // 2
        weight = torch.exp(-torch.sum((xy_grid - center) ** 2., dim=-1) / (2 * sigma ** 2))
        # Make sure sum of values in gaussian kernel equals 1.
        weight /= torch.sum(weight)
        self.gaussian_weight = weight

        # Reshape to 2d depthwise convolutional weight
        weight = weight.view(1, 1, ksize, ksize)
        weight = weight.repeat(3, 1, 1, 1)

        # create gaussian filter as convolutional layer
        pad = (ksize - 1) // 2
        self.filter = nn.Conv2d(3, 3, ksize, stride=1, padding=pad, groups=3, bias=False, padding_mode='reflect')
        self.filter.weight.data = weight
        self.filter.weight.requires_grad = False

    def forward(self, x):
        return self.filter(x)


class BilateralFilter(nn.Module):
    def __init__(self, user_device="cuda",  ksize=5, sigma_space=None, sigma_density=None):
        super(BilateralFilter, self).__init__()
        # initialization
        if sigma_space is None:
            self.sigma_space = 0.3 * ((ksize - 1) * 0.5 - 1) + 0.8
        if sigma_density is None:
            self.sigma_density = self.sigma_space

        self.pad = (ksize - 1) // 2
        self.ksize = ksize
        # get the spatial gaussian weight
        self.weight_space = GaussianFilter(ksize=self.ksize, sigma=self.sigma_space).gaussian_weight
        self.user_divce = user_device

    def forward(self, x):
        # Extracts sliding local patches from a batched input tensor.
        x_pad = F.pad(x, pad=[self.pad, self.pad, self.pad, self.pad], mode='reflect')
        x_patches = x_pad.unfold(2, self.ksize, 1).unfold(3, self.ksize, 1)
        patch_dim = x_patches.dim()

        # Calculate the 2-dimensional gaussian kernel
        if self.user_divce == "cpu":
            diff_density = x_patches - x.unsqueeze(-1).unsqueeze(-1)
            weight_density = torch.exp(-(diff_density ** 2) / (2 * self.sigma_density ** 2))
            # Normalization
            weight_density /= weight_density.sum(dim=(-1, -2), keepdim=True)
        # print(weight_density.shape)
        elif self.user_divce == "cuda":
            # diff_density = torch.Tensor(x_patches).type(torch.cuda.FloatTensor) - torch.Tensor(x.unsqueeze(-1).unsqueeze(-1)).type(torch.cuda.FloatTensor)
            diff_density = x_patches - x.unsqueeze(-1).unsqueeze(-1)
            # sigma_density = torch.Tensor(self.sigma_density).type(torch.cuda.FloatTensor)
            sigma_density = torch.as_tensor(self.sigma_density).type(torch.cuda.FloatTensor)

            weight_density = torch.exp(-(diff_density ** 2) / (2 * sigma_density ** 2))
            # Normalization
            weight_density /= weight_density.sum(dim=(-1, -2), keepdim=True)

        # Keep same shape with weight_density
        weight_space_dim = (patch_dim - 2) * (1,) + (self.ksize, self.ksize)
        weight_space = self.weight_space.view(*weight_space_dim).expand_as(weight_density)

        # get the final kernel weight
        if self.user_divce == "cpu":
            weight = torch.Tensor(weight_density).type(torch.FloatTensor) * torch.Tensor(weight_space).type(torch.FloatTensor)
            weight_sum = weight.sum(dim=(-1, -2))
            x_patches = torch.Tensor(x_patches).type(torch.FloatTensor)
        elif self.user_divce == "cuda":
            weight = weight_density * torch.Tensor(weight_space).type(torch.cuda.FloatTensor)
            weight_sum = weight.sum(dim=(-1, -2))
            # x_patches = torch.Tensor(x_patches).type(torch.cuda.FloatTensor)
        x = (weight * x_patches).sum(dim=(-1, -2)) / weight_sum
        return x

######################################################
######################################################
######################################################
# https://zhuanlan.zhihu.com/p/102036086

class sSE(nn.Module):
    def __init__(self, in_channels):
        super().__init__()
        self.Conv1x1 = nn.Conv2d(in_channels, 1, kernel_size=1, bias=False)
        self.norm = nn.Sigmoid()

    def forward(self, U):
        q = self.Conv1x1(U)  # U:[bs,c,h,w] to q:[bs,1,h,w]
        q = self.norm(q)
        return U * q  # 广播机制


class cSE(nn.Module):
    def __init__(self, in_channels):
        super().__init__()
        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.Conv_Squeeze = nn.Conv2d(in_channels, in_channels // 2, kernel_size=1, bias=False)
        self.Conv_Excitation = nn.Conv2d(in_channels//2, in_channels, kernel_size=1, bias=False)
        self.norm = nn.Sigmoid()

    def forward(self, U):
        z = self.avgpool(U) # shape: [bs, c, h, w] to [bs, c, 1, 1]
        z = self.Conv_Squeeze(z) # shape: [bs, c/2]
        z = F.relu(z)
        z = self.Conv_Excitation(z) # shape: [bs, c]
        z = self.norm(z)
        return U * z.expand_as(U)


class scSE(nn.Module):
    def __init__(self, in_channels):
        super().__init__()
        self.cSE = cSE(in_channels)
        self.sSE = sSE(in_channels)

    def forward(self, U):
        U_sse = self.sSE(U)
        U_cse = self.cSE(U)
        return U_cse+U_sse


class BilateralFSUnet(nn.Module):

    def __init__(self, user_device="cuda"):
        super().__init__()
        self.device = user_device
        self.b_filter_m = BilateralFilter(ksize=3, user_device=self.device)
        self.conv1 = nn.Conv2d(1, 16, (3, 3), stride=1, padding=1, bias=False)
        # self.relu1 = nn.ReLU(inplace=True)
        # self.maxpool1 = nn.MaxPool2d((2, 2))

        self.conv2 = nn.Conv2d(16, 32, (3, 3), stride=1, padding=1, bias=False)
        self.relu2 = nn.ReLU(inplace=True)
        self.maxpool2 = nn.MaxPool2d((2, 2))

        self.conv3 = nn.Conv2d(32, 64, (3, 3), stride=1, padding=1, bias=False)
        self.relu3 = nn.ReLU(inplace=True)
        self.maxpool3 = nn.MaxPool2d((2, 2))

        self.sc_se = scSE(in_channels=32)

        self.up_conv1 = nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=3,
                                           stride=2, padding=1, bias=False,  output_padding=1)
        # self.up_conv2 = nn.ConvTranspose2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=0)

        self.conv4 = nn.Conv2d(32, 64, (3, 3), padding=1, bias=False)

        self.conv5 = nn.Conv2d(64, 128, (3, 3), padding=1, bias=False)
        self.relu5 = nn.ReLU(inplace=True)
        self.maxpool5 = nn.MaxPool2d((2, 2))
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(256, 6)


    def forward(self, x):
        x = self.b_filter_m(x)
        x = self.conv1(x)
        # x = self.relu1(x)
        # x = self.maxpool1(x)

        conv_out_2 = self.conv2(x)

        skip_tensor = self.sc_se(conv_out_2)

        # x = self.relu2(x)
        x = self.maxpool2(conv_out_2)

        x = self.conv3(x)
        # x = self.relu3(x)
        # x = self.maxpool3(x)

        # print("skip_tensor.shape:",skip_tensor.shape)
        # print("x.shape:", x.shape)
        # print(x.shape)
        x = self.up_conv1(x)
        # x = self.up_conv2(x)

        x += skip_tensor
        x = self.conv4(x)

        x = self.conv5(x)
        x = self.relu5(x)
        x = self.maxpool5(x)

        # print(x.shape)
        # x = self.avgpool(x)
        # x = torch.flatten(x, 1)

        # out = self.fc(x)
        # feature = out.mean((2, 3))
        return x


class FSDeConvDecoder(nn.Module):
    """
    A simple Convolutional Decoder Model
    """

    def __init__(self):
        super().__init__()
        self.deconv1 = nn.ConvTranspose2d(128, 1, (2, 2), stride=(2, 2))
        # self.upsamp1 = nn.UpsamplingBilinear2d(2)
        self.relu1 = nn.ReLU(inplace=True)

        # self.deconv2 = nn.ConvTranspose2d(64, 32, (2, 2), stride=(2, 2))
        # # self.upsamp1 = nn.UpsamplingBilinear2d(2)
        # self.relu2 = nn.ReLU(inplace=True)
        #
        # self.deconv3 = nn.ConvTranspose2d(32, 1, (2, 2), stride=(2, 2))
        # # self.upsamp1 = nn.UpsamplingBilinear2d(2)
        # self.relu3 = nn.ReLU(inplace=True)

        # self.deconv4 = nn.ConvTranspose2d(16, 1, (2, 2), stride=(2, 2))
        # # self.upsamp1 = nn.UpsamplingBilinear2d(2)
        # self.relu4 = nn.ReLU(inplace=True)

    def forward(self, x):
        # print(x.shape)
        x = self.deconv1(x)
        x = self.relu1(x)
        # print(x.shape)

        # x = self.deconv2(x)
        # x = self.relu2(x)
        # # print(x.shape)
        #
        # x = self.deconv3(x)
        # x = self.relu3(x)
        # print(x.shape)

        # x = self.deconv4(x)
        # x = self.relu4(x)
        # print(x.shape)

        return x


class bilateralfsunet_model(nn.Module):
    def __init__(self, user_device="cuda"):
        super(bilateralfsunet_model, self).__init__()
        self.device = user_device
        self.trace_width = trace_patch_size
        self.encoder = BilateralFSUnet(user_device=self.device)
        self.decoder = FSDeConvDecoder()

    def forward(self, x):
        enc_out = self.encoder(x)
        print("enc_out_bs.shape", enc_out.shape)
        dec_out = self.decoder(enc_out)

        return dec_out


if __name__ == "__main__":
    # bs, c, h, w = 10, 3, 64, 64
    # in_tensor = torch.ones(bs, c, h, w)
    #
    # sc_se = scSE(c)
    # print("in shape:",in_tensor.shape)
    # out_tensor = sc_se(in_tensor)
    # print("out shape:", out_tensor.shape)
    if torch.cuda.is_available():
        device = "cuda"
    else:
        device = "cpu"
    print(device)
    trace_patch_size = 250  # 65, 130, 260, 650
    # patch_size_number = 20 # 10  # 一行多少个数据
    patch_size_number = 10  # 20, 10, 5, 2

    img_random = torch.randn(1, 1, trace_patch_size, trace_patch_size)
    print("img_random.shape:", img_random.shape)
    # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    enc = BilateralFSUnet(user_device=device)

    enc.to(device)
    img_random.to(device)

    enc_out = enc(img_random)
    # print("enc_out:", enc_out)
    print("enc_out.shape:", enc_out.shape)

    dec = FSDeConvDecoder()
    dec.to(device)
    dec_out = dec(enc_out)
    print("dec_out.shape:", dec_out.shape)

    model = bilateralfsunet_model()
    model.to(device)
    dec_out = model(img_random)
    print("dec_out.shape:", dec_out.shape)
    dec_out_img = dec_out.reshape(trace_patch_size, trace_patch_size)

    from matplotlib import pyplot as plt
    import os

    os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

    fig = plt.figure()
    plt.title("Orgin Data")
    plt.imshow(img_random[0, :, :, :].reshape(trace_patch_size, trace_patch_size, 1), cmap=plt.cm.get_cmap('YlOrBr'))
    plt.show()

    fig2 = plt.figure()
    plt.imshow(dec_out_img.detach().cpu().numpy(), cmap=plt.cm.get_cmap('YlOrBr'))
    plt.title("Reconstruction")
    plt.show()