import torch
import torch.nn as nn
from torch.nn import PairwiseDistance

# from utils import init_weights


class _DomainSpecificBatchNorm(nn.Module):
    _version = 2

    def __init__(self, num_features, num_domains, eps=1e-5, momentum=0.1, affine=True,
                 track_running_stats=True):
        super(_DomainSpecificBatchNorm, self).__init__()
        self.bns = nn.ModuleList(
            [nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_domains)])

    def reset_running_stats(self):
        for bn in self.bns:
            bn.reset_running_stats()

    def reset_parameters(self):
        for bn in self.bns:
            bn.reset_parameters()

    def _check_input_dim(self, input):
        raise NotImplementedError

    def forward(self, x, domain_label):
        self._check_input_dim(x)
        bn = self.bns[domain_label[0]]
        return bn(x), domain_label


class DomainSpecificBatchNorm2d(_DomainSpecificBatchNorm):
    def _check_input_dim(self, input):
        if input.dim() != 4:
            raise ValueError('expected 4D input (got {}D input)'
                             .format(input.dim()))


class DownBlock(nn.Module):
    def __init__(self, in_ch, out_ch, is_first_block=False, nConv=2, kernel_size=3, stride=1, padding=1, do_batchnorm=True, num_domains=3):
        super(DownBlock, self).__init__()

        self.num_bn = set()
        block = [] if is_first_block else [nn.MaxPool2d(kernel_size=2)]
        for _ in range(nConv):
            block += [nn.Conv2d(in_ch, out_ch, kernel_size, stride, padding)]
            self.num_bn.add(len(block))
            block += [DomainSpecificBatchNorm2d(out_ch, num_domains)] if do_batchnorm else []
            block += [nn.ReLU(inplace=True)]
            in_ch = out_ch
            
        self.block = nn.Sequential(*block)

    def forward(self, x, domain):
        for j in range(len(self.block)):
            if j in self.num_bn:
                x, _ = self.block[j](x, domain)
            else:
                x = self.block[j](x)
        return x


class UpBlock(nn.Module):
    def __init__(self, in_ch, out_ch, nConv=2, kernel_size=3, stride=1, padding=1, do_batchnorm=True, num_domains=3):
        super(UpBlock, self).__init__()

        out_chs = []
        out_ch_ = in_ch
        for n in range(nConv):
            out_ch_ = out_ch_//2
            if out_ch_ >= out_ch: out_chs.append(out_ch_)
            else: out_chs.append(out_ch)

        self.num_bn = set()
        block = []
        for n in range(nConv):
            block += [nn.Conv2d(in_ch, out_chs[n], kernel_size, stride, padding)]
            self.num_bn.add(len(block))
            block += [DomainSpecificBatchNorm2d(out_chs[n], num_domains)] if do_batchnorm else []
            block += [nn.ReLU(inplace=True)]
            in_ch = out_chs[n]

        self.block = nn.Sequential(*block)

    def forward(self, x, x_skip, domain):
        x = nn.UpsamplingBilinear2d(scale_factor=2)(x)
        x = torch.cat([x, x_skip], dim=1)

        for j in range(len(self.block)):
            if j in self.num_bn:
                x, _ = self.block[j](x, domain)
            else:
                x = self.block[j](x)
        return x


class UNet_ds(nn.Module):
    def __init__(self, in_ch=3, out_ch=1, nConv=2, nBlock=5, ch=64, kernel_size=3, stride=1, padding=1, **kwargs):
        super(UNet_ds, self).__init__()
        self.nBlock = nBlock
        self.out_ch = out_ch

        in_chs = [in_ch] + [ch*2**n for n in range(nBlock-1)] + [ch*2**n for n in range(nBlock-1,0,-1)] + [ch]
        out_chs = [ch*2**n for n in range(nBlock-1)] + [ch*2**n for n in range(nBlock-2,-1,-1)] + [ch, out_ch]
        # [3,  64,  128, 256, 512, 1024, 512, 256, 128, 64]
        # [64, 128, 256, 512, 512, 256,  128, 64,  64,  10]
        # 256  128  64   32   16   32    64   128  256  256

        # [3,  32,  64, 128, 256, 512, 256, 128, 64, 32]
        # [32, 64, 128, 256, 512, 256, 128, 64,  32,  n]
        # 160  80   40   20   10   20   40  80  160  160

        self.downBlocks, self.upBlocks = [], []
        for i in range(nBlock):
            self.downBlocks += [DownBlock(in_chs[i], out_chs[i], is_first_block=i==0, nConv=nConv, kernel_size=kernel_size, stride=stride, padding=padding)]
        for i in range(nBlock-1):
            self.upBlocks += [UpBlock(in_chs[i+nBlock], out_chs[i+nBlock], nConv=nConv, kernel_size=kernel_size, stride=stride, padding=padding)]
        
        self.downBlocks = nn.ModuleList(self.downBlocks)
        self.upBlocks = nn.ModuleList(self.upBlocks)
        self.last_conv = nn.Conv2d(in_chs[-1], out_chs[-1], 1, bias=False)

        # parameters count
        self.param_count = 0
        for p in self.parameters():
            self.param_count += p.numel()
        print(f'U-Net param_count: {self.param_count}')

    def getResult(self, x, y, block):
        means_list = []
        vars_list = []
        dis = 99999999
        best_out = None
        for j in range(2):
            means, vars = get_bn_statis(block, j)
            means_list.append(means)
            vars_list.append(vars)

        # Set 'train' mode for computing target BN statistics and better results
        for m in block.modules():
            if isinstance(m, nn.BatchNorm2d):
                m.train()
        
        if y is not None:
            _ = block(x, y, 2* torch.ones(x.shape[0], dtype=torch.long, device=x.device))
        else:
            _ = block(x, 2* torch.ones(x.shape[0], dtype=torch.long, device=x.device))
        means, vars = get_bn_statis(block, 2)
        reset_bn_statis(block, 2)

        # Select best result
        for domain_id in range(2):
            new_dis = cal_distance(means, means_list[domain_id], vars, vars_list[domain_id])
            if new_dis < dis:
                if y is not None:
                    best_out = block(x, y, domain_id* torch.ones(x.shape[0], dtype=torch.long, device=x.device))
                else:
                    best_out = block(x, domain_id* torch.ones(x.shape[0], dtype=torch.long, device=x.device))
                dis = new_dis
        return best_out

    # def forward(self, x, domain):
    #     domain *= torch.ones(x.shape[0], dtype=torch.long, device=x.device)

    #     x_skip = []
    #     i = 0
    #     for downBlock in self.downBlocks:
    #         x = downBlock(x, domain)
    #         if i<self.nBlock-1:
    #             x_skip.append(x)
    #         i += 1
    #     for upBlock in self.upBlocks:
    #         x = upBlock(x, x_skip.pop(), domain)
            
    #     return self.last_conv(x)

    # block-wise ds
    def forward(self, x, domain, myds=False):
        domain *= torch.ones(x.shape[0], dtype=torch.long, device=x.device)

        x_skip = []
        i = 0

        for downBlock in self.downBlocks:
            # ds
            if myds:
                x = self.getResult(x, None, downBlock)
            else:
                x = downBlock(x, domain)

            if i<self.nBlock-1:
                x_skip.append(x)
            i += 1
        for upBlock in self.upBlocks:
            # ds
            if myds:
                x = self.getResult(x, x_skip.pop(), upBlock)
            else:
                x = upBlock(x, x_skip.pop(), domain)
            
        return self.last_conv(x)


def get_bn_statis(model, domain_id):
    means = []
    vars = []
    for name, param in model.state_dict().items():
        if 'bns.{}.running_mean'.format(domain_id) in name:
            means.append(param.clone())
        elif 'bns.{}.running_var'.format(domain_id) in name:
            vars.append(param.clone())
    return means, vars


def reset_bn_statis(model, domain_id):
    for name, param in model.state_dict().items():
        if 'bns.{}.running_mean'.format(domain_id) in name:
            param.zero_()
        elif 'bns.{}.running_var'.format(domain_id) in name:
            param.fill_(1)


def cal_distance(means_1, means_2, vars_1, vars_2):
    pdist = PairwiseDistance(p=2)
    dis = 0
    for (mean_1, mean_2, var_1, var_2) in zip(means_1, means_2, vars_1, vars_2):
        dis += (pdist(mean_1.reshape(1, mean_1.shape[0]), mean_2.reshape(1, mean_2.shape[0])) + pdist(var_1.reshape(1, var_1.shape[0]), var_2.reshape(1, var_2.shape[0])))
    return dis.item()


import torch.autograd as autograd
import torch.nn.functional as F
from torch.autograd import Variable


def linear(inputs, weight, bias, meta_step_size=0.001, meta_loss=None, stop_gradient=False):
    # inputs = inputs.cuda()
    # weight = weight.cuda()
    # bias = bias.cuda()

    if meta_loss is not None:

        if not stop_gradient:
            grad_weight = autograd.grad(meta_loss, weight, create_graph=True)[0]

            if bias is not None:
                grad_bias = autograd.grad(meta_loss, bias, create_graph=True)[0]
                bias_adapt = bias - grad_bias * meta_step_size
            else:
                bias_adapt = bias

        else:
            grad_weight = Variable(autograd.grad(meta_loss, weight, create_graph=True)[0].data, requires_grad=False)

            if bias is not None:
                grad_bias = Variable(autograd.grad(meta_loss, bias, create_graph=True)[0].data, requires_grad=False)
                bias_adapt = bias - grad_bias * meta_step_size
            else:
                bias_adapt = bias

        return F.linear(inputs,
                        weight - grad_weight * meta_step_size,
                        bias_adapt)
    else:
        return F.linear(inputs, weight, bias)

def conv2d(inputs, weight, bias, stride=1, padding=1, dilation=1, groups=1, kernel_size=3):

    # inputs = inputs.cuda()
    # weight = weight.cuda()
    # bias = bias.cuda()

    return F.conv2d(inputs, weight, bias, stride, padding, dilation, groups)


def deconv2d(inputs, weight, bias, stride=2, padding=0, dilation=0, groups=1, kernel_size=None):

    # inputs = inputs.cuda()
    # weight = weight.cuda()
    # bias = bias.cuda()

    return F.conv_transpose2d(inputs, weight, bias, stride, padding, dilation, groups)

def relu(inputs):
    return F.relu(inputs, inplace=True)


def maxpool(inputs, kernel_size, stride=None, padding=0):
    return F.max_pool2d(inputs, kernel_size, stride, padding=padding)


def dropout(inputs):
    return F.dropout(inputs, p=0.5, training=False, inplace=False)

def batchnorm(inputs, running_mean, running_var):
    return F.batch_norm(inputs, running_mean, running_var)


"""
The following are the new methods for 2D-Unet:
Conv2d, batchnorm2d, GroupNorm, InstanceNorm2d, MaxPool2d, UpSample
"""
#as per the 2D Unet:  kernel_size, stride, padding

def instancenorm(input):
    return F.instance_norm(input)

def groupnorm(input):
    return F.group_norm(input)

def dropout2D(inputs):
    return F.dropout2d(inputs, p=0.5, training=False, inplace=False)

def maxpool2D(inputs, kernel_size, stride=None, padding=0):
    return F.max_pool2d(inputs, kernel_size, stride, padding=padding)

def upsample(input):
    return F.upsample(input, scale_factor=2, mode='bilinear', align_corners=False)


class MyUpsample2(nn.Module):
    def forward(self, x):
        return x[:, :, :, None, :, None].expand(-1, -1, -1, 2, -1, 2).reshape(x.size(0), x.size(1), x.size(2)*2, x.size(3)*2)


def normalization(planes, norm='gn', num_domains=None, momentum=0.1):
    if norm == 'dsbn':
        m = DomainSpecificBatchNorm2d(planes, num_domains=num_domains, momentum=momentum)
    elif norm == 'bn':
        m = nn.BatchNorm2d(planes)
    elif norm == 'gn':
        m = nn.GroupNorm(1, planes)
    elif norm == 'in':
        m = nn.InstanceNorm2d(planes)
    else:
        raise ValueError('Normalization type {} is not supporter'.format(norm))
    return m

#### Note: All are functional units except the norms, which are sequential
class ConvD(nn.Module):
    def __init__(self, inplanes, planes, norm='bn', first=False, num_domains=None, momentum=0.1):
        super(ConvD, self).__init__()

        self.first = first
        self.conv1 = nn.Conv2d(inplanes, planes, 3, 1, 1, bias=True)
        self.bn1   = normalization(planes, norm, num_domains, momentum=momentum)

        self.conv2 = nn.Conv2d(planes, planes, 3, 1, 1, bias=True)
        self.bn2   = normalization(planes, norm, num_domains, momentum=momentum)

        self.conv3 = nn.Conv2d(planes, planes, 3, 1, 1, bias=True)
        self.bn3   = normalization(planes, norm, num_domains, momentum=momentum)

    def forward(self, x, weights=None, layer_idx=None, domain_label=None):

        if weights == None:
            weight_1, bias_1 = self.conv1.weight, self.conv1.bias
            weight_2, bias_2 = self.conv2.weight, self.conv2.bias
            weight_3, bias_3 = self.conv3.weight, self.conv3.bias

        else:
            weight_1, bias_1 = weights[layer_idx+'.conv1.weight'], weights[layer_idx+'.conv1.bias']
            weight_2, bias_2 = weights[layer_idx+'.conv2.weight'], weights[layer_idx+'.conv2.bias']
            weight_3, bias_3 = weights[layer_idx+'.conv3.weight'], weights[layer_idx+'.conv3.bias']

        if not self.first:
            x = maxpool2D(x, kernel_size=2)

        #layer 1 conv, bn
        x = conv2d(x, weight_1, bias_1)
        if domain_label is not None:
            x, _ = self.bn1(x, domain_label)
        else:
            x = self.bn1(x)

        #layer 2 conv, bn, relu
        y = conv2d(x, weight_2, bias_2)
        if domain_label is not None:
            y, _ = self.bn2(y, domain_label)
        else:
            y = self.bn2(y)
        y = relu(y)

        #layer 3 conv, bn
        z = conv2d(y, weight_3, bias_3)
        if domain_label is not None:
            z, _ = self.bn3(z, domain_label)
        else:
            z = self.bn3(z)
        z = relu(z)

        return z

class ConvU(nn.Module):
    def __init__(self, planes, norm='bn', first=False, num_domains=None, momentum=0.1):
        super(ConvU, self).__init__()

        self.first = first
        if not self.first:
            self.conv1 = nn.Conv2d(2*planes, planes, 3, 1, 1, bias=True)
            self.bn1   = normalization(planes, norm, num_domains, momentum=momentum)

        self.pool = MyUpsample2()
        self.conv2 = nn.Conv2d(planes, planes//2, 1, 1, 0, bias=True)
        self.bn2   = normalization(planes//2, norm, num_domains, momentum=momentum)

        self.conv3 = nn.Conv2d(planes, planes, 3, 1, 1, bias=True)
        self.bn3   = normalization(planes, norm, num_domains, momentum=momentum)

        self.relu = nn.ReLU(inplace=True)

    def forward(self, x, prev, weights=None, layer_idx=None, domain_label=None):

        if weights == None:
            if not self.first:
                weight_1, bias_1 = self.conv1.weight, self.conv1.bias
            weight_2, bias_2 = self.conv2.weight, self.conv2.bias
            weight_3, bias_3 = self.conv3.weight, self.conv3.bias

        else:
            if not self.first:
                weight_1, bias_1 = weights[layer_idx+'.conv1.weight'], weights[layer_idx+'.conv1.bias']
            weight_2, bias_2 = weights[layer_idx+'.conv2.weight'], weights[layer_idx+'.conv2.bias']
            weight_3, bias_3 = weights[layer_idx+'.conv3.weight'], weights[layer_idx+'.conv3.bias']
            
        #layer 1 conv, bn, relu
        if not self.first:
            x = conv2d(x, weight_1, bias_1, )
            if domain_label is not None:
                x, _ = self.bn1(x, domain_label)
            else:
                x = self.bn1(x)
            x = relu(x)

        #upsample, layer 2 conv, bn, relu
        y = self.pool(x)
        y = conv2d(y, weight_2, bias_2, kernel_size=1, stride=1, padding=0)
        if domain_label is not None:
            y, _ = self.bn2(y, domain_label)
        else:
            y = self.bn2(y)
        y = relu(y)

        #concatenation of two layers
        y = torch.cat([prev, y], 1)

        #layer 3 conv, bn
        y = conv2d(y, weight_3, bias_3)
        if domain_label is not None:
            y, _ = self.bn3(y, domain_label)
        else:
            y = self.bn3(y)
        y = relu(y)

        return y


class Unet2D(nn.Module):
    def __init__(self, c=1, n=16, norm='bn', num_classes=2, num_domains=4, momentum=0.1):
        super(Unet2D, self).__init__()

        self.convd1 = ConvD(c,     n, norm, first=True, num_domains=num_domains, momentum=momentum)
        self.convd2 = ConvD(n,   2*n, norm, num_domains=num_domains, momentum=momentum)
        self.convd3 = ConvD(2*n, 4*n, norm, num_domains=num_domains, momentum=momentum)
        self.convd4 = ConvD(4*n, 8*n, norm, num_domains=num_domains, momentum=momentum)
        self.convd5 = ConvD(8*n,16*n, norm, num_domains=num_domains, momentum=momentum)

        self.convu4 = ConvU(16*n, norm, first=True, num_domains=num_domains, momentum=momentum)
        self.convu3 = ConvU(8*n, norm, num_domains=num_domains, momentum=momentum)
        self.convu2 = ConvU(4*n, norm, num_domains=num_domains, momentum=momentum)
        self.convu1 = ConvU(2*n, norm, num_domains=num_domains, momentum=momentum)

        self.seg1 = nn.Conv2d(2*n, num_classes, 1)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.GroupNorm):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        params_num = sum(p.numel() for p in self.parameters())
        print("\nModle's Params: %.3fM" % (params_num / 1e6))

    def forward(self, x, domain=None, myds=None):
        domain_label = domain*torch.ones(x.shape[0], dtype=torch.long)

        x1 = self.convd1(x, domain_label=domain_label)
        x2 = self.convd2(x1, domain_label=domain_label)
        x3 = self.convd3(x2, domain_label=domain_label)
        x4 = self.convd4(x3, domain_label=domain_label)
        x5 = self.convd5(x4, domain_label=domain_label)

        y4 = self.convu4(x5, x4, domain_label=domain_label)
        y3 = self.convu3(y4, x3, domain_label=domain_label)
        y2 = self.convu2(y3, x2, domain_label=domain_label)
        y1 = self.convu1(y2, x1, domain_label=domain_label)

        y1_pred = conv2d(y1, self.seg1.weight, self.seg1.bias, kernel_size=None, stride=1, padding=0)

        predictions = torch.sigmoid(input=y1_pred)

        return predictions

if __name__ == '__main__':
    # x = torch.randn(1,12,256,256)
    # net = SegNet_S(12)
    # x = net(x)
    # print(x.shape)
    a = UNet_ds(1, 17)
    y = torch.randn(10,1,256,256)
    print(a(y, 2, False).shape)
    print(get_bn_statis(a, 2)[0][0])
    reset_bn_statis(a, 2)
    print(get_bn_statis(a, 2)[0][0])
