# -*- coding: utf-8 -*-
import copy
import torch
from torch import nn
from torch import Tensor
import torch.nn.functional as F


class SEBlock(nn.Module):

    def __init__(self, input_channels, internal_neurons):
        super(SEBlock, self).__init__()
        self.down = nn.Conv2d(in_channels=input_channels, out_channels=internal_neurons, kernel_size=1, stride=1, bias=True)
        self.up = nn.Conv2d(in_channels=internal_neurons, out_channels=input_channels, kernel_size=1, stride=1, bias=True)
        self.input_channels = input_channels

    def forward(self, inputs):
        x = F.avg_pool2d(inputs, kernel_size=15)
        x = self.down(x)
        x = F.relu_(x)
        x = self.up(x)
        x = torch.sigmoid(x)
        x = x.view(-1, self.input_channels, 1, 1)
        return inputs * x

class Block(nn.Module):
    def __init__(self, D_in, D, train):
        super().__init__()
        # Conv2d = _Conv2d if train else nn.Conv2d
        # BatchNorm2d = _BatchNorm2d if train else nn.BatchNorm2d
        self.h1 = nn.Conv2d(D_in, D, 5, 1, 2, bias=False)
        self.bn1 = nn.BatchNorm2d(D)
        self.se = SEBlock(D, D//16)
        self.relu1 = nn.ReLU(True)

    def fuse(self):
        tmp = self.bn1.weight / torch.sqrt(self.bn1.running_var + 1e-5)
        self.h1.weight.data = tmp.view(tmp.size()[0], 1, 1, 1) * self.h1.weight
        self.h1.bias = nn.Parameter(-tmp * self.bn1.running_mean + self.bn1.bias)
        self.bn1 = nn.Identity()

    def forward(self, x):
        x = self.h1(x)
        x = self.bn1(x)
        x = self.relu1(x)
        x = self.se(x)
        return x


class ReversiNet(nn.Module):
    def __init__(self, device='cpu', cfg=[(3, 128)], train=False):
        super(ReversiNet, self).__init__()
        # Conv2d = _Conv2d if train else nn.Conv2d
        blocks = [nn.Conv2d(2, cfg[0][1], 3), nn.ReLU(True)]
        last_c = cfg[0][1]
        for n, c in cfg:
            for i in range(n):
                blocks.append(Block(last_c, c, train))
                last_c = c
        self.resblocks = nn.Sequential(*blocks)
        self.out = nn.Conv2d(last_c, 13, 1, bias=False)
        self.out.weight.data.mul_(0.01)

        # self.quant = torch.quantization.QuantStub()
        # self.dequant = torch.quantization.DeQuantStub()

    def forward(self, x):
        x = F.pad(x, (1, 1, 1, 1), value=1.)

        # x = self.quant(x)
        x = self.resblocks(x)
        x = self.out(x)
        # x = self.dequant(x)
        return x


class EMAModel(nn.Module):
    def __init__(self, model: nn.Module, decay=0.999):
        super().__init__()
        self.model = copy.deepcopy(model)
        self.decay = decay

    @torch.no_grad()
    def update(self, model):
        for p_ema, p_m in zip(self.model.parameters(), model.parameters()):
            p_ema.add_(p_m - p_ema, alpha=1 - self.decay)
        for p_ema, p_m in zip(self.model.buffers(), model.buffers()):
            if p_ema.dtype == torch.long:
                continue
            p_ema.add_(p_m - p_ema, alpha=1 - self.decay)

    def forward(self, x):
        return self.model(x)
