import torch
import torch.nn as nn
import numpy as np

IMG_SIZE = 128
NUM_CLASSES = 5
N_CHANNELS = 3  # R,G,B


class DownsampleLayer(nn.Module):
    def __init__(self, in_ch, out_ch):
        super(DownsampleLayer, self).__init__()
        self.conv_bn_relu_2 = nn.Sequential(
            nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_ch), nn.ReLU(),
            nn.Conv2d(in_channels=out_ch, out_channels=out_ch, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_ch), nn.ReLU()
        )
        self.downsample = nn.Sequential(
            nn.Conv2d(in_channels=out_ch, out_channels=out_ch, kernel_size=3, stride=2, padding=1),
            nn.BatchNorm2d(out_ch), nn.ReLU()
        )

    def forward(self, x):
        out = self.conv_bn_relu_2(x)
        out_2 = self.downsample(out)
        return out, out_2


class UpSampleLayer(nn.Module):
    def __init__(self, in_ch, out_ch):
        super(UpSampleLayer, self).__init__()
        self.conv_bn_relu_2 = nn.Sequential(
            nn.Conv2d(in_channels=in_ch, out_channels=out_ch * 2, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(out_ch * 2), nn.ReLU(),
            nn.Conv2d(in_channels=out_ch * 2, out_channels=out_ch * 2, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(out_ch * 2), nn.ReLU()
        )
        self.upsample = nn.Sequential(
            nn.ConvTranspose2d(in_channels=out_ch * 2, out_channels=out_ch, kernel_size=3, stride=2, padding=1,
                               output_padding=1),
            nn.BatchNorm2d(out_ch), nn.ReLU()
        )

    def forward(self, x, out):
        x_out = self.conv_bn_relu_2(x)
        x_out = self.upsample(x_out)
        cat_out = torch.cat((x_out, out), dim=1)
        return cat_out


out_channels = [2 ** (i + 4) for i in range(5)]


class UNet(nn.Module):
    def __init__(self):
        super(UNet, self).__init__()
        out_channels = [2 ** (i + 4) for i in range(5)]  # [16, 32, 64, 128, 256]
        # 下采样
        self.d1 = DownsampleLayer(3, out_channels[0])  # 3-16
        self.d2 = DownsampleLayer(out_channels[0], out_channels[1])  # 16-32
        self.d3 = DownsampleLayer(out_channels[1], out_channels[2])  # 32-64
        self.d4 = DownsampleLayer(out_channels[2], out_channels[3])  # 128-256
        # 上采样
        self.u1 = UpSampleLayer(out_channels[3], out_channels[3])  # 256-512-256
        self.u2 = UpSampleLayer(out_channels[4], out_channels[2])  # 256-128-64*2
        self.u3 = UpSampleLayer(out_channels[3], out_channels[1])  # 128-64-32*2
        self.u4 = UpSampleLayer(out_channels[2], out_channels[0])  # 64-32-16*2
        self.conv1 = nn.Conv2d(out_channels[1], out_channels[0], (3, 3), padding=1)
        self.conv2 = nn.Conv2d(out_channels[0], 1, (3, 3), padding=1)

        # 输出

    def forward(self, x):
        skip1, out1 = self.d1(x)
        skip2, out2 = self.d2(out1)
        skip3, out3 = self.d3(out2)
        skip4, out4 = self.d4(out3)
        out5 = self.u1(out4, skip4)
        out6 = self.u2(out5, skip3)
        out7 = self.u3(out6, skip2)
        out8 = self.u4(out7, skip1)
        out = self.conv1(out8)
        out = torch.relu(out)
        out = self.conv2(out)
        return out


# %%
class Flatten(nn.Module):
    def forward(self, input):
        return input.view(input.size(0), -1)


classifier = nn.Sequential(
    nn.Conv2d(1, out_channels[0], kernel_size=3, stride=1, padding=1),
    nn.BatchNorm2d(out_channels[0]),
    nn.ReLU(),
    nn.MaxPool2d(2),

    nn.Conv2d(out_channels[0], out_channels[1], kernel_size=3, stride=1, padding=1),
    nn.BatchNorm2d(out_channels[1]),
    nn.ReLU(),
    nn.MaxPool2d(2),

    nn.Conv2d(out_channels[1], out_channels[1], 3, 1, 1),
    nn.BatchNorm2d(out_channels[1]),
    nn.ReLU(),
    nn.MaxPool2d(2),

    nn.Conv2d(out_channels[1], out_channels[1], 3, 1, 1),
    nn.BatchNorm2d(out_channels[1]),
    nn.ReLU(),
    nn.MaxPool2d(2),
    # nn.Flatten(),
    Flatten(),
    nn.Linear((128 // 2 ** 4) ** 2 * out_channels[1], 128),
    nn.ReLU(),
    nn.Linear(128, 64),
    nn.ReLU(),
    nn.Linear(64, 5)
)


# %%
def train_model_(epochs: int, model,
                 opt: torch.optim.Optimizer,
                 training_loader, test_loader,
                 loss_fn, out_params, out_hists,
                 dev='cpu'):
    training_hist = []
    test_hist = []
    min_test_loss = torch.tensor(np.inf)
    for i in range(epochs):
        t_loss = 0.0
        for dt in training_loader:
            inp, targ = dt[0], dt[1]
            inp = inp.to(dev)
            targ = targ.to(dev)
            out = model(inp)
            loss = loss_fn(out, targ)
            opt.zero_grad()
            loss.backward()
            t_loss += loss.item() * inp.shape[0]
            opt.step()
        training_hist.append(t_loss / 300)
        t_loss = 0
        for dt in test_loader:
            inp, targ = dt[0], dt[1]
            inp = inp.to(dev)
            targ = targ.to(dev)
            out = model(inp)
            loss = loss_fn(out, targ)
            t_loss += loss.item() * inp.shape[0]
        test_hist.append(t_loss / 100)
        if min_test_loss > t_loss:
            min_test_loss = t_loss
            torch.save(model.state_dict(), out_params)
    torch.save(training_hist, out_hists[0])
    torch.save(test_hist, out_hists[1])
