
import torch.nn as nn
import torch.nn.functional as F
import torch
class VGG19_pytorch(nn.Module):
    """
    
    """

    def __init__(self, pool="max"):
        super(VGG19_pytorch, self).__init__()
        self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
        self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
        self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
        self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
        self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
        self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.conv3_4 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
        self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
        self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
        self.conv4_4 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
        self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
        self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
        self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
        self.conv5_4 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
        if pool == "max":
            self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
            self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
            self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
            self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
            self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2)
        elif pool == "avg":
            self.pool1 = nn.AvgPool2d(kernel_size=2, stride=2)
            self.pool2 = nn.AvgPool2d(kernel_size=2, stride=2)
            self.pool3 = nn.AvgPool2d(kernel_size=2, stride=2)
            self.pool4 = nn.AvgPool2d(kernel_size=2, stride=2)
            self.pool5 = nn.AvgPool2d(kernel_size=2, stride=2)

    def forward(self, x):
        """
        NOTE: input tensor should range in [0,1]
        """
        out = {}
       
        out["r11"] = F.relu(self.conv1_1(x))
        out["r12"] = F.relu(self.conv1_2(out["r11"]))
        out["p1"] = self.pool1(out["r12"])
        out["r21"] = F.relu(self.conv2_1(out["p1"]))
        out["r22"] = F.relu(self.conv2_2(out["r21"]))
        out["p2"] = self.pool2(out["r22"])
        out["r31"] = F.relu(self.conv3_1(out["p2"]))
        out["r32"] = F.relu(self.conv3_2(out["r31"]))
        out["r33"] = F.relu(self.conv3_3(out["r32"]))
        out["r34"] = F.relu(self.conv3_4(out["r33"]))
        out["p3"] = self.pool3(out["r34"])
        out["r41"] = F.relu(self.conv4_1(out["p3"]))
        out["r42"] = F.relu(self.conv4_2(out["r41"]))
        out["r43"] = F.relu(self.conv4_3(out["r42"]))
        out["r44"] = F.relu(self.conv4_4(out["r43"]))
        
        return out["r44"]


class Decoder(nn.Module):
    def __init__(self, ic, max_channels=512):
        super(Decoder, self).__init__()
        self.conv1_1 = nn.Sequential(nn.Conv2d(ic, min(32, max_channels), 3, 1, 1), nn.ReLU(), nn.Conv2d(min(32, max_channels), min(64, max_channels), 3, 1, 1))
        self.conv1_2 = nn.Conv2d(min(64, max_channels), min(64, max_channels), 3, 1, 1)
        self.conv1_2norm_ss = nn.Conv2d(min(64, max_channels), min(64, max_channels), 1, 2, bias=False, groups=min(64, max_channels))
        self.conv2_1 = nn.Conv2d(min(64, max_channels), min(128, max_channels), 3, 1, 1)
        self.conv2_2 = nn.Conv2d(min(128, max_channels), min(128, max_channels), 3, 1, 1)
        self.conv2_2norm_ss = nn.Conv2d(min(128, max_channels), min(128, max_channels), 1, 2, bias=False, groups=min(128, max_channels))
        self.conv3_1 = nn.Conv2d(min(128, max_channels), min(256, max_channels), 3, 1, 1)
        self.conv3_2 = nn.Conv2d(min(256, max_channels), min(256, max_channels), 3, 1, 1)
        self.conv3_3 = nn.Conv2d(min(256, max_channels), min(256, max_channels), 3, 1, 1)
        self.conv3_3norm_ss = nn.Conv2d(min(256, max_channels), min(256, max_channels), 1, 2, bias=False, groups=min(256, max_channels))
        self.conv4_1 = nn.Conv2d(min(256, max_channels), min(512, max_channels), 3, 1, 1)
        self.conv4_2 = nn.Conv2d(min(512, max_channels), min(512, max_channels), 3, 1, 1)
        self.conv4_3 = nn.Conv2d(min(512, max_channels), min(512, max_channels), 3, 1, 1)
        self.conv5_1 = nn.Conv2d(min(512, max_channels), min(512, max_channels), 3, 1, 2, 2)
        self.conv5_2 = nn.Conv2d(min(512, max_channels), min(512, max_channels), 3, 1, 2, 2)
        self.conv5_3 = nn.Conv2d(min(512, max_channels), min(512, max_channels), 3, 1, 2, 2)
        self.conv6_1 = nn.Conv2d(min(512, max_channels), min(512, max_channels), 3, 1, 2, 2)
        self.conv6_2 = nn.Conv2d(min(512, max_channels), min(512, max_channels), 3, 1, 2, 2)
        self.conv6_3 = nn.Conv2d(min(512, max_channels), min(512, max_channels), 3, 1, 2, 2)
        self.conv7_1 = nn.Conv2d(min(512, max_channels), min(512, max_channels), 3, 1, 1)
        self.conv7_2 = nn.Conv2d(min(512, max_channels), min(512, max_channels), 3, 1, 1)
        self.conv7_3 = nn.Conv2d(min(512, max_channels), min(512, max_channels), 3, 1, 1)
        self.conv3_3_short = nn.Conv2d(min(256, max_channels), min(256, max_channels), 3, 1, 1)
        self.conv8_2 = nn.Conv2d(min(256, max_channels), min(256, max_channels), 3, 1, 1)
        self.conv8_3 = nn.Conv2d(min(256, max_channels), min(256, max_channels), 3, 1, 1)
        self.conv2_2_short = nn.Conv2d(min(128, max_channels), min(128, max_channels), 3, 1, 1)
        self.conv9_2 = nn.Conv2d(min(128, max_channels), min(128, max_channels), 3, 1, 1)
        self.conv1_2_short = nn.Conv2d(min(64, max_channels), min(128, max_channels), 3, 1, 1)
        self.conv10_2 = nn.Conv2d(min(128, max_channels), min(128, max_channels), 3, 1, 1)
        self.conv10_ab = nn.Conv2d(min(128, max_channels), 3, 1, 1)

        # add self.relux_x
        self.relu1_1 = nn.ReLU()
        self.relu1_2 = nn.ReLU()
        self.relu2_1 = nn.ReLU()
        self.relu2_2 = nn.ReLU()
        self.relu3_1 = nn.ReLU()
        self.relu3_2 = nn.ReLU()
        self.relu3_3 = nn.ReLU()
        self.relu4_1 = nn.ReLU()
        self.relu4_2 = nn.ReLU()
        self.relu4_3 = nn.ReLU()
        self.relu5_1 = nn.ReLU()
        self.relu5_2 = nn.ReLU()
        self.relu5_3 = nn.ReLU()
        self.relu6_1 = nn.ReLU()
        self.relu6_2 = nn.ReLU()
        self.relu6_3 = nn.ReLU()
        self.relu7_1 = nn.ReLU()
        self.relu7_2 = nn.ReLU()
        self.relu7_3 = nn.ReLU()
        self.relu8_1_comb = nn.ReLU()
        self.relu8_2 = nn.ReLU()
        self.relu8_3 = nn.ReLU()
        self.relu9_1_comb = nn.ReLU()
        self.relu9_2 = nn.ReLU()
        self.relu10_1_comb = nn.ReLU()
        self.relu10_2 = nn.LeakyReLU(0.2, True)

        # print("replace all deconv with [nearest + conv]")
        self.conv8_1 = nn.Sequential(nn.Upsample(scale_factor=2, mode="nearest"), nn.Conv2d(min(512, max_channels), min(256, max_channels), 3, 1, 1))
        self.conv9_1 = nn.Sequential(nn.Upsample(scale_factor=2, mode="nearest"), nn.Conv2d(min(256, max_channels), min(128, max_channels), 3, 1, 1))
        self.conv10_1 = nn.Sequential(nn.Upsample(scale_factor=2, mode="nearest"), nn.Conv2d(min(128, max_channels), min(128, max_channels), 3, 1, 1))

        # print("replace all batchnorm with instancenorm")
        self.conv1_2norm = nn.InstanceNorm2d(min(64, max_channels))
        self.conv2_2norm = nn.InstanceNorm2d(min(128, max_channels))
        self.conv3_3norm = nn.InstanceNorm2d(min(256, max_channels))
        self.conv4_3norm = nn.InstanceNorm2d(min(512, max_channels))
        self.conv5_3norm = nn.InstanceNorm2d(min(512, max_channels))
        self.conv6_3norm = nn.InstanceNorm2d(min(512, max_channels))
        self.conv7_3norm = nn.InstanceNorm2d(min(512, max_channels))
        self.conv8_3norm = nn.InstanceNorm2d(min(256, max_channels))
        self.conv9_2norm = nn.InstanceNorm2d(min(128, max_channels))

    def forward(self, x):
        """ x: gray image (1 channel), ab(2 channel), ab_err, ba_err"""
        conv1_1 = self.relu1_1(self.conv1_1(x))
        conv1_2 = self.relu1_2(self.conv1_2(conv1_1))
        conv1_2norm = self.conv1_2norm(conv1_2)
        conv1_2norm_ss = self.conv1_2norm_ss(conv1_2norm)
        conv2_1 = self.relu2_1(self.conv2_1(conv1_2norm_ss))
        conv2_2 = self.relu2_2(self.conv2_2(conv2_1))
        conv2_2norm = self.conv2_2norm(conv2_2)
        conv2_2norm_ss = self.conv2_2norm_ss(conv2_2norm)
        conv3_1 = self.relu3_1(self.conv3_1(conv2_2norm_ss))
        conv3_2 = self.relu3_2(self.conv3_2(conv3_1))
        conv3_3 = self.relu3_3(self.conv3_3(conv3_2))
        conv3_3norm = self.conv3_3norm(conv3_3)
        conv3_3norm_ss = self.conv3_3norm_ss(conv3_3norm)
        conv4_1 = self.relu4_1(self.conv4_1(conv3_3norm_ss))
        conv4_2 = self.relu4_2(self.conv4_2(conv4_1))
        conv4_3 = self.relu4_3(self.conv4_3(conv4_2))
        conv4_3norm = self.conv4_3norm(conv4_3)
        conv5_1 = self.relu5_1(self.conv5_1(conv4_3norm))
        conv5_2 = self.relu5_2(self.conv5_2(conv5_1))
        conv5_3 = self.relu5_3(self.conv5_3(conv5_2))
        conv5_3norm = self.conv5_3norm(conv5_3)
        conv6_1 = self.relu6_1(self.conv6_1(conv5_3norm))
        conv6_2 = self.relu6_2(self.conv6_2(conv6_1))
        conv6_3 = self.relu6_3(self.conv6_3(conv6_2))
        conv6_3norm = self.conv6_3norm(conv6_3)
        conv7_1 = self.relu7_1(self.conv7_1(conv6_3norm))
        conv7_2 = self.relu7_2(self.conv7_2(conv7_1))
        conv7_3 = self.relu7_3(self.conv7_3(conv7_2))
        conv7_3norm = self.conv7_3norm(conv7_3)
        conv8_1 = self.conv8_1(conv7_3norm)
        conv3_3_short = self.conv3_3_short(conv3_3norm)
        conv8_1_comb = self.relu8_1_comb(conv8_1 + conv3_3_short)
        conv8_2 = self.relu8_2(self.conv8_2(conv8_1_comb))
        conv8_3 = self.relu8_3(self.conv8_3(conv8_2))
        conv8_3norm = self.conv8_3norm(conv8_3)
        conv9_1 = self.conv9_1(conv8_3norm)
        conv2_2_short = self.conv2_2_short(conv2_2norm)
        conv9_1_comb = self.relu9_1_comb(conv9_1 + conv2_2_short)
        conv9_2 = self.relu9_2(self.conv9_2(conv9_1_comb))
        conv9_2norm = self.conv9_2norm(conv9_2)
        conv10_1 = self.conv10_1(conv9_2norm)
        conv1_2_short = self.conv1_2_short(conv1_2norm)
        conv10_1_comb = self.relu10_1_comb(conv10_1 + conv1_2_short)
        conv10_2 = self.relu10_2(self.conv10_2(conv10_1_comb))
        conv10_ab = self.conv10_ab(conv10_2)

        return torch.tanh(conv10_ab)
