from __future__ import print_function

__all__ = ["ConvEncoder", "ConvDecoder", "ConvClassifyWithReconstructionNet", "ConvClassifyNet"]

import torch
import torch.nn as nn
import time
import config_conv_encoder_decoder_oil as config

# from omegaconf import DictConfig
from functools import partial
# from torchsummary import summary
from torchvision.models import resnet18

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchinfo import summary

from matplotlib import pyplot as plt
import os
import cv2 as cv

plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['font.size']=18 #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号

from resnet import BasicBlock, ResNetEncoding

# --------------------------------------------------------------------
# https://github.com/KushajveerSingh/resize_network_cv
# 用完 Learning to Resize Images for Computer Vision Tasks方法后再处理
class ResBlock(nn.Module):
    def __init__(self, channel_size: int, negative_slope: float = 0.2):
        super().__init__()
        self.block = nn.Sequential(
            nn.Conv2d(channel_size, channel_size, kernel_size=3, padding=1,
                      bias=False),
            nn.BatchNorm2d(channel_size),
            nn.LeakyReLU(negative_slope, inplace=True),
            nn.Conv2d(channel_size, channel_size, kernel_size=3, padding=1,
                      bias=False),
            nn.BatchNorm2d(channel_size)
        )

    def forward(self, x):
        return x + self.block(x)


class Resizer(nn.Module):
    '''
    config 为 config_efficient_capsule_oil
    '''
    def __init__(self,config):
        super().__init__()
        self.interpolate_mode = config.interpolate_mode  # "bilinear"
        self.scale_factor = config.image_size / config.resizer_image_size  # 224 / 448

        n = config.resizer.num_kernels     # 16
        r = config.resizer.num_resblocks   # 2
        slope = config.resizer.negative_slope  # 0.2

        self.module1 = nn.Sequential(
            nn.Conv2d(config.resizer.in_channels, n, kernel_size=7, padding=3),  # 3 | 1
            nn.LeakyReLU(slope, inplace=True),
            nn.Conv2d(n, n, kernel_size=1),
            nn.LeakyReLU(slope, inplace=True),
            nn.BatchNorm2d(n)
        )

        resblocks = []
        for i in range(r):
            resblocks.append(ResBlock(n, slope))
        self.resblocks = nn.Sequential(*resblocks)

        self.module3 = nn.Sequential(
            nn.Conv2d(n, n, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(n)
        )

        self.module4 = nn.Conv2d(n, config.out_channels, kernel_size=7,
                                 padding=3)           # 3 | 1

        self.interpolate = partial(F.interpolate,
                                   scale_factor=self.scale_factor,
                                   mode=self.interpolate_mode,
                                   align_corners=False,
                                   recompute_scale_factor=False)

    def forward(self, x):
        residual = self.interpolate(x)
        out = self.module1(x)
        out_residual = self.interpolate(out)

        out = self.resblocks(out_residual)
        out = self.module3(out)
        out = out + out_residual

        out = self.module4(out)

        out = out + residual

        return out
#----------------------------------------------------------------------------

# -----------------------------------
# 原文链接：https://blog.csdn.net/weixin_36979214/article/details/108879684

class RestNetBasicBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride):
        super(RestNetBasicBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, padding=1)
        self.bn2 = nn.BatchNorm2d(out_channels)

    def forward(self, x):
        output = self.conv1(x)
        output = F.relu(self.bn1(output))
        output = self.conv2(output)
        output = self.bn2(output)
        return F.relu(x + output)


class RestNetDownBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride):
        super(RestNetDownBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride[0], padding=1)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride[1], padding=1)
        self.bn2 = nn.BatchNorm2d(out_channels)
        self.extra = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride[0], padding=0),
            nn.BatchNorm2d(out_channels)
        )

    def forward(self, x):
        extra_x = self.extra(x)
        output = self.conv1(x)
        out = F.relu(self.bn1(output))

        out = self.conv2(out)
        out = self.bn2(out)
        return F.relu(extra_x + out)

# ------------------------------------------------------------------

def squash(input, eps=10e-21):
    n = torch.norm(input, dim=-1, keepdim=True)
    return (1 - 1 / (torch.exp(n) + eps)) * (input / (n + eps))


def length(input):
    return torch.sqrt(torch.sum(input**2, dim=-1) + 1e-8)


def mask(input):
    if type(input) is list:
        input, mask = input
    else:
        x = torch.sqrt(torch.sum(input**2, dim=-1))
        mask = F.one_hot(torch.argmax(x, dim=1), num_classes=x.shape[1]).float()

    masked = input * mask.unsqueeze(-1)
    return masked.view(input.shape[0], -1)


class ConvEncoder(nn.Module):
    """
    A simple Convolutional Encoder Model
    """

    def __init__(self):
        super().__init__()
        # self.img_size = img_size
        self.conv1 = nn.Conv2d(1, 16, (3, 3), padding=(1, 1))
        self.relu1 = nn.ReLU(inplace=True)
        self.maxpool1 = nn.MaxPool2d((2, 2))

        self.conv2 = nn.Conv2d(16, 32, (3, 3), padding=(1, 1))
        self.relu2 = nn.ReLU(inplace=True)
        self.maxpool2 = nn.MaxPool2d((2, 2))

        self.conv3 = nn.Conv2d(32, 64, (3, 3), padding=(1, 1))
        self.relu3 = nn.ReLU(inplace=True)
        self.maxpool3 = nn.MaxPool2d((2, 2))

        self.conv4 = nn.Conv2d(64, 128, (3, 3), padding=(1, 1))
        self.relu4 = nn.ReLU(inplace=True)
        self.maxpool4 = nn.MaxPool2d((2, 2))

        self.conv5 = nn.Conv2d(128, 256, (3, 3), padding=(1, 1))
        self.relu5 = nn.ReLU(inplace=True)
        self.maxpool5 = nn.MaxPool2d((2, 2))
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))


    def forward(self, x):
        # Downscale the image with conv maxpool etc.
        # print(x.shape)
        x = self.conv1(x)
        x = self.relu1(x)
        x = self.maxpool1(x)

        # print(x.shape)

        x = self.conv2(x)
        x = self.relu2(x)
        x = self.maxpool2(x)

        # print(x.shape)

        x = self.conv3(x)
        x = self.relu3(x)
        x = self.maxpool3(x)

        # print(x.shape)

        x = self.conv4(x)
        x = self.relu4(x)
        x = self.maxpool4(x)

        # print(x.shape)

        x = self.conv5(x)
        x = self.relu5(x)
        x = self.maxpool5(x)

        x_2davg = self.avgpool(x)
        features = torch.flatten(x_2davg, 1)

        # print(x.shape)

        return x, features

class ConvEncoder2(nn.Module):
    """
    A simple Convolutional Encoder Model
    """

    def __init__(self):
        super().__init__()
        # self.img_size = img_size
        self.conv1 = nn.Conv2d(1, 16, (3, 3), padding=(1, 1))
        self.relu1 = nn.ReLU(inplace=True)
        self.maxpool1 = nn.MaxPool2d((2, 2))

        self.conv2 = nn.Conv2d(16, 32, (3, 3), padding=(1, 1))
        self.relu2 = nn.ReLU(inplace=True)
        self.maxpool2 = nn.MaxPool2d((2, 2))

        self.conv3 = nn.Conv2d(32, 64, (3, 3), padding=(1, 1))
        self.relu3 = nn.ReLU(inplace=True)
        self.maxpool3 = nn.MaxPool2d((2, 2))

        self.conv4 = nn.Conv2d(64, 128, (3, 3), padding=(1, 1))
        self.relu4 = nn.ReLU(inplace=True)
        self.maxpool4 = nn.MaxPool2d((2, 2))

        self.conv5 = nn.Conv2d(128, 256, (3, 3), padding=(1, 1))
        self.relu5 = nn.ReLU(inplace=True)
        self.maxpool5 = nn.MaxPool2d((2, 2))

        self.conv6 = nn.Conv2d(256, 512, (3, 3), padding=(1, 1))
        self.relu6 = nn.ReLU(inplace=True)
        self.maxpool6 = nn.MaxPool2d((2, 2))


        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))


    def forward(self, x):
        # Downscale the image with conv maxpool etc.
        # print(x.shape)
        x = self.conv1(x)
        x = self.relu1(x)
        x = self.maxpool1(x)

        # print(x.shape)

        x = self.conv2(x)
        x = self.relu2(x)
        x = self.maxpool2(x)

        # print(x.shape)

        x = self.conv3(x)
        x = self.relu3(x)
        x = self.maxpool3(x)

        # print(x.shape)

        x = self.conv4(x)
        x = self.relu4(x)
        x = self.maxpool4(x)

        # print(x.shape)

        x = self.conv5(x)
        x = self.relu5(x)
        x = self.maxpool5(x)

        x = self.conv6(x)
        x = self.relu6(x)
        x = self.maxpool6(x)

        x_2davg = self.avgpool(x)
        features = torch.flatten(x_2davg, 1)

        # print(x.shape)

        return x, features


class ConvDecoder2(nn.Module):
    """
    A simple Convolutional Decoder Model
    """

    def __init__(self):
        super().__init__()
        self.deconv1 = nn.ConvTranspose2d(512, 256, (2, 2), stride=(2, 2))
        # self.upsamp1 = nn.UpsamplingBilinear2d(2)
        self.relu1 = nn.ReLU(inplace=True)

        self.deconv2 = nn.ConvTranspose2d(256, 128, (2, 2), stride=(3, 3))
        # self.upsamp1 = nn.UpsamplingBilinear2d(2)
        self.relu2 = nn.ReLU(inplace=True)

        self.deconv3 = nn.ConvTranspose2d(128, 64, (2, 2), stride=(3, 3))
        # self.upsamp1 = nn.UpsamplingBilinear2d(2)
        self.relu3 = nn.ReLU(inplace=True)

        self.deconv4 = nn.ConvTranspose2d(64, 32, (2, 2), stride=(2, 2))
        # self.upsamp1 = nn.UpsamplingBilinear2d(2)
        self.relu4 = nn.ReLU(inplace=True)

        self.deconv5 = nn.ConvTranspose2d(32, 16, (2, 2), stride=(2, 2))
        # self.upsamp1 = nn.UpsamplingBilinear2d(2)
        self.relu5 = nn.ReLU(inplace=True)

        self.deconv6 = nn.ConvTranspose2d(16, 1, (2, 2), stride=(2, 2))
        # self.upsamp1 = nn.UpsamplingBilinear2d(2)
        self.relu6 = nn.ReLU(inplace=True)


    def forward(self, x):
        # print(x.shape)
        x = self.deconv1(x)
        x = self.relu1(x)
        # print(x.shape)

        x = self.deconv2(x)
        x = self.relu2(x)
        # print(x.shape)

        x = self.deconv3(x)
        x = self.relu3(x)
        # print(x.shape)

        x = self.deconv4(x)
        x = self.relu4(x)
        # print(x.shape)

        x = self.deconv5(x)
        x = self.relu5(x)
        # print(x.shape)

        x = self.deconv6(x)
        x = self.relu6(x)


        return x


class ConvDecoder(nn.Module):
    """
    A simple Convolutional Decoder Model
    """

    def __init__(self):
        super().__init__()
        self.deconv1 = nn.ConvTranspose2d(256, 128, (2, 2), stride=(2, 2))
        # self.upsamp1 = nn.UpsamplingBilinear2d(2)
        self.relu1 = nn.ReLU(inplace=True)

        self.deconv2 = nn.ConvTranspose2d(128, 64, (2, 2), stride=(2, 2))
        # self.upsamp1 = nn.UpsamplingBilinear2d(2)
        self.relu2 = nn.ReLU(inplace=True)

        self.deconv3 = nn.ConvTranspose2d(64, 32, (2, 2), stride=(2, 2))
        # self.upsamp1 = nn.UpsamplingBilinear2d(2)
        self.relu3 = nn.ReLU(inplace=True)

        self.deconv4 = nn.ConvTranspose2d(32, 16, (2, 2), stride=(2, 2))
        # self.upsamp1 = nn.UpsamplingBilinear2d(2)
        self.relu4 = nn.ReLU(inplace=True)

        self.deconv5 = nn.ConvTranspose2d(16, 1, (2, 2), stride=(2, 2))
        # self.upsamp1 = nn.UpsamplingBilinear2d(2)
        self.relu5 = nn.ReLU(inplace=True)

    def forward(self, x):
        # print(x.shape)
        x = self.deconv1(x)
        x = self.relu1(x)
        # print(x.shape)

        x = self.deconv2(x)
        x = self.relu2(x)
        # print(x.shape)

        x = self.deconv3(x)
        x = self.relu3(x)
        # print(x.shape)

        x = self.deconv4(x)
        x = self.relu4(x)
        # print(x.shape)

        x = self.deconv5(x)
        x = self.relu5(x)
        # print(x.shape)
        return x


class ConvClassifyNet(nn.Module):

    def __init__(self, feature_dim, num_classes):
        super().__init__()

        self.fc = nn.Linear(feature_dim, num_classes)

    def forward(self, x):

        self.logits = self.fc(x)
        return self.logits


class ConvClassifyWithReconstructionNet(nn.Module):
    def __init__(self, conv_encoder_net, conv_decoder_net, conv_classify_net):
        super(ConvClassifyWithReconstructionNet, self).__init__()
        self.conv_encoder_net = conv_encoder_net
        self.conv_decoder_net = conv_decoder_net
        self.conv_classify_net = conv_classify_net

    def __init__(self):
        super(ConvClassifyWithReconstructionNet, self).__init__()
        self.conv_encoder_net = ConvEncoder2()
        self.conv_decoder_net = ConvDecoder2()
        self.conv_classify_net = ConvClassifyNet(config.FEATURE_DIM, config.NUM_CLASSES)

    def forward(self, x):
        enc_x, features = self.conv_encoder_net(x)
        dec_x = self.conv_decoder_net(enc_x)
        probs = self.conv_classify_net(features)
        return features, dec_x, probs


class ConvWithReconstructionNet(nn.Module):
    def __init__(self, conv_encoder_net, conv_decoder_net):
        super(ConvWithReconstructionNet, self).__init__()
        self.conv_encoder_net = conv_encoder_net
        self.conv_decoder_net = conv_decoder_net


    def forward(self, x):
        enc_x, features = self.conv_encoder_net(x)
        dec_x = self.conv_decoder_net(enc_x)
        return features, dec_x


class MarginLoss(nn.Module):
    def __init__(self, m_pos=0.9, m_neg=0.1, lambda_=0.5):
        super(MarginLoss, self).__init__()
        self.m_pos = m_pos
        self.m_neg = m_neg
        self.lambda_ = lambda_

    def forward(self, y_pred, y_true, size_average=True):
        # y_pred shape is [16,10], while y_true is [16]
        t = torch.zeros(y_pred.size()).long()
        if y_true.is_cuda:
            t = t.cuda()
        t = t.scatter_(1, y_true.data.view(-1, 1), 1)
        targets = Variable(t)
        losses = targets * torch.pow(
            torch.clamp(self.m_pos - y_pred, min=0.0), 2
        ) + self.lambda_ * (1 - targets) * torch.pow(
            torch.clamp(y_pred - self.m_neg, min=0.0), 2
        )
        return losses.mean() if size_average else losses.sum()




if __name__ == "__main__":
    '''针对彩色图像数据'''

    os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
    # img_random = torch.randn(1, 1, 250, 250)
    # img_random2 = torch.randn(1, 1, 250, 250)
    image = cv.imread("../data/small_data_oil_for_classification/images/liefeng/0000.png", cv.IMREAD_GRAYSCALE)
    img_input_size = (224,224)
    image = cv.resize(image, img_input_size)
    # image = cv.cvtColor(image, cv.COLOR_GRAY2RGB)
    # plt.axis('off')  # 去掉坐标轴
    # x_major_locator = plt.MultipleLocator(10)  # 把x轴的刻度间隔设置为1，并存在变量里
    # y_major_locator = plt.MultipleLocator(10)  # 把y轴的刻度间隔设置为1，并存在变量里
    # ax = plt.gca()  # ax为两条坐标轴的实例
    # ax.xaxis.set_major_locator(x_major_locator)  # 把x轴的主刻度设置为1的倍数
    # ax.yaxis.set_major_locator(y_major_locator)  # 把y轴的主刻度设置为1的倍数
    # plt.imshow(image, cmap="gray", vmin=0, vmax=255)  # 显示原始图像灰度vmin = 0, vmax = 255
    # plt.imsave("1-plt.bmp", image, cmap="gray", vmin=0, vmax=255)
    # https://matplotlib.org/stable/tutorials/colors/colormaps.html
    # plt.imshow(image, cmap=plt.cm.get_cmap('YlOrBr'))
    # plt.imshow(cv.cvtColor(image, cv.COLOR_RGB2BGR))
    # plt.imshow(cv.cvtColor(image, cv.COLOR_BGR2GRAY))
    # plt.title('原图')
    # plt.imshow(image, cmap=plt.cm.get_cmap('YlOrBr'))
    # plt.show()

    # 原文链接：https: // blog.csdn.net / L888666Q / article / details / 127112683
    # image = np.transpose(image, (2, 0, 1))  # 将通道数转到前面
    img_random = np.array(image)

    img_random = np.expand_dims(img_random, axis=0)  # 当image是灰度图像时候
    img_random = np.expand_dims(img_random, axis=0)
    print("img_random.shape:", img_random.shape)
    img_random = torch.from_numpy(img_random).float()
    # img_random = img_random.permute(0, 3, 1, 2)
    print(img_random.shape)
    use_cuda = False #False
    if use_cuda == True:
        if torch.cuda.is_available():
            device = "cuda"
            use_cuda = True
        else:
            device = "cpu"
            use_cuda = False
    else:
        device = "cpu"
        use_cuda = False
    print("device:", device)
    img_random = img_random.to(device)
    img_random2 = img_random.to(device)
    # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model = ConvClassifyWithReconstructionNet()
    input_data_shape = (1, 224, 224)
    # summary(enc,(1, 250, 250))
    model.to(device)
    # summary(model.conv_encoder_net, input_data_shape, device=device)
    start_time = time.time()
    features, dec_out, prob_out = model(img_random)
    end_time = time.time()
    print("time use: {:.5f} s".format(end_time - start_time))

    print("probs：", prob_out)
    dec_out_img = dec_out.detach().numpy().reshape(224,224)
    print("dec_out_img.shape:", dec_out_img.shape)



    fig = plt.figure()
    # img_random *= 255
    # img_random_show = img_random[0, :, :, :].reshape(250, 250, 3)
    plt.imshow(image, cmap=plt.cm.get_cmap('YlOrBr'))
    plt.title('编码前的图')
    plt.show()

    # dec_out_img *= 255
    fig2 = plt.figure()
    plt.imshow(dec_out_img, cmap=plt.cm.get_cmap('YlOrBr'))
    plt.title('解码后的图')
    plt.show()


