from __future__ import print_function

# __all__ = ["EfficientResNet18CapsNet", "ResNet18CapsNetReconstructionNet", "EfficientResNet18CapsNetWithReconstruction"]

import torch
import torch.nn as nn
import time
import config_efficient_resnet18_capsule_oil as config

# from omegaconf import DictConfig
from functools import partial
# from torchsummary import summary
from torchvision.models import VisionTransformer

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
# from torchsummary import summary

from matplotlib import pyplot as plt
import os
import cv2 as cv

plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['font.size']=18 #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号

from resnet import BasicBlock, Bottleneck, ResNetEncoding, ResNetEncodingFlatten

from torchvision.models import resnet50, resnet18, resnet34, resnet101, resnet152
from torchvision.models import vgg16, vgg16_bn
from torchvision.models import mobilenet_v3_small, mobilenet_v3_large
# from torchvision.models import vit_b_16, vit_b_32, vit_l_16, vit_l_32
# from torchvision.models import maxvit_t
from resnet import ResNet
from resizer_efficient_resnet18_capsule_model import ResizerEfficientResNet18CapsNet
from model.glom_pytorch import Glom
from model.crate_encoder import CRATE_small, CRATE_tiny, CRATE_base, CRATE_large, CRATE_tiny_small, CRATE
from model.bilateralfsunet import BilateralFSUnet
from torch_classical_features_model_oil import ClassicalFeaturesEncoder
# from model.fastetvit.registry import create_model
# from model.fastetvit.faster_vit import PatchEmbed
# --------------------------------------------------------------------
from scipy import stats
# from sklearn.metrics import pairwise
from scipy.spatial import distance as scipy_distance


def compute_similarity(array_X, array_Y, metric_method):
    """
        Compute the similarity based on distance between two 1-D arrays.
    """
    if metric_method == "pearson":
        features_distance, p_value = stats.pearsonr(array_X, array_Y)
        similarity_result = features_distance
    elif metric_method == "cosine":
        features_distance = scipy_distance.cosine(array_X, array_Y)
        similarity_result = 1 - features_distance
    elif metric_method == "euclidean":
        features_distance = scipy_distance.euclidean(array_X, array_Y)
        similarity_result = 1 /  (1 + features_distance)
    elif metric_method == "jaccard":
        features_distance = scipy_distance.jaccard(array_X, array_Y)
        similarity_result = 1 - features_distance
    elif metric_method == "corr":
        features_distance = scipy_distance.correlation(array_X, array_Y)
        similarity_result = 1 /  (1 + features_distance)
    else:
        features_distance, p_value = stats.pearsonr(array_X, array_Y)
        similarity_result = features_distance
    return similarity_result


# https://github.com/KushajveerSingh/resize_network_cv
# 用完 Learning to Resize Images for Computer Vision Tasks方法后再处理
class ResBlock(nn.Module):
    def __init__(self, channel_size: int, negative_slope: float = 0.2):
        super().__init__()
        self.block = nn.Sequential(
            nn.Conv2d(channel_size, channel_size, kernel_size=3, padding=1,
                      bias=False),
            nn.BatchNorm2d(channel_size),
            nn.LeakyReLU(negative_slope, inplace=True),
            nn.Conv2d(channel_size, channel_size, kernel_size=3, padding=1,
                      bias=False),
            nn.BatchNorm2d(channel_size)
        )

    def forward(self, x):
        return x + self.block(x)


class Resizer(nn.Module):
    '''
    config 为 config_efficient_capsule_oil
    '''
    def __init__(self,config):
        super().__init__()
        self.interpolate_mode = config.interpolate_mode  # "bilinear"
        self.scale_factor = config.image_size / config.resizer_image_size  # 224 / 448

        n = config.resizer.num_kernels     # 16
        r = config.resizer.num_resblocks   # 2
        slope = config.resizer.negative_slope  # 0.2

        self.module1 = nn.Sequential(
            nn.Conv2d(config.resizer.in_channels, n, kernel_size=7, padding=3),  # 3 | 1
            nn.LeakyReLU(slope, inplace=True),
            nn.Conv2d(n, n, kernel_size=1),
            nn.LeakyReLU(slope, inplace=True),
            nn.BatchNorm2d(n)
        )

        resblocks = []
        for i in range(r):
            resblocks.append(ResBlock(n, slope))
        self.resblocks = nn.Sequential(*resblocks)

        self.module3 = nn.Sequential(
            nn.Conv2d(n, n, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(n)
        )

        self.module4 = nn.Conv2d(n, config.out_channels, kernel_size=7,
                                 padding=3)           # 3 | 1

        self.interpolate = partial(F.interpolate,
                                   scale_factor=self.scale_factor,
                                   mode=self.interpolate_mode,
                                   align_corners=False,
                                   recompute_scale_factor=False)

    def forward(self, x):
        residual = self.interpolate(x)
        out = self.module1(x)
        out_residual = self.interpolate(out)

        out = self.resblocks(out_residual)
        out = self.module3(out)
        out = out + out_residual

        out = self.module4(out)

        out = out + residual

        return out
#----------------------------------------------------------------------------

# -----------------------------------
# 原文链接：https://blog.csdn.net/weixin_36979214/article/details/108879684

class RestNetBasicBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride):
        super(RestNetBasicBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, padding=1)
        self.bn2 = nn.BatchNorm2d(out_channels)

    def forward(self, x):
        output = self.conv1(x)
        output = F.relu(self.bn1(output))
        output = self.conv2(output)
        output = self.bn2(output)
        return F.relu(x + output)


class RestNetDownBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride):
        super(RestNetDownBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride[0], padding=1)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride[1], padding=1)
        self.bn2 = nn.BatchNorm2d(out_channels)
        self.extra = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride[0], padding=0),
            nn.BatchNorm2d(out_channels)
        )

    def forward(self, x):
        extra_x = self.extra(x)
        output = self.conv1(x)
        out = F.relu(self.bn1(output))

        out = self.conv2(out)
        out = self.bn2(out)
        return F.relu(extra_x + out)

# ------------------------------------------------------------------

def squash(input, eps=10e-21):
    n = torch.norm(input, dim=-1, keepdim=True)
    return (1 - 1 / (torch.exp(n) + eps)) * (input / (n + eps))


def length(input):
    return torch.sqrt(torch.sum(input**2, dim=-1) + 1e-8)


def mask(input):
    if type(input) is list:
        input, mask = input
    else:
        x = torch.sqrt(torch.sum(input**2, dim=-1))
        mask = F.one_hot(torch.argmax(x, dim=1), num_classes=x.shape[1]).float()

    masked = input * mask.unsqueeze(-1)
    return masked.view(input.shape[0], -1)


class PrimaryCapsLayer(nn.Module):
    """Create a primary capsule layer where the properties of each capsule are extracted
    using a 2D depthwise convolution.

    Args:
        in_channels (int): depthwise convolution's number of features
        kernel_size (int): depthwise convolution's kernel dimension
        num_capsules (int): number of primary capsules
        dim_capsules (int): primary capsule dimension
        stride (int, optional): depthwise convolution's strides. Defaults to 1.
    """

    def __init__(self, in_channels, kernel_size, num_capsules, dim_capsules, stride=1):
        super(PrimaryCapsLayer, self).__init__()
        self.depthwise_conv = nn.Conv2d(
            in_channels=in_channels,
            out_channels=in_channels,
            kernel_size=kernel_size,
            stride=stride,
            groups=in_channels,
            padding="valid",
        )
        self.num_capsules = num_capsules
        self.dim_capsules = dim_capsules

    def forward(self, input):
        output = self.depthwise_conv(input)
        # print("output.size:", output.size(0))
        output = output.view(output.size(0), self.num_capsules, self.dim_capsules)
        return squash(output)


class RoutingLayer(nn.Module):
    """Self-attention routing layer using a fully-connected network, to create a parent
    layer of capsules.

    Args:
        num_capsules (int): number of primary capsules
        dim_capsules (int): primary capsule dimension
    """

    def __init__(self, user_divce, num_capsules, dim_capsules):
        super(RoutingLayer, self).__init__()
        # 中间两个参数为输入的：input_shape[-2] , input_shape[-1]
        # 下面w、b的参数也需要调整, 根据num_capsules，get_backbone方法
        self.W = nn.Parameter(torch.Tensor(num_capsules, 64, 8, dim_capsules))
        self.b = nn.Parameter(torch.zeros(num_capsules, 64, 1))
        self.num_capsules = num_capsules
        self.dim_capsules = dim_capsules
        self.user_divce = user_divce
        self.reset_parameters()

    def reset_parameters(self):
        nn.init.kaiming_normal_(self.W)
        nn.init.zeros_(self.b)

    def forward(self, input):
        u = torch.einsum(
            "...ji,kjiz->...kjz", input, self.W
        )  # u shape = (None, num_capsules, height*width*16, dim_capsules)
        c = torch.einsum("...ij,...kj->...i", u, u)[
            ..., None
        ]  # b shape = (None, num_capsules, height*width*16, 1) -> (None, j, i, 1)
        if self.user_divce == "cpu":
            capsule_value = torch.Tensor([self.dim_capsules]).type(torch.FloatTensor)
        elif self.user_divce == "cuda":
            capsule_value = torch.Tensor([self.dim_capsules]).type(torch.cuda.FloatTensor)
        else:
            capsule_value = torch.Tensor([self.dim_capsules]).type(torch.FloatTensor)
        c = c / torch.sqrt(capsule_value)
        # capsule_value = torch.Tensor([self.dim_capsules]).type(torch.cuda.FloatTensor)  # 针对GPU
        # torch.Tensor([self.dim_capsules]).type(torch.FloatTensor)
        # torch.Tensor([self.dim_capsules])
        c = torch.softmax(c, axis=1)
        c = c + self.b
        s = torch.sum(
            torch.mul(u, c), dim=-2
        )  # s shape = (None, num_capsules, dim_capsules)
        final_v = squash(s)
        return final_v


class EfficientResNet18CapsNet(nn.Module):
    """Efficient-CapsNet architecture implementation.

    Args:
        nn (_type_): _description_
    """

    def __init__(self, user_device="cuda"):
        super(EfficientResNet18CapsNet, self).__init__()
        # self.conv0 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
        # self.bn1 = nn.BatchNorm2d(64)
        # self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
        #
        # self.layer1 = nn.Sequential(RestNetBasicBlock(64, 64, 1),
        #                             RestNetBasicBlock(64, 64, 1))
        #
        # self.layer2 = nn.Sequential(RestNetDownBlock(64, 128, [2, 1]),
        #                             RestNetBasicBlock(128, 128, 1))

        self.resnet_backbone =  self.get_backbone()

        # self.resnet_backbone =  resnet18()
        # self.resnet_backbone.conv1 = nn.Conv2d(1, 32, kernel_size=7, stride=2, padding=3, bias=False)
        # self.resnet_backbone.fc = nn.Identity()

        # self.conv1 = nn.Conv2d(
        #     in_channels=128, out_channels=32, kernel_size=3, stride=1,padding="valid"
        # )
        # self.batch_norm1 = nn.BatchNorm2d(num_features=32)
        # self.conv2 = nn.Conv2d(32, 64, 3,  stride=1, padding="valid")
        # self.batch_norm2 = nn.BatchNorm2d(64)
        # self.conv3 = nn.Conv2d(64, 64, 3, stride=1, padding="valid")
        # self.batch_norm3 = nn.BatchNorm2d(64)
        # self.conv4 = nn.Conv2d(64, 128, 3, stride=2, padding="valid")
        # self.batch_norm4 = nn.BatchNorm2d(128)

        self.primary_caps = PrimaryCapsLayer(
            # in_channels=256, kernel_size=8, num_capsules=32, dim_capsules=8  # 对应 get_backbone method 1
            in_channels=512, kernel_size=8, num_capsules=64, dim_capsules=8  # 对应 get_backbone method 2
        )
        # self.primary_caps_2 = PrimaryCapsLayer(
        #     in_channels=1, kernel_size=7, num_capsules=50, dim_capsules=10
        # )
        self.device = user_device
        # self.digit_caps = RoutingLayer(self.device, num_capsules=32, dim_capsules=16) # 对应 get_backbone method 1
        self.digit_caps = RoutingLayer(self.device, num_capsules=config.NUM_CLASSES, dim_capsules=128) # 对应 get_backbone method 2
        self.probs = None
        self.reset_parameters()

    def get_backbone(self):
        block = BasicBlock
        blocks_config = [
            # [32, 2], [64, 2], [128, 2], [256, 2]  # method 1
            [64, 2], [128, 2], [256, 2], [512, 2]  # method 2
        ]
        # first_config = [1, 32, 5, 2] # method 1
        first_config = [1, 64, 5, 2] # method 2
        return ResNetEncoding(block, blocks_config, first_config, first_pool=True)

    def reset_parameters(self):
        # nn.init.kaiming_normal_(self.conv1.weight)
        # nn.init.kaiming_normal_(self.conv2.weight)
        # nn.init.kaiming_normal_(self.conv3.weight)
        # nn.init.kaiming_normal_(self.conv4.weight)
        nn.init.kaiming_normal_(self.resnet_backbone.conv1.weight)

    def forward(self, x):
        # x = self.conv0(x)
        # print("self.conv0(x):", x.shape)
        # x = self.bn1(x)
        # print("self.bn1(x):", x.shape)
        # x = self.avgpool(x)
        # print("self.maxpool(x):", x.shape)
        # x = self.layer1(x)
        # print("layer1(x):", x.shape)
        # x = self.layer2(x)
        # print("layer2(x):", x.shape)

        x = self.resnet_backbone(x)
        # print("backbone x:",x.shape)
        # x = torch.relu(self.batch_norm1(self.conv1(x)))
        # # print("conv1(x):",x.shape)
        # x = torch.relu(self.batch_norm2(self.conv2(x)))
        # # print("conv2(x):",x.shape)
        # x = torch.relu(self.batch_norm3(self.conv3(x)))
        # # print("conv3(x):",x.shape)
        # x = torch.relu(self.batch_norm4(self.conv4(x)))
        # print("conv4(x):",x.shape)
        x = self.primary_caps(x)
        # print("primary_caps:", x.shape)
        # x = self.primary_caps_2(x)
        # print("primary_caps2:", x.shape)
        x = self.digit_caps(x)
        probs = length(x)
        self.probs = probs
        return x, probs


class ResNet18CapsNetReconstructionNet(nn.Module):
    def __init__(self, n_dim=16, n_classes=10):
        super(ResNet18CapsNetReconstructionNet, self).__init__()
        self.fc1 = nn.Linear(n_dim * n_classes, 1024)
        self.fc2 = nn.Linear(1024, 2048)
        self.fc3 = nn.Linear(2048, 62500) # 1024, 784 = 28 * 28
        self.reset_parameters()

    def reset_parameters(self):
        nn.init.kaiming_normal_(self.fc1.weight)
        nn.init.kaiming_normal_(self.fc2.weight)
        nn.init.xavier_normal_(self.fc3.weight)

    def forward(self, x):
        x = mask(x)
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        x = torch.sigmoid(self.fc3(x))
        x = x.view(-1, 1, 250, 250)
        # x = x.reshape(250, 250)
        return x  # 784 = 28 * 28


class DecReconstructionNet(nn.Module):
    def __init__(self, encoding_dim):
        super(DecReconstructionNet, self).__init__()
        self.fc1 = nn.Linear(encoding_dim, 1024)
        self.fc2 = nn.Linear(1024, 2048)
        self.fc3 = nn.Linear(2048, 62500) # 1024, 784 = 28 * 28
        self.reset_parameters()

    def reset_parameters(self):
        nn.init.kaiming_normal_(self.fc1.weight)
        nn.init.kaiming_normal_(self.fc2.weight)
        nn.init.xavier_normal_(self.fc3.weight)

    def forward(self, x):
        # x = mask(x)
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        x = torch.sigmoid(self.fc3(x))
        x = x.view(-1, 1, 250, 250)
        # x = x.reshape(250, 250)
        return x  # 784 = 28 * 28


class EfficientResNet18CapsNetWithReconstruction(nn.Module):
    def __init__(self, efficient_capsnet, reconstruction_net):
        super(EfficientResNet18CapsNetWithReconstruction, self).__init__()
        self.efficient_capsnet = efficient_capsnet
        self.reconstruction_net = reconstruction_net

    def forward(self, x):
        x, probs = self.efficient_capsnet(x)
        reconstruction = self.reconstruction_net(x)
        return reconstruction, probs


class MarginLoss(nn.Module):
    def __init__(self, m_pos=0.9, m_neg=0.1, lambda_=0.5):
        super(MarginLoss, self).__init__()
        self.m_pos = m_pos
        self.m_neg = m_neg
        self.lambda_ = lambda_

    def forward(self, y_pred, y_true, size_average=True):
        # y_pred shape is [16,10], while y_true is [16]
        t = torch.zeros(y_pred.size()).long()
        if y_true.is_cuda:
            t = t.cuda()
        t = t.scatter_(1, y_true.data.view(-1, 1), 1)
        targets = Variable(t)
        losses = targets * torch.pow(
            torch.clamp(self.m_pos - y_pred, min=0.0), 2
        ) + self.lambda_ * (1 - targets) * torch.pow(
            torch.clamp(y_pred - self.m_neg, min=0.0), 2
        )
        return losses.mean() if size_average else losses.sum()

capsule_network_family_list = ['efficient-capsule-orgin', 'efficient-capsule', 'efficient-res-capsule',
                               'res-capsule', 'efficient-resnet18-capsule', 'simple_efficient_res_capsule',
                               'resizer_efficient-resnet18-capsule']
crate_network_family_list = ['crate_small' ,'crate_base' ,'crate_large', 'crate_tiny', 'crate__tiny_small']

vit_network_family_list = ['vit-b-50', 'vit-b-125', 'vit-l-50', 'vit-l-125']


def get_classical_vision_model(arch, user_device):

    if arch == "vgg16":
        backbone = vgg16(pretrained=False)
        backbone.features[0] = nn.Conv2d(1, 64, kernel_size=3, padding=1)
        backbone.classifier[-1] = nn.Identity()
        return backbone, 4096
    elif arch == "vgg16_bn":
        backbone = vgg16_bn(pretrained=False)
        backbone.features[0] = nn.Conv2d(1, 64, kernel_size=3, padding=1)
        backbone.classifier[-1] = nn.Identity()
        return backbone, 4096
    elif arch == "resnet18":
        # block = BasicBlock
        # blocks_config = [
        #     # [32, 2], [64, 2], [128, 2], [256, 2]  # method 1
        #     [64, 2], [128, 2], [256, 2], [512, 2]  # method 2
        # ]
        # # first_config = [1, 32, 5, 2] # method 1
        # first_config = [1, 64, 5, 2] # method 2
        # backbone =  ResNetEncodingFlatten(block, blocks_config, first_config, first_pool=True)
        backbone = resnet18(pretrained=False)
        backbone.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=1, padding=1, bias=False)
        backbone.avgpool = nn.AdaptiveAvgPool2d(1)
        backbone.fc = nn.Identity()
        return backbone, 512
    elif arch == "resnet34":
        # block = BasicBlock
        # blocks_config = [
        #     [64, 3], [128, 4], [256, 6], [512, 3]  # method 1
        #     # [64, 3], [128, 4], [256, 6], [512, 3]  # method 2
        # ]
        # # first_config = [1, 32, 5, 2] # method 1
        # first_config = [1, 64, 5, 2]  # method 2
        # backbone = ResNetEncodingFlatten(block, blocks_config, first_config, first_pool=True)
        backbone = resnet34(pretrained=False)
        backbone.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
        backbone.avgpool = nn.AdaptiveAvgPool2d(1)
        backbone.fc = nn.Identity()
        return backbone, 512
    elif arch == "resnet50":
        # block = Bottleneck
        # blocks_config = [
        #     [64, 3], [128, 4], [256, 23], [512, 3]  # method 1
        #     # [64, 3], [128, 4], [256, 6], [512, 3]  # method 2
        # ]
        # # first_config = [1, 32, 5, 2] # method 1
        # first_config = [1, 64, 5, 2]  # method 2
        # backbone = ResNetEncodingFlatten(block, blocks_config, first_config, first_pool=True)
        backbone = resnet50(pretrained=False)
        backbone.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
        backbone.avgpool = nn.AdaptiveAvgPool2d(1)
        backbone.fc = nn.Identity()
        return backbone, 2048
    elif arch == "resnet101":
        # block = Bottleneck
        # blocks_config = [
        #     [64, 3], [128, 4], [256, 23], [512, 3]  # method 1
        #     # [64, 3], [128, 4], [256, 6], [512, 3]  # method 2
        # ]
        # # first_config = [1, 32, 5, 2] # method 1
        # first_config = [1, 64, 5, 2]  # method 2
        # backbone = ResNetEncodingFlatten(block, blocks_config, first_config, first_pool=True)
        backbone = resnet101(pretrained=False)
        backbone.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
        backbone.avgpool = nn.AdaptiveAvgPool2d(1)
        backbone.fc = nn.Identity()
        return backbone, 2048
    elif arch == "resnet152":
        # block = Bottleneck
        # blocks_config = [
        #     [64, 3], [128, 4], [256, 36], [512, 3]  # method 1
        #     # [64, 3], [128, 4], [256, 6], [512, 3]  # method 2
        # ]
        # # first_config = [1, 32, 5, 2] # method 1
        # first_config = [1, 64, 5, 2]  # method 2
        # backbone = ResNetEncodingFlatten(block, blocks_config, first_config, first_pool=True)
        backbone = resnet152(pretrained=False)
        backbone.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
        backbone.avgpool = nn.AdaptiveAvgPool2d(1)
        backbone.fc = nn.Identity()
        return backbone, 2048
    elif arch == "mobilenet_v3":
        backbone = mobilenet_v3_small(in_dim=1,pretrained=False)
        backbone.features[0] = nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1)
        backbone.features[-1] =torch.nn.Identity()
        return backbone, 1024
    elif arch == "efficient-resnet18-capsule":
        backbone = EfficientResNet18CapsNet(user_device)
        backbone.fc = nn.Identity()
        return backbone, 200, 128
    elif arch == 'resizer_efficient-resnet18-capsule':
        backbone = ResizerEfficientResNet18CapsNet(user_device)
        backbone.fc = nn.Identity()
        return backbone, 256, 16
    elif arch == "bilateralfsunet":
        backbone = BilateralFSUnet(user_device)
        backbone.fc = nn.Identity()
        return backbone, 128
    elif arch == 'crate_tiny':
        backbone = CRATE(image_size=250,
                    patch_size=50,
                    num_classes=1000,
                    dim=384,
                    depth=12,
                    heads=6,
                    channels=1,
                    dropout=0.0,
                    emb_dropout=0.0,
                    dim_head=384//6)
        backbone.fc = nn.Identity()
        return backbone, 384
    elif arch == "crate_small":
        backbone = CRATE(image_size=250,
                    patch_size=25,
                    num_classes=1000,
                    dim=576,
                    depth=12,
                    heads=12,
                    channels=1,
                    dropout=0.0,
                    emb_dropout=0.0,
                    dim_head=576//12)
        backbone.fc = nn.Identity()
        return backbone, 576
    elif arch == "crate_base":
        backbone = CRATE(image_size=250,
                patch_size=50,
                num_classes=1000,
                dim=768,
                depth=12,
                heads=12,
                channels=1,
                dropout=0.0,
                emb_dropout=0.0,
                dim_head=768//12)
        backbone.fc = nn.Identity()
        return backbone, 768
    elif arch == "crate_large":
        backbone = CRATE(image_size=250,
                patch_size=125,
                num_classes=1000,
                dim=1024,
                depth=24,
                heads=16,
                channels=1,
                dropout=0.0,
                emb_dropout=0.0,
                dim_head=1024//16)
        backbone.fc = nn.Identity()
        return backbone, 1024
    elif arch == "crate_tiny_small":
        backbone = CRATE(image_size=250,
                    patch_size=50,
                    num_classes=1000,
                    dim=128,
                    depth=4,
                    heads=8,
                    channels=1,
                    dropout=0.0,
                    emb_dropout=0.0,
                    dim_head=128//8)
        backbone.fc = nn.Identity()
        return backbone, 128
    elif arch == "vit-b-50":
        hidden_dim = 768
        patch_size = 50
        backbone = VisionTransformer(
            # arch = "vit_b_16",
            image_size=250,
            patch_size=50,
            num_layers=12,
            num_heads=12,
            hidden_dim=768,
            mlp_dim=3072)
        # for vit vision_transformer
        # -----------------变为单通道
        backbone.conv_proj = nn.Conv2d(
            in_channels=1, out_channels=hidden_dim, kernel_size=patch_size, stride=patch_size
        )
        backbone.heads = torch.nn.Identity()
        return backbone, 768
        # ---------------------去除全连接层
    elif arch == "vit-b-125":
        hidden_dim = 768
        patch_size = 125
        backbone = VisionTransformer(
            # arch = "vit_b_32",
            image_size=250,
            patch_size=125,
            num_layers=12,
            num_heads=12,
            hidden_dim=768,
            mlp_dim=3072)
        backbone.conv_proj = nn.Conv2d(
            in_channels=1, out_channels=hidden_dim, kernel_size=patch_size, stride=patch_size
        )
        backbone.heads = torch.nn.Identity()
        return backbone, 768
    elif arch == "vit-l-50":
        hidden_dim = 1024
        patch_size = 50
        backbone = VisionTransformer(
            # arch = "vit_l_16",
            image_size=250,
            patch_size=50,
            num_layers=24,
            num_heads=16,
            hidden_dim=1024,
            mlp_dim=4096)
        backbone.conv_proj = nn.Conv2d(
            in_channels=1, out_channels=hidden_dim, kernel_size=patch_size, stride=patch_size
        )
        backbone.heads = torch.nn.Identity()
        return backbone, 1024
    elif arch == "vit-l-125":
        hidden_dim = 1024
        patch_size = 125
        backbone = VisionTransformer(
            # arch = "vit_l_16",
            image_size=250,
            patch_size=125,
            num_layers=24,
            num_heads=16,
            hidden_dim=1024,
            mlp_dim=4096)
        backbone.conv_proj = nn.Conv2d(
            in_channels=1, out_channels=hidden_dim, kernel_size=patch_size, stride=patch_size
        )
        backbone.heads = torch.nn.Identity()
        return backbone, 1024
    # elif arch == "faster_vit_4_21k_224":
    #     backbone = create_model('faster_vit_4_21k_224',
    #                          pretrained=False,
    #                          # model_path="G:\\torch_cache\\huggingface\\hub\\fastervit_4_21k_224_w14.pth.tar",
    #                          num_classes=-1)
    #     backbone.patch_embed = PatchEmbed(in_chans=1, in_dim=64, dim=196)
    #     backbone.heads = torch.nn.Identity()
    #     backbone.num_classes = -1
    #     return backbone, 1568
    elif arch == "glom":
        backbone = Glom(
            dim=256,  # dimension
            levels=4,  # number of levels 4
            image_size=250,  # image size
            patch_size=125,  # patch size
            consensus_self=False,
            local_consensus_radius=0,
            image_chanel=1,
            return_state=2,
            device=user_device
        )
        return backbone, 4, 256  # (image_size/patch_size)^2, dim
    elif arch == "classical_features":
        feature_list = ["Hus", "CLCM", "hog"]
        backbone = ClassicalFeaturesEncoder(feature_list)
        return backbone, 2364
    else:
        raise NameError("{} not found in network architecture".format(arch))


models_list = ['vgg16', 'vgg16_bn', 'resnet18', 'resnet-34', 'resnet-50',
               'resnet101', 'resnet152','mobilenet_v3',
               'efficient-resnet18-capsule', 'resizer_efficient-resnet18-capsule',
               'crate_small' ,'crate_base' ,'crate_large', 'crate_tiny', 'crate_tiny_small',
               'vit-b-50', 'vit-b-125', 'vit-l-50', 'vit-l-125','glom', 'classical_features',
               'faster_vit_4_21k_224']





if __name__ == "__main__":

    import cv2
    import torchmetrics
    # arch = "vit-l-50"
    # arch = "resnet18"
    # arch = "classical_features"
    # arch = "efficient-resnet18-capsule"
    # arch = "crate_base"
    arch = "glom"
    # arch = "faster_vit_4_21k_224"
    '''针对彩色图像数据'''


    os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

    use_cuda = False
    if use_cuda == True:
        if torch.cuda.is_available():
            device = "cuda"
            use_cuda = True
        else:
            device = "cpu"
            use_cuda = False
    else:
        device = "cpu"
        use_cuda = False
    # device = "cpu"
    print("device:", device)
    # img_random = img_random.to(device)

    # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model_name = arch
    feature_dim = 0
    if arch in capsule_network_family_list:
        backbone, num_capsules, capsules_feature_dim = get_classical_vision_model(arch, device)
        feature_dim = num_capsules * capsules_feature_dim
        reconstruction_model = ResNet18CapsNetReconstructionNet(num_capsules, capsules_feature_dim)
    elif arch in crate_network_family_list:
        backbone, feature_dim = get_classical_vision_model(arch, device)
        reconstruction_model = DecReconstructionNet(feature_dim)
    elif arch == "glom":
        backbone, num_capsules, capsules_feature_dim = get_classical_vision_model(arch, device)
        feature_dim = num_capsules * capsules_feature_dim
        reconstruction_model = ResNet18CapsNetReconstructionNet(num_capsules, capsules_feature_dim)
    else:
        backbone, feature_dim = get_classical_vision_model(arch, device)
        reconstruction_model = DecReconstructionNet(feature_dim)


    input_data_shape = (1, 250, 250)
    if "224" in arch:
        input_data_shape = (1, 224, 224)

    # summary(enc, input_data_shape, device=device)
    # input_data_shape = [(1, 250, 250)]
    from torchinfo import summary
    enc = backbone
    if arch != "classical_features":
        summary(enc, input_data_shape, batch_dim=1, device=device)

    enc.to(device)


    # img_random = torch.randn(32, 3, 256, 256)
    # img_random2 = torch.randn(32, 3, 256, 256)
    img_list = ["../data/data_oil_for_classification/images/liefeng/0040.png",
                # "../data/data_oil_for_classification/images/liefeng/0060.png"]
                # "../data/data_oil_for_classification/images/rongkong/1022.png"]
                "../data/data_oil_for_classification/images/wenceng/2000.png"]
                # "../data/data_oil_for_classification/images/lishi/3040.png"]
                # "../data/data_oil_for_classification/images/ansetiaodai/4002.png"]
                # "../data/data_oil_for_classification/images/youdaofeng/5175.png"]

    img_input_size = (250, 250)
    # img_input_size = (224, 224)
    input_image_x = cv2.imread(img_list[0], cv2.IMREAD_GRAYSCALE)
    input_image_y = cv2.imread(img_list[1], cv2.IMREAD_GRAYSCALE)

    if arch != "classical_features":
        img_normalized_x = cv2.normalize(input_image_x, None, 0, 1.0, cv2.NORM_MINMAX, dtype=cv2.CV_32F)
        img_normalized_y = cv2.normalize(input_image_y, None, 0, 1.0, cv2.NORM_MINMAX, dtype=cv2.CV_32F)
        input_x = cv2.resize(img_normalized_x, img_input_size)
        input_y = cv2.resize(img_normalized_y, img_input_size)
    else:
        input_x = cv2.resize(input_image_x, img_input_size)
        input_y = cv2.resize(input_image_y, img_input_size)

    if arch == "classical_feature":
        image_x = np.array(input_x, dtype=np.uint8)
        image_y = np.array(input_y, dtype=np.uint8)
    else:
        image_x = input_x
        image_y = input_y

    image_x = np.expand_dims(image_x, axis=0)  # 当image是灰度图像时候
    image_x = np.expand_dims(image_x, axis=0)  # 彩色图注释,灰度图解除注释
    image_y = np.expand_dims(image_y, axis=0)  # 当image是灰度图像时候
    image_y = np.expand_dims(image_y, axis=0)  # 彩色图注释,灰度图解除注释
    # img_tensor_x = torch.randn(1, 1, 250, 250)
    # img_tensor_y = torch.randn(1, 1, 250, 250)
    img_tensor_x = torch.from_numpy(image_x)
    img_tensor_y = torch.from_numpy(image_y)
    img_tensor_x = img_tensor_x.to(device)
    img_tensor_y = img_tensor_y.to(device)

    metric_method_list = [
        "pearson",
        "cosine",
        "euclidean",
        "jaccard",
        "corr"
    ]

    start_time = time.time()
    enc_out_x = enc(img_tensor_x)
    enc_out_y = enc(img_tensor_y)

    if arch == "classical_features":
        array_x = enc_out_x.flatten()
        array_y = enc_out_y.flatten()
    elif arch in capsule_network_family_list:
        array_x = enc_out_x[0].detach().cpu().numpy().flatten()
        array_y = enc_out_y[0].detach().cpu().numpy().flatten()
    elif arch == "glom":
        array_x = enc_out_x[0].detach().cpu().numpy().flatten()
        array_y = enc_out_y[0].detach().cpu().numpy().flatten()
    else:
        # arch != "classical_features"
        array_x = enc_out_x.detach().cpu().numpy().flatten()
        array_y = enc_out_y.detach().cpu().numpy().flatten()
    metric_method_list_size = len(metric_method_list)
    end_time = time.time()
    enc_to_vec_time = end_time - start_time
    print("arch enc time use: {:.5f} s".format(enc_to_vec_time))
    start_time_eval = time.time()
    scores = [compute_similarity(array_x, array_y, metric_method_list[i]) for i in range(metric_method_list_size)]
    end_time_eval = time.time()
    eval_time = (end_time_eval - start_time_eval) / 5.
    print("arch eval time use: {:.5f} s".format(eval_time))
    print("arch time use: {:.5f} s".format(enc_to_vec_time + eval_time))

    for k in range(metric_method_list_size):
        print("method {} score is : {:.4f} ".format(metric_method_list[k],scores[k]))


    # eval ssim
    ssim = torchmetrics.image.StructuralSimilarityIndexMeasure(data_range=1.0)
    if arch not in capsule_network_family_list and arch != "glom":
        if feature_dim == 1024:
            ssim_score = ssim(enc_out_x.view(1, 1, 32, 32), enc_out_y.view(1, 1, 32, 32))
        elif feature_dim == 512:
            ssim_score = ssim(enc_out_x.view(1, 1, 32, 16), enc_out_y.view(1, 1, 32, 16))
        elif feature_dim == 1568:
            ssim_score = ssim(enc_out_x.view(1, 1, 56, 28), enc_out_y.view(1, 1, 56, 28))
        elif feature_dim == 768:
            ssim_score = ssim(enc_out_x.view(1, 1, 24, 32), enc_out_y.view(1, 1, 24, 32))
        elif feature_dim == 2364:
            enc_out_x = np.expand_dims(enc_out_x, axis=0)
            enc_out_x = np.expand_dims(enc_out_x, axis=0)
            enc_out_y = np.expand_dims(enc_out_y, axis=0)
            enc_out_y = np.expand_dims(enc_out_y, axis=0)
            enc_out_tensor_x = torch.from_numpy(enc_out_x)
            enc_out_tensor_y = torch.from_numpy(enc_out_y)
            ssim_score = ssim(enc_out_tensor_x.view(1, 1, 197, 12), enc_out_tensor_y.view(1, 1, 197, 12))
        else:
            ssim_score = -1
    else:
        if arch == "glom":
            ssim_score = ssim(enc_out_x.reshape(1, 1, 32, 32), enc_out_y.reshape(1, 1, 32, 32))
        elif arch in capsule_network_family_list:
            ssim_score = ssim(enc_out_x[0].view(1, 1, 200, 128), enc_out_y[0].view(1, 1, 200, 128))
        else:
            ssim_score = ssim(enc_out_x.view(1, 1, 32, 16), enc_out_y.view(1, 1, 32, 16))
    print("ssim: ", ssim_score)
