# import numpy as np
# import torch
# import torch.nn.functional as F
# import torch.nn as nn

from torchvision.models import resnet50, resnet18, resnet34, resnet101, resnet152
from torchvision.models import vgg16, vgg16_bn
from torchvision.models import mobilenet_v3_small, mobilenet_v3_large
from torchvision.models import VisionTransformer
# 针对单通道
# from efficient_capsule_model_oil import ConvEncoder, EfficientCapsNet, EfficientCapsNetWithReconstruction
import config
from efficient_capsnet_orgin_oil_gray import EfficientCapsNet, EfficientCapsNetWithReconstruction
from efficient_capsnet_orgin import EfficientCapsOrginNet
# 针对三通道
from efficient_res_capsule_model import ConvEncoder, EfficientResCapsNet, EfficientResCapsNetWithReconstruction
from simple_efficient_res_capsule_model import SimpleEfficientResCapsReconstructionNet
from bilateralfsunet import BilateralFSUnet
from efficient_resnet18_capsule_model import EfficientResNet18CapsNet
from resizer_efficient_resnet18_capsule_model import ResizerEfficientResNet18CapsNet
from simple_efficient_res_capsule_model import SimpleEfficientResCapsNet
from torch_classical_features_model_oil import ClassicalFeaturesEncoder
# from resnet import Resnet10CIFAR
from glom_pytorch import Glom
from crate_encoder import *

models_list = ['vgg16', 'vgg16_bn', 'resnet18', 'resnet-34', 'resnet-50',
               'resnet101', 'resnet152','mobilenet_v3',
               'efficient-resnet18-capsule', 'resizer_efficient-resnet18-capsule',
               'crate_small' ,'crate_base' ,'crate_large', 'crate_tiny', 'crate_tiny_small',
               'vit-b-50', 'vit-b-125', 'vit-l-50', 'vit-l-125','glom', 'classical_features',
               'faster_vit_4_21k_224']

capsule_network_family_list = ['efficient-capsule-orgin' ,'efficient-capsule' ,'efficient-res-capsule',
                               'res-capsule', 'efficient-resnet18-capsule','simple_efficient_res_capsule',
                               'resizer_efficient-resnet18-capsule']
crate_network_family_list = ['crate_small','crate_base','crate_large', 'crate_tiny', 'crate__tiny_small']

def getmodel(arch, user_device):
    
    #backbone = resnet18()
    if arch == "vgg16":
        backbone = vgg16(weights=None)
        backbone.features[0] = nn.Conv2d(1, 64, kernel_size=3, padding=1)
        backbone.classifier[-1] = nn.Identity()
        return backbone, 4096
    elif arch == "vgg16_bn":
        backbone = vgg16_bn(weights=None)
        backbone.features[0] = nn.Conv2d(1, 64, kernel_size=3, padding=1)
        backbone.classifier[-1] = nn.Identity()
        return backbone, 4096
    elif arch == "resnet18-cifar":
        backbone = resnet18()
        backbone.conv1 = nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1, bias=False)
        backbone.maxpool = nn.Identity()
        backbone.fc = nn.Identity()
        return backbone, 512
    elif arch == "resnet18-imagenet":
        backbone = resnet18()
        backbone.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
        backbone.fc = nn.Identity()
        return backbone, 512
    elif arch == "resnet18-tinyimagenet":
        backbone = resnet18()
        backbone.avgpool = nn.AdaptiveAvgPool2d(1)
        backbone.fc = nn.Identity()
        return backbone, 512
    elif arch == "resnet18":
        # block = BasicBlock
        # blocks_config = [
        #     # [32, 2], [64, 2], [128, 2], [256, 2]  # method 1
        #     [64, 2], [128, 2], [256, 2], [512, 2]  # method 2
        # ]
        # # first_config = [1, 32, 5, 2] # method 1
        # first_config = [1, 64, 5, 2] # method 2
        # backbone =  ResNetEncodingFlatten(block, blocks_config, first_config, first_pool=True)
        backbone = resnet18(weights=None)
        backbone.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=1, padding=1, bias=False)
        backbone.avgpool = nn.AdaptiveAvgPool2d(1)
        backbone.fc = nn.Identity()
        return backbone, 512
    elif arch == "resnet34":
        # block = BasicBlock
        # blocks_config = [
        #     [64, 3], [128, 4], [256, 6], [512, 3]  # method 1
        #     # [64, 3], [128, 4], [256, 6], [512, 3]  # method 2
        # ]
        # # first_config = [1, 32, 5, 2] # method 1
        # first_config = [1, 64, 5, 2]  # method 2
        # backbone = ResNetEncodingFlatten(block, blocks_config, first_config, first_pool=True)
        backbone = resnet34(weights=None)
        backbone.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
        backbone.avgpool = nn.AdaptiveAvgPool2d(1)
        backbone.fc = nn.Identity()
        return backbone, 512
    elif arch == "resnet50":
        # block = Bottleneck
        # blocks_config = [
        #     [64, 3], [128, 4], [256, 23], [512, 3]  # method 1
        #     # [64, 3], [128, 4], [256, 6], [512, 3]  # method 2
        # ]
        # # first_config = [1, 32, 5, 2] # method 1
        # first_config = [1, 64, 5, 2]  # method 2
        # backbone = ResNetEncodingFlatten(block, blocks_config, first_config, first_pool=True)
        backbone = resnet50(weights=None)
        backbone.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
        backbone.avgpool = nn.AdaptiveAvgPool2d(1)
        backbone.fc = nn.Identity()
        return backbone, 2048
    elif arch == "resnet101":
        # block = Bottleneck
        # blocks_config = [
        #     [64, 3], [128, 4], [256, 23], [512, 3]  # method 1
        #     # [64, 3], [128, 4], [256, 6], [512, 3]  # method 2
        # ]
        # # first_config = [1, 32, 5, 2] # method 1
        # first_config = [1, 64, 5, 2]  # method 2
        # backbone = ResNetEncodingFlatten(block, blocks_config, first_config, first_pool=True)
        backbone = resnet101(weights=None)
        backbone.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
        backbone.avgpool = nn.AdaptiveAvgPool2d(1)
        backbone.fc = nn.Identity()
        return backbone, 2048
    elif arch == "resnet152":
        # block = Bottleneck
        # blocks_config = [
        #     [64, 3], [128, 4], [256, 36], [512, 3]  # method 1
        #     # [64, 3], [128, 4], [256, 6], [512, 3]  # method 2
        # ]
        # # first_config = [1, 32, 5, 2] # method 1
        # first_config = [1, 64, 5, 2]  # method 2
        # backbone = ResNetEncodingFlatten(block, blocks_config, first_config, first_pool=True)
        backbone = resnet152(weights=None)
        backbone.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
        backbone.avgpool = nn.AdaptiveAvgPool2d(1)
        backbone.fc = nn.Identity()
        return backbone, 2048
    elif arch == "mobilenet_v3":
        backbone = mobilenet_v3_small(in_dim=1, weights=None)
        backbone.features[0] = nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1)
        backbone.features[-1] = torch.nn.Identity()
        return backbone, 1024
    elif arch == "conv-encoder":
        backbone = ConvEncoder()
        backbone.fc = nn.Identity()
        return backbone, 256
    elif arch == "efficient-capsule-orgin":
        backbone = EfficientCapsOrginNet(user_device)
        backbone.fc = nn.Identity()
        return backbone, 10, 16
    elif arch == "efficient-res-capsule":
        backbone = EfficientResCapsNet(user_device)
        backbone.fc = nn.Identity()
        return backbone, 256, 16
    elif arch == "simple_efficient_res_capsule":
        backbone = SimpleEfficientResCapsNet(user_device)
        backbone.fc = nn.Identity()
        return backbone, 288, 16
    elif arch == "efficient-capsule":
        backbone = EfficientCapsNet(user_device)
        backbone.fc = nn.Identity()
        return backbone, 512, 16
    elif arch == "efficient-resnet18-capsule":
        backbone = EfficientResNet18CapsNet(user_device)
        backbone.fc = nn.Identity()
        return backbone, 6, 128
    elif arch == 'resizer_efficient-resnet18-capsule':
        backbone = ResizerEfficientResNet18CapsNet(user_device)
        backbone.fc = nn.Identity()
        return backbone, 256, 16
    elif arch == "bilateralfsunet":
        backbone = BilateralFSUnet(user_device)
        backbone.fc = nn.Identity()
        return backbone, 128
    elif arch == 'crate_tiny':
        backbone = CRATE(image_size=250,
                         patch_size=50,
                         num_classes=1000,
                         dim=384,
                         depth=12,
                         heads=6,
                         channels=1,
                         dropout=0.0,
                         emb_dropout=0.0,
                         dim_head=384 // 6)
        backbone.fc = nn.Identity()
        return backbone, 384
    elif arch == "crate_small":
        backbone = CRATE(image_size=250,
                         patch_size=25,
                         num_classes=1000,
                         dim=576,
                         depth=12,
                         heads=12,
                         channels=1,
                         dropout=0.0,
                         emb_dropout=0.0,
                         dim_head=576 // 12)
        backbone.fc = nn.Identity()
        return backbone, 576
    elif arch == "crate_base":
        backbone = CRATE(image_size=250,
                         patch_size=50,
                         num_classes=1000,
                         dim=768,
                         depth=12,
                         heads=12,
                         channels=1,
                         dropout=0.0,
                         emb_dropout=0.0,
                         dim_head=768 // 12)
        backbone.fc = nn.Identity()
        return backbone, 768
    elif arch == "crate_large":
        backbone = CRATE(image_size=250,
                         patch_size=125,
                         num_classes=1000,
                         dim=1024,
                         depth=24,
                         heads=16,
                         channels=1,
                         dropout=0.0,
                         emb_dropout=0.0,
                         dim_head=1024 // 16)
        backbone.fc = nn.Identity()
        return backbone, 1024
    elif arch == "crate_tiny_small":
        backbone = CRATE(image_size=250,
                         patch_size=50,
                         num_classes=1000,
                         dim=128,
                         depth=4,
                         heads=8,
                         channels=1,
                         dropout=0.0,
                         emb_dropout=0.0,
                         dim_head=128 // 8)
        backbone.fc = nn.Identity()
        return backbone, 128
    elif arch == "vit-b-50":
        hidden_dim = 768
        patch_size = 50
        backbone = VisionTransformer(
            # arch = "vit_b_16",
            image_size=250,
            patch_size=50,
            num_layers=12,
            num_heads=12,
            hidden_dim=768,
            mlp_dim=3072)
        # for vit vision_transformer
        # -----------------变为单通道
        backbone.conv_proj = nn.Conv2d(
            in_channels=1, out_channels=hidden_dim, kernel_size=patch_size, stride=patch_size
        )
        backbone.heads = torch.nn.Identity()
        return backbone, 768
        # ---------------------去除全连接层
    elif arch == "vit-b-125":
        hidden_dim = 768
        patch_size = 125
        backbone = VisionTransformer(
            # arch = "vit_b_32",
            image_size=250,
            patch_size=125,
            num_layers=12,
            num_heads=12,
            hidden_dim=768,
            mlp_dim=3072)
        backbone.conv_proj = nn.Conv2d(
            in_channels=1, out_channels=hidden_dim, kernel_size=patch_size, stride=patch_size
        )
        backbone.heads = torch.nn.Identity()
        return backbone, 768
    elif arch == "vit-l-50":
        hidden_dim = 1024
        patch_size = 50
        backbone = VisionTransformer(
            # arch = "vit_l_16",
            image_size=250,
            patch_size=50,
            num_layers=24,
            num_heads=16,
            hidden_dim=1024,
            mlp_dim=4096)
        backbone.conv_proj = nn.Conv2d(
            in_channels=1, out_channels=hidden_dim, kernel_size=patch_size, stride=patch_size
        )
        backbone.heads = torch.nn.Identity()
        return backbone, 1024
    elif arch == "vit-l-125":
        hidden_dim = 1024
        patch_size = 125
        backbone = VisionTransformer(
            # arch = "vit_l_16",
            image_size=250,
            patch_size=125,
            num_layers=24,
            num_heads=16,
            hidden_dim=1024,
            mlp_dim=4096)
        backbone.conv_proj = nn.Conv2d(
            in_channels=1, out_channels=hidden_dim, kernel_size=patch_size, stride=patch_size
        )
        backbone.heads = torch.nn.Identity()
        return backbone, 1024
    # elif arch == "faster_vit_4_21k_224":
    #     backbone = create_model('faster_vit_4_21k_224',
    #                          weights=None,
    #                          # model_path="G:\\torch_cache\\huggingface\\hub\\fastervit_4_21k_224_w14.pth.tar",
    #                          num_classes=-1)
    #     backbone.patch_embed = PatchEmbed(in_chans=1, in_dim=64, dim=196)
    #     backbone.heads = torch.nn.Identity()
    #     backbone.num_classes = -1
    #     return backbone, 1568
    elif arch == "glom":
        backbone = Glom(
            dim=256,  # dimension
            levels=4,  # number of levels 4
            image_size=250,  # image size
            patch_size=125,  # patch size
            consensus_self=False,
            local_consensus_radius=0,
            image_chanel=1,
            return_state=2,
            device=user_device
        )
        return backbone, 4, 256  # (image_size/patch_size)^2, dim
    elif arch == "classical_features":
        feature_list = ["Hus", "CLCM", "hog"]
        backbone = ClassicalFeaturesEncoder(feature_list)
        return backbone, 2364
    else:
        raise NameError("{} not found in network architecture".format(arch))


class encoder(nn.Module):
     def __init__(self, user_device="cuda", z_dim=1024,hidden_dim=4096, norm_p=2, arch = "resnet18-cifar"):
        super().__init__()
        self.backbone_name = arch
        self.device_use = user_device
        # if self.backbone_name == 'efficient-capsule':
        if self.backbone_name in capsule_network_family_list:
            backbone, num_capsules, capsules_feature_dim = getmodel(arch, self.device_use)
            self.backbone_feature_dim = num_capsules * capsules_feature_dim
        elif self.backbone_name  == "glom":
            backbone,  num_capsules, capsules_feature_dim = getmodel(arch, self.device_use)
            self.backbone_feature_dim = num_capsules * capsules_feature_dim
        elif self.backbone_name in crate_network_family_list:
            backbone, feature_dim = getmodel(arch, self.device_use)
            self.backbone_feature_dim = feature_dim
        else:
            backbone, feature_dim = getmodel(arch, self.device_use)
            self.backbone_feature_dim = feature_dim

        self.backbone = backbone
        self.norm_p = norm_p
        self.pre_feature = nn.Sequential(nn.Linear(self.backbone_feature_dim, hidden_dim),
                                         nn.BatchNorm1d(hidden_dim),
                                         nn.ReLU())
        self.projection = nn.Sequential(nn.Linear(hidden_dim, hidden_dim),
                                        nn.BatchNorm1d(hidden_dim),
                                        nn.ReLU(),
                                        nn.Linear(hidden_dim, z_dim))
        # 按照默认的方式展开,第0维是图片数据id
        self.feature_flatten = nn.Flatten(start_dim=1, end_dim=-1)

     def forward(self, x, is_test = False):



        # 假设模型输出的特征图为 feature_map, feature_map 是一个 tuple
        # if self.backbone_name == 'efficient-capsule':
        if self.backbone_name in capsule_network_family_list:
            feature = self.backbone(x)
            feature_map = feature[0]
        elif self.backbone_name in crate_network_family_list:
            feature = self.backbone(x)
            feature_map = feature
        elif self.backbone_name == "glom":
            feature = self.backbone(x, return_all=2)
            feature_map = feature
        else:
            feature = self.backbone(x)
            feature_map = feature
        # 获取特征图个数
        # num_maps = len(feature_map)

        if self.backbone_name in capsule_network_family_list:
            feature_map = self.feature_flatten(feature_map)
        if self.backbone_name == "glom":
            feature_map = self.feature_flatten(feature_map)
        # elif self.backbone_name in crate_network_family_list:
        #     feature_map = self.feature_flatten(feature_map)

        ##################################################
        # print("type(feature_map):", type(feature_map))
        # print("feature_map:", feature_map)

        # 打印原来的特征图信息
        # print("type feature_raw:", type(outs))
        # for out in feature_map:
        #     print(out.size())
        # print("len feature_raw:", num_maps)

        # 按第 0 维度拼接特征图
        # feature_map = torch.cat([fm for fm in feature_map], dim=0)

        # 检查特征图类型
        # print("type feature_map:", type(feature_map))
        # 输出： <class 'torch.Tensor'>

        # 检查特征图维度
        # print("size feature_map:", feature_map.size())

        # 原文链接：https: // blog.csdn.net / qq_41204464 / article / details / 129331275

        # feature1 = np.asarray(feature_map.cuda())  # feature.cpu()
        # print("feature1:", feature1)
        # print("type(feature1):", type(feature1))
        # feature = torch.as_tensor(feature1, dtype=torch.float16, device='cuda')
        ###################################

        feature = self.pre_feature(feature_map)
        z = F.normalize(self.projection(feature), p=self.norm_p)

        if is_test:
            # if self.backbone_name == 'efficient-capsule':
            if self.backbone_name in capsule_network_family_list:
                return z, feature, self.backbone.probs
            else:
                return z, feature
        else:
            if self.backbone_name in capsule_network_family_list:
                return z, self.backbone.probs
            else:
                return z


   
    