
from myutils.helper import get_index_last

import torch
import torch.nn as nn
import torchvision.models as models

def get_layers_before_avgpool(layers_in):
    list_ = list(map(lambda i:isinstance(i,nn.AdaptiveAvgPool2d),layers_in))
    index = get_index_last(list_,True)
    return layers_in[0:index+1]
        
    


def get_backbone(pre_model_name,use_transformer):
    shape_base = 64
    shape = [shape_base//8,shape_base//16,shape_base//24,1]


    pre_model_name_s = ['resnet101','resnet50','resnet34']
    pre_model_func_s = [models.resnet101,models.resnet50,models.resnet34]
    
    pre_model_index = pre_model_name_s.index(pre_model_name)
    pre_model = pre_model_func_s[pre_model_index](progress=True,pretrained=True) # pretrained=True,num_classes=24
    backbone_temp = get_layers_before_avgpool(list(pre_model.children()))

    channel_base_s = [256,256,64]
    channel_base = channel_base_s[pre_model_index]
    channels = [ channel_base* 2**i  for i in range(1,4)]
    features_backbone = channels[-1]

    channels+=[features_backbone]

    layer_need_index = [-4,-3,-2,-1]

    return backbone_temp,features_backbone,layer_need_index,channels,shape

class Model_one(nn.Module):
    """单个网络
    
    use_shape: 使用shape
    use_location: 使用location

    backbone     :编码向量      ,        -> 2048
    embedding_nn :转化为编码向量, 2048+2 -> 2048
    location_nn  :着丝粒位置    , 2048   -> 3
    out_nn       :预测种类      , 2048+3 -> 24
    """

    def __init__(self,pre_model_name,use_transformer): # 
        super(Model_one, self).__init__()
        self.use_transformer = use_transformer
        shape_len = 4
        # 预训练模型
        backbone_temp,features_backbone,tfer_layer_need_index,tfer_channels,tfer_shape = get_backbone(pre_model_name,use_transformer) # 取全部
        self.backbone = nn.Sequential(*backbone_temp,nn.Flatten())
        # 预测embedding: 输入为pre_model_output
        # embedding_in_features+2 -> 1024
        self.embedding_nn = nn.Sequential(
            nn.Linear(features_backbone+shape_len,512),nn.ReLU(),
            nn.Linear(512,512),nn.ReLU(),
            )
        # 预测着丝粒位置: 输入为embedding，输出为三种着丝粒的可能性
        # 1024 -> 3
        self.location_nn = nn.Sequential(
            nn.Linear(512,3),nn.ReLU(),
            nn.Softmax(dim=1),
            )
        # 预测种类，输入为（embedding+location）
        # 1024+3 -> 24
        self.out_nn = nn.Sequential(
            nn.Linear(512+3,24), nn.ReLU(),
            nn.Softmax(dim=1),
            )

        if  self.use_transformer:
            for i in tfer_layer_need_index:
                backbone_temp[i].register_forward_hook(hook=self.hook)

            value_number = [ i* j**2 for i,j in zip(tfer_channels,tfer_shape)]
            value_number_sum = sum(value_number)+shape_len
            print(f'value_number_sum={value_number_sum}')
            self.transformer_input = nn.Linear(value_number_sum,512)
            encoderLayer = torch.nn.TransformerEncoderLayer(d_model=512, nhead=8)
            self.transformer_encoder = torch.nn.TransformerEncoder(encoderLayer,6)
    



    def forward(self,input,shape):
        
        if self.use_transformer:
            self.features_out_hook = []

        # 预训练模型
        backbone_out = self.backbone(input)

        if self.use_transformer:
            features_out_flatten = list(map(nn.Flatten(),self.features_out_hook))
            transformer_input_flatten = torch.cat([*features_out_flatten,shape],dim=1).squeeze(1)
            transformer_input_linear = self.transformer_input(transformer_input_flatten).reshape(-1,1,512)
            embedding = self.transformer_encoder(transformer_input_linear).reshape(-1,512)
        else:
            backbone_out_2 = torch.cat((backbone_out,shape),dim=1) # 拼接shape
            # embedding
            embedding = self.embedding_nn(backbone_out_2)

        # 着丝粒位置
        location_hat = self.location_nn(embedding)

        # 预测种类
        param = torch.cat((embedding,location_hat),dim=1) # 拼接location
        Y_hat = self.out_nn(param)
        # 返回
        return (embedding,location_hat,Y_hat)
    
    
    def hook(self,module, fea_in, fea_out):
        self.features_out_hook.append(fea_out)
        return None
