import torch
from torchvision import models
import torch.nn as nn

'''调用resnet34 作为主干'''


class ResNet(nn.Module):
    def __init__(self, in_channels=1):
        super(ResNet, self).__init__()
        model = models.resnet34(pretrained=True)
        self.conv_input = nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=7, stride=2, padding=3)
        self.bn1 = model.bn1
        self.relu = model.relu
        self.maxpool = model.maxpool

        self.layer1 = model.layer1
        self.layer2 = model.layer2
        self.layer3 = model.layer3
        self.layer4 = model.layer4

    def forward(self, x):
        out = []
        y0 = self.conv_input(x)
        out.append(y0)
        y1 = self.layer1(self.maxpool(self.relu(self.bn1(y0))))
        out.append(y1)

        y2 = self.layer2(y1)
        out.append(y2)
        y3 = self.layer3(y2)
        out.append(y3)
        y4 = self.layer4(y3)
        out.append(y4)
        # out.append([y1, y2, y3, y4])

        return out



if __name__ == '__main__':
    img = torch.Tensor(1,1,384,384)
    net = ResNet()
    # out1,out2,out3,out4=net(img)
    # print(out1.shape)  #torch.Size([1, 64, 96, 96])
    # print(out2.shape)  #torch.Size([1, 128, 48, 48])
    # print(out3.shape)  #torch.Size([1, 256, 24, 24])
    # print(out4.shape)  #torch.Size([1, 512, 12, 12])
    out = net(img)
    print(out[0].shape)
    print(out[1].shape)
    print(out[2].shape)
    print(out[3].shape)
    print(out[4].shape)
    # print(out.shape)



# class proj(nn.Module):
#     def __init__(self, in_channels, out_channels=240):
#         super(proj, self).__init__()
#         self.conv1X1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)
#
#     def forward(self, x):
#         out = self.conv1X1(x)
#         return out

'''
自己修改的一部分，没加入位置编码，暂时丢弃不用
'''

#
# class TFormer(nn.Module):
#     def __init__(self,
#                  d_model=240,
#                  in_channels=[64, 64, 128, 256, 512],
#                  proj_idxs=(2, 3, 4),
#                  n_levels=3, ):
#         super(TFormer, self).__init__()
#         self.trans = Transformer_2()
#         self.proj_idxs = proj_idxs
#         self.projs = nn.ModuleList()
#         for idx in self.proj_idxs:
#             self.projs.append(ConvModule(in_channels[2, 3, 4],
#                                          d_model,  ##将不同的特征图的通道数经过卷积变成一致的240，图片大小不变
#                                          kernel_size=3,
#                                          padding=1))
#         self.level_embed = nn.Parameter(torch.Tensor(n_levels, d_model))
#         self.position_embedding = build_position_encoding(position_embedding="sine", hidden_dim=d_model)
#
#     def projection(self, feats):
#         pos = []
#         masks = []
#         cnn_feats = []
#         tran_feats = []
#
#         for idx, feats in enumerate(feats):
#             if idx not in self.proj_idxs:
#                 cnn_feats.append(feats)
#             else:
#                 b, c, h, w = feats.shape
#                 ##一环套一环，总之位置编码就需要mask
#                 mask = torch.zeros((b, h, w)).to(torch.bool).to(feats.device)
#                 nested_feats = NestedTensor(feats, mask)
#                 masks.append(mask)
#                 pos.append(self.position_embedding(nested_feats).to(nested_feats.tensor.dtype))
#                 tran_feats.append(feats)
#         for idx, proj in enumerate(self.projs):  ### 将映射后相同通道数的特征图装入list
#             tran_feats[idx] = proj(tran_feats[idx])
#
#         return cnn_feats, tran_feats, pos, masks
#
#     def forward(self, x):
#         # project and prepare for the input没有加入CNN部分，考虑在。。。
#         cnn_feats, tran_feats, pos, masks = self.projection(x)
#         ##传入transformer##########
#         feature_shapes = []
#         features_flatten = []
#         spatial_shapes = []
#
#         for lvl, feature in enumerate(tran_feats):
#             bs, c, h, w = feature.shape
#             spatial_shapes.append((h, w))
#             feature_shapes.append(feature.shape)
#
#             feature = feature.flatten(2).transpose(1, 2)  ## feature:[bs,h*w,c]
#
#             features_flatten.append(feature)
#         features_flatten = torch.cat(features_flatten, 1)  # [bs,h*w,c]
#         ## 处理好的features传入Transformer_2
#         feats = self.trans(features_flatten)
#
#         ###
#         out = []
#         features = feats.split(spatial_shapes.prod(1).tolist(), dim=1)
#         for idx, (feats, ori_shape) in enumerate(zip(features, spatial_shapes)):
#             out.append(feats.transpose(1, 2).reshape(feature_shapes[idx]))
#
#         cnn_feats.extend(out)
#         return cnn_feats

# out1,out2,out3,out4 = ResNet(x)
# out2 = proj(out2)   # [bs,c,48,48]
# out3 = proj(out3)   # [bs,c,24,24]
# out4 = proj(out4)   # [bs,c,12,12]
#
# # B2,C,H2,W2 = out2.shape
# out2 = out2.flatten(2)  #[bs,c,48*48]
# out3 = out3.flatten(2) ## [bs,c,24*24]
# out4 = out4.flatten(2)  # [bs,c,12*12]
# out = torch.cat((out2,out3,out4),dim=2)    ##out:[bs, c, 48*48+24*24+12*12=3024]
#
# tr = self.trans(out)
