import torch
import torch.nn as nn
# from unet_pytorch.nets.vgg import VGG16
import numpy as np

class unetUp(nn.Module):
    def __init__(self, in_size, out_size):
        super(unetUp, self).__init__()
        self.conv1 = nn.Conv2d(in_size, out_size, kernel_size=(3, 3), padding=1)  # 两次不改变大小的卷积
        self.conv2 = nn.Conv2d(out_size, out_size, kernel_size=(3, 3), padding=1)
        self.up = nn.UpsamplingBilinear2d(scale_factor=2)  # 调用上采样函数将大小变为原来的两倍
        self.relu = nn.ReLU(inplace=True)  # 激活函数
        self._initialize_weights()

    def forward(self, inputs1, inputs2):
        outputs = torch.cat([inputs1, self.up(inputs2)], 1)  # 合并
        outputs = self.conv1(outputs)  # 两次卷积
        outputs = self.relu(outputs)
        outputs = self.conv2(outputs)
        outputs = self.relu(outputs)
        return outputs  # 返回
    def _initialize_weights(self):#初始化模型参数
        for module in self.modules():
                if isinstance(module, nn.Conv2d):
                    nn.init.kaiming_normal_(module.weight)
                    if module.bias is not None:
                        module.bias.data.zero_()
                elif isinstance(module, nn.BatchNorm2d):
                    module.weight.data.fill_(1)
                    module.bias.data.zero_()


class Unet(nn.Module):
    def __init__(self, num_classes=21, in_channels=3, pretrained=False):
        super(Unet, self).__init__()  # 调用父类构造函数进行初始化
        self.vgg = VGG16(pretrained=pretrained, in_channels=in_channels)  # 调用自己写的VGG网络
        in_filters = [192, 384, 768, 1024]
        out_filters = [64, 128, 256, 512]
        # upsampling
        # 64,64,512
        self.up_concat4 = unetUp(in_filters[3], out_filters[3])
        # 128,128,256
        self.up_concat3 = unetUp(in_filters[2], out_filters[2])
        # 256,256,128
        self.up_concat2 = unetUp(in_filters[1], out_filters[1])
        # 512,512,64
        self.up_concat1 = unetUp(in_filters[0], out_filters[0])

        # final conv (without any concat)
        self.final = nn.Conv2d(out_filters[0], num_classes, (1, 1))
    def forward(self, inputs):
        # 编码过程
        # 配合下面这个看
        # cfgs = {     'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']#D配置 }
        feat1 = self.vgg.features[:4](inputs)
        feat2 = self.vgg.features[4:9](feat1)
        feat3 = self.vgg.features[9:16](feat2)
        feat4 = self.vgg.features[16:23](feat3)
        feat5 = self.vgg.features[23:-1](feat4)
        # 解码过程
        up4 = self.up_concat4(feat4, feat5)
        up3 = self.up_concat3(feat3, up4)
        up2 = self.up_concat2(feat2, up3)
        up1 = self.up_concat1(feat1, up2)

        final = self.final(up1)

        return final

    # 参数初始化
    # def _initialize_weights(self, *stages):
    #     print("weight init")  # 不知道会不会自己调用
    #     for modules in stages:
    #         for module in modules.modules():
    #             if isinstance(module, nn.Conv2d):
    #                 nn.init.kaiming_normal_(module.weight)
    #                 if module.bias is not None:
    #                     module.bias.data.zero_()
    #             elif isinstance(module, nn.BatchNorm2d):
    #                 module.weight.data.fill_(1)
    #                 module.bias.data.zero_()




import torchvision.models
from torchvision.models.utils import load_state_dict_from_url
class VGG(nn.Module):
    def __init__(self, features, num_classes=1000):
        super(VGG, self).__init__()
        self.features = features
        self.avgpool = nn.AdaptiveAvgPool2d((7, 7))#结果是7x7
        self.classifier = nn.Sequential(
            nn.Linear(512 * 7 * 7, 4096),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(4096, num_classes),
        )
        self._initialize_weights()

    def forward(self, x):
        x = self.features(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.classifier(x)
        return x

    def _initialize_weights(self):#初始化模型参数
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.constant_(m.bias, 0)


def make_layers(cfg, batch_norm=False, in_channels = 3):
    layers = []
    for v in cfg:
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            #conv2d = nn.Conv2d(in_channels, v, kernel_size=(3,3), padding=1)
            if batch_norm:
                layers += [nn.Conv2d(in_channels, v, kernel_size=(3,3), padding=1,bias=False), nn.BatchNorm2d(v), nn.ReLU(inplace=True)]#这里这样写不好，要把卷积层的bias去掉
            else:
                layers += [nn.Conv2d(in_channels, v, kernel_size=(3,3), padding=1), nn.ReLU(inplace=True)]
            in_channels = v
    return nn.Sequential(*layers)
# 512,512,3 -> 512,512,64 -> 256,256,64 -> 256,256,128 -> 128,128,128 -> 128,128,256 -> 64,64,256
# 64,64,512 -> 32,32,512 -> 32,32,512
cfgs = {
    'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']#D配置
}


def VGG16(pretrained, in_channels, **kwargs):
    model = VGG(make_layers(cfgs["D"], batch_norm = False, in_channels = in_channels), **kwargs)
    if pretrained:
        state_dict = load_state_dict_from_url("https://download.pytorch.org/models/vgg16-397923af.pth", model_dir="./model_data",file_name="vgg16-397923af.pth")

        # model.load_state_dict(state_dict)
    #去除平均池化和分类器，要去掉的话直接调makelayer，继承nn.module会更好
    if pretrained==True:#使用迁移学习就加载预训练的参数
        model_path = r"model_data/vgg16-397923af.pth"  # 预训练模型参数的地址
        print('Loading weights into state dict...')
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  # 虽然上面指定了是否用GPU但是这里还是先把设备搞好再看情况
        model_dict = model.state_dict()  # 获取现在模型的参数
        pretrained_dict = torch.load(model_path, map_location=device)  # 获取与训练的参数
        pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}  # 将预训练参数更新为模型中有的（不确定，等会试着跑跑）
        model_dict.update(pretrained_dict)  # 进行一波参数更新
        model.load_state_dict(model_dict)  # 把模型参数搞到模型中
        print('Finished!')  # 提示预训练参数更新完毕
    del model.avgpool
    del model.classifier
    return model