import torch.nn as nn
import torch.nn.functional as F
import torch
from torchvision import models
from torch.nn import init

"""
遍历模型的每一层，判断各层属于什么类型, 例如, 是否是 nn.Conv2d、nn.BatchNorm2d、nn.Linear 等,
然后根据不同类型的层,设定不同的权值初始化方法,例如,Xavier,kaiming,normal_,uniform_等
"""
def weigth_init(net):
    '''初始化网络'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.xavier_uniform_(m.weight.data)
            # init.constant_(m.bias.data, 0)  #用val的值填充输入的张量或变量
        elif isinstance(m, nn.BatchNorm2d):
            init.constant_(m.weight.data, 1)
            # init.constant_(m.bias.data, 0)
        elif isinstance(m, nn.Linear):
            init.normal_(m.weight.data, std=1e-3)
            # init.constant_(m.bias.data, 0)

    # for name, w in net.named_parameters():
    #     init = True
    #     method="xavier"
    #     if init:
    #         if 'weight' in name:
    #             if method == 'xavier':
    #                 nn.init.xavier_normal_(w)
    #             elif method == 'kaiming':
    #                 nn.init.kaiming_normal_(w)
    #             else:
    #                 nn.init.normal_(w)
    #         elif 'bias' in name:
    #             nn.init.constant_(w, 0)
    #         else:
    #             pass


class MobileNet(nn.Module):
    def __init__(self, num_classes, alpha):
        super(MobileNet, self).__init__()
        self.alpha = alpha

        def conv_bn(inp, oup, stride):
            return nn.Sequential(
                nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
                nn.BatchNorm2d(oup),
                nn.ReLU(inplace=True)
            )

        def conv_dw(inp, oup, stride):
            return nn.Sequential(
                nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
                nn.BatchNorm2d(inp),
                nn.ReLU(inplace=True),

                nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
                nn.BatchNorm2d(oup),
                nn.ReLU(inplace=True),
            )

        self.model = nn.Sequential(
            conv_bn(3, int(32 * alpha), 1),
            conv_dw(int(32 * alpha), int(64 * alpha), 1),
            conv_dw(int(64 * alpha), int(128 * alpha), 2),
            conv_dw(int(128 * alpha), int(128 * alpha), 1),
            conv_dw(int(128 * alpha), int(256 * alpha), 2),
            conv_dw(int(256 * alpha), int(256 * alpha), 1),
            conv_dw(int(256 * alpha), int(512 * alpha), 2),
            conv_dw(int(512 * alpha), int(512 * alpha), 1),
            conv_dw(int(512 * alpha), int(512 * alpha), 1),
            conv_dw(int(512 * alpha), int(512 * alpha), 1),
            conv_dw(int(512 * alpha), int(512 * alpha), 1),
            conv_dw(int(512 * alpha), int(512 * alpha), 1),
            conv_dw(int(512 * alpha), int(1024 * alpha), 2),
            conv_dw(int(1024 * alpha), int(1024 * alpha), 1),       #
            # nn.AvgPool2d(7, ceil_mode=False)
        )
        self.avg_pool = nn.AvgPool2d(2, ceil_mode=False)
        self.fc = nn.Linear(int(1024 * alpha), num_classes)
        # self.fc = nn.Linear(3072, num_classes)
        # self.fc1 = nn.Linear(8192, num_classes)
        # self.fc2 = nn.Linear(8192, num_classes)

    def forward(self, x):
        x = self.model(x)
        # print(x.shape)
        # x = self.avg_pool(x)
        # print(x.shape)
        x = x.view(x.size(0), -1)
        # x = x.flatten(1)
        # print(x.shape)
        x0 = self.fc(x)
        # x1 = self.fc1(x)
        # x2 = self.fc2(x)
        # x=F.softmax(x)
        return x0


def load_model_2_cpu(state_dict):
    '''
    when use torch.nn.DataParallel to train model ,it will add a prefix(model.) in before paras name,
    so we should delete the prefix (model.)
    '''
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        name = k[7:]  # remove `module.`
        new_state_dict[name] = v
    return new_state_dict


class mobilenet_v2(nn.Module):
    def __init__(self,class_number):
        super(mobilenet_v2, self).__init__()
        self.feature=models.mobilenet_v2(pretrained=False)
        # self.mobinet_layer = nn.Sequential(*list(self.feature.children())[:-1])  #detect last layer
        self.feature.classifier=nn.Linear(in_features=1280,out_features=class_number)


    def forward(self,x):

        y=self.feature(x)
        # y=torch.softmax(x,1)
        # y=torch.sigmoid(y)

        return y



if __name__ == '__main__':

    net=mobilenet_v2(2)
    net.apply(weigth_init)
    print(net)
    # for k in net.state_dict():
    #     print(k)
    import torch
    x=torch.randn((1,3,224,224))
    out=net(x)
    print(out.shape)

    # exit()
    # import torch
    # from torch.autograd import Variable
    # import os
    # os.environ['CUDA_VISIBLE_DEVICES'] = '1'    # todo
    #
    # x = torch.FloatTensor(1, 3, 50, 50)
    # x = Variable(x)
    #
    # # model = MobileNet(num_classes=11, alpha=0.5)
    # from models.resnet import resnet50
    # model = resnet50(pretrained=True)
    # # print("org:", model)
    # # model.fc1 = torch.nn.Linear(1024, 3)
    # # state_dict = load_model_2_cpu(torch.load('/data1/sheng/1116/classify/new_pretrain.pth'))
    # # state_dict = torch.load('/data1/sheng/1116/classify/new_pretrain.pth')
    # # model.load_state_dict(state_dict)
    # print('------------'*5, '\n', model)
    # x0, x1, x2 = model(x)
    # print(x0.shape, x1.shape, x2.shape)
    #
