from torch import nn
import math
import torch.utils.model_zoo as model_zoo
import torch
import torch.nn.functional as F
from copy import deepcopy
from other_layers import *
from functools import partial
from other_layers import bilinear, cat_FcMap

from models.resnet import resnet50_cat,resnet101_cat ,resnet101_cat_dilate
from models.drn import drn_d_54_cat

class cat_Res101d_Drn54(nn.Module):

    def __init__(self, num_classes, resume1, resume2, stage=1):
        super(cat_Res101d_Drn54, self).__init__()
        self.model1 = resnet101_cat_dilate(num_classes=num_classes)
        self.model1.load_state_dict(torch.load(resume1))

        self.model2 = drn_d_54_cat(num_classes=num_classes)
        self.model2.load_state_dict(torch.load(resume2))

        self.fc = cat_linear(in_features=2048+512, out_features_list=num_classes)
        self.stage = stage

    def forward(self, x):

        if self.stage == 1:
            # volatile input when training only final fc layer
            x.volatile = True

        fea1 = F.adaptive_avg_pool2d(self.model1(x, ret_fea=True),1).view(x.size(0),-1)
        fea2 = F.adaptive_avg_pool2d(self.model2(x, ret_fea=True),1).view(x.size(0),-1)

        x = torch.cat([fea1,fea2],1)

        x.volatile = False
        x = self.fc(x)
        return x


class Bil_Res101d_Drn54(nn.Module):

    def __init__(self, num_classes, resume1, resume2, stage=1):
        super(Bil_Res101d_Drn54, self).__init__()
        self.stage = stage

        self.model1 = resnet101_cat_dilate(num_classes=num_classes)
        self.model1.load_state_dict(torch.load(resume1))

        self.model2 = drn_d_54_cat(num_classes=num_classes)
        self.model2.load_state_dict(torch.load(resume2))

        self.fc = cat_linear(in_features=2048*512, out_features_list=num_classes)

    def forward(self, x):

        if self.stage == 1:
            # volatile input when training only final fc layer
            x.volatile = True

        fea1 = self.model1(x, ret_fea=True)
        fea2 = self.model2(x, ret_fea=True)

        x = bilinear(fea1, fea2)
        x.volatile = False
        x = self.fc(x)
        return x


class Bil2_Res101d_Drn54(nn.Module):

    def __init__(self, num_classes, resume1, resume2, stage=1):
        super(Bil2_Res101d_Drn54, self).__init__()
        self.stage = stage

        self.model1 = resnet101_cat_dilate(num_classes=num_classes)
        self.model1.load_state_dict(torch.load(resume1))

        self.model2 = drn_d_54_cat(num_classes=num_classes)
        self.model2.load_state_dict(torch.load(resume2))

        self.reduction1 = nn.Sequential(
            nn.Conv2d(2048, 512,kernel_size=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
        )
        self.reduction2 = nn.Sequential(
            nn.Conv2d(512,128,kernel_size=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
        )

        if self.stage == 1:
            self.fc = cat_linear(in_features=512+128, out_features_list=num_classes)
        elif self.stage == 2:
            self.fc = cat_linear(in_features=512 * 128, out_features_list=num_classes)

    def forward(self, x):

        if self.stage == 1:
            # volatile input when training only final fc layer
            x.volatile = True

        fea1 = self.model1(x, ret_fea=True)
        fea2 = self.model2(x, ret_fea=True)

        fea1.volatile = False
        fea2.volatile = False

        fea1 = self.reduction1(fea1)
        fea2 = self.reduction2(fea2)

        if self.stage == 1:
            fea1 = F.adaptive_avg_pool2d(fea1,1).view(x.size(0), -1)
            fea2 = F.adaptive_avg_pool2d(fea2,1).view(x.size(0), -1)

            x = torch.cat([fea1, fea2], 1)

            x = self.fc(x)
            return x
        elif self.stage==2:

            x = bilinear(fea1, fea2)
            x = self.fc(x)
            return x



class test_Res101d_Drn54(nn.Module):

    def __init__(self, num_classes, resume1, resume2, stage=1):
        super(test_Res101d_Drn54, self).__init__()
        self.stage = stage

        self.model1 = resnet101_cat_dilate(num_classes=num_classes)
        self.model1.load_state_dict(torch.load(resume1))

        self.model2 = drn_d_54_cat(num_classes=num_classes)
        self.model2.load_state_dict(torch.load(resume2))

        # print self.model1

        self.cat_FcMap1 = cat_FcMap(in_features=2048,out_features_list=num_classes)
        for i in range(len(num_classes)):
            # getattr(self.cat_FcMap1, 'FcAmp%d' % i).weight.data =
            getattr(self.cat_FcMap1, 'FcAmp%d' % i).weight.data = getattr(self.model1.fc, 'fc%d' % i).weight.data
            getattr(self.cat_FcMap1, 'FcAmp%d' % i).bias.data = getattr(self.model1.fc, 'fc%d' % i).bias.data

        self.cat_FcMap2 = cat_FcMap(in_features=512, out_features_list=num_classes)
        for i in range(len(num_classes)):
            # getattr(self.cat_FcMap1, 'FcAmp%d' % i).weight.data =
            getattr(self.cat_FcMap2, 'FcAmp%d' % i).weight.data = getattr(self.model2.fc, 'fc%d' % i).weight.data.view(num_classes[i], -1)
            getattr(self.cat_FcMap2, 'FcAmp%d' % i).bias.data = getattr(self.model2.fc, 'fc%d' % i).bias.data

        self.MapConv = nn.Sequential(
            nn.Conv2d(sum(num_classes)*2, sum(num_classes), kernel_size=1),
            nn.BatchNorm2d(sum(num_classes)),
            nn.ReLU(inplace=True)
        )
        for m in self.MapConv.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.fc = cat_linear(in_features=sum(num_classes),out_features_list=num_classes)

    def forward(self, x):

        if self.stage == 1:
            # volatile input when training only final fc layer
            x.volatile = True

        fea1 = self.model1(x, ret_fea=True)
        fea2 = self.model2(x, ret_fea=True)

        fea1.volatile = False
        fea2.volatile = False

        logps_cat1, fmaps_cat1 = self.cat_FcMap1(fea1)
        logps_cat2, fmaps_cat2 = self.cat_FcMap2(fea2)

        x = torch.cat([fmaps_cat1, fmaps_cat2],1)

        x = self.MapConv(x)
        x = self.avgpool(x)
        x = x.view(x.size(0), -1)
        x = self.fc(x)

        return x


class testBi_Res101d_Drn54(nn.Module):

    def __init__(self, num_classes, resume1, resume2, stage=1):
        super(testBi_Res101d_Drn54, self).__init__()
        self.stage = stage

        self.model1 = resnet101_cat_dilate(num_classes=num_classes)
        self.model1.load_state_dict(torch.load(resume1))

        self.model2 = drn_d_54_cat(num_classes=num_classes)
        self.model2.load_state_dict(torch.load(resume2))

        # print self.model1

        self.cat_FcMap1 = cat_FcMap(in_features=2048,out_features_list=num_classes)
        for i in range(len(num_classes)):
            # getattr(self.cat_FcMap1, 'FcAmp%d' % i).weight.data =
            getattr(self.cat_FcMap1, 'FcAmp%d' % i).weight.data = getattr(self.model1.fc, 'fc%d' % i).weight.data
            getattr(self.cat_FcMap1, 'FcAmp%d' % i).bias.data = getattr(self.model1.fc, 'fc%d' % i).bias.data

        self.cat_FcMap2 = cat_FcMap(in_features=512, out_features_list=num_classes)
        for i in range(len(num_classes)):
            # getattr(self.cat_FcMap1, 'FcAmp%d' % i).weight.data =
            getattr(self.cat_FcMap2, 'FcAmp%d' % i).weight.data = getattr(self.model2.fc, 'fc%d' % i).weight.data.view(num_classes[i], -1)
            getattr(self.cat_FcMap2, 'FcAmp%d' % i).bias.data = getattr(self.model2.fc, 'fc%d' % i).bias.data


        self.fc = cat_linear(in_features=sum(num_classes)**2,out_features_list=num_classes)

    def forward(self, x):

        if self.stage == 1:
            # volatile input when training only final fc layer
            x.volatile = True

        fea1 = self.model1(x, ret_fea=True)
        fea2 = self.model2(x, ret_fea=True)

        fea1.volatile = False
        fea2.volatile = False

        _, fmaps_cat1 = self.cat_FcMap1(fea1)
        _, fmaps_cat2 = self.cat_FcMap2(fea2)

        x = bilinear(fmaps_cat1, fmaps_cat2)

        x = self.fc(x)
        return x



if __name__ == '__main__':
    import numpy as np
    from FSdata.FSdataset import attr2length_map

    resume1 = '/media/gserver/extra/FashionAI/round2/res101_d[0567]9233/weights-9-0-[0.7557]-[0.9233].pth'
    resume2 = '/media/gserver/extra/FashionAI/round2/drn/[0,5,6,7]/round2/weights-1-5441-[0.7599]-[0.9243].pth'

    select_AttrIdx = [0, 5, 6, 7]

    model = testBi_Res101d_Drn54(num_classes=[attr2length_map[x] for x in select_AttrIdx], resume1=resume1, resume2=resume2)



    x = torch.FloatTensor(3,3,336,336).zero_()
    x = torch.autograd.Variable(x)
    y = model(x)
    #
    print y
