import torch
import torchvision.models as models
from collections import OrderedDict
import numpy as np
import torch.nn as nn

vgg16_pretrained = models.vgg16(pretrained=True)


class ADEVGG16(torch.nn.Module):
    def __init__(self, n_classes=2, tr=False, ft=False):
        super(ADEVGG16, self).__init__()
        # 用于反卷积时寻找池化索引
        self.unpool2PoolIdx = {15: 4, 12: 9, 8: 16, 4: 23, 0: 30}
        self.tr = tr
        self.ft = ft
        # encoder部分
        self.encoder = nn.Sequential(OrderedDict([
            # conv1
            ('conv1',nn.Sequential(
                            nn.Conv2d(3, 64, 3, padding=1),  # 0 （224 - 3 + 2 ）/1 + 1 = 224
                            nn.ReLU())),
            ('conv2', nn.Sequential(
                            nn.Conv2d(64, 64, 3, padding=1),  # 3 （224 - 3 + 2）/1 +  1 = 224
                            nn.ReLU(),
                            nn.MaxPool2d(2, stride=2, return_indices=True))),
            ('conv3', nn.Sequential(
                            nn.Conv2d(64, 128, 3, padding=1),  # 5 (111 - 3 + 2 )/stride + 1 = 111
                            nn.ReLU())),
            ('conv4', nn.Sequential(
                            nn.Conv2d(128, 128, 3, padding=1),  # 7 111 - 3 + 2 + 1 = 111
                            nn.ReLU(),
                            nn.MaxPool2d(2, stride=2, return_indices=True))),  # （111 - （2 - 1）- 1）/2 + 1 =
            ('conv5',nn.Sequential(
                            nn.Conv2d(128, 256, 3, padding=1),
                            nn.ReLU())),
            ('conv6', nn.Sequential(
                            nn.Conv2d(256, 256, 3, padding=1),
                            nn.ReLU())),
            ('conv7', nn.Sequential(
                            nn.Conv2d(256, 256, 3, padding=1),
                            nn.ReLU(),
                            nn.MaxPool2d(2, stride=2, return_indices=True))),
            ('conv8',nn.Sequential(
                            nn.Conv2d(256, 512, 3, padding=1),
                            nn.ReLU())),
            ('conv9', nn.Sequential(
                            nn.Conv2d(512, 512, 3, padding=1),
                            nn.ReLU())),
            ('conv10', nn.Sequential(
                            nn.Conv2d(512, 512, 3, padding=1),
                            nn.ReLU(),
                            nn.MaxPool2d(2, stride=2, return_indices=True))),
            ('conv11',nn.Sequential(
                            nn.Conv2d(512, 512, 3, padding=1),
                            nn.ReLU())),
            ('conv12', nn.Sequential(
                            nn.Conv2d(512, 512, 3, padding=1),
                            nn.ReLU())),
            ('conv13', nn.Sequential(
                            nn.Conv2d(512, 512, 3, padding=1),
                            nn.ReLU(),
                            nn.MaxPool2d(2, stride=2, return_indices=True)))]))
        self.feature_outputs = [0] * 31
        self.pool_indices = dict()
        ## decoder 部分
        self.decoder = torch.nn.ModuleList([
            torch.nn.MaxUnpool2d(2, stride=2),
            torch.nn.ConvTranspose2d(512, 512, 3, padding=1),
            torch.nn.ConvTranspose2d(512, 512, 3, padding=1),
            torch.nn.ConvTranspose2d(512, 512, 3, padding=1),
            torch.nn.MaxUnpool2d(2, stride=2),
            torch.nn.ConvTranspose2d(512, 512, 3, padding=1),
            torch.nn.ConvTranspose2d(512, 512, 3, padding=1),
            torch.nn.ConvTranspose2d(512, 256, 3, padding=1),
            torch.nn.MaxUnpool2d(2, stride=2),
            torch.nn.ConvTranspose2d(256, 256, 3, padding=1),
            torch.nn.ConvTranspose2d(256, 256, 3, padding=1),
            torch.nn.ConvTranspose2d(256, 128, 3, padding=1),
            torch.nn.MaxUnpool2d(2, stride=2),
            torch.nn.ConvTranspose2d(128, 128, 3, padding=1),
            torch.nn.ConvTranspose2d(128, 64, 3, padding=1),
            torch.nn.MaxUnpool2d(2, stride=2),
            torch.nn.ConvTranspose2d(64, 64, 3, padding=1),
            torch.nn.ConvTranspose2d(64, 3, 3, padding=1)])

        self.classifier = nn.Sequential(OrderedDict([
            ('fc1',nn.Sequential(
                            nn.Linear(512 * 7 * 7, 4096),  # 224x244 image pooled down to 7x7 from ade
                            nn.ReLU(),
                            nn.Dropout())),
            ('score',nn.Sequential(
                            nn.Linear(4096, n_classes))),]))
            #                nn.ReLU(),
            #                nn.Dropout())),
            #('score',nn.Sequential(
            #                nn.Linear(4096, n_classes))),]))

        # self._initialize_weights()
        self.build_param_dict()
    # 构建网络参数字典，用于设置哪些参数需要被学习
    def build_param_dict(self):
        self.params = OrderedDict()
        for name, module in self.encoder.named_children():
            append_params(self.params, module, name)
        for name, module in self.classifier.named_children():
            append_params(self.params, module, name)

    def _initialize_weights(self):
        # initializing weights using ImageNet-trained model from PyTorch
        index = 0
        for i, layer in enumerate(vgg16_pretrained.encoder):
            if isinstance(layer, nn.Conv2d):
                self.encoder[index][0].weight.data = layer.weight.data
                self.encoder[index][0].bias.data = layer.bias.data
                index += 1

    def set_learnable_params(self, layers):
        for k, p in self.params.items():
            if any([k.startswith(l) for l in layers]):
                p.requires_grad = True
            else:
                p.requires_grad = False

    def get_learnable_params(self):
        params = OrderedDict()
        for k, p in self.params.items():
            if p.requires_grad:
                params[k] = p
        return params

    # forwar_ft的存在是为了进行跟踪时后进行微调
    def forward_ft(self, module, x):# 介绍一下这里为什么传入index，因为在给self.feature_outputs[i] = output;self.pool_indices[i] = indices赋值的时候，i取局部函数的值是不对的
        output = x
        for _, layer in enumerate(module):
            if isinstance(layer, nn.MaxPool2d):
                output, indices = layer(output)
            else:
                output = layer(output)
        return output

    # forward_encoder与forward_decoder函数一起，用于进行编码器的训练
    def forward_encoder(self, x):
        output = x
        index = 0
        # 适用于多GPU模式
        # if hasattr(self.ade, 'modules'):
        #     m = self.ade.module
        #     for _, module in enumerate(m):
        #         for i, layer in enumerate(module):
        #             if isinstance(layer, nn.MaxPool2d):
        #                 output, indices = layer(output)
        #                 self.feature_outputs[index] = output
        #                 self.pool_indices[index] = indices
        #                 index += 1
        #             else:
        #                 output = layer(output)
        #                 self.feature_outputs[index] = output
        #                 index += 1
        # else:# 适用于单GPU模式
        for name, module in self.encoder.named_children():
            for i, layer in enumerate(module):
                if isinstance(layer, nn.MaxPool2d):
                    output, indices = layer(output)
                    self.feature_outputs[index] = output
                    self.pool_indices[index] = indices
                    index += 1
                else:
                    output = layer(output)
                    self.feature_outputs[index] = output
                    index += 1
        return output

    # 定义此函数，以处理MaxPool2d的内容
    def forward_decoder(self, x, pool_indices):
        output = x
        # 适用于多GPU模式
        # if hasattr(self.decoder, 'modules'):
        #     m = self.decoder.module
        #     for i, layer in enumerate(m):
        #         if isinstance(layer, nn.MaxUnpool2d):
        #             output = layer(output, pool_indices[self.unpool2PoolIdx[i]])
        #         else:
        #             output = layer(output)
        # else:
        for i, layer in enumerate(self.decoder):
            if isinstance(layer, nn.MaxUnpool2d):
                output = layer(output, pool_indices[self.unpool2PoolIdx[i]])
            else:
                output = layer(output)
        return output

    def forward(self, x, in_layer='conv1', out_layer='conv13'):
        # 判断现在是否处于跟踪时的微调模式
        if self.ft:
            # 输出特定层
            run = False
            for name, module in self.encoder.named_children():
                if name == in_layer:
                    run = True
                if run:
                    x = self.forward_ft(module, x)
                    if name == 'conv13':
                        x = x.view(x.size(0),-1)
                    if name == out_layer:
                        return x

            # 分类层,若不提取特征，则进行分类
            for name, module in self.classifier.named_children():
                x = self.forward_ft(module, x)
                if name == 'score':
                    return x
            return x
        # 判断当前是否处于训练自编码器的模式
        elif self.tr:
            # 送入encoder进行前向遍历
            x = self.forward_encoder(x)
            code = x.view(x.size()[0], -1)
            conf = self.classifier(code)
            output = self.forward_decoder(x, self.pool_indices)
            return code, output, conf

def append_params(params, module, prefix):
    for child in module.children():
        for k, p in child._parameters.items():
            if p is None: continue

            if isinstance(child, nn.BatchNorm2d):
                name = prefix + '_bn_' + k
            else:
                name = prefix + '_' + k

            if name not in params:
                params[name] = p
            else:
                raise RuntimeError("Duplicated param name: %s" % (name))
