from collections import OrderedDict

import torch
import torch.nn as nn
import torch.functional as F

model_pretrained = []
####
# 1.卷积的计算公式
# O=输出图像的尺寸,I=输入图像的尺寸,K=卷积层的核尺寸,N=核数量,S=移动步长,P =填充数
# O = (I - K + 2P) / S + 1
# 2.池化计算公式
# （I + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1
####
class Encoder(nn.Module):
    def __init__(self, model_path="", rgb=True, tr=False, ft=False):
        super(Encoder, self).__init__()
        self.tr = tr
        self.ft = ft
        if rgb:
            self.in_ch = 3
        else:
            self.in_ch = 1
        self.features = nn.Sequential(OrderedDict([
            ('conv1',nn.Sequential(nn.Conv2d(self.in_ch, 32, kernel_size=3, padding=1),  # 0  (32 - 5 + 2 * 1) / 1 + 1= 30*30  (128 - 11)/1 + 1  = 118
                                    nn.ReLU())),                                           #1
            ('conv2',nn.Sequential(nn.Conv2d(32, 64, kernel_size=3, padding=1),         #2 (32 - 5 + 2 * 1) / 1 + 1= 30*30  (128 - 11)/1 + 1  = 118
                                    nn.ReLU(),                                           #3
                                    nn.MaxPool2d(2, stride=2, return_indices=True))),      #4  16*16 （32 + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1
            ('conv3',nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, padding=1),         #5  16*16
                                    nn.ReLU())),                                           #6
            ('conv4',nn.Sequential(nn.Conv2d(64, 128, kernel_size=3, padding=1),        #7  16*16
                                    nn.ReLU(),                                           #8
                                    nn.MaxPool2d(2, stride=2, return_indices=True))),      #9  （16-（2-1）-1）/2+1=8*8
            ('conv5',nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, padding=1),       #10  8*8
                                    nn.ReLU())),                                           #11  8*8
            ('conv6',nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, padding=1),        #12  8*8
                                    nn.ReLU(),                                            #13
                                    nn.MaxPool2d(2, stride=2, return_indices=True))),      #14  （8-（2-1）-1）/2+1=4*4
        ]))
        self.classifier = nn.Sequential(OrderedDict([
                        ('score', nn.Sequential(nn.Dropout(0.5),
                                        nn.Linear(128 * 4 * 4, 2),
                                        nn.Softmax()))
        ]))
        self.bbreg = nn.Sequential(OrderedDict([
                        ('bbreg',nn.Sequential(nn.Dropout(0.5),
                                        nn.Linear(128 * 4 * 4, 4),
                                        nn.Sigmoid()))
        ]))
        self.feature_outputs = [0] * 15
        self.pool_indices = dict()                                  # 用于在上采样中使用

        # self._initialize_weights()
        self.build_param_dict()
    # 构建网络参数字典，用于设置哪些参数需要被学习
    def build_param_dict(self):
        self.params = OrderedDict()
        for name, module in self.features.named_children():
            append_params(self.params, module, name)
        for name, module in self.classifier.named_children():
            append_params(self.params, module, name)



    def _initialize_weights(self):
        for i, layer in enumerate(model_pretrained.features):
            if isinstance(layer, nn.Conv2d):
                self.features[i].weight.data = layer.weight.data
                self.features[i].bias.data = layer.bias.data

    def get_conv_layer_indices(self):
        return [0, 2, 5, 7, 10, 12]

    # 定义此函数，以处理MaxPool2d的内容
    def forward_features(self, module, x, index):
        index = index
        output = x
        for i, layer in enumerate(module):
            if isinstance(layer, nn.MaxPool2d):
                output, indices = layer(output)
                self.feature_outputs[index] = output
                self.pool_indices[index] = indices
                index += 1
            else:
                output = layer(output)
                self.feature_outputs[index] = output
                index += 1
        return output, index

        # forward_encoder与forward_decoder函数一起，用于进行编码器的训练
    def forward_encoder(self, x):
        output = x
        index = 0
        for name, module in self.features.named_children():
            for i, layer in enumerate(module):
                if isinstance(layer, nn.MaxPool2d):
                    output, indices = layer(output)
                    self.feature_outputs[index] = output
                    self.pool_indices[index] = indices
                    index += 1
                else:
                    output = layer(output)
                    self.feature_outputs[index] = output
                    index += 1
        return output

    def set_learnable_params(self, layers):
        for k, p in self.params.items():
            if any([k.startswith(l) for l in layers]):
                p.requires_grad = True
            else:
                p.requires_grad = False

    def get_learnable_params(self):
        params = OrderedDict()
        for k, p in self.params.items():
            if p.requires_grad:
                params[k] = p
        return params

    def forward(self, x, in_layer='conv1', out_layer='conv6'):
        # 判断现在是否处于跟踪时的微调模式
        if self.ft:
            # 输出特定层
            run = False
            index = 0
            for name, module in self.features.named_children():
                if name == in_layer:
                    run = True
                if run:
                    x, index = self.forward_features(module, x, index)
                    if name == 'conv6':
                        x = x.view(x.size(0),-1)
                    if name == out_layer:
                        return x

            # 分类层,若不提取特征，则进行分类
            index = 0
            for name, module in self.classifier.named_children():
                x, index = self.forward_features(module, x, index)
                if name == 'score':
                    return x
            return x
        # 判断当前是否处于训练自编码器的模式
        elif self.tr:
            code = self.forward_encoder(x)
            return code, self.pool_indices


def append_params(params, module, prefix):
    for child in module.children():
        for k, p in child._parameters.items():
            if p is None: continue

            if isinstance(child, nn.BatchNorm2d):
                name = prefix + '_bn_' + k
            else:
                name = prefix + '_' + k

            if name not in params:
                params[name] = p
            else:
                raise RuntimeError("Duplicated param name: %s" % (name))

class Decoder(nn.Module):
    def __init__(self, rgb=True):
        super(Decoder, self).__init__()
        if rgb:
            self.out_ch = 3
        else:
            self.out_ch = 1
        self.unpool2PoolIdx = [14, 9, 4]

        self.deconv_first_layers = nn.ModuleList([
            nn.MaxUnpool2d(2, stride=2),
            nn.ConvTranspose2d(128, 128, 3, padding=1),
            nn.ConvTranspose2d(128, 128, 3, padding=1),
            nn.MaxUnpool2d(2, stride=2),
            nn.ConvTranspose2d(128, 64, 3, padding=1),
            nn.ConvTranspose2d(64, 64, 3, padding=1),
            nn.MaxUnpool2d(2, stride=2),
            nn.ConvTranspose2d(64, 32, 3, padding=1),
            #nn.ConvTranspose2d(32, 3, 3, padding=1),])
            nn.ConvTranspose2d(32, self.out_ch, 3, padding=1), ])
        self.feature_outputs = [0] * len(self.deconv_first_layers)
        # self._initialize_weights()

    def _initialize_weights(self):
        for i, layer in enumerate(model_pretrained.features):
            if isinstance(layer, nn.Conv2d):
                self.deconv_features[self.conv2DeconvIdx[i]].weight.data = layer.weight.data
                biasIdx = self.conv2DeconvBiasIdx[i]
                if biasIdx > 0:
                    self.deconv_features[biasIdx].bias.data = layer.bias.data

    # 定义此函数，以处理MaxPool2d的内容
    def forward_features(self, x, pool_indices):
        index = 0
        output = x
        for i, layer in enumerate(self.deconv_first_layers):
            if isinstance(layer, nn.MaxUnpool2d):
                output = layer(output, pool_indices[self.unpool2PoolIdx[index]])
                index = index + 1
                self.feature_outputs[i] = output
            else:
                output = layer(output)
                self.feature_outputs[i] = output
        return output

    def forward(self, x, pool_indices):
        output = self.forward_features(x, pool_indices)
        return output


# 1 MAX 2 CONV 1 MAX 2 CONV 1MAX 2 CONV
class BinaryLoss(nn.Module):
    def __init__(self):
        super(BinaryLoss, self).__init__()

    def forward(self, pos_score, neg_score):
        pos_loss = -F.log_softmax(pos_score)[:, 1]
        neg_loss = -F.log_softmax(neg_score)[:, 0]

        loss = pos_loss.sum() + neg_loss.sum()
        return loss


class Accuracy():
    def __call__(self, pos_score, neg_score):
        pos_correct = (pos_score[:, 1] > pos_score[:, 0]).sum().float()
        neg_correct = (neg_score[:, 1] < neg_score[:, 0]).sum().float()

        pos_acc = pos_correct / (pos_score.size(0) + 1e-8)
        neg_acc = neg_correct / (neg_score.size(0) + 1e-8)

        return pos_acc.data[0], neg_acc.data[0]


class Precision():
    def __call__(self, pos_score, neg_score):
        scores = torch.cat((pos_score[:, 1], neg_score[:, 1]), 0)
        topk = torch.topk(scores, pos_score.size(0))[1]
        prec = (topk < pos_score.size(0)).float().sum() / (pos_score.size(0) + 1e-8)

        return prec.data[0]

