import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...backbones import *
from ..base import BaseModel
from ..pspnet import PyramidPoolingModule



def conv3x3(in_planes, out_planes, stride=1):
    """3x3 convolution with padding"""
    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
                     padding=1, bias=False)

class PPM_conv(nn.Module):
    def __init__(self,pool_scales, norm_cfg, fc_dim=2048,num_class=None):
        super(PPM_conv,self).__init__()
        self.ppm = []
        for i in range(len(pool_scales)):
            self.ppm.append(nn.Sequential(
                                          nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
                                          BuildNormalization(norm_cfg['type'], (512, norm_cfg['opts'])),
                                          nn.ReLU(inplace=True)
                                         ))
        self.ppm = nn.ModuleList(self.ppm)
        self.conv_last_ = nn.Sequential(
            nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
                      kernel_size=3, padding=1, bias=False),
            BuildNormalization(norm_cfg['type'], (512, norm_cfg['opts'])),
            nn.ReLU(inplace=True),
            nn.Dropout2d(0.1),
            nn.Conv2d(512, num_class, kernel_size=1)
            )

    def forward(self,x,xs):
        input_size = x.size()
        ppm_out = [x]
        for pool_scale,x_ in zip(self.ppm,xs):
            ppm_out.append(nn.functional.interpolate(
                pool_scale(x_),
                (input_size[2], input_size[3]),
                mode='bilinear', align_corners=False))
        ppm_out = torch.cat(ppm_out, 1)

        x = self.conv_last_(ppm_out)
        return x


class Clip_PSP(BaseModel):
    def __init__(self, cfg, **kwargs):
        super(Clip_PSP, self).__init__(cfg, **kwargs)
        align_corners, norm_cfg, act_cfg = self.align_corners, self.norm_cfg, self.act_cfg
        self.crit = nn.NLLLoss(ignore_index=255)
        self.deep_sup_scale = cfg['deep_sup_scale']
        self.num_classes = cfg['num_classes']
        self.psp_weight = cfg['psp_weight']
        self.pool_scales = cfg['ppm']['pool_scales']
        # fc_dim = 2048
        fc_dim = 1024
        self.ppm_conv = PPM_conv(pool_scales=self.pool_scales,
                                norm_cfg=norm_cfg,
                                fc_dim=fc_dim,
                                num_class=self.num_classes)
        self.deepsup  = nn.Sequential(
            nn.Conv2d(fc_dim // 2, fc_dim // 4, kernel_size=3,
                      stride=1, padding=1, bias=False),
            BuildNormalization(norm_cfg['type'], (fc_dim // 4, norm_cfg['opts'])),
            nn.ReLU(inplace=True),
            nn.Dropout2d(0.1),
            nn.Conv2d(fc_dim // 4, self.num_classes, 1, 1, 0),
            )
        self.ppm_pool = []
        if self.psp_weight:
            self.pspweight_conv = nn.Sequential(nn.Conv2d(fc_dim,1,kernel_size=1,bias=False),
                                                 nn.AdaptiveAvgPool2d((1,1)))
        for scale in self.pool_scales:
            self.ppm_pool.append(nn.AdaptiveAvgPool2d(scale))
        self.ppm_pool = nn.ModuleList(self.ppm_pool)


    def pixel_acc(self, pred, label):
        _, preds = torch.max(pred, dim=1)
        valid = (label >= 0).long()
        acc_sum = torch.sum(valid * (preds == label).long())
        pixel_sum = torch.sum(valid)
        acc = acc_sum.float() / (pixel_sum.float() + 1e-10)
        return acc
    def get_1x_lr_params(self):
        modules = [self.encoder]
        for i in range(len(modules)):
            for m in modules[i].named_modules():
                for key, p in m[1].named_parameters():
                    if p.requires_grad and (not ('bias' in key)):

                        yield p

    def get_10x_lr_params(self):
        modules = [self.ppm_conv]
        if self.deep_sup_scale is not None:
            modules.append(self.deepsup)
        if self.psp_weight:
            modules.append(self.pspweight_conv)
        for i in range(len(modules)):
            for m in modules[i].named_modules():
                for key, p in m[1].named_parameters():
                    if p.requires_grad and (not ('bias' in key)):
                        yield p

    def get_1x_lr_params_bias(self):
        modules = [self.encoder]
        for i in range(len(modules)):
            for m in modules[i].named_modules():
                for key, p in m[1].named_parameters():
                    if p.requires_grad and 'bias' in key:
                        yield p
    def get_10x_lr_params_bias(self):
        modules = [self.ppm_conv]
        if self.deep_sup_scale is not None:
            modules.append(self.deepsup)
        for i in range(len(modules)):
            for m in modules[i].named_modules():
                for key, p in m[1].named_parameters():
                    if p.requires_grad and 'bias' in key:
                        yield p
    
    def forward(self, feed_dict,  segSize=None):
        c_img = feed_dict['image']
        clip_imgs = feed_dict['clip_imgs']
        if self.mode != 'TEST':
            label = feed_dict['segmentation']
            _,h,w = label.size()
        clip_num = len(clip_imgs)
        clip_imgs.append(c_img)
        input = torch.cat(clip_imgs,dim=0)
        x1, x2, x3, x4 = self.transforminputs(self.backbone_net(input), selected_indices=self.cfg['backbone'].get('selected_indices'))

        if self.psp_weight:
            psp_w = self.pspweight_conv(x4)
            psp_w = torch.split(psp_w,split_size_or_sections=int(psp_w.size(0)/(clip_num+1)), dim=0)
            psp_w = [psp_ww.unsqueeze(-1) for psp_ww in psp_w]
            psp_w = torch.cat(psp_w,dim=-1)
            psp_w = F.softmax(psp_w,dim=-1)

        x4 = torch.split(x4,split_size_or_sections=int(x4.size(0)/(clip_num+1)), dim=0)
        c_tmp = x4[-1]
        others_tmp = x4[:-1]
        pooled_features=[]
        for i in range(len(self.pool_scales)):
            pooled_features.append([])
        for i,pool in enumerate(self.ppm_pool):
            tmp_f = pool(c_tmp)
            pooled_features[i].append(tmp_f.unsqueeze(-1))
        for i,pool in enumerate(self.ppm_pool):
            for j,other in enumerate(others_tmp):
                tmp_f = pool(other)
                pooled_features[i].append(tmp_f.unsqueeze(-1))

        p_fs=[]
        for feature in pooled_features:
            feature  =torch.cat(feature,dim=-1)
            if self.psp_weight:
#                psp_w = psp_w.expand_as(feature)
                feature = feature * psp_w
            feature = torch.mean(feature,dim=-1)
            p_fs.append(feature)
        pred_ = self.ppm_conv(c_tmp,p_fs)        
        if self.mode != 'TRAIN':
            pred_ = nn.functional.interpolate(
                     pred_, size=segSize, mode='bilinear', align_corners=False)
            pred_ = nn.functional.softmax(pred_, dim=1)
            return pred_
        else:
            clip_labels = feed_dict['clip_segs']
            clip_labels.append(label)
            pred_ = nn.functional.log_softmax(pred_, dim=1)
            _,h,w = label.size()
            #label= label.squeeze(1)
            label = label.long()
            pred_ = F.interpolate(pred_,(h,w),mode='bilinear',align_corners=False)
            loss = self.crit(pred_,label)
            alllabel = torch.cat(clip_labels,dim=0)
            if self.deep_sup_scale is not None:

                #alllabel = alllabel.squeeze(1)
                alllabel = alllabel.long()
                
                pred_deepsup_s = self.deepsup(x3)
                pred_deepsup_s = nn.functional.log_softmax(pred_deepsup_s, dim=1)  
                pred_deepsup = F.interpolate(pred_deepsup_s,(h,w),mode='bilinear',align_corners=False)
                loss_deepsup = self.crit(pred_deepsup, alllabel)
                loss = loss + loss_deepsup * self.deep_sup_scale
            acc = self.pixel_acc(pred_, label)
            return loss, acc
    
    def alllayers(self):
        layers = {
            'ppm_net': self.ppm_conv,
            'deepsup': self.deepsup,
        }
        tmp_layers = []
        for key, value in self.backbone_net.zerowdlayers().items():
            tmp_layers.append(value)
        layers.update({'backbone_net_zerowd': nn.Sequential(*tmp_layers)})
        tmp_layers = []
        for key, value in self.backbone_net.nonzerowdlayers().items():
            tmp_layers.append(value)
        layers.update({'backbone_net_nonzerowd': nn.Sequential(*tmp_layers)})
        return layers
            
        
                    
                     
                
                
             

    
