from torch import nn
import math
import torch
import torch.nn.functional as F
from collections import OrderedDict
from .unet import Unet
from .unet_pcn import Unet_pcn
from .alignment import Alignment,Offset_gen_t
from torchvision import ops
from .myresnet import myresnet
from .attention_net import ChannelGate, SpatialGate

# my attention
class SpatialAttentionModule(nn.Module):
    def __init__(self, cfg, in_channels=2, bias=False):
        super(SpatialAttentionModule, self).__init__()
        self.cfg = cfg

        self.conv_du = nn.Sequential(nn.Conv2d(4, 1, 1, padding=0, bias=bias), nn.PReLU())

        self.offset_gens1_1 = Offset_gen_t(3+2, 18+9)
        self.offset_gens1_2 = Offset_gen_t(3+2, 18+9)

        # self.offset_gens2 = Offset_gen_t(64*2, 18+9)

        self.dconvs1_1 = ops.DeformConv2d(3, 64, kernel_size=3, padding=1, stride=1)
        self.dconvs1_2 = ops.DeformConv2d(3, 64, kernel_size=3, padding=1, stride=1)

        # self.dconvs2 = ops.DeformConv2d(64*2, 3, kernel_size=3, padding=1, stride=1)

        # self.sp_att1 = SpatialGate()
        # self.sp_att2 = SpatialGate()

        # self.ch_att = ChannelGate(64*2)

        self.fcs = nn.ModuleList([])

        for i in range(2):
            self.fcs.append(nn.Conv2d(3, 1, kernel_size=1, stride=1,bias=bias))
        self.softmax = nn.Softmax(dim=1)

    def forward(self, xs, Errors):  #输入tensor列表
        feats = []
        # outs=[]
        # for i in range(0,2):
        #     avgout = torch.mean(xs[i], dim=1, keepdim=True)
        #     maxout, _ = torch.max(xs[i], dim=1, keepdim=True)
        #     outs.append(torch.cat([avgout, maxout], dim=1))

        # feature = self.conv_du(torch.cat(outs,axis=1))

        # attention_vectors=[]
        # attention_vectors.append(self.sp_att1(torch.cat((xs[0],Errors[0]),dim=1)))
        # attention_vectors.append(self.sp_att2(torch.cat((xs[1],Errors[1]),dim=1)))
        # for i in range(0,2):
        #     attention_vectors.append(self.fcs[i](torch.cat([feature,Errors[i]],axis=1)))

        # attention_vectors = torch.cat(attention_vectors, dim=1)
        # attention_vectors = self.softmax(attention_vectors)

        # out = xs[0]*attention_vectors[:,0:1,...] + xs[1]*attention_vectors[:,1:2,...]

        offset1, mask1 = self.offset_gens1_1(torch.cat((xs[0],Errors[0]),dim=1), flag='pam')
        offset2, mask2 = self.offset_gens1_2(torch.cat((xs[1],Errors[1]),dim=1), flag='pam')

        feats.append(self.dconvs1_1(xs[0],offset1, mask1))
        feats.append(self.dconvs1_2(xs[1],offset2, mask2))

        outs=[]
        for i in range(0,2):
            avgout = torch.mean(feats[i], dim=1, keepdim=True)
            maxout, _ = torch.max(feats[i], dim=1, keepdim=True)
            outs.append(torch.cat([avgout, maxout], dim=1))

        feature = self.conv_du(torch.cat(outs,axis=1))

        attention_vectors=[]
        # attention_vectors.append(self.sp_att1(torch.cat((xs[0],Errors[0]),dim=1)))
        # attention_vectors.append(self.sp_att2(torch.cat((xs[1],Errors[1]),dim=1)))

        for i in range(0,2):
            attention_vectors.append(self.fcs[i](torch.cat([feature,Errors[i]],axis=1)))

        attention_vectors = torch.cat(attention_vectors, dim=1)
        attention_vectors = self.softmax(attention_vectors)

        out = xs[0]*attention_vectors[:,0:1,...] + xs[1]*attention_vectors[:,1:2,...]

        # feat = self.ch_att(torch.cat((feat1,feat2),dim=1))

        # offset3,mask3 = self.offset_gens2(feat,flag='tem')
        # out = self.dconvs2(feat, offset3, mask3)
        
        return out
    

def normal_init(m, mean, std):
    if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):
        m.weight.data.normal_(mean, std)
        m.bias.data.zero_()


class Alignment_time(nn.Module):
    def __init__(self,cfg):
        super(Alignment_time,self).__init__()
        self.cfg = cfg
        self.act = nn.ReLU()

        self.offset_gens0 = Offset_gen_t(cfg.tem_input*2, 18+9)  
        self.offset_gens1 = Offset_gen_t(cfg.tem_input*2, 18+9)  # 2 * rgb = 6

        self.dconvs0 = ops.DeformConv2d(cfg.tem_input, cfg.tem_output, kernel_size=3, padding=1, stride=1)
        self.dconvs1 = ops.DeformConv2d(cfg.tem_input, cfg.tem_output, kernel_size=3, padding=1, stride=1)

        self.conv = nn.Conv2d(cfg.tem_input, cfg.tem_output, kernel_size=3, padding=1)
        self.conv_out = myresnet(cfg.tem_output*3)  # small resnet
        self.channel_att = ChannelGate(cfg.tem_output*3)
        self.sigmod = nn.Sigmoid()

    def forward(self, outputs1, errors):
        offset0, mask0 = self.offset_gens0(torch.cat([outputs1[0],outputs1[1]],axis=1),flag='tem')
        offset2, mask2 = self.offset_gens1(torch.cat([outputs1[2],outputs1[1]],axis=1),flag='tem')
        
        Ft0 = outputs1[0]
        supp_feature0 = self.dconvs0(Ft0, offset0, mask0)
        Ft0 = self.act(supp_feature0)
        
        Ft1 = self.act(self.conv(outputs1[1]))
        
        Ft2 = outputs1[2]
        supp_feature2 = self.dconvs1(Ft2, offset2, mask2)
        Ft2 = self.act(supp_feature2)

        Ft = self.channel_att(torch.cat([Ft0,Ft1,Ft2],axis=1))

        out = self.conv_out(Ft) + outputs1[1]
        return out


class Generator(nn.Module):
    def __init__(self,cfg):
        super().__init__()
        self.cfg = cfg

        self.alignment = Alignment(cfg)
        self.unet_pcn = Unet_pcn(cfg)
        self.attention1 = SpatialAttentionModule(cfg)
        self.alignment_time = Alignment_time(cfg)

        if cfg.init:
            self.init_weights()
       
    def forward(self, batch):
        outputs1 = []
        outerrors = []
        img = batch['inputs']
        mask = batch['masks']
        error = batch['dpoints']

        imgs = torch.chunk(img,3,1) 
        errors = torch.chunk(error,3,1)
        masks = torch.chunk(mask,3,1)  # devide into 3 pieces

        for i in range(0, len(imgs)):
            imgs1 = torch.chunk(imgs[i].squeeze(1), 2, 1)  # left % right
            errors1 = torch.chunk(errors[i].squeeze(1),2,1)

            imgL = imgs1[0]
            imgR = imgs1[1]
            errorsL = errors1[0]
            errorsR = errors1[1]

            feature1 = self.alignment(torch.cat([imgL,errorsL],axis=1))
            feature2 = self.alignment(torch.cat([imgR,errorsR],axis=1))  # integrate

            feature = self.attention1([feature1,feature2], [errorsL,errorsR])

            outputs1.append(self.unet_pcn(feature, masks[i].squeeze(1)))

        out = self.alignment_time(outputs1, outerrors)

        return out
    
    def init_weights(self):
        for m in self.parameters():
            if m.dim() > 1:
                nn.init.kaiming_uniform_(m)

def load_checkpoint(model, weights):
    checkpoint = torch.load(weights)
    try:
        model.load_state_dict(checkpoint["state_dict"])
    except:
        state_dict = checkpoint["state_dict"]
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            name = k[7:] # remove `module.`
            new_state_dict[name] = v
        model.load_state_dict(new_state_dict)