import re
from torch import nn
import model2
import torch
from torch.nn import functional as F
from torch.autograd import Variable
from model2 import Encoder, Decoder
import model2
from layers import Patch_ap, Gap
from SFNet import *





class DMPHN_With_MDSF(nn.Module):
    def __init__(self):
        super(DMPHN_With_MDSF, self).__init__()
        base_channel = 32
        num_res = 3
        mode = 'train'
        self.Encoder = nn.ModuleDict()
        self.Decoder = nn.ModuleDict()
        self.feat_extract = nn.ModuleDict()     
        self.merge = nn.ModuleDict()
        for s in ['s1', 's2', 's3', 's4']:
            self.Encoder[s] = nn.ModuleList([
                EBlock(base_channel, num_res, mode),
                EBlock(base_channel * 2, num_res, mode),
                EBlock(base_channel * 4, num_res, mode)
            ])

        # feat_extract 用来调整通道数、上采样、下采样
        for s in ['s1', 's2', 's3', 's4']:
            self.feat_extract[s] = nn.ModuleList([
                BasicConv(3, base_channel, kernel_size=3, relu=True, stride=1), # lv3升通道数
                BasicConv(base_channel, base_channel*2, kernel_size=3, relu=True, stride=1), # lv3ending降采样, 升通道数 
                BasicConv(base_channel * 2, base_channel * 4, kernel_size=3, relu=True, stride=1), # lv2ending降采样, 升通道数 
                BasicConv(base_channel, base_channel * 2, kernel_size=3, relu=True, stride=1),# lv3hidden 降采样, 
                BasicConv(base_channel*2, base_channel*4, kernel_size=3, relu=True, stride=1),# lv2hidden 降采样,
                BasicConv(base_channel*4, base_channel * 2, kernel_size=3, relu=False, stride=1),# 输出图像,
                BasicConv(base_channel*2, 3, kernel_size=3, relu=False, stride=1),# 输出图像,
            ])


        for s in ['s1', 's2', 's3', 's4']:
            self.Decoder[s] = nn.ModuleList([
                DBlock(base_channel, num_res, mode),
                DBlock(base_channel * 2, num_res, mode),
                DBlock(base_channel * 4, num_res, mode)
            ])
        
        # hidden 两两之间合并
        for s in ['s1', 's2', 's3', 's4']:
            self.merge[s] = nn.ModuleList([
                BasicConv(base_channel, base_channel, kernel_size=3, stride=1, relu=False), # lv3
                BasicConv(base_channel, base_channel, kernel_size=3, stride=1, relu=False), # lv3
                BasicConv(base_channel*2, base_channel *2, kernel_size=3, stride=1, relu=False),# lv2
            ])

        self.SCM1 = SCM(base_channel * 4)# 128
        self.SCM2 = SCM(base_channel * 2)# 64
        

    def forward(self, x):
        images = {}
        feature = {}
        residual = {}

        for s in ['s1', 's2', 's3', 's4']:
            feature[s] = {} # encoder output
            residual[s] = {} # decoder output

        images['lv1'] = Variable(x - 0.5)
        H = images['lv1'].size(2)
        W = images['lv1'].size(3)

        images['lv2_1'] = images['lv1'][:, :, 0:int(H / 2), :]
        images['lv2_2'] = images['lv1'][:, :, int(H / 2):H, :]
        images['lv3_1'] = images['lv2_1'][:, :, :, 0:int(W / 2)]
        images['lv3_2'] = images['lv2_1'][:, :, :, int(W / 2):W]
        images['lv3_3'] = images['lv2_2'][:, :, :, 0:int(W / 2)]
        images['lv3_4'] = images['lv2_2'][:, :, :, int(W / 2):W]

        for s, ps in zip(['s1', 's2', 's3', 's4'], [None, 's1', 's2', 's3']):
            #lv3
            if ps is None:
                feature[s]['lv3_1'] = self.feat_extract[s][0](images['lv3_1'])
                feature[s]['lv3_2'] = self.feat_extract[s][0](images['lv3_2'])
                feature[s]['lv3_3'] = self.feat_extract[s][0](images['lv3_3'])
                feature[s]['lv3_4'] = self.feat_extract[s][0](images['lv3_4'])
            else:
                feature[s]['lv3_1'] = self.feat_extract[s][0](images['lv3_1'] + residual[ps]['lv1'][:, :, 0:int(H / 2), 0:int(W / 2)])
                feature[s]['lv3_2'] = self.feat_extract[s][0](images['lv3_2'] + residual[ps]['lv1'][:, :, 0:int(H / 2), int(W / 2):W])
                feature[s]['lv3_3'] = self.feat_extract[s][0](images['lv3_3'] + residual[ps]['lv1'][:, :, int(H / 2):H, 0:int(W / 2)])
                feature[s]['lv3_4'] = self.feat_extract[s][0](images['lv3_4'] + residual[ps]['lv1'][:, :, int(H / 2):H, int(W / 2):W])

            feature[s]['lv3_1'] = self.Encoder[s][0](feature[s]['lv3_1'])
            feature[s]['lv3_2'] = self.Encoder[s][0](feature[s]['lv3_2'])
            feature[s]['lv3_3'] = self.Encoder[s][0](feature[s]['lv3_3'])
            feature[s]['lv3_4'] = self.Encoder[s][0](feature[s]['lv3_4'])

            feature[s]['lv3_top'] = torch.cat((feature[s]['lv3_1'], feature[s]['lv3_2']), 3)    
            feature[s]['lv3_top'] = self.merge[s][0](feature[s]['lv3_top'])
            feature[s]['lv3_bot'] = torch.cat((feature[s]['lv3_3'], feature[s]['lv3_4']), 3)
            feature[s]['lv3_bot'] = self.merge[s][1](feature[s]['lv3_bot'])

            residual[s]['lv3_top'] = self.Decoder[s][0](feature[s]['lv3_top'])
            residual[s]['lv3_bot'] = self.Decoder[s][0](feature[s]['lv3_bot'])

            residual[s]['lv3_top'] = self.feat_extract[s][1](residual[s]['lv3_top'])
            residual[s]['lv3_bot'] = self.feat_extract[s][1](residual[s]['lv3_bot'])


            #lv2
            if ps is None:
                feature[s]['lv2_1'] = self.SCM2(images['lv2_1']) + residual[s]['lv3_top']
                feature[s]['lv2_2'] = self.SCM2(images['lv2_2']) + residual[s]['lv3_bot']
            else:
                # 在images['lv2_1'] + residual[ps]['lv1']后添加卷积层
                feature[s]['lv2_1'] = self.SCM2(images['lv2_1'] + residual[ps]['lv1'][:, :, 0:int(H / 2), :]) + residual[s]['lv3_top']
                feature[s]['lv2_2'] = self.SCM2(images['lv2_2'] + residual[ps]['lv1'][:, :, int(H / 2):H, :]) + residual[s]['lv3_bot'] 
            
            feature[s]['lv2_1'] = self.Encoder[s][1](feature[s]['lv2_1'])
            feature[s]['lv2_2'] = self.Encoder[s][1](feature[s]['lv2_2'])

            feature[s]['lv2_1'] = self.feat_extract[s][3](feature[s]['lv3_top']) + feature[s]['lv2_1']
            feature[s]['lv2_2'] = self.feat_extract[s][3](feature[s]['lv3_bot']) + feature[s]['lv2_2']

            feature[s]['lv2'] = torch.cat((feature[s]['lv2_1'], feature[s]['lv2_2']), 2)
            feature[s]['lv2'] = self.merge[s][2](feature[s]['lv2'])

            residual[s]['lv2'] = self.Decoder[s][1](feature[s]['lv2'])
            residual[s]['lv2'] = self.feat_extract[s][2](residual[s]['lv2'])

            # lv1
            if ps is None:
                feature[s]['lv1'] = self.SCM1(images['lv1']) + residual[s]['lv2']
            else:
                feature[s]['lv1'] = self.SCM1(images['lv1'] +residual[ps]['lv1']) + residual[s]['lv2']

            feature[s]['lv1'] = self.Encoder[s][2](feature[s]['lv1'])
            feature[s]['lv1'] = self.feat_extract[s][4](feature[s]['lv2']) + feature[s]['lv1']

            residual[s]['lv1'] = self.Decoder[s][2](feature[s]['lv1'])
            residual[s]['lv1'] = self.feat_extract[s][5](residual[s]['lv1'])
            residual[s]['lv1'] = self.feat_extract[s][6](residual[s]['lv1'])
            
        return (residual['s1']['lv1'], residual['s2']['lv1'],  residual['s3']['lv1'], residual['s1']['lv1'])



class DMPHN(nn.Module):
    def __init__(self):
        super(DMPHN, self).__init__()
        self.encoder = nn.ModuleDict()
        self.decoder = nn.ModuleDict()
        for s in ['s1', 's2', 's3', 's4']:
            self.encoder[s] = nn.ModuleDict()
            self.decoder[s] = nn.ModuleDict()
            for lv in ['lv1', 'lv2', 'lv3']:
                self.encoder[s][lv] = model2.Encoder()
                self.decoder[s][lv] = model2.Decoder()

    def forward(self, inputs):

        images = {}
        feature = {}
        residual = {}
        for s in ['s1', 's2', 's3', 's4']:
            feature[s] = {} # encoder output
            residual[s] = {} # decoder output

        images['lv1'] = Variable(inputs - 0.5)
        H = images['lv1'].size(2)
        W = images['lv1'].size(3)

        images['lv2_1'] = images['lv1'][:, :, 0:int(H / 2), :]
        images['lv2_2'] = images['lv1'][:, :, int(H / 2):H, :]
        images['lv3_1'] = images['lv2_1'][:, :, :, 0:int(W / 2)]
        images['lv3_2'] = images['lv2_1'][:, :, :, int(W / 2):W]
        images['lv3_3'] = images['lv2_2'][:, :, :, 0:int(W / 2)]
        images['lv3_4'] = images['lv2_2'][:, :, :, int(W / 2):W]

        for s, ps in zip(['s1', 's2', 's3', 's4'], [None, 's1', 's2', 's3']):
            if ps is None:
                feature[s]['lv3_1'] = self.encoder[s]['lv3'](images['lv3_1'])
                feature[s]['lv3_2'] = self.encoder[s]['lv3'](images['lv3_2'])
                feature[s]['lv3_3'] = self.encoder[s]['lv3'](images['lv3_3'])
                feature[s]['lv3_4'] = self.encoder[s]['lv3'](images['lv3_4'])
            else:
                feature[s]['lv3_1'] = self.encoder[s]['lv3'](images['lv3_1'] + residual[ps]['lv1'][:, :, 0:int(H / 2), 0:int(W / 2)])
                feature[s]['lv3_2'] = self.encoder[s]['lv3'](images['lv3_2'] + residual[ps]['lv1'][:, :, 0:int(H / 2), int(W / 2):W])
                feature[s]['lv3_3'] = self.encoder[s]['lv3'](images['lv3_3'] + residual[ps]['lv1'][:, :, int(H / 2):H, 0:int(W / 2)])
                feature[s]['lv3_4'] = self.encoder[s]['lv3'](images['lv3_4'] + residual[ps]['lv1'][:, :, int(H / 2):H, int(W / 2):W])

            feature[s]['lv3_top'] = torch.cat((feature[s]['lv3_1'], feature[s]['lv3_2']), 3)
            feature[s]['lv3_bot'] = torch.cat((feature[s]['lv3_3'], feature[s]['lv3_4']), 3)

            if ps is not None:
                feature[s]['lv3_top'] += feature[ps]['lv3_top']
                feature[s]['lv3_bot'] += feature[ps]['lv3_bot']

            residual[s]['lv3_top'] = self.decoder[s]['lv3'](feature[s]['lv3_top'])
            residual[s]['lv3_bot'] = self.decoder[s]['lv3'](feature[s]['lv3_bot'])

            ########################################

            if ps is None:
                feature[s]['lv2_1'] = self.encoder[s]['lv2'](images['lv2_1'] + residual[s]['lv3_top'])
                feature[s]['lv2_2'] = self.encoder[s]['lv2'](images['lv2_2'] + residual[s]['lv3_bot'])
            else:
                feature[s]['lv2_1'] = self.encoder[s]['lv2'](images['lv2_1'] + residual[s]['lv3_top'] + residual[ps]['lv1'][:, :, 0:int(H / 2), :])
                feature[s]['lv2_2'] = self.encoder[s]['lv2'](images['lv2_2'] + residual[s]['lv3_bot'] + residual[ps]['lv1'][:, :, int(H / 2):H, :])

            feature[s]['lv2_1'] += feature[s]['lv3_top']
            feature[s]['lv2_2'] += feature[s]['lv3_bot']

            if ps is not None:
                feature[s]['lv2_1'] += feature[ps]['lv2_1']
                feature[s]['lv2_2'] += feature[ps]['lv2_2']

            feature[s]['lv2'] = torch.cat((feature[s]['lv2_1'], feature[s]['lv2_2']), 2)

            residual[s]['lv2'] = self.decoder[s]['lv2'](feature[s]['lv2'])

            ########################################
            # if ps is not None:
            #     residual[s]['lv2'] += residual[ps]['lv2']
            if ps is None:
                feature[s]['lv1'] = self.encoder[s]['lv1'](images['lv1'] + residual[s]['lv2'])
            else:
                feature[s]['lv1'] = self.encoder[s]['lv1'](images['lv1'] + residual[s]['lv2'] + residual[ps]['lv1'])# + feature[s]['lv2']

            feature[s]['lv1'] += feature[s]['lv2']

            if ps is not None:
                feature[s]['lv1'] += feature[ps]['lv1']

            residual[s]['lv1'] = self.decoder[s]['lv1'](feature[s]['lv1'])


        return residual['s1']['lv1'], residual['s2']['lv1'],  residual['s3']['lv1'], residual['s4']['lv1']
    
