import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F


class DPDNNBasicBlock(torch.nn.Module):
    def __init__(self):
        super(DPDNNBasicBlock, self).__init__()

        self.eta = nn.Parameter(torch.Tensor([0.9]))
        self.zeta = nn.Parameter(torch.Tensor([0.1]))

        self.block0 = nn.Sequential()
        self.block0.add_module("conv_0", nn.Conv2d(1, 64, 3, padding=1))
        # self.block0.add_module("conv_0", nn.Conv2d(1, 64, 2, padding=0))
        self.block0.add_module("relu_0", nn.ReLU())

        self.block1 = nn.Sequential()
        self.block1.add_module("conv_1", nn.Conv2d(64, 64, 3, padding=1))
        self.block1.add_module("relu_1", nn.ReLU())

        self.down = nn.Sequential()
        self.down.add_module("maxpool_d", nn.MaxPool2d(kernel_size=2))
        self.down.add_module("conv_d1", nn.Conv2d(64, 128, 3, padding=1))
        self.down.add_module("relu_d1", nn.ReLU())
        self.down.add_module("conv_d2", nn.Conv2d(128, 128, 3, padding=1))
        self.down.add_module("relu_d2", nn.ReLU())

        self.middle = nn.Sequential()
        self.middle.add_module("maxpool_m", nn.MaxPool2d(kernel_size=2))
        self.middle.add_module("conv_m1", nn.Conv2d(128, 256, 3, padding=1))
        self.middle.add_module("relu_m1", nn.ReLU())
        self.middle.add_module("conv_m2", nn.Conv2d(256, 256, 3, padding=1))
        self.middle.add_module("relu_m2", nn.ReLU())
        self.middle.add_module("conv_m3", nn.Conv2d(256, 128, 3, padding=1))
        self.middle.add_module("relu_m3", nn.ReLU())
        self.middle.add_module("upsampling_m", nn.ConvTranspose2d(128, 128, 2, 2))  # 反卷积

        self.up = nn.Sequential()
        self.up.add_module("conv_s1", nn.Conv2d(256, 128, 1, padding=0))
        self.up.add_module("relu_s1", nn.ReLU())
        self.up.add_module("conv_s2", nn.Conv2d(128, 64, 1, padding=0))
        self.up.add_module("relu_s2", nn.ReLU())
        self.up.add_module("upsampling_u", nn.ConvTranspose2d(64, 64, 2, 2, output_padding=1))

        self.block2 = nn.Sequential()
        self.block2.add_module("conv_s3", nn.Conv2d(128, 64, 1, padding=0))
        self.block2.add_module("relu_s3", nn.ReLU())
        self.block2.add_module("conv_s4", nn.Conv2d(64, 64, 1, padding=0))
        self.block2.add_module("relu_s4", nn.ReLU())
        self.block2.add_module("conv_5", nn.Conv2d(64, 1, 1, padding=0))


    def forward(self, x, PhiTPhi, PhiTb):
        x_img = x.view(-1, 1, 33, 33)

        # denoise x_img
        x1 = self.block0(x_img)
        x2 = self.block1(x1)
        x3 = self.down(x2)
        x4 = self.middle(x3)

        x5 = torch.cat((x3, x4), 1)
        x6 = self.up(x5)
        x7 = torch.cat((x2, x6), 1)
        x8 = self.block2(x7)

        v_img = x8 + x_img

        # compute x_pred
        v = v_img.view(-1, 1089)
        x_pred = (1 - self.eta * self.zeta) * x - self.zeta * torch.mm(x, PhiTPhi)
        x_pred += self.zeta * PhiTb + self.eta * self.zeta * v

        return x_pred


# Define ISTA-Net-plus
class DPDNN(torch.nn.Module):
    def __init__(self, LayerNo):
        super(DPDNN, self).__init__()
        onelayer = []
        self.LayerNo = LayerNo

        for i in range(LayerNo):
            onelayer.append(DPDNNBasicBlock())

        self.fcs = nn.ModuleList(onelayer)

    def forward(self, Phix, Phi, Qinit):

        PhiTPhi = torch.mm(torch.transpose(Phi, 0, 1), Phi)
        PhiTb = torch.mm(Phix, Phi)

        x = torch.mm(Phix, torch.transpose(Qinit, 0, 1))

        for i in range(self.LayerNo):
            x = self.fcs[i](x, PhiTPhi, PhiTb)

        x_final = x

        return x_final

