import torch
import torch.nn as nn
import torch.nn.functional as F

from dataset import get_img_shape

'''
时间t的位置编码
max_seq_len: 表示序列的最大长度, 如n_steps=1000 迭代步数
d_model: 表示编码向量的维度, 如pe_dim=128
'''
class PositionalEncoding(nn.Module):

    def __init__(self, max_seq_len: int, d_model: int):
        super().__init__()

        # Assume d_model is an even number for convenience
        assert d_model % 2 == 0   # 为了编码方便

        # ---1.计算PE矩阵
        # 位置编码二维矩阵PE的大小: [max_seq_len, d_model]
        pe = torch.zeros(max_seq_len, d_model)  # 初始化为零矩阵
        # 行：i向量 [0,1,2,..., 999]  表示每个时间步t
        i_seq = torch.linspace(0, max_seq_len - 1, max_seq_len)
        # 列：j向量 [0,2,4,6,8]       表示偶数位
        j_seq = torch.linspace(0, d_model - 2, d_model // 2)   #(0, 8, 5)
        # 生成网格数据: 2个矩阵[1000, 5]
        pos, two_i = torch.meshgrid(i_seq, j_seq)
        pe_2i = torch.sin(pos / 10000**(two_i / d_model))    # 偶数位sin
        pe_2i_1 = torch.cos(pos / 10000**(two_i / d_model))  # 奇数位cos
        # stack拼接到第2个维度[0,1,2]，在把3维重塑为2维
        pe = torch.stack((pe_2i, pe_2i_1), 2).reshape(max_seq_len, d_model)

        # ---2.定义嵌入层
        self.embedding = nn.Embedding(max_seq_len, d_model)  # 定义了一个嵌入层
        self.embedding.weight.data = pe      # 使用位置编码计算好的嵌入矩阵对其进行初始化
        self.embedding.requires_grad_(False) # 将其参数设为不可训练

    def forward(self, t):
        # 调用嵌入层方法
        return self.embedding(t)


class ResidualBlock(nn.Module):

    def __init__(self, in_c: int, out_c: int):
        super().__init__()
        self.conv1 = nn.Conv2d(in_c, out_c, 3, 1, 1)
        self.bn1 = nn.BatchNorm2d(out_c)
        self.actvation1 = nn.ReLU()
        self.conv2 = nn.Conv2d(out_c, out_c, 3, 1, 1)
        self.bn2 = nn.BatchNorm2d(out_c)
        self.actvation2 = nn.ReLU()
        if in_c != out_c:
            self.shortcut = nn.Sequential(nn.Conv2d(in_c, out_c, 1),
                                          nn.BatchNorm2d(out_c))
        else:
            self.shortcut = nn.Identity()

    def forward(self, input):
        x = self.conv1(input)
        x = self.bn1(x)
        x = self.actvation1(x)
        x = self.conv2(x)
        x = self.bn2(x)
        x += self.shortcut(input)
        x = self.actvation2(x)
        return x


class ConvNet(nn.Module):

    def __init__(self,
                 n_steps,
                 intermediate_channels=[10, 20, 40],
                 pe_dim=10,
                 insert_t_to_all_layers=False):
        super().__init__()
        # 获取图片的大小从数据集中
        C, H, W = get_img_shape()  # 1, 28, 28
        # 对时间t进行位置编码，返回位置编码器pe
        self.pe = PositionalEncoding(n_steps, pe_dim)

        self.pe_linears = nn.ModuleList()
        self.all_t = insert_t_to_all_layers
        if not insert_t_to_all_layers:
            self.pe_linears.append(nn.Linear(pe_dim, C))

        self.residual_blocks = nn.ModuleList()
        prev_channel = C
        for channel in intermediate_channels:
            self.residual_blocks.append(ResidualBlock(prev_channel, channel))
            if insert_t_to_all_layers:
                self.pe_linears.append(nn.Linear(pe_dim, prev_channel))
            else:
                self.pe_linears.append(None)
            prev_channel = channel
        self.output_layer = nn.Conv2d(prev_channel, C, 3, 1, 1)

    def forward(self, x, t):
        n = t.shape[0]
        t = self.pe(t)
        for m_x, m_t in zip(self.residual_blocks, self.pe_linears):
            if m_t is not None:
                pe = m_t(t).reshape(n, -1, 1, 1)
                x = x + pe
            x = m_x(x)
        x = self.output_layer(x)
        return x

'''
Unet的基础模块【水平部分】
shape: 输入图片大小，(C,H,W), 如(1,28,28)
in_c: 输入通道数, 与shape的第一个参数保持一致
out_c: 输出的通道数
residual: 是否进行残差连接
'''
class UnetBlock(nn.Module):

    def __init__(self, shape, in_c, out_c, residual=False):
        super().__init__()
        # 归一化
        self.ln = nn.LayerNorm(shape)
        self.conv1 = nn.Conv2d(in_c, out_c, 3, 1, 1)    # 仅改变通道数，没有改变图片大小
        self.conv2 = nn.Conv2d(out_c, out_c, 3, 1, 1)   # 不改变通道数的卷积层9（信息汇聚）
        self.activation = nn.ReLU()
        self.residual = residual
        # 若需要残差连接
        if residual:
            if in_c == out_c:
                self.residual_conv = nn.Identity()   # 维度一样，则取自身
            else:
                self.residual_conv = nn.Conv2d(in_c, out_c, 1) #维度不一样，则用一个1*1卷积来改变通道数

    def forward(self, x):
        # Unet网络结构
        out = self.ln(x)
        out = self.conv1(out)
        out = self.activation(out)
        out = self.conv2(out)
        if self.residual:
            out += self.residual_conv(x)
        out = self.activation(out)
        return out

'''
构建UNet网络
'''
class UNet(nn.Module):
    
    def __init__(self,
                 n_steps,                    # 模型迭代训练次数
                 channels=[10, 20, 40, 80],  # 下采样不同层的通道数变化
                 pe_dim=10,                  # 对t进行Transformer中的位置编码，把该编码加到图像的每一处上【重要】
                 # （默认为10，实际使用中128或者256）
                 residual=False) -> None:
        # 继承Module父类
        super().__init__()  
        # 得到图片大小：(1, 28, 28)
        C, H, W = get_img_shape()
        # 变化的层数
        layers = len(channels)   # 4层
        # 在不同层，图片高和宽
        Hs = [H]   # [28, 14, 7, 3]
        Ws = [W]
        cH = H
        cW = W
        for _ in range(layers - 1):
            cH //= 2    # 缩小一半
            cW //= 2
            Hs.append(cH)
            Ws.append(cW)

        # 位置编码器
        self.pe = PositionalEncoding(n_steps, pe_dim)

        self.encoders = nn.ModuleList()   # 编码器
        self.decoders = nn.ModuleList()   # 解码器
        self.pe_linears_en = nn.ModuleList()   # input embedding：线性连接编码器embedding结果和PE
        self.pe_linears_de = nn.ModuleList()   # input embedding：线性连接解码器embedding结果和PE
        self.downs = nn.ModuleList()      # 下采样
        self.ups = nn.ModuleList()        # 上采样
        prev_channel = C    # 图片通道数, 如原始通道为1
        # 除最后一个通道的操作
        for channel, cH, cW in zip(channels[0:-1], Hs[0:-1], Ws[0:-1]):
            #---encoder涉及部分
            #1. encoder输入连接PE
            self.pe_linears_en.append(
                nn.Sequential(nn.Linear(pe_dim, prev_channel), nn.ReLU(),
                              nn.Linear(prev_channel, prev_channel)))
            #2.编码器(2个水平的Unet模块)
            self.encoders.append(
                nn.Sequential(
                    # 增加通道数
                    UnetBlock((prev_channel, cH, cW),
                              prev_channel,
                              channel,
                              residual=residual),
                    # 保持通道数不变
                    UnetBlock((channel, cH, cW),
                              channel,
                              channel,
                              residual=residual)))
            #3.下采样
            self.downs.append(nn.Conv2d(channel, channel, 2, 2)) # 图片大小减半，保持通道数
            # 更新通道数
            prev_channel = channel

        self.pe_mid = nn.Linear(pe_dim, prev_channel)
        # 最后一层的通道数
        channel = channels[-1]
        self.mid = nn.Sequential(
            # 增加了通道数
            UnetBlock((prev_channel, Hs[-1], Ws[-1]),
                      prev_channel,
                      channel,
                      residual=residual),
            UnetBlock((channel, Hs[-1], Ws[-1]),
                      channel,
                      channel,
                      residual=residual),
        )
        # 更新通道数
        prev_channel = channel

        for channel, cH, cW in zip(channels[-2::-1], Hs[-2::-1], Ws[-2::-1]):
            #1. decoder输入连接PE
            self.pe_linears_de.append(nn.Linear(pe_dim, prev_channel))
            #2.上采样：使用逆卷积，仅改变通道数，图片大小保持不变
            self.ups.append(nn.ConvTranspose2d(prev_channel, channel, 2, 2))
            #3.解码器
            self.decoders.append(
                nn.Sequential(
                    # 减少一半的通道数
                    UnetBlock((channel * 2, cH, cW),
                              channel * 2,
                              channel,
                              residual=residual),
                    # 保持通道数
                    UnetBlock((channel, cH, cW),
                              channel,
                              channel,
                              residual=residual)))
            # 更新通道数
            prev_channel = channel

        # 输出前还原为原图片通道(1或3)
        self.conv_out = nn.Conv2d(prev_channel, C, 3, 1, 1)

    def forward(self, x, t):
        '''
        x: 输入图片大小 (1,28,28)
        t: 批量数据大小 batch_size * 1
        '''
        n = t.shape[0]   # 数据量 batch_size = n
        t = self.pe(t)   # 这批数据进行位置编码 [n, 1, 128]

        #---encoder
        encoder_outs = []
        for pe_linear, encoder, down in zip(self.pe_linears_en, self.encoders,
                                            self.downs):
            # 每次编码前都需要重新计算一个位置编码PE，再合并到原图x
            pe = pe_linear(t).reshape(n, -1, 1, 1)  # [n, 1, 1, 1]
            x = encoder(x + pe)                     # [n, 1, 28, 28] -> [n, 10, 14,14]
            encoder_outs.append(x)
            x = down(x)

        #---中间层
        pe = self.pe_mid(t).reshape(n, -1, 1, 1)
        x = self.mid(x + pe)

        #---decoder
        for pe_linear, decoder, up, encoder_out in zip(self.pe_linears_de,
                                                       self.decoders, self.ups,
                                                       encoder_outs[::-1]):
            pe = pe_linear(t).reshape(n, -1, 1, 1)
            x = up(x)

            pad_x = encoder_out.shape[2] - x.shape[2]
            pad_y = encoder_out.shape[3] - x.shape[3]
            x = F.pad(x, (pad_x // 2, pad_x - pad_x // 2, pad_y // 2,
                          pad_y - pad_y // 2))
            x = torch.cat((encoder_out, x), dim=1)
            x = decoder(x + pe)
        x = self.conv_out(x)
        return x


convnet_small_cfg = {
    'type': 'ConvNet',
    'intermediate_channels': [10, 20],
    'pe_dim': 128
}

convnet_medium_cfg = {
    'type': 'ConvNet',
    'intermediate_channels': [10, 10, 20, 20, 40, 40, 80, 80],
    'pe_dim': 256,
    'insert_t_to_all_layers': True
}
convnet_big_cfg = {
    'type': 'ConvNet',
    'intermediate_channels': [20, 20, 40, 40, 80, 80, 160, 160],
    'pe_dim': 256,
    'insert_t_to_all_layers': True
}

unet_1_cfg = {'type': 'UNet', 'channels': [10, 20, 40, 80], 'pe_dim': 128}
unet_res_cfg = {
    'type': 'UNet',                     # 网络类型
    'channels': [10, 20, 40, 80],       # 通道变化
    'pe_dim': 128,                      # 位置编码向量长度
    'residual': True                    # 是否进行残差连接
}

'''
构建网络
config: 可选的网络结构
'''
def build_network(config: dict, n_steps):
    network_type = config.pop('type')
    # 当前仅设定了2种模型结构
    if network_type == 'ConvNet':
        network_cls = ConvNet
    elif network_type == 'UNet':
        network_cls = UNet

    network = network_cls(n_steps, **config)
    return network