import torch
import torch.nn as nn
import numpy as np
import math

from torch.nn.utils import weight_norm
from einops import rearrange, repeat


class Config(object):
    def __init__(self):
        self.vocab_size = 6

        self.d_model = 129
        self.n_heads = 4

        # assert self.d_model % self.n_heads == 0
        self.dim_k = self.d_model // self.n_heads
        self.dim_v = self.d_model // self.n_heads

        self.padding_size = 30
        self.UNK = 5
        self.PAD = 4

        self.N = 6
        self.p = 0.1

        if torch.cuda.is_available():
            torch.set_default_tensor_type(torch.cuda.FloatTensor)
            self.device = "cuda"
        else:
            self.device = "cpu"


config = Config()


class SE_Block(nn.Module):
    def __init__(self, inchannel, ratio=16):
        super(SE_Block, self).__init__()
        # 全局平均池化(Fsq操作)
        self.gap = nn.AdaptiveAvgPool1d(1)
        # 两个全连接层(Fex操作)
        self.fc = nn.Sequential(
            nn.Linear(inchannel, inchannel // ratio, bias=False),  # 从 c -> c/r
            nn.ReLU(),
            nn.Linear(inchannel // ratio, inchannel, bias=False),  # 从 c/r -> c
            nn.Sigmoid()
        )

    def forward(self, x):
        # 处理四维数据
        # heads = x.size(0)
        # w = rearrange(x, 'h b n d -> b n (h d)')
        # # 读取批数据图片数量及通道数
        # b, n, d = w.size()
        # # Fsq操作：经池化后输出b*c的矩阵
        # y = self.gap(w).view(b, n)
        # # Fex操作：经全连接层输出（b，c，1，1）矩阵
        # y = self.fc(y).view(b, n, 1)
        # # Fscale操作：将得到的权重乘以原来的特征图x
        # x = rearrange(w * y.expand_as(w), ' b n (h d) -> h b n d', h=heads)
        # return x

        x = x.transpose(1, 2)
        # 读取批数据图片数量及通道数
        b, n, d = x.size()
        # Fsq操作：经池化后输出b*c的矩阵
        y = self.gap(x).view(b, n)
        # Fex操作：经全连接层输出（b，c，1，1）矩阵
        y = self.fc(y).view(b, n, 1)
        # Fscale操作：将得到的权重乘以原来的特征图x
        x = x * y.expand_as(x)
        return x.transpose(1, 2)


# 用于裁剪输入张量的时间维度，去除多余的 padding 部分。
class Crop(nn.Module):

    def __init__(self, crop_size):
        super(Crop, self).__init__()
        self.crop_size = crop_size

    def forward(self, x):
        # 裁剪张量以去除额外的填充
        return x[:, :, :-self.crop_size].contiguous()


# 膨胀因果卷积类
class DilatedConvolutions(nn.Module):

    def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, dropout=0.2):
        super(DilatedConvolutions, self).__init__()
        padding = (kernel_size - 1) * dilation
        conv_params = {
            'kernel_size': kernel_size,
            'stride': stride,
            'padding': padding,
            'dilation': dilation
        }

        self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, **conv_params))
        self.crop1 = Crop(padding)
        self.relu1 = nn.ReLU()
        self.dropout1 = nn.Dropout(dropout)

        self.max_pool = nn.MaxPool1d(4, stride=2, dilation=11)  # Add a max pooling layer

        self.net = nn.Sequential(self.conv1, self.crop1, self.relu1, self.dropout1)

        # 快捷连接
        self.bias = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
        self.relu = nn.ReLU()

    def forward(self, x):
        x = x.transpose(1, 2)
        # 应用因果卷积和快捷连接
        y = self.net(x)
        b = x if self.bias is None else self.bias(x)
        y = self.relu(y + b).transpose(1, 2)
        y = self.max_pool(y)
        return y


# 实现了一个膨胀卷积层，由两个膨胀卷积块组成。每个膨胀卷积块包含一个带有权重归一化的卷积层、裁剪模块、ReLU激活函数和 Dropout 正则化。
# 此外，还包括了一个用于快捷连接的卷积层
class TemporalCasualLayer(nn.Module):

    def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, dropout=0.2):
        super(TemporalCasualLayer, self).__init__()
        padding = (kernel_size - 1) * dilation
        conv_params = {
            'kernel_size': kernel_size,
            'stride': stride,
            'padding': padding,
            'dilation': dilation
        }

        self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, **conv_params))
        self.crop1 = Crop(padding)
        self.relu1 = nn.ReLU()
        self.dropout1 = nn.Dropout(dropout)

        self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, **conv_params))
        self.crop2 = Crop(padding)
        self.relu2 = nn.ReLU()
        self.dropout2 = nn.Dropout(dropout)

        self.net = nn.Sequential(self.conv1, self.crop1, self.relu1, self.dropout1,
                                 self.conv2, self.crop2, self.relu2, self.dropout2)
        # 快捷连接
        self.bias = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
        self.relu = nn.ReLU()

    def forward(self, x):
        # x为四维
        # x_in = x.reshape(x.size(0), -1, x.size(3))
        # x_in.transpose_(1, 2)
        # y = self.net(x_in)
        # b = x_in if self.bias is None else self.bias(x_in)
        # y = self.relu(y + b)
        # y = y.transpose(1, 2).reshape(x.size(0), x.size(1), -1, x.size(3))
        # return y

        x = x.transpose(1, 2)
        # 应用因果卷积和快捷连接
        y = self.net(x)
        b = x if self.bias is None else self.bias(x)
        y = self.relu(y + b)
        return y.transpose(1, 2)


# 通过堆叠多个 TemporalCasualLayer 组成了一个完整的 TCN 网络。每个 TemporalCasualLayer 具有不同的膨胀系数，并根据输入和输出通道的数量进行设置。
class TemporalConvolutionNetwork(nn.Module):

    def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
        super(TemporalConvolutionNetwork, self).__init__()
        layers = []
        num_levels = len(num_channels)
        tcl_param = {
            'kernel_size': kernel_size,
            'stride': 1,
            'dropout': dropout
        }
        for i in range(num_levels):
            # dilation = 2 ** i
            dilation = 1
            in_ch = num_inputs if i == 0 else num_channels[i - 1]
            out_ch = num_channels[i]
            tcl_param['dilation'] = dilation
            tcl = TemporalCasualLayer(in_ch, out_ch, **tcl_param)
            # tcl = self.relu(tcl)
            layers.append(tcl)

        self.network = nn.Sequential(*layers)

    def forward(self, x):
        return self.network(x)


# 封装了 TemporalConvolutionNetwork，并添加了一个线性层用于最终的预测。
# 在前向传播中，先经过 TCN 网络，然后将输出的最后一个时间步传入线性层，并通过 ReLU 激活函数进行非线性变换。
class TCN(nn.Module):

    def __init__(self, input_size, output_size, num_channels, kernel_size, dropout):
        super(TCN, self).__init__()
        self.tcn = TemporalConvolutionNetwork(input_size, num_channels, kernel_size=kernel_size, dropout=dropout)
        self.linear = nn.Linear(num_channels[-1], output_size)
        self.relu = nn.ReLU()

    def forward(self, x):
        # 应用TCN和线性层，然后使用ReLU激活函数
        y = self.tcn(x)  # [N,C_out,L_out=L_in]
        return self.relu(self.linear(y))


# class Embedding(nn.Module):
#     def __init__(self, vocab_size):
#         super(Embedding, self).__init__()
#         # 一个普通的 embedding层，我们可以通过设置padding_idx=config.PAD 来实现论文中的 padding_mask
#         self.embedding = nn.Embedding(vocab_size, config.d_model, padding_idx=config.PAD)
#
#     def forward(self, x):
#         # 根据每个句子的长度，进行padding，短补长截
#         for i in range(len(x)):
#             if len(x[i]) < config.padding_size:
#                 x[i].extend(
#                     [config.UNK] * (config.padding_size - len(x[i])))  # 注意 UNK是你词表中用来表示oov的token索引，这里进行了简化，直接假设为6
#             else:
#                 x[i] = x[i][:config.padding_size]
#         x = self.embedding(torch.tensor(x))  # batch_size * seq_len * d_model
#         return x


class Positional_Encoding(nn.Module):

    def __init__(self, d_model):
        super(Positional_Encoding, self).__init__()
        self.d_model = d_model

    def forward(self, seq_len, embedding_dim, device):
        positional_encoding = np.zeros((seq_len, embedding_dim))
        for pos in range(positional_encoding.shape[0]):
            for i in range(positional_encoding.shape[1]):
                positional_encoding[pos][i] = math.sin(
                    pos / (10000 ** (2 * i / self.d_model))) if i % 2 == 0 else math.cos(
                    pos / (10000 ** (2 * i / self.d_model)))
        return torch.from_numpy(positional_encoding).to(device)


class Mutihead_Attention(nn.Module):
    def __init__(self, d_model, dim_k, dim_v, n_heads):
        super(Mutihead_Attention, self).__init__()
        self.dim_v = dim_v
        self.dim_k = dim_k
        self.n_heads = n_heads

        self.q = nn.Sequential(
            DilatedConvolutions(d_model, dim_k * 3, kernel_size=5, stride=1, dilation=6, dropout=0.2),
        )
        # self.q = nn.Linear(d_model, dim_k)
        self.k = nn.Sequential(
            DilatedConvolutions(d_model, dim_k * 3, kernel_size=5, stride=1, dilation=6, dropout=0.2),
        )
        self.v = nn.Sequential(
            DilatedConvolutions(d_model, dim_v * 3, kernel_size=5, stride=1, dilation=6, dropout=0.2),
        )
        self.norm_fact = 1 / math.sqrt(d_model)
        num_channels = dim_v
        self.tcn_att = nn.Sequential(
            TemporalConvolutionNetwork(dim_v, [num_channels, num_channels, num_channels], kernel_size=3,
                                       dropout=0.2),
            SE_Block(dim_v)

        )
        # self.norm1 = nn.LayerNorm(dim_v)
        self.o = nn.Linear(dim_v, d_model)
        # self.norm2 = nn.LayerNorm(d_model)

    def generate_mask(self, dim):
        # 此处是 sequence mask ，防止 decoder窥视后面时间步的信息。
        # padding mask 在数据输入模型之前完成。
        matirx = np.ones((dim, dim))
        mask = torch.Tensor(np.tril(matirx))

        return mask == 1

    def forward(self, x, y, requires_mask=False):
        assert self.dim_k % self.n_heads == 0 and self.dim_v % self.n_heads == 0
        # size of x : [batch_size * seq_len * batch_size]
        # 对 x 进行自注意力
        Q = self.q(x).reshape(-1, x.shape[0], x.shape[1],
                              self.dim_k // self.n_heads)  # n_heads * batch_size * seq_len * dim_k
        K = self.k(x).reshape(-1, x.shape[0], x.shape[1],
                              self.dim_k // self.n_heads)  # n_heads * batch_size * seq_len * dim_k
        V = self.v(y).reshape(-1, y.shape[0], y.shape[1],
                              self.dim_v // self.n_heads)  # n_heads * batch_size * seq_len * dim_v
        # print("Attention V shape : {}".format(V.shape))
        attention_score = torch.matmul(Q, K.permute(0, 1, 3, 2)) * self.norm_fact

        if requires_mask:
            mask = self.generate_mask(x.shape[1])
            # masked_fill 函数中，对Mask位置为True的部分进行Mask
            attention_score.masked_fill(mask, value=float("-inf"))  # 注意这里的小Trick，不需要将Q,K,V 分别MASK,只MASKSoftmax之前的结果就好了

        output = torch.matmul(attention_score, V).reshape(y.shape[0], y.shape[1], -1)
        output = self.tcn_att(output)

        output = output.reshape(y.shape[0], y.shape[1], -1)
        # print("Attention output shape : {}".format(output.shape))
        output = self.o(output)
        return output


class Feed_Forward(nn.Module):
    def __init__(self, input_dim, hidden_dim=2048):
        super(Feed_Forward, self).__init__()
        self.L1 = nn.Linear(input_dim, hidden_dim)
        self.L2 = nn.Linear(hidden_dim, input_dim)

    def forward(self, x):
        output = nn.ReLU()(self.L1(x))
        output = self.L2(output)
        return output


class Add_Norm(nn.Module):
    def __init__(self):
        super(Add_Norm, self).__init__()
        self.dropout = nn.Dropout(config.p)

    def forward(self, x, sub_layer, **kwargs):
        sub_output = sub_layer(x, **kwargs)
        # print("{} output : {}".format(sub_layer,sub_output.size()))
        x = self.dropout(x + sub_output)

        layer_norm = nn.LayerNorm(x.size()[1:])
        out = layer_norm(x)
        return out


class Encoder(nn.Module):
    def __init__(self):
        super(Encoder, self).__init__()
        self.positional_encoding = Positional_Encoding(config.d_model)
        self.muti_atten = Mutihead_Attention(config.d_model // 3, config.dim_k, config.dim_v, config.n_heads)
        self.liner = nn.Linear((config.d_model // 3) * 3, config.d_model)
        self.dropout = nn.Dropout(config.p)
        self.feed_forward = Feed_Forward(config.d_model)

        self.add_norm = Add_Norm()

    def forward(self, x):  # batch_size * seq_len 并且 x 的类型不是tensor，是普通list

        x += self.positional_encoding(x.shape[1], config.d_model, config.device)
        # print("After positional_encoding: {}".format(x.size()))
        # MTCA层
        x1, x2, x3 = torch.chunk(x, chunks=3, dim=2)
        output1 = self.muti_atten(x1, y=x1)
        output2 = self.muti_atten(x2, y=x2)
        output3 = self.muti_atten(x3, y=x3)
        output = torch.cat((output1, output2, output3), dim=-1)
        output = self.liner(output)
        x = self.dropout(x + output)
        layer_norm = nn.LayerNorm(x.size()[1:])
        output = layer_norm(x)

        output = self.add_norm(output, self.feed_forward)

        return output


# 在 Decoder 中，Encoder的输出作为Query和KEy输出的那个东西。即 Decoder的Input作为V。此时是可行的
# 因为在输入过程中，我们有一个padding操作，将Inputs和Outputs的seq_len这个维度都拉成一样的了
# 我们知道，QK那个过程得到的结果是 batch_size * seq_len * seq_len .既然 seq_len 一样，那么我们可以这样操作
# 这样操作的意义是，Outputs 中的 token 分别对于 Inputs 中的每个token作注意力

# class Decoder(nn.Module):
#     def __init__(self):
#         super(Decoder, self).__init__()
#         self.positional_encoding = Positional_Encoding(config.d_model)
#         self.muti_atten = Mutihead_Attention(config.d_model,config.dim_k,config.dim_v,config.n_heads)
#         self.feed_forward = Feed_Forward(config.d_model)
#         self.add_norm = Add_Norm()
#
#     def forward(self,x,encoder_output): # batch_size * seq_len 并且 x 的类型不是tensor，是普通list
#         # print(x.size())
#         x += self.positional_encoding(x.shape[1],config.d_model)
#         # print(x.size())
#         # 第一个 sub_layer
#         output = self.add_norm(x,self.muti_atten,y=x,requires_mask=True)
#         # 第二个 sub_layer
#         output = self.add_norm(x,self.muti_atten,y=encoder_output,requires_mask=True)
#         # 第三个 sub_layer
#         output = self.add_norm(output,self.feed_forward)
#         return output

class Regressor(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(Regressor, self).__init__()
        self.leaky_relu = nn.LeakyReLU(0.01)
        self.flatten = nn.Flatten()
        self.linear1 = nn.Linear(5160, hidden_dim)
        self.linear2 = nn.Linear(hidden_dim, output_dim)
        # self.softmax = nn.Softmax(dim=-1)

    def forward(self, x):
        x = self.leaky_relu(x)
        x = self.flatten(x)
        # x = x.mean(dim=1)
        x = self.linear1(x)
        x = self.linear2(x)
        x = self.leaky_relu(x)
        # x = self.softmax(x)
        return x


class Transformer_layer(nn.Module):
    def __init__(self):
        super(Transformer_layer, self).__init__()
        self.encoder = Encoder()

    def forward(self, x):
        x_input = x
        encoder_output = self.encoder(x_input)
        return encoder_output


class T_Transformer(nn.Module):
    def __init__(self, N, output_dim):
        super(T_Transformer, self).__init__()
        # self.embedding_input = Embedding(vocab_size=vocab_size)
        # self.embedding_output = Embedding(vocab_size=vocab_size)

        self.output_dim = output_dim
        self.linear1 = nn.Linear(177, config.d_model)
        self.linear = nn.Linear(config.d_model, output_dim)
        self.softmax = nn.Softmax(dim=-1)
        self.model = nn.Sequential(*[Transformer_layer() for _ in range(N)])
        self.flatter = nn.Flatten()
        self.regressor = Regressor(config.d_model, 256, output_dim)

    def forward(self, x):
        b, n, d = x.size()
        x_input = x
        x_input = self.linear1(x_input)

        # x_input = self.embedding_input(x_input)
        # x_output = self.embedding_output(x_output)

        output = self.model(x_input)
        output1 = self.regressor(output)

        # output = self.flatter(output)
        # output = output.mean(dim=1)
        # output = self.linear(output)
        # output = self.softmax(output)

        return output1
