import torch
from torch import nn
from torch.nn.utils import weight_norm

import matplotlib.pyplot as plt

from einops import rearrange, repeat
from einops.layers.torch import Rearrange
import torch.nn.functional as F
import torch.nn.init as init
from .layer.Conv_Blocks import Inception_Block_V1, Inception2D


def FFT_for_Period(x, k=2):
    # [B, T, C]
    xf = torch.fft.rfft(x, dim=1)
    # find period by amplitudes
    frequency_list = abs(xf).mean(0).mean(-1)
    frequency_list[0] = 0
    _, top_list = torch.topk(frequency_list, k)
    top_list = top_list.detach().cpu().numpy()
    period = x.shape[1] // top_list
    return period, abs(xf).mean(-1)[:, top_list]


class TimesBlock(nn.Module):
    def __init__(self, seq_len, d_model, d_ff, num_kernels, top_k):
        super(TimesBlock, self).__init__()
        self.seq_len = seq_len
        self.k = top_k
        # parameter-efficient design
        self.conv = nn.Sequential(
            # Inception2D(d_model, d_model),
            Inception_Block_V1(d_model, d_ff, num_kernels=num_kernels),
            nn.GELU(),
            Inception_Block_V1(d_ff, d_model, num_kernels=num_kernels),
        )

    def forward(self, x):
        B, T, N = x.size()

        self.seq_len = T

        period_list, period_weight = FFT_for_Period(x, self.k)

        res = []
        for i in range(self.k):
            period = period_list[i]
            # padding
            if self.seq_len % period != 0:
                length = (
                                 (self.seq_len // period) + 1) * period
                padding = torch.zeros([x.shape[0], (length - self.seq_len), x.shape[2]]).to(x.device)
                out = torch.cat([x, padding], dim=1)
            else:
                length = self.seq_len
                out = x
            # reshape
            out = out.reshape(B, length // period, period,
                              N).permute(0, 3, 1, 2).contiguous()
            # 2D conv: from 1d Variation to 2d Variation
            out = self.conv(out)
            # reshape back
            out = out.permute(0, 2, 3, 1).reshape(B, -1, N)
            res.append(out[:, :self.seq_len, :])
        res = torch.stack(res, dim=-1)
        # adaptive aggregation
        period_weight = F.softmax(period_weight, dim=1)
        period_weight = period_weight.unsqueeze(
            1).unsqueeze(1).repeat(1, T, N, 1)
        # 维度怎么变化的？
        res = torch.sum(res * period_weight, -1)
        # residual connection
        res = res + x
        return res

# 仅用在inception块当中
class BasicConv1d(nn.Module):

    def __init__(self, in_channels, out_channels, **kwargs):
        super(BasicConv1d, self).__init__()
        self.conv = nn.Conv1d(in_channels, out_channels, bias=False, **kwargs)
        # self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
        self.bn = nn.BatchNorm1d(out_channels, eps=0.001)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        return F.relu(x, inplace=True)


'''---InceptionA---'''


class InceptionA(nn.Module):

    def __init__(self, in_channels, pool_features, conv_block=None):
        super(InceptionA, self).__init__()
        if conv_block is None:
            conv_block = BasicConv1d
        self.branch1x1 = conv_block(in_channels, 48, kernel_size=1)

        self.branch5x5_1 = conv_block(in_channels, 36, kernel_size=1)
        self.branch5x5_2 = conv_block(36, 48, kernel_size=5, padding=2)

        self.branch3x3dbl_1 = conv_block(in_channels, 48, kernel_size=1)
        self.branch3x3dbl_2 = conv_block(48, 72, kernel_size=3, padding=1)
        # self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1)

        self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1)

    def _forward(self, x):
        branch1x1 = self.branch1x1(x)

        branch5x5 = self.branch5x5_1(x)
        branch5x5 = self.branch5x5_2(branch5x5)

        branch3x3dbl = self.branch3x3dbl_1(x)
        branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
        # branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)

        branch_pool = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
        branch_pool = self.branch_pool(branch_pool)

        outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
        return outputs

    def forward(self, x):
        x.transpose_(1, 2)
        outputs = self._forward(x)
        x = torch.cat(outputs, 1)
        return x.transpose_(1, 2)


# 用于裁剪输入张量的时间维度，去除多余的 padding 部分。
class Crop(nn.Module):

    def __init__(self, crop_size):
        super(Crop, self).__init__()
        self.crop_size = crop_size

    def forward(self, x):
        # 裁剪张量以去除额外的填充
        return x[:, :, :-self.crop_size].contiguous()


# 膨胀因果卷积类
class DilatedConvolutions(nn.Module):

    def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, dropout=0.2):
        super(DilatedConvolutions, self).__init__()
        padding = (kernel_size - 1) * dilation
        conv_params = {
            'kernel_size': kernel_size,
            'stride': stride,
            'padding': padding,
            'dilation': dilation
        }

        self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, **conv_params))
        self.crop1 = Crop(padding)
        self.relu1 = nn.ReLU()
        self.dropout1 = nn.Dropout(dropout)

        self.max_pool = nn.MaxPool1d(2, stride=2)  # Add a max pooling layer

        self.net = nn.Sequential(self.conv1, self.crop1, self.relu1, self.dropout1)

        # 快捷连接
        self.bias = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
        self.relu = nn.ReLU()

    def forward(self, x):
        x.transpose_(1, 2)
        # 应用因果卷积和快捷连接
        y = self.net(x)
        b = x if self.bias is None else self.bias(x)
        # 复现论文中的最大池化层
        # y = self.max_pool(self.relu(y + b))

        y = self.relu(y + b)
        # y = self.relu(y + b)
        return y.transpose_(1, 2)


# 实现了一个膨胀卷积层，由两个膨胀卷积块组成。每个膨胀卷积块包含一个带有权重归一化的卷积层、裁剪模块、ReLU激活函数和 Dropout 正则化。
# 此外，还包括了一个用于快捷连接的卷积层
class TemporalCasualLayer(nn.Module):

    def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, dropout=0.2):
        super(TemporalCasualLayer, self).__init__()
        padding = (kernel_size - 1) * dilation
        conv_params = {
            'kernel_size': kernel_size,
            'stride': stride,
            'padding': padding,
            'dilation': dilation
        }

        self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, **conv_params))
        self.crop1 = Crop(padding)
        self.relu1 = nn.ReLU()
        self.dropout1 = nn.Dropout(dropout)

        self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, **conv_params))
        self.crop2 = Crop(padding)
        self.relu2 = nn.ReLU()
        self.dropout2 = nn.Dropout(dropout)

        self.net = nn.Sequential(self.conv1, self.crop1, self.relu1, self.dropout1,
                                 self.conv2, self.crop2, self.relu2, self.dropout2)
        # 快捷连接
        self.bias = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
        self.relu = nn.ReLU()

    def forward(self, x):
        x.transpose_(1, 2)
        # 应用因果卷积和快捷连接
        y = self.net(x)
        b = x if self.bias is None else self.bias(x)
        y = self.relu(y + b)
        return y.transpose(1, 2)


# 通过堆叠多个 TemporalCasualLayer 组成了一个完整的 TCN 网络。每个 TemporalCasualLayer 具有不同的膨胀系数，并根据输入和输出通道的数量进行设置。
class TemporalConvolutionNetwork(nn.Module):

    def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
        super(TemporalConvolutionNetwork, self).__init__()
        layers = []
        num_levels = len(num_channels)
        tcl_param = {
            'kernel_size': kernel_size,
            'stride': 1,
            'dropout': dropout
        }
        for i in range(num_levels):
            # dilation = 2 ** i
            dilation = 1
            in_ch = num_inputs if i == 0 else num_channels[i - 1]
            out_ch = num_channels[i]
            tcl_param['dilation'] = dilation
            tcl = TemporalCasualLayer(in_ch, out_ch, **tcl_param)
            # tcl = self.relu(tcl)
            layers.append(tcl)

        self.network = nn.Sequential(*layers)

    def forward(self, x):
        return self.network(x)


# 封装了 TemporalConvolutionNetwork，并添加了一个线性层用于最终的预测。
# 在前向传播中，先经过 TCN 网络，然后将输出的最后一个时间步传入线性层，并通过 ReLU 激活函数进行非线性变换。
class TCN(nn.Module):

    def __init__(self, input_size, output_size, num_channels, kernel_size, dropout):
        super(TCN, self).__init__()
        self.tcn = TemporalConvolutionNetwork(input_size, num_channels, kernel_size=kernel_size, dropout=dropout)
        self.linear = nn.Linear(num_channels[-1], output_size)
        self.relu = nn.ReLU()

    def forward(self, x):
        # 应用TCN和线性层，然后使用ReLU激活函数
        y = self.tcn(x)  # [N,C_out,L_out=L_in]
        return self.relu(self.linear(y))


def pair(t):
    return t if isinstance(t, tuple) else (t, t)


class PreNorm(nn.Module):
    def __init__(self, dim, fn):
        super().__init__()
        self.norm = nn.LayerNorm(dim)
        self.fn = fn

    def forward(self, x, **kwargs):
        return self.fn(self.norm(x), **kwargs)


class FeedForward(nn.Module):
    def __init__(self, dim, hidden_dim, dropout=0., step_size=0.1):
        super().__init__()
        self.weight = nn.Parameter(torch.Tensor(dim, dim))
        with torch.no_grad():
            init.kaiming_uniform_(self.weight)
        self.step_size = step_size
        self.lambd = 0.1

    def forward(self, x):
        # compute D^T * D * x
        x1 = F.linear(x, self.weight, bias=None)
        grad_1 = F.linear(x1, self.weight.t(), bias=None)
        # compute D^T * x
        grad_2 = F.linear(x, self.weight.t(), bias=None)
        # compute negative gradient update: step_size * (D^T * x - D^T * D * x)
        grad_update = self.step_size * (grad_2 - grad_1) - self.step_size * self.lambd

        output = F.relu(x + grad_update)
        return output


class Attention(nn.Module):
    def __init__(self, dim, heads=8, dim_head=64, dropout=0.):
        super().__init__()
        inner_dim = dim_head * heads
        project_out = not (heads == 1 and dim_head == dim)

        self.heads = heads
        self.scale = dim_head ** -0.5

        self.attend = nn.Softmax(dim=-1)
        self.dropout = nn.Dropout(dropout)

        # conv_
        # padding = (3 - 1)
        # self.padding_operator = nn.ConstantPad1d((padding, 0), 0)
        # self.conv_1d = nn.Conv1d(dim, dim, 3, padding=0, bias=True)

        # self.qkv = nn.Linear(dim, inner_dim, bias=False)
        self.qkv_1 = DilatedConvolutions(dim, inner_dim, 7, 1, 2, dropout=dropout)

        self.to_out = nn.Sequential(
            nn.Linear(inner_dim, dim),
            nn.Dropout(dropout)
        ) if project_out else nn.Identity()

    def forward(self, x):
        # conv
        # x = x.transpose(1, 2)
        # x = self.padding_operator(x)
        # x = self.conv_1d(x)
        # x = x.transpose(1, 2)

        w = rearrange(self.qkv_1(x), 'b n (h d) -> b h n d', h=self.heads)

        dots = torch.matmul(w, w.transpose(-1, -2)) * self.scale

        attn = self.attend(dots)
        attn = self.dropout(attn)

        out = torch.matmul(attn, w)

        out = rearrange(out, 'b h n d -> b n (h d)')
        return self.to_out(out)


import torch.nn.functional as F


class Transformer(nn.Module):
    def __init__(self, dim, depth, heads, dim_head, dropout=0., ista=0.1):
        super().__init__()
        self.layers = nn.ModuleList([])
        self.heads = heads
        self.depth = depth
        self.dim = dim

        for _ in range(depth):
            self.layers.append(nn.ModuleList([
                # PreNorm(dim, DilatedConvolutions(dim, dim, 3, 1, 2, dropout=dropout)),
                PreNorm(dim, Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout)),
                # PreNorm(dim, TCN(dim, dim, [32, 32], 3, dropout=dropout)),
                # PreNorm(dim, InceptionA(dim, 24)),
                PreNorm(dim, TimesBlock(40, dim, dim*2, 1, 5)),
                PreNorm(dim, FeedForward(dim, dim, dropout=dropout, step_size=ista))
            ]))

    def forward(self, x):
        depth = 0
        # x = self.DDC(x)
        # criterion = MaximalCodingRateReduction()
        for attn, inception, ff in self.layers:
            depth += 1
            # causal
            # x = casual(x)
            x_visual = attn(x)
            grad_x = x_visual + x
            # code_rate = criterion.compute_discrimn_loss_empirical(x_visual)
            # print(f"depth: {depth}, code_rate: {code_rate}")

            # inception TimesBlock
            grad_x = inception(grad_x) + x

            x = ff(grad_x)
            # 确保x是numpy数组
            # y = x.cpu().detach().numpy()[0, :, 0:50]
            # plt.imshow(y, cmap='hot')
            # plt.colorbar()
            # plt.savefig('heatmap.png')  # Save the figure before showing it
            # plt.show()

            # l0_norm = torch.norm(x, p=0) / (x.shape[0] * x.shape[1] * x.shape[2])
            # print(f"depth: {depth}, l0_norm: {l0_norm}")

        # for attn, ff in self.layers:
        #     grad_x = attn(x) + x
        #
        #     x = ff(grad_x)
        return x


class CRATE(nn.Module):
    def __init__(self, *, feature_size, num_classes, input_size, dim, depth, heads, pool='cls', channels=3, dim_head=64,
                 dropout=0., emb_dropout=0., ista=0.1):
        super().__init__()
        # image_height, image_width = pair(image_size)
        # patch_height, patch_width = pair(patch_size)

        # assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'

        # num_patches = (image_height // patch_height) * (image_width // patch_width)
        # patch_dim = channels * patch_height * patch_width
        # assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'

        # self.to_patch_embedding = nn.Sequential(
        #     Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width),
        #     nn.LayerNorm(patch_dim),
        #     nn.Linear(patch_dim, dim),
        #     nn.LayerNorm(dim),
        # )

        self.pos_embedding = nn.Parameter(torch.randn(1, input_size + 1, dim))
        self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
        self.dropout = nn.Dropout(emb_dropout)
        self.linear = nn.Linear(feature_size, dim)

        self.transformer = Transformer(dim, depth, heads, dim_head, dropout, ista=ista)

        self.pool = pool
        self.to_latent = nn.Identity()

        self.mlp_head = nn.Sequential(
            nn.LayerNorm(dim),
            nn.Linear(dim, num_classes)
        )

    def forward(self, data):
        # x = self.to_patch_embedding(img)
        x = data
        b, n, _ = x.shape
        x = self.linear(x)

        # 切换feature_size与sequence维度
        # x = x.transpose(1, 2)

        cls_tokens = repeat(self.cls_token, '1 1 d -> b 1 d', b=b)
        x = torch.cat((cls_tokens, x), dim=1)
        x += self.pos_embedding[:, :(n + 1)]
        x = self.dropout(x)

        x = self.transformer(x)
        feature_pre = x
        x = x.mean(dim=1) if self.pool == 'mean' else x[:, 0]

        x = self.to_latent(x)
        feature_last = x
        return self.mlp_head(x)


def CRATE_tiny(num_classes=1000, feature_size=177, input_size=100):
    return CRATE(feature_size=feature_size,
                 num_classes=num_classes,
                 dim=192,
                 depth=3,
                 heads=4,
                 dropout=0.,
                 emb_dropout=0.25,
                 dim_head=192 // 4,
                 input_size=input_size)

# def CRATE_small(num_classes = 1000):
#     return CRATE(image_size=224,
#                     patch_size=16,
#                     num_classes=num_classes,
#                     dim=576,
#                     depth=12,
#                     heads=12,
#                     dropout=0.0,
#                     emb_dropout=0.0,
#                     dim_head=576//12)
#
# def CRATE_base(num_classes = 1000):
#     return CRATE(image_size=224,
#                 patch_size=16,
#                 num_classes=num_classes,
#                 dim=768,
#                 depth=12,
#                 heads=12,
#                 dropout=0.0,
#                 emb_dropout=0.0,
#                 dim_head=768//12)
#
# def CRATE_large(num_classes = 1000):
#     return CRATE(image_size=224,
#                 patch_size=16,
#                 num_classes=num_classes,
#                 dim=1024,
#                 depth=24,
#                 heads=16,
#                 dropout=0.0,
#                 emb_dropout=0.0,
#                 dim_head=1024//16)
