import math
import torch.nn as nn
# from torch.nn.utils.parametrizations import weight_norm
from torch.nn.utils import weight_norm  # 更高版本torch
from ext.layers.tcnmodels.ModernTCN import Block
import torch.nn.functional as F
import torch
from einops import rearrange


class Chomp1d(nn.Module):
    def __init__(self, chomp_size):
        super(Chomp1d, self).__init__()
        self.chomp_size = chomp_size

    def forward(self, x):
        """
        In fact, this is a cropping module, cropping the extra rightmost padding (default is padding on both sides)

        tensor.contiguous() will return the same tensor with contiguous memory
        Some tensors do not occupy a whole block of memory, but are composed of different blocks of data
        The tensor's view() operation relies on the memory being a whole block, in which case it is only necessary
        to execute the contiguous() function, which turns the tensor into a continuous distribution in memory
        """
        return x[:, :, :-self.chomp_size].contiguous()


class TemporalCnn(nn.Module):
    def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):
        super(TemporalCnn, self).__init__()
        self.conv = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
                                          stride=stride, padding=padding, dilation=dilation))
        self.chomp = Chomp1d(padding)
        self.leakyrelu = nn.LeakyReLU()
        self.dropout = nn.Dropout(dropout)

        self.net = nn.Sequential(self.conv, self.chomp, self.leakyrelu, self.dropout)
        self.init_weights()

    def init_weights(self):
        self.conv.weight.data.normal_(0, 0.01)

    def forward(self, x):
        """
        :param x: size of (Batch, out_channel, seq_len)
        :return:size of (Batch, out_channel, seq_len)
        """
        out = self.net(x)
        return out


class Tcn_Local(nn.Module):

    def __init__(self, num_outputs, kernel_size=3, dropout=0.2):  # k>=3
        """
        TCN, the current paper gives a TCN structure that supports well the case of one number per moment, i.e., the sequence structure.
        For a one-dimensional structure where each moment is a vector, it is barely possible to split the vector into several input channels at that moment.
        For the case where each moment is a matrix or a higher dimensional image, it is not so good.

        :param num_outputs: int, the number of output channels
        :param input_length: int, the length of the sliding window input sequence
        :param kernel_size: int, the size of the convolution kernel
        :param dropout: float, drop_out ratio
        """
        super(Tcn_Local, self).__init__()
        layers = []
        num_levels = 3
        out_channels = num_outputs
        for i in range(num_levels):
            layers += [TemporalCnn(out_channels, out_channels, kernel_size, stride=1, dilation=1,
                                   padding=(kernel_size - 1),
                                   dropout=dropout)]  # Adding padding to the convolved tensor to achieve causal convolution by slicing the tensor

        self.network = nn.Sequential(*layers)

    def forward(self, x):
        """
        The structure of input x is different from RNN, which generally has size (Batch, seq_len, channels) or (seq_len, Batch, channels).
        Here the seq_len is put after channels, and the data of all time steps are put together and used as the input size of Conv1d to realize the operation of convolution across time steps.
        Very clever design.

        :param x: size of (Batch, out_channel, seq_len)
        :return: size of (Batch, out_channel, seq_len)
        """
        return self.network(x)


class Tcn_Global(nn.Module):

    def __init__(self, num_inputs, num_outputs, kernel_size=3, dropout=0.2):  # k>=d
        """
        TCN, the current paper gives a TCN structure that supports well the case of one number per moment, i.e., the sequence structure.
        For a one-dimensional structure where each moment is a vector, it is barely possible to split the vector into several input channels at that moment.
        For the case where each moment is a matrix or a higher dimensional image, it is not so good.

        :param num_inputs: int, input length
        :param num_outputs: int, the number of output channels
        :param input_length: int, the length of the sliding window input sequence
        :param kernel_size: int, convolutional kernel size
        :param dropout: float, drop_out ratio
        """
        super(Tcn_Global, self).__init__()
        layers = []
        num_levels = math.ceil(math.log2((num_inputs - 1) * (2 - 1) / (kernel_size - 1) + 1))
        out_channels = num_outputs
        for i in range(num_levels):
            dilation_size = 2 ** i  # Expansion coefficient: 1，2，4，8……
            layers += [TemporalCnn(out_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
                                   padding=(kernel_size - 1) * dilation_size,
                                   dropout=dropout)]

        self.network = nn.Sequential(*layers)

    def forward(self, x):
        """
        The structure of input x is different from RNN, which generally has size (Batch, seq_len, channels) or (seq_len, Batch, channels).
        Here the seq_len is put after channels, and the data of all time steps are put together and used as the input size of Conv1d to realize the operation of convolution across time steps.
        Very clever design.

        :param x: size of (Batch, out_channel, seq_len)
        :return: size of (Batch, out_channel, seq_len)
        """
        #print(x.shape)
        x=self.network(x)
        #print(x.shape)
        return x

# class ModernTCN_Global(nn.Module):
#     def __init__(self, input_len, output_dim, num_vars, 
#                  kernel_size=5, patch_size=2, patch_stride=1,
#                  stem_ratio=4, num_blocks=1, dropout=0.1):
#         """
#         参数说明：
#             input_len (int): 输入序列长度 L
#             output_dim (int): 输出特征维度 D
#             num_vars (int): 变量数 M
#             kernel_size (int): 卷积核大小
#             patch_size (int): 分块长度
#             patch_stride (int): 分块步长
#             stem_ratio (int): 特征增强倍数
#             num_blocks (int): Block数量
#             dropout (float): Dropout率
#         """
#         super().__init__()
        
#         # --- 参数校验 ---
#         assert patch_size <= input_len, "分块大小不能超过序列长度"
#         assert kernel_size % 2 == 1, "卷积核大小应为奇数"
        
#         # --- 核心参数计算 ---
#         self.stem_dim = output_dim * stem_ratio  # 特征增强后的维度
#         self.dff = self.stem_dim * 4             # FFN中间层维度（固定4倍）
        
#         # --- 输入分块处理 ---
#         self.patch_size = patch_size
#         self.patch_stride = patch_stride
#         self.num_patches = (input_len - patch_size) // patch_stride + 1
        
#         # 特征增强层（每个变量独立）
#         self.stem = nn.ModuleList([
#             nn.Sequential(
#                 nn.Linear(patch_size, self.stem_dim),
#                 nn.LayerNorm(self.stem_dim),
#                 nn.GELU()
#             ) for _ in range(num_vars)
#         ])
        
#         # --- 构建Block链 ---
#         self.blocks = nn.ModuleList([
#             Block(
#                 large_size=kernel_size,
#                 small_size=max(3, kernel_size//2),  # 小核最小为3
#                 dmodel=self.stem_dim,      # 必须与stem_dim一致
#                 dff=self.dff,              # 固定4倍关系
#                 nvars=num_vars,            # 变量数直接传递
#                 small_kernel_merged=False, # 保持与原始实现一致
#                 drop=dropout               # 统一dropout率
#             ) for _ in range(num_blocks)
#         ])
        
#         # --- 输出适配层 ---
#         self.feature_compressor = nn.Conv2d(
#             in_channels=self.stem_dim,
#             out_channels=1,
#             kernel_size=1
#         )

#     def forward(self, x):
#         """
#         输入: (B, M, L)
#         输出: (B, M, L)
#         """
#         B, M, L = x.shape
        
#         # === 1. 分块处理 ===
#         # 动态填充保证可分块
#         pad_len = (self.patch_stride - (L - self.patch_size) % self.patch_stride) % self.patch_stride
#         x = F.pad(x, (0, pad_len), mode='constant', value=0)  # (B, M, L+pad)
        
#         # 分块展开 → (B, M, P, S)
#         x = x.unfold(dimension=-1, size=self.patch_size, step=self.patch_stride)
        
#         # === 2. 特征增强 ===
#         # 按变量独立处理 → (B, M, P, D')
#         x = torch.stack([self.stem[m](x[:, m]) for m in range(M)], dim=1)
        
#         # === 3. 维度调整为Block所需格式 ===
#         # (B, M, P, D') → (B, M, D', P) → (B, M*D', P)
#         x = x.permute(0, 1, 3, 2)
        
#         # === 4. 通过所有Block ===
#         for block in self.blocks:
#             x = block(x)  # 每个Block需保持 (B, M*D', P) 格式
        
#         # === 5. 输出重构 ===
# # === 处理Block输出 ===
#         #print(x.shape)
#         x = x.permute(0, 2, 3, 1)  # (B, D', P, M)
#         x = self.feature_compressor(x)  # (B, M, P)
#         #print(x.shape)
#         x = x.squeeze(1)
#         #print(x.shape)
#         # === 反池化重建序列 ===
#         # 如果需要原始序列长度
#         x = x.permute(0, 2, 1)  # [128, P, 38]
#         x = F.interpolate(x, size=10, mode='linear')  # [128, 10, 38]
#         #print(x.shape)
#         x=x.permute(0, 2, 1)
#         return x  # [128, 38, 10]


# class ModernTCN_Global(nn.Module):
#     def __init__(self, input_len, output_dim, num_vars,
#                  kernel_size=5, patch_size=4, patch_stride=2,
#                  stem_ratio=1, num_blocks=1, dropout=0.1):
#         super().__init__()
#         # --- 参数校验 ---
#         assert patch_size <= input_len, "分块大小不能超过序列长度"
#         assert kernel_size % 2 == 1, "卷积核大小应为奇数"
        
#         # --- 核心参数 ---
#         self.stem_dim = output_dim * stem_ratio  # 特征增强后的维度
#         self.patch_size = patch_size
#         self.patch_stride = patch_stride
#         self.num_patches = (input_len - patch_size) // patch_stride + 1

#         # --- 替换为 forward_feature 的特征生成逻辑 ---
#         # 1. 分块后的降采样层（替代原来的独立线性层）
#         self.downsample = nn.Conv1d(
#             in_channels=patch_size,
#             out_channels=self.stem_dim,
#             kernel_size=1,
#             stride=1
#         )
        
#         # --- 构建 Block 链 ---
#         self.blocks = nn.ModuleList([
#             Block(
#                 large_size=kernel_size,
#                 small_size=max(3, kernel_size//2),
#                 dmodel=self.stem_dim,
#                 dff=self.stem_dim * 2,
#                 nvars=num_vars,
#                 small_kernel_merged=False,
#                 drop=dropout
#             ) for _ in range(num_blocks)
#         ])
        
#         # --- 输出适配层 ---
#         self.feature_compressor = nn.Conv2d(
#             in_channels=self.stem_dim,
#             out_channels=1,
#             kernel_size=1
#         )

#     def forward(self, x):
#         B, M, L = x.shape
        
#         # === 1. 分块处理（类似 forward_feature 的第一个 stage） ===
#         # 动态填充保证可分块
#         pad_len = (self.patch_stride - (L - self.patch_size) % self.patch_stride) % self.patch_stride
#         x = F.pad(x, (0, pad_len), mode='constant', value=0)  # (B, M, L+pad)
        
#         # 分块展开 → (B, M, P, patch_size)
#         x = x.unfold(dimension=-1, size=self.patch_size, step=self.patch_stride)
        
#         # === 2. 特征增强（替换为卷积降采样） ===
#         # 合并批次和变量维度 → (B*M, P, patch_size)
#         x = x.reshape(B * M, self.num_patches, self.patch_size).permute(0, 2, 1)
        
#         # 通过降采样层 → (B*M, stem_dim, P)
#         x = self.downsample(x)
        
#         # 恢复维度 → (B, M, stem_dim, P)
#         x = x.reshape(B, M, self.stem_dim, -1)
        
#         # === 3. 通过所有 Block ===
#         for block in self.blocks:
#             x = block(x)  # 输入输出保持 (B, M, stem_dim, P)
        
#         # === 4. 输出重构 ===
#         x = x.permute(0, 2, 3, 1)  # (B, D', P, M)
#         x = self.feature_compressor(x)  # (B, P, M, 1)
#         x = x.squeeze(1).permute(0, 2, 1)  # (B, M, P)
        
#         # 插值到固定长度（例如 10）
#         x = F.interpolate(x, size=L, mode='linear')  # (B, M, 10)
#         return x


class ModernTCN_Global(nn.Module):
    def __init__(self, input_len, output_dim, num_vars,
                 kernel_size=5, patch_size=4, patch_stride=2,
                 stem_ratio=1, num_blocks=1, dropout=0.1):
        super().__init__()
        assert patch_size <= input_len, "分块大小不能超过序列长度"
        assert kernel_size % 2 == 1, "卷积核大小应为奇数"
        
        self.stem_dim = output_dim * stem_ratio
        self.patch_size = patch_size
        self.patch_stride = patch_stride

        # 计算分块数(静态)
        self.num_patches = (input_len - patch_size) // patch_stride + 1

        # --- 使用 Conv1d 替代 unfold + 降采样 ---
        # 输入维度: (B*M, 1, L)，输出维度: (B*M, stem_dim, P)
        self.patch_conv = nn.Conv1d(
            in_channels=1,
            out_channels=self.stem_dim,
            kernel_size=patch_size,
            stride=patch_stride
        )
        
        self.blocks = nn.ModuleList([
            Block(
                large_size=kernel_size,
                small_size=max(3, kernel_size//2),
                dmodel=self.stem_dim,
                dff=self.stem_dim * 2,
                nvars=num_vars,
                small_kernel_merged=False,
                drop=dropout
            ) for _ in range(num_blocks)
        ])
        
        self.feature_compressor = nn.Conv2d(
            in_channels=self.stem_dim,
            out_channels=1,
            kernel_size=1
        )

    def forward(self, x):
        B, M, L = x.shape
        # 这里假设输入长度L固定，或者你需要做padding保证固定长度

        # 先reshape合并batch和变量维度，增加channel维度1
        x = x.reshape(B * M, 1, L)  # (B*M, 1, L)
        
        # 通过 patch_conv 实现类似 unfold + 降采样
        x = self.patch_conv(x)  # (B*M, stem_dim, num_patches)
        
        # 恢复维度
        x = x.reshape(B, M, self.stem_dim, self.num_patches)  # (B, M, stem_dim, P)
        
        # 通过所有块
        for block in self.blocks:
            x = block(x)
        
        x = x.permute(0, 2, 3, 1)  # (B, stem_dim, P, M)
        x = self.feature_compressor(x)  # (B, 1, P, M)
        x = x.squeeze(1).permute(0, 2, 1)  # (B, M, P)
        
        # 插值到固定长度L
        x = F.interpolate(x, size=L, mode='linear')  # (B, M, L)
        return x
