import torch
from torch import nn
from typing import Tuple


class TemporalBlock(nn.Module):
    def __init__(self, input_channels, output_channels, kernel_size, stride, dilation, padding):
        super(TemporalBlock, self).__init__()
        self.conv1 = nn.Conv1d(input_channels, output_channels, kernel_size, stride, padding, dilation)
        self.conv2 = nn.Conv1d(output_channels, output_channels, kernel_size, stride, padding, dilation)
        self.relu = nn.ReLU()
        self.downsample = nn.Conv1d(input_channels, output_channels, 1) if input_channels != output_channels else None

    def forward(self, x):
        out = self.conv1(x)
        # print('1.in temporal out.shape is ' + str(out.shape))
        out = self.relu(out)
        out = self.conv2(out)
        # print('2.in temporal out.shape is ' + str(out.shape))
        redisual = x if self.downsample is None else self.downsample(x)
        if out.shape[-1] < redisual.shape[-1]:
            redisual = redisual[..., -out.shape[-1]:]
        return self.relu(out + redisual)


class Tcn(nn.Module):
    def __init__(self, dims: Tuple, num_layer, pred_len, dropout=0.2, kernel_size=3):
        super(Tcn, self).__init__()
        # 由于卷积和操作并不直接操作特征维度，而是将特征维度看作“通道”，因此这里使用input_channels而非input_size
        self.input_channels, self.channels, self.output_channels = dims
        self.num_layer = num_layer
        self.pred_len = pred_len
        self.kernel_size = kernel_size
        
        layera = []
        dilation_size = 2
        for i in range(num_layer):
            input_channels = self.input_channels if i == 0 else self.channels[i - 1]
            output_channels = self.channels[i]
            layera += [TemporalBlock(input_channels, output_channels, self.kernel_size, stride=1, dilation=dilation_size, padding=0)]
            dilation_size *= 2
        self.tcn_a = nn.Sequential(*layera)

        layerb = []
        dilation_size = 2
        for i in range(num_layer):
            input_channels = self.input_channels if i == 0 else self.channels[i - 1]
            output_channels = self.channels[i]
            layerb += [TemporalBlock(input_channels, output_channels, self.kernel_size, stride=1, dilation=dilation_size, padding=0)]
            dilation_size *= 2
        self.tcn_b = nn.Sequential(*layerb)

        self.dropout = nn.Dropout(dropout)
        self.linear = nn.Linear(self.channels[-1], self.output_channels)

    def forward(self, x):
        # x: (B, T, C)，在卷积操作中，将其转为(B, C, T)，卷积结束后再转换回来，变为(B, T_out, C_out)
        x = x.transpose(1, 2).contiguous()
        x_a = self.tcn_a(x)
        x_b = self.tcn_b(x)
        x_a = torch.tanh(x_a)
        x_b = torch.sigmoid(x_b)
        # print('in tcn x.shape is ' + str(x.shape))
        x = x_a * x_b
        x = x.transpose(1, 2).contiguous()
        x = self.dropout(x)
        x = self.linear(x)
        return x 