import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
import copy

class Chomp1d(nn.Module):
    def __init__(self, chompSize):
        super(Chomp1d, self).__init__()
        self.chompSize = chompSize

    def forward(self, x):
        return x[:, :, :-self.chompSize].contiguous()

class SENet(nn.Module):
    def __init__(self, num_channel, r=2):
        super(SENet, self).__init__()
        self.global_pooling = nn.AdaptiveAvgPool1d(1)
        self.linear1 = nn.Linear(num_channel, num_channel//r)
        self.relu = nn.ReLU()
        self.linear2 = nn.Linear(num_channel//r, num_channel)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        pool = self.global_pooling(x).squeeze(-1)
        mid = self.relu(self.linear1(pool))
        att = self.sigmoid(self.linear2(mid).unsqueeze(-1))
        return x*att

class SelfAttention(nn.Module):
    def __init__(self, hidden_dim):
        super().__init__()
        self.hidden_dim = hidden_dim
        self.projection = nn.Sequential(
            nn.Linear(hidden_dim, 64),
            nn.ReLU(True),
            nn.Linear(64, 1)
        )

    def forward(self, encoder_outputs):
        # (B, L, H) -> (B , L, 1)
        energy = self.projection(encoder_outputs)
        weights = F.softmax(energy.squeeze(-1), dim=1)
        # (B, L, H) * (B, L, 1) -> (B, H)
        outputs = (encoder_outputs * weights.unsqueeze(-1)).sum(dim=1)
        return outputs, weights

class TcnBlock(nn.Module):
    def __init__(self, in_channel, out_channel, kernel_size, dilation, transform, senet, seperabel, pro_conv):
        super(TcnBlock, self).__init__()


        if seperabel:
            self.conv1 = nn.Sequential(
                nn.Conv1d(out_channel, out_channels=out_channel, kernel_size=kernel_size, dilation=dilation,
                      padding=(kernel_size - 1) * dilation, stride=1, groups=out_channel)
            )
        else:
            self.conv1 = nn.Conv1d(in_channel, out_channels=out_channel, kernel_size=kernel_size, dilation=dilation,
                                   padding=(kernel_size-1)*dilation, stride=1)
        self.batchnorm1 = nn.BatchNorm1d(out_channel)
        self.chomp1 = Chomp1d((kernel_size-1)*dilation)
        self.drop1 = nn.Dropout(0.5)
        self.prelu = nn.PReLU()

        if senet:
            self.senet = SENet(out_channel)
        else:
            self.senet = None

        if pro_conv:
            self.pro_conv = nn.Sequential(
                nn.Conv1d(in_channel, out_channels=out_channel, kernel_size=kernel_size, dilation=dilation,
                          padding=(kernel_size - 1) * dilation, stride=1),
                nn.BatchNorm1d(out_channel),
                Chomp1d((kernel_size - 1) * dilation),
                nn.Sigmoid()
            )
        if transform:
            self.transform = nn.Conv1d(in_channel, out_channel, kernel_size=1, stride=1)
            # self.relu = nn.ReLU()
        else:
            self.transform = None

    def forward(self, x):
        fea = self.drop1(self.prelu(self.batchnorm1(self.chomp1(self.conv1(x)))))
        # fea = self.relu2(self.batchnorm2(self.chomp2(self.conv2(fea))))
        if self.transform:
            res = self.transform(x)
            if self.senet:
                return self.senet(fea) + res
            else:
                return fea + res
        elif self.senet:
            return self.senet(fea)
        elif self.pro_conv:
            return fea * self.pro_conv(x)
        else:
            return fea

class TCN(nn.Module):
    def __init__(self, in_channel, out_channels, kernel_size, transform, num_class):
        super(TCN, self).__init__()

        self.batch = nn.BatchNorm1d(in_channel)
        self.tcn_block1 = TcnBlock(in_channel, out_channels[0], kernel_size=kernel_size, dilation=1,
                                   transform=transform, senet=True, seperabel=False, pro_conv=False)
        self.tcn_block2 = TcnBlock(out_channels[0], out_channels[1], kernel_size=kernel_size, dilation=1,
                                   transform=transform, senet=True, seperabel=False, pro_conv=False)
        self.tcn_block3 = TcnBlock(out_channels[1], out_channels[2], kernel_size=kernel_size, dilation=2,
                                   transform=transform, senet=False, seperabel=False, pro_conv=False)
        self.tcn_block4 = TcnBlock(out_channels[2], out_channels[3], kernel_size=kernel_size, dilation=2,
                                   transform=transform, senet=False, seperabel=False, pro_conv=False)


        self.forth_classifier = nn.Linear(out_channels[-1], num_class)
        # print('number of params:', self.get_n_params())
        self.initialize_weights()


    def initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv1d):
                torch.nn.init.xavier_normal_(m.weight)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                torch.nn.init.xavier_normal_(m.weight)
                m.bias.data.zero_()

    def get_n_params(self):
        model_parameters = filter(lambda p: p.requires_grad, self.parameters())
        number_params = sum([np.prod(p.size()) for p in model_parameters])
        return number_params

    def forward(self, x, feature=False):
        # tcn_out = self.feature(x)
        x = self.batch(x)
        tcn_out = self.tcn_block1(x)
        tcn_out = self.tcn_block2(tcn_out)
        tcn_out = self.tcn_block3(tcn_out)
        tcn_out = self.tcn_block4(tcn_out)
        tcn_out = torch.sum(tcn_out, dim=2)
        if feature:
            return tcn_out
        out = self.forth_classifier(tcn_out)
        return out

class ReverseTCN(nn.Module):
    def __init__(self, in_channel, out_channels, kernel_size, transform, num_class):
        super(ReverseTCN, self).__init__()

        self.batch = nn.BatchNorm1d(in_channel)
        self.tcn_block1 = TcnBlock(in_channel, out_channels[0], kernel_size=kernel_size, dilation=1,
                                   transform=transform, senet=True, seperabel=False, pro_conv=False)
        self.tcn_block2 = TcnBlock(out_channels[0], out_channels[1], kernel_size=kernel_size, dilation=1,
                                   transform=transform, senet=True, seperabel=False, pro_conv=False)
        self.tcn_block3 = TcnBlock(out_channels[1], out_channels[2], kernel_size=kernel_size, dilation=2,
                                   transform=transform, senet=False, seperabel=False, pro_conv=False)
        self.tcn_block4 = TcnBlock(out_channels[2], out_channels[3], kernel_size=kernel_size, dilation=2,
                                   transform=transform, senet=False, seperabel=False, pro_conv=False)

        # 目前最好的结果发生在：第一层添加senet其他不添加
        self.forth_classifier = nn.Linear(out_channels[-1], num_class)
        # print('number of params:', self.get_n_params())
        self.initialize_weights()

    def initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv1d):
                torch.nn.init.xavier_normal_(m.weight)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                torch.nn.init.xavier_normal_(m.weight)
                m.bias.data.zero_()

    def get_n_params(self):
        model_parameters = filter(lambda p: p.requires_grad, self.parameters())
        number_params = sum([np.prod(p.size()) for p in model_parameters])
        return number_params

    def forward(self, x, feature=False):
        x = torch.flip(x, dims=[2])
        x = self.batch(x)
        tcn_out = self.tcn_block1(x)
        tcn_out = self.tcn_block2(tcn_out)
        tcn_out = self.tcn_block3(tcn_out)
        tcn_out = self.tcn_block4(tcn_out)
        tcn_out = torch.sum(tcn_out, dim=2)
        if feature:
            return tcn_out
        out = self.forth_classifier(tcn_out)
        return out


class BiTCN(nn.Module):
    def __init__(self):
        super().__init__()

        self.reversetcn = ReverseTCN(10, [16, 32, 64, 64], kernel_size=3, transform=True, num_class=52)
        self.tcn = TCN(10, [16, 32, 64, 64], kernel_size=3, transform=True, num_class=52)

        # self.batch = nn.BatchNorm1d(128)
        # self.relu = nn.PReLU()
        self.classifier = nn.Sequential(
            nn.BatchNorm1d(128),
            nn.Linear(128, 52),
        )
        print('number of parameters:', self.get_n_params())
        self.initial_linear()

    def get_n_params(self):
        model_parameters = filter(lambda p: p.requires_grad, self.parameters())
        number_params = sum([np.prod(p.size()) for p in model_parameters])
        return number_params

    def load_tcn(self, path1, path2):
        self.tcn.load_state_dict(torch.load(path1))
        self.reversetcn.load_state_dict(torch.load(path2))
        for m in self.tcn.parameters():
            m.requires_grad = False
        for m in self.reversetcn.parameters():
            m.requires_grad = False
        print('training number of parameters:', self.get_n_params())

    def initial_linear(self):
        for m in self.classifier.modules():
            if isinstance(m, nn.Linear):
                torch.nn.init.kaiming_normal_(m.weight)
                m.bias.data.zero_()

    def forward(self, x):
        tcn_out = self.tcn(x, True)
        back_out = self.reversetcn(x, True)
        out = torch.cat((tcn_out, back_out), dim=1)
        # out = self.relu(self.batch(out))
        out = self.classifier(out)
        return out

if __name__ == '__main__':
    # x = torch.randn((128, 10, 20))
    # model = TCN(10, [16, 32, 64, 64], kernel_size=3, transform=True, num_class=52)
    model = BiTCN()
    # # model = BasicLstme(16, 64, num_classes=52, num_layers=2, bidirectional=True)
    # res = model(x)
    # print(res.size())
    # import numpy as np
    # x = torch.ones((128, 16, 16))
    # y = torch.tensor(np.arange(128*16).reshape((128, 16, 1)))
    # z = x*y
    # print(z[0, 1, :])
    # x = torch.randn((128, 64))
    # b = torch.randn((64, 128))
    # print(torch.matmul(x, b).size())