import math
import os

import torch
import torch.nn as nn
import torch.nn.functional as F


class CNN(nn.Module):
    def __init__(self):
        super().__init__()
        self.save_path = os.path.join("params", "conv.pth")
        self.conv = nn.Sequential(
            nn.Conv1d(in_channels=1, out_channels=16,
                      kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            nn.MaxPool1d(kernel_size=2),
            nn.Conv1d(in_channels=16, out_channels=32,
                      kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            nn.MaxPool1d(kernel_size=2),
            nn.Conv1d(in_channels=32, out_channels=64,
                      kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            nn.MaxPool1d(kernel_size=2),
            nn.Conv1d(in_channels=64, out_channels=128,
                      kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            nn.MaxPool1d(kernel_size=2),
            nn.Conv1d(in_channels=128, out_channels=256,
                      kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            nn.MaxPool1d(kernel_size=2),
        )
        self.fc = nn.Sequential(
            nn.Linear(in_features=768, out_features=400),
            nn.BatchNorm1d(400),
            nn.ReLU(),
            nn.Linear(in_features=400, out_features=100),
            nn.BatchNorm1d(100),
            nn.ReLU(),
            nn.Linear(in_features=100, out_features=25),
            nn.BatchNorm1d(25),
            nn.ReLU(),
            nn.Linear(in_features=25, out_features=6),
            nn.Softmax(dim=1)
        )

    def forward(self, x):
        x = x.unsqueeze(1)
        out = self.conv(x)
        out = out.view(out.shape[0], -1)
        out = self.fc(out)
        return out

    def initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv1d):
                nn.init.xavier_normal_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm1d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight.data, 0, 0.1)
                m.bias.data.zero_()


class DNN(nn.Module):
    def __init__(self, in_features, out_features):
        super().__init__()
        self.save_path = os.path.join("params", "fc.pth")
        self.classifier = nn.Sequential(
            nn.Linear(in_features, 4000), nn.BatchNorm1d(4000), nn.ReLU(),
            nn.Linear(4000, 2000), nn.Dropout(
                0.2), nn.BatchNorm1d(2000), nn.ReLU(),
            nn.Linear(2000, 1000), nn.BatchNorm1d(1000), nn.ReLU(),
            nn.Linear(1000, 500), nn.Dropout(
                0.2), nn.BatchNorm1d(500), nn.ReLU(),
            nn.Linear(500, 250), nn.BatchNorm1d(250), nn.ReLU(),
            nn.Linear(250, 120), nn.BatchNorm1d(120), nn.ReLU(),
            nn.Linear(120, 60), nn.BatchNorm1d(60), nn.ReLU(),
            nn.Linear(60, 30), nn.BatchNorm1d(30), nn.ReLU(),
            nn.Linear(30, 15), nn.BatchNorm1d(15), nn.ReLU(),
            nn.Linear(15, out_features),
            nn.Softmax(dim=1)
        )

    def initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.kaiming_uniform_(m.weight.data)
                m.bias.data.zero_()
            elif isinstance(m, nn.LayerNorm):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

    def forward(self, x):
        return self.classifier(x)


class Transformer(nn.Module):
    """
    Transformer网络。采用Linear层和TransformerEncoder结合的方式来进行预测
    """
    def __init__(self, in_features, out_features):
        """
        初始化网络
        :params: in_features 输入特征数
        :params: out_features 分类类别数
        """
        super().__init__()
        self.save_path = os.path.join("params", "transformer")
        self.embedding = nn.Linear(in_features, 32)
        encoder_layer = nn.TransformerEncoderLayer(32, 2)
        self.encoder = nn.TransformerEncoder(encoder_layer, 2)
        self.fc = nn.Linear(32, out_features)
        self.softmax = nn.Softmax(dim=1)

    def forward(self, x):
        """
        前向传播
        """
        x = self.embedding(x)
        x = self.encoder(x)
        x = self.fc(x)
        x = self.softmax(x)
        return x

    def initialize_weights(self):
        """
        权值初始化
        """
        pass


class TransformerEncoder(nn.Module):
    def __init__(self, input_dim, hidden_dim, num_layers, num_heads):
        super(TransformerEncoder, self).__init__()
        self.embedding = nn.Linear(input_dim, hidden_dim)
        self.positional_encoding = PositionalEncoding(hidden_dim)
        self.layers = nn.ModuleList([
            TransformerEncoderLayer(hidden_dim, num_heads) for _ in range(num_layers)
        ])

    def forward(self, x):
        x = self.embedding(x)
        x = self.positional_encoding(x)
        for layer in self.layers:
            x = layer(x)
        return x.mean(dim=1)

    def initialize_weights(self):
        # for m in self.modules():
        #     nn.init.xavier_uniform_(m.weight.data)
        pass


class TransformerEncoderLayer(nn.Module):
    def __init__(self, hidden_dim, num_heads):
        super(TransformerEncoderLayer, self).__init__()
        self.multi_head_attention = nn.MultiheadAttention(
            hidden_dim, num_heads)
        self.feed_forward_network = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim)
        )
        self.layer_norm1 = nn.LayerNorm(hidden_dim)
        self.layer_norm2 = nn.LayerNorm(hidden_dim)

    def forward(self, x):
        residual = x
        x = self.layer_norm1(x)
        x, _ = self.multi_head_attention(x, x, x)
        x = residual + x
        residual = x
        x = self.layer_norm2(x)
        x = self.feed_forward_network(x)
        x = residual + x
        return x

    def initialize_weights(self):
        # for m in self.modules():
        #     nn.init.xavier_uniform_(m.weight.data)
        pass


class PositionalEncoding(nn.Module):
    def __init__(self, hidden_dim, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=0.1)
        pe = torch.zeros(max_len, hidden_dim)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(
            0, hidden_dim, 2).float() * (-math.log(10000.0) / hidden_dim))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0).transpose(0, 1)
        self.register_buffer('pe', pe)

    def forward(self, x):
        x = x + self.pe[:x.size(0), :]
        return self.dropout(x)

    def initialize_weights(self):
        # for m in self.modules():
        #     nn.init.xavier_uniform_(m.weight.data)
        pass


class TransformerDecoder(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, num_layers, num_heads):
        super(TransformerDecoder, self).__init__()
        self.embedding = nn.Linear(input_dim, hidden_dim)
        # self.positional_encoding = PositionalEncoding(hidden_dim)
        self.layers = nn.ModuleList([
            TransformerDecoderLayer(hidden_dim, num_heads) for _ in range(num_layers)
        ])
        self.output_layer = nn.Linear(hidden_dim, output_dim)

    def forward(self, x, encoder_output):
        x = self.embedding(x)
        # x = self.positional_encoding(x)
        for layer in self.layers:
            x = layer(x, encoder_output)
        x = self.output_layer(x)
        return F.log_softmax(x, dim=-1)

    def initialize_weights(self):
        # for m in self.modules():
        #     nn.init.xavier_uniform_(m.weight.data)
        pass


class TransformerDecoderLayer(nn.Module):
    def __init__(self, hidden_dim, num_heads):
        super(TransformerDecoderLayer, self).__init__()
        self.masked_multi_head_attention = nn.MultiheadAttention(
            hidden_dim, num_heads)
        self.multi_head_attention = nn.MultiheadAttention(
            hidden_dim, num_heads)
        self.feed_forward_network = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim)
        )
        self.layer_norm1 = nn.LayerNorm(hidden_dim)
        self.layer_norm2 = nn.LayerNorm(hidden_dim)
        self.layer_norm3 = nn.LayerNorm(hidden_dim)

    def forward(self, x, encoder_output):
        residual = x
        x = self.layer_norm1(x)
        x, _ = self.masked_multi_head_attention(
            x, x, x, attn_mask=self._generate_mask(x))
        x = residual + x
        residual = x
        x = self.layer_norm2(x)
        x, _ = self.multi_head_attention(x, encoder_output, encoder_output)
        x = residual + x
        residual = x
        x = self.layer_norm3(x)
        x = self.feed_forward_network(x)
        x = residual + x
        return x

    def initialize_weights(self):
        # for m in self.modules():
        #     nn.init.xavier_uniform_(m.weight.data)
        pass

    def _generate_mask(self, x):
        mask = (torch.triu(torch.ones(x.size(0), x.size(1),
                x.size(1)), diagonal=1) == 1).to(x.device)
        return mask
