import torch
import torch.nn as nn
import torch.nn.functional as F
from layers.Embed import DataEmbedding

from models.TimesNet import Model as Super_Model
# from ext.models.TimesNet_DFT import Model as Super_Model # 用于兼容onnx导出的版本



class Neck_VAE(nn.Module):
    def __init__(self, configs):
        super().__init__()
        d_model = configs.d_model
        hidden_dims = 8
        self.fc11 = nn.Linear(d_model, hidden_dims, bias=True)
        self.fc12 = nn.Linear(d_model, hidden_dims, bias=True)

        self.fc2 = nn.Linear(hidden_dims, configs.d_model, bias=True)

    def forward(self, input):
        # m = F.adaptive_avg_pool1d(v.transpose(1, 2), 1).transpose(1, 2)
        x = input.reshape(-1, input.shape[2]) # [BxT, C]
        
        mu = self.fc11(x)
        logvar = self.fc12(x) # 用loss将logvar引导到[0.5, 1]之间，无效果
        
        ## Reparameterization trick
        std = torch.exp(0.5 * logvar)
        eps = torch.randn_like(std)
        
        x = mu + eps * std

        x = self.fc2(x)
        enc_out = x.reshape(input.shape[0], input.shape[1], -1)
        # Accuracy : 0.9884, Precision : 0.8832, Recall : 0.8312, F-score : 0.8564, AUC: 0.7487 
        return enc_out, mu

class Neck_V2(nn.Module):
    def __init__(self, configs):
        super().__init__()
        d_model = configs.d_model
        hidden_dims = 8
        self.fc11 = nn.Linear(d_model, hidden_dims, bias=True)

        self.fc2 = nn.Linear(hidden_dims, configs.d_model, bias=True)
        # self.down_conv = nn.Conv1d(in_channels=hidden_dims, out_channels=hidden_dims,
        #                     stride=2, kernel_size=2, padding=0, bias=True)
        # self.up_conv = nn.ConvTranspose1d(in_channels=hidden_dims, out_channels=hidden_dims,
        #                     stride=2, kernel_size=2, padding=0, bias=True)


    def forward(self, input):
        # m = F.adaptive_avg_pool1d(v.transpose(1, 2), 1).transpose(1, 2)
        x = input.reshape(-1, input.shape[2]) # [BxT, C]
        
        mu = self.fc11(x)
        # _, D = mu.shape
        
        # mu = mu.reshape(input.shape[0], -1, D).permute(0, 2, 1)
        # mu = self.down_conv(mu)
        # mu = self.up_conv(mu)
        # mu = mu.transpose(1,2)
        # mu = mu.reshape(-1, D)
        
        x = self.fc2(mu)
        enc_out = x.reshape(input.shape[0], input.shape[1], -1)
        # Accuracy : 0.9871, Precision : 0.8692, Recall : 0.8128, F-score : 0.8400, AUC: 0.7394 带 down_conv up_conv
        # Accuracy : 0.9884, Precision : 0.8840, Recall : 0.8302, F-score : 0.8562, AUC: 0.7514 不带的,即v1
        return enc_out, None

class Neck_V1(nn.Module):
    def __init__(self, configs):
        super().__init__()
        d_model = configs.d_model
        hidden_dims = 8
        self.fc11 = nn.Linear(d_model, hidden_dims, bias=True)

        self.fc2 = nn.Linear(hidden_dims, configs.d_model, bias=True)

    def forward(self, input):
        x = input.reshape(-1, input.shape[2]) # [BxT, C]
        
        x1 = self.fc11(x)
        x = self.fc2(x1)
        
        enc_out = x.reshape(input.shape[0], input.shape[1], -1)
        # Accuracy : 0.9885, Precision : 0.8843, Recall : 0.8311, F-score : 0.8569, AUC: 0.7556 
        return enc_out, x1

class Model(Super_Model):
    def __init__(self, configs):
        super().__init__(configs)
        self.enc_embedding = DataEmbedding(configs.enc_in, configs.d_model, configs.embed, configs.freq,
                                           configs.dropout)
        # self.neck = Neck_V1(configs)
        self.neck = Neck_V1(configs)

    def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):
        if self.task_name == 'anomaly_detection':
            if self.post_process == 'anomaly_v1':
                dec_out, val = self.anomaly_detection_v2(x_enc)
                if self.training:
                    return {"dec_out":dec_out, "val":val}
                else: # 预测阶段只是单输出，解码输出
                    return dec_out  # [B, L, D]
            elif self.post_process == 'anomaly_v2':
                dec_out, val = self.anomaly_detection_v2(x_enc)
                if self.training:
                    return dec_out  # [B, L, D]
                else: # 预测阶段是双输出，含解码输出，前端编码
                    return dec_out, val  # [B, L, D]

        return None

    def anomaly_detection_v2(self, x_enc):
        # Normalization from Non-stationary Transformer
        means = x_enc.mean(1, keepdim=True).detach()
        x_enc = x_enc - means
        stdev = torch.sqrt(
            torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)
        x_enc /= stdev

        # embedding
        v = self.enc_embedding(x_enc, None)  # [B,T,C]
        enc_out, mu = self.neck(v)
    
        # TimesNet
        for i in range(self.layer):
            enc_out = self.layer_norm(self.model[i](enc_out))
        # porject back
        dec_out = self.projection(enc_out)
        
        # De-Normalization from Non-stationary Transformer
        dec_out = dec_out * \
                  (stdev[:, 0, :].unsqueeze(1).repeat(
                      1, self.pred_len + self.seq_len, 1))
        dec_out = dec_out + \
                  (means[:, 0, :].unsqueeze(1).repeat(
                      1, self.pred_len + self.seq_len, 1))
        return dec_out, mu
    
# Accuracy : 0.9885, Precision : 0.8840, Recall : 0.8312, F-score : 0.8568, AUC: 0.7507  anomaly_detection
# Accuracy : 0.9912, Precision : 0.8816, Recall : 0.9112, F-score : 0.8961, AUC: 0.7556  anomaly_detection_v2

    @staticmethod
    def custom_loss(input, target):
        if isinstance(input, torch.Tensor):
            loss = F.mse_loss(input, target)
        else:
            w = 0.01
            x1 = input["dec_out"]
            # val = input["val"]
            
            # m = F.adaptive_avg_pool1d(val.transpose(1, 0), 1).transpose(1, 0)
            # loss = F.mse_loss(val, m)
            
            loss = F.mse_loss(x1, target) #+ w * loss
        return loss

    
    # @staticmethod
    # def custom_loss(input, target):
    #     if isinstance(input, torch.Tensor):
    #         loss = F.mse_loss(input, target)
    #     else:
    #         w = 1e-1
    #         x1 = input["dec_out"]
    #         # v = input["v"]
    #         loss = F.mse_loss(x1, target) #+ w * torch.mean(torch.abs(v))

    #     return loss
# Accuracy : 0.9885, Precision : 0.8843, Recall : 0.8311, F-score : 0.8569, AUC: 0.7556 


# class Model(Model):
#     def __init__(self, configs):
#         super().__init__(configs)
#         self.enc_embedding = DataEmbedding(configs.enc_in, configs.d_model, configs.embed, configs.freq,
#                                            configs.dropout)
#         self.hidden_dims = configs.d_model // 8
#         self.fc11 = nn.Linear(configs.d_model, 3 * self.hidden_dims, bias=True)
#         self.fc2 = nn.Linear(self.hidden_dims, configs.d_model, bias=True)
#         self.self_attn = nn.MultiheadAttention(self.hidden_dims, 4, configs.dropout)


#     def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):
#         if self.task_name == 'anomaly_detection':
#             dec_out, v = self.anomaly_detection_v2(x_enc)
#             if self.training:
#                 return {"v": v, "dec_out": dec_out}
#             else: # 预测阶段只是单输出，解码输出
#                 return dec_out  # [B, L, D]
#         if self.task_name == 'anomaly_detection_v2':
#             dec_out, v = self.anomaly_detection_v2(x_enc)
#             if self.training:
#                 return dec_out  # [B, L, D]
#             else: # 预测阶段是双输出，含解码输出，前端编码
#                 return dec_out, v  # [B, L, D]
#         return None

#     def anomaly_detection_v2(self, x_enc):
#         # Normalization from Non-stationary Transformer
#         means = x_enc.mean(1, keepdim=True).detach()
#         x_enc = x_enc - means
#         stdev = torch.sqrt(
#             torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)
#         x_enc /= stdev

#         # embedding
#         enc = self.enc_embedding(x_enc, None)  # [B,T,C]
#         # m = F.adaptive_avg_pool1d(v.transpose(1, 2), 1).transpose(1, 2)
        
#         x = enc.reshape(-1, enc.shape[2]) # [BxT, C]
#         qkv = self.fc11(x)
#         M = self.hidden_dims
#         q,k,v = torch.split(qkv, [M, M, M], dim=-1)
#         att, _ = self.self_attn(q,k,v)
#         x = self.fc2(v + att)
#         enc_out = x.reshape(enc.shape[0], enc.shape[1], -1)
        
#         # TimesNet
#         for i in range(self.layer):
#             enc_out = self.layer_norm(self.model[i](enc_out))
#         # porject back
#         dec_out = self.projection(enc_out)

#         # De-Normalization from Non-stationary Transformer
#         dec_out = dec_out * \
#                   (stdev[:, 0, :].unsqueeze(1).repeat(
#                       1, self.pred_len + self.seq_len, 1))
#         dec_out = dec_out + \
#                   (means[:, 0, :].unsqueeze(1).repeat(
#                       1, self.pred_len + self.seq_len, 1))
#         return dec_out, enc_out
    
#     @staticmethod
#     def custom_loss(input, target):
#         if isinstance(input, torch.Tensor):
#             loss = F.mse_loss(input, target)
#         else:
#             w = 1e-1
#             x1 = input["dec_out"]
#             # v = input["v"]
#             loss = F.mse_loss(x1, target) #+ w * torch.mean(torch.abs(v))

#         return loss
#     # Accuracy : 0.9877, Precision : 0.8803, Recall : 0.8138, F-score : 0.8457, AUC: 0.7451