import os

from sympy import im
from ext.layers.senn import SENNGC
import torch.nn as nn
import torch.nn.functional as F
import torch
import yaml




def compute_kl_divergence(us,):
    """
    Compute the KL divergence between the empirical distribution of the input samples
    and an isotropic standard Gaussian distribution using PyTorch.

    Parameters:
    samples (Tensor): A 2D tensor with rows as samples and columns as features.

    Returns:
    Tensor: The KL divergence between the empirical distribution of the samples
            and the standard Gaussian distribution.
    """
    # Calculate the empirical mean and covariance matrix of the samples
    mean_p = torch.mean(us, dim=0)
    cov_p = torch.cov(us.t())

    # Dimensionality of the distribution
    d = mean_p.shape[0]

    eigenvalues = torch.linalg.eigvalsh(cov_p)
    condition_number = eigenvalues.max() / eigenvalues.clamp(min=1e-9).min()
    regularization_term = condition_number * 1e-6
    cov_p += torch.eye(d, device=us.device) * regularization_term
    # Ensure the covariance matrix is full rank
    # cov_p += 1e-9 * torch.eye(d).to(device)

    # Compute the trace term
    trace_term = torch.trace(cov_p)

    # Compute the product of means term (since mean_q is zero, this is just mean_p squared)
    means_term = torch.dot(mean_p, mean_p)

    # # Compute the determinant term
    # log_det_cov_p = torch.logdet(cov_p)
    try:
        L = torch.linalg.cholesky(cov_p)
        log_det_cov_p = 2 * torch.log(torch.diagonal(L)).sum()
    except RuntimeError:
        # Handle the case where Cholesky decomposition fails
        log_det_cov_p = torch.logdet(cov_p)

    # Compute the KL divergence using the formula
    kl_div = means_term + trace_term - d - log_det_cov_p # ?原代码中 means_term + trace_term - d + log_det_cov_p 
    if torch.isnan(kl_div).any():
        print('nan')
        print(f'mean_p: {mean_p}')
        print(f'cov_p: {cov_p}')
        print(f'trace_term: {trace_term}')
        print(f'means_term: {means_term}')
        print(f'log_det_cov_p: {log_det_cov_p}')
        print(f'kl_div: {kl_div}')
        raise ValueError('KL divergence is NaN')


    return kl_div

def sliding_window_view_torch(x, window_size: int):
    """
    A function to create a 2D sliding window view of a 2D PyTorch tensor.

    Args:
    x (torch.Tensor): The input 2D tensor.
    window_size (int): Window size.

    Returns:
    torch.Tensor: A tensor with the sliding windows.
    """
    # Calculate output shape
    output_shape = (x.size(0) - window_size + 1, window_size, x.size(1))
    # Calculate strides
    strides = (x.stride(0), x.stride(0), x.stride(1))
    # Create a view
    return x.as_strided(size=output_shape, stride=strides)


class Model(nn.Module):
    # static
    encoder_alpha = None
    decoder_alpha = None
    encoder_gamma = None
    decoder_gamma = None
    encoder_lambda = None
    decoder_lambda = None
    beta = None
        
    def __init__(self, configs):
        super(Model, self).__init__()
        
        self.configs = configs
        self.name = 'AERCA'
        # load config file .yaml
        yml_file_path = os.path.join('./ext/models/configs', f'{configs.model_id.lower()}.yaml')
        self.yml_conf = yaml.load(open(yml_file_path, 'r'), Loader=yaml.FullLoader)

        num_vars = configs.enc_in if 'enc_in' in configs else self.yml_conf['num_vars']
        hidden_layer_size =  configs.d_model if 'd_model' in configs else self.yml_conf['hidden_layer_size']
        num_hidden_layers = configs.e_layers if 'e_layers' in configs else self.yml_conf['num_hidden_layers']
        
        window_size = self.yml_conf['window_size']
        stride = self.yml_conf['stride']

        self.encoder = SENNGC(num_vars, window_size, hidden_layer_size, num_hidden_layers)
        self.decoder = SENNGC(num_vars, window_size, hidden_layer_size, num_hidden_layers)
        self.decoder_prev = SENNGC(num_vars, window_size, hidden_layer_size, num_hidden_layers)
        # self.device = device
        self.num_vars = num_vars
        self.hidden_layer_size = hidden_layer_size
        self.num_hidden_layers = num_hidden_layers
        self.window_size = window_size
        self.stride = stride
        
        Model.encoder_alpha = self.yml_conf['encoder_alpha']
        Model.decoder_alpha = self.yml_conf['decoder_alpha']
        Model.encoder_gamma = self.yml_conf['encoder_gamma']
        Model.decoder_gamma = self.yml_conf['decoder_gamma']
        Model.encoder_lambda = self.yml_conf['encoder_lambda']
        Model.decoder_lambda = self.yml_conf['decoder_lambda']
        Model.beta = self.yml_conf['beta']
        
        # self.lr = lr
        # self.epochs = epochs
        self.recon_threshold = self.yml_conf['recon_threshold']
        self.root_cause_threshold_encoder = self.yml_conf['root_cause_threshold_encoder']
        self.root_cause_threshold_decoder = self.yml_conf['root_cause_threshold_decoder']
        self.initial_z_score = self.yml_conf['initial_z_score']
        # self.mse_loss = nn.MSELoss()
        # self.mse_loss_wo_reduction = nn.MSELoss(reduction='none')
        # self.optimizer = torch.optim.Adam(self.parameters(), lr=lr)
        # self.encoder.to(self.device)
        # self.decoder.to(self.device)
        # self.decoder_prev.to(self.device)
        # self.model_name = 'AERCA_' + data_name + '_ws_' + str(window_size) + '_stride_' + str(stride) + \
        #                   '_encoder_alpha_' + str(encoder_alpha) + '_decoder_alpha_' + str(decoder_alpha) + \
        #                   '_encoder_gamma_' + str(encoder_gamma) + '_decoder_gamma_' + str(decoder_gamma) + \
        #                   '_encoder_lambda_' + str(encoder_lambda) + '_decoder_lambda_' + str(decoder_lambda) + \
        #                   '_beta_' + str(beta) + '_lr_' + str(lr) + '_epochs_' + str(epochs) + \
        #                   '_hidden_layer_size_' + str(hidden_layer_size) + '_num_hidden_layers_' + \
        #                   str(num_hidden_layers)
        self.causal_quantile = self.yml_conf['causal_quantile']
        self.risk = self.yml_conf['risk']
        self.initial_level = self.yml_conf['initial_level']
        self.num_candidates = self.yml_conf['num_candidates']
        self.add_u = self.yml_conf.get('add_u', True)

    # def encoding(self, xs):
    #     windows = sliding_window_view(xs, (self.window_size + 1, self.num_vars))[:, 0, :, :]
    #     winds = windows[:, :-1, :]
    #     nexts = windows[:, -1, :]
    #     winds = torch.tensor(winds).float().to(self.device)
    #     nexts = torch.tensor(nexts).float().to(self.device)
    #     preds, coeffs = self.encoder(winds)
    #     us = preds - nexts
    #     return us, coeffs, nexts[self.window_size:], winds[:-self.window_size]
    
    def encoding(self, xs):
        # windows = sliding_window_view(xs, (self.window_size + 1, self.num_vars))[:, 0, :, :]
        # xs = torch.tensor(xs).float()
        windows = xs.unfold(0, self.window_size + 1, 1).permute(0, 2, 1).contiguous()
        winds = windows[:, :-1, :]
        nexts = windows[:, -1, :]
        # winds = torch.tensor(winds).float().to(self.device)
        # nexts = torch.tensor(nexts).float().to(self.device)
        preds, coeffs = self.encoder(winds)
        us = preds - nexts
        return us, coeffs, nexts[self.window_size:], winds[:-self.window_size]

    def decoding(self, us, winds, add_u=True):
        u_windows = sliding_window_view_torch(us, self.window_size + 1)
        u_winds = u_windows[:, :-1, :]
        u_next = u_windows[:, -1, :]

        preds, coeffs = self.decoder(u_winds)
        prev_preds, prev_coeffs = self.decoder_prev(winds)

        if add_u:
            nexts_hat = preds + u_next + prev_preds
        else:
            nexts_hat = preds + prev_preds
        return nexts_hat, coeffs, prev_coeffs

    def forward(self, x, x_mark_enc, x_dec, x_mark_dec, mask=None):
        assert x.shape[0] == 1, "Batch size must be 1"
        x = x.squeeze(0)
        
        us, encoder_coeffs, nexts, winds = self.encoding(x)
        # kl_div = compute_kl_divergence(us,)
        nexts_hat, decoder_coeffs, prev_coeffs = self.decoding(us, winds, add_u=self.add_u)
        # return nexts_hat, nexts, encoder_coeffs, decoder_coeffs, prev_coeffs, kl_div, us
        # 判断在训练中
        if self.training:
            return {"nexts_hat": nexts_hat, "nexts": nexts, 
                    "encoder_coeffs":encoder_coeffs, "decoder_coeffs":decoder_coeffs,
                    "prev_coeffs":prev_coeffs,  "us":us} # "kl_div":kl_div,
        else:
            return nexts_hat.unsqueeze(0)

        
    @staticmethod
    def _sparsity_loss(coeffs, alpha):
        norm2 = torch.mean(torch.norm(coeffs, dim=1, p=2))
        norm1 = torch.mean(torch.norm(coeffs, dim=1, p=1))
        return (1 - alpha) * norm2 + alpha * norm1

    @staticmethod
    def _smoothness_loss(coeffs):   
        return torch.norm(coeffs[:, 1:, :, :] - coeffs[:, :-1, :, :], dim=1).mean()
    
    @staticmethod
    def custom_loss(input, target):
        if isinstance(input, torch.Tensor):
            T = input.shape[1]
            loss = F.mse_loss(input, target[:, -T:, :])

        else:
            nexts_hat = input["nexts_hat"]
            nexts = input["nexts"]
            encoder_coeffs = input["encoder_coeffs"]
            decoder_coeffs = input["decoder_coeffs"]
            prev_coeffs = input["prev_coeffs"]
            us = input["us"]

            loss_recon = F.mse_loss(nexts_hat, nexts)
            loss_encoder_coeffs = Model._sparsity_loss(encoder_coeffs, Model.encoder_alpha)
            loss_decoder_coeffs = Model._sparsity_loss(decoder_coeffs, Model.decoder_alpha)
            loss_prev_coeffs = Model._sparsity_loss(prev_coeffs, Model.decoder_alpha)
            loss_encoder_smooth = Model._smoothness_loss(encoder_coeffs)
            loss_decoder_smooth = Model._smoothness_loss(decoder_coeffs)
            loss_prev_smooth = Model._smoothness_loss(prev_coeffs)
            
            us = F.softmax(us, dim=1)
            loss_kl = compute_kl_divergence(us,)

            loss = (loss_recon +
                    Model.encoder_lambda * loss_encoder_coeffs +
                    Model.decoder_lambda * (loss_decoder_coeffs + loss_prev_coeffs) +
                    Model.encoder_gamma * loss_encoder_smooth +
                    Model.decoder_gamma * (loss_decoder_smooth + loss_prev_smooth) +
                    Model.beta * loss_kl)

        return loss
# Accuracy : 0.9855, Precision : 0.8587, Recall : 0.7801, F-score : 0.8175, AUC: 0.7371 