# -*- coding: utf-8 -*-
import torch
from torch import nn
import numpy as np
import yaml
import os
import logging

def sequence_mask(X, valid_len, value=0):
    """Mask irrelevant entries in sequences."""
    if valid_len is not None:
        maxlen = X.size(1)
        # 新的 维度添加 [None, :] 在第0维添加新的维度
        mask = torch.arange((maxlen), dtype=torch.float32,
                            device=X.device)[None, :] < valid_len[:, None]
        X[~mask] = value
    return X
    
def masked_softmax(X, valid_len):
    """Perform softmax by filtering out some elements."""
    # X: 3-D tensor, valid_len: 1-D or 2-D tensor
    if valid_len is None:
        return nn.functional.softmax(X, dim=-1)
    else:
        shape = X.shape
        if valid_len.dim() == 1:
            valid_len = torch.repeat_interleave(valid_len, repeats=shape[1],
                                                dim=0)
        else:
            valid_len = valid_len.reshape(-1)
        # Fill masked elements with a large negative, whose exp is 0
        X = sequence_mask(X.reshape(-1, shape[-1]), valid_len, value=-1e6)
        return nn.functional.softmax(X.reshape(shape), dim=-1)

class MaskedMSELoss(nn.MSELoss):
    """The softmax cross-entropy loss with masks."""
    # `pred` shape: (`batch_size`, `num_steps`, )
    # `label` shape: (`batch_size`, `num_steps`)
    # `valid_len` shape: (`batch_size`,)
    # `valid_label` shape: (`batch_size`, `channel_size`)
    def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
        super().__init__(size_average, reduce, reduction)
    def forward(self, pred, label, valid_len, valid_label=None):
        assert label.device == pred.device, (f"`label` on {label.device},"
                            f"`pred` on {pred.device}")
        weights_len = torch.ones_like(label)
        # set some label weight as 0.
        weights_len = sequence_mask(weights_len, valid_len)
        if valid_label is None:
            weights_label = torch.tensor(1, device=pred.device)
        else:
            weights_label = valid_label[:, None, :]
        self.reduction='none'
        unweighted_loss = super(MaskedMSELoss, self).forward(
            pred, label)
        # mask some loss.
        weighted_loss = (unweighted_loss * weights_len * weights_label).sum(dim=1) / valid_len[:, None]
        return weighted_loss

# def schedule_sampling(cur_steps, total_steps,
#                       mode=["Inverse_Sigmoid", 800]) -> float:
#     r"""Schedule sampling to solve teaching force exposure bias
#     based on google Scheduled Sampling for sequence prediction 
#     with Recurrent Neural Networks doi:0.5555/2969239.
    
#     This is a method of gradually introducing teaching force.
    
#     Args:
#         cur_step (int): current step of the model training
#         total_step (int): total step use schedule sampling.
#     Returns:
#         (float): the teaching force ratio.
#     """
#     if mode is str or len(mode) == 1:
#         mode = mode
#     else:
#         mode, h_para = mode
#     if mode == 'Naive':
#         return 0.5  # 0.5 for naive useing.
#     elif mode == 'Linear':
#         decrement = 1/total_steps
#         return 1 - decrement * cur_steps
#     elif mode == 'Exponential':  # 0.999
#         return h_para**cur_steps
#     elif mode == 'Inverse_Sigmoid':
#         return h_para / (h_para + np.exp(cur_steps/h_para))  # 800
#     else:
#         # no schedule sampling.
#         return 0  

def sigmoid_schedule_sampling(cur_steps, limiter_steps) -> float:
    r"""Schedule sampling to solve teaching force exposure bias
    based on google Scheduled Sampling for sequence prediction 
    with Recurrent Neural Networks doi:0.5555/2969239."""
    return limiter_steps / (limiter_steps + np.exp(cur_steps/limiter_steps))

def _get_prediction(model, enc_inputs, dec_inputs, state, now_tf):
    prediction, state = model(enc_inputs, dec_inputs, state, now_tf)
    return prediction

def _calculate_error(prediction, label, valid_len, valid_channel):
    loss_fn = MaskedMSELoss()
    loss = loss_fn(prediction, label, valid_len, valid_channel)
    # Attention !!! Sum loss directly. 
    loss = loss.sum()
    return loss

def calculate_l2_error(model, 
                       enc_inputs,
                       dec_inputs, 
                       current_steps,
                       label, 
                       valid_len,
                       valid_channel,
                       lossfn_config: dict,):
    """
    Evaluate the model respect to input data and label.

    Add node existence flags and sequence valid length for maskedMSE
    Args:
        model (Cell): list of expressions node can by identified by mindspore.
        enc_inputs (Tensor): the encoder input data of network.
        dec_inputs (Tensor): the decoder input data of network.
        label (Tensor): the true output value of given inputs.
        # batch_size (int): data size in one step, which is the same as that in training.
        valid_len (Tensor): the valid len of the sequence with shape (batch_size)
        valid_channel (Tensor): the valid channel of the output sequence with shape (batch_size, channel_size)
        lossfn_config (dict): loss func configuration.
            # current_steps (int): the steps of model training. this is a parameter for schedule sampling.
            limiter_steps (int): the limiter steps. In my case, it only be used for the schedule sampling mode ["Linear"]
    """
    limiter_steps = lossfn_config['limiter_steps']
    state = None
    if model.training:
        now_tf = sigmoid_schedule_sampling(current_steps, limiter_steps)    
    else:
        now_tf = 0
    prediction = _get_prediction(model, enc_inputs, dec_inputs, state, now_tf)
    l2_error = _calculate_error(prediction, label, valid_len, valid_channel) / prediction.shape[0]
    print("    l2_error: ", l2_error)
    print("==================================================================================================")
    return l2_error

def _make_paths_absolute(dir_, config):
    """
    Make all values for keys ending with `_path` absolute to dir_.

    Args:
        dir_ (str): The path of yaml configuration file.
        config (dict): The yaml for configuration file.

    Returns:
        Dict. The configuration information in dict format.
    """
    for key in config.keys():
        if key.endswith("_path"):
            config[key] = os.path.join(dir_, config[key])
            config[key] = os.path.abspath(config[key])
        if isinstance(config[key], dict):
            config[key] = _make_paths_absolute(dir_, config[key])
    return config

def load_yaml_config(file_path):
    """
    Load a YAML configuration file.

    Args:
        file_path (str): The path of yaml configuration file.

    Returns:
        Dict. The configuration information in dict format.

    Supported Platforms:
        ``Ascend`` ``CPU`` ``GPU``
    """
    # Read YAML experiment definition file
    with open(file_path, 'r') as stream:
        config = yaml.safe_load(stream)
    config = _make_paths_absolute(os.path.abspath('.'), config)
    return config

def log_config(log_dir='./logs', model_name="model", permission=0o644):
    """
    Log configuration.
    Args:
        log_dir (str): Directory to save log.
        model_name (str): Project name as prefix of log. Default: "model".
    """
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)
    log_path = os.path.join(log_dir, f"{model_name}.log")
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
                        datefmt='%a, %d %b %Y %H:%M:%S',
                        filename=log_path,
                        filemode='w')
    os.chmod(log_path, permission)