# !/usr/bin/env python
# -*-coding:utf-8 -*-

"""
# File       : module_calibration.py
# Time       ：2024/11/29 19:29
# Author     ：Zeqing Huang
# contact    : huangzq6@mail2.sysu.edu.cn
# version    ：python 3.8+ (with Scipy 1.8.0+)
# Description：
    This module is used to calibrate the raw forecasts by
    (1) linear scaling; (2) quantile mapping; (3) Bernoulli-Gamma-Gaussian model;
    New version for DLUT for operational forecast calibration

References:
    Huang Z, Zhao T, Zhang Y, et al. A five-parameter Gamma-Gaussian model to calibrate monthly and seasonal
    GCM precipitation forecasts[J]. Journal of Hydrology, 2021, 603: 126893.
    Huang Z, Zhao T, Xu W, et al. A seven-parameter Bernoulli-Gamma-Gaussian model to calibrate subseasonal to seasonal
    precipitation forecasts[J]. Journal of Hydrology, 2022, 610: 127896.
    Huang Z, Zhao T. pyNMME: A python toolkit to retrieve, calibrate and verify seasonal precipitation
    forecasts[J]. Environmental Modelling & Software, 2023, 166: 105732.


"""
#%%
import numpy as np
from scipy import stats, optimize
from scipy.stats import norm, multivariate_normal
import warnings
from typing import Union, Optional
import copy
from . import model_config

#%% common setting
censor_fill_value = model_config.censor_fill_value
ens_dim = model_config.ens_dim
dist_type = model_config.dist_type # first for forecasts, second for observations
cens_thres = model_config.cens_thres # default censoring threshold
# set the number of ensemble for the Bernoulli-Gamma-Gaussian model
n_number = model_config.n_number
fix_rnd_seed = model_config.fix_rnd_seed

param_nms = model_config.param_nms
least_cens_coeff = model_config.least_cens_coeff
sample_size_tol = model_config.sample_size_tol
invalid_para = model_config.invalid_para

#%% linear scaling
    
class LinearScaling:
    """
    The class to calibrate the raw forecasts by linear scaling.
    """

    def __init__(self, c: float):
        """
        Initialize the LinearScaling instance. Either fcst and obs or c must be provided.
        
        Args:
            c: scaling coefficient
            fcst: forecasts in training samples, shape of [n_sample, n_ens]
            obs: observations in training samples, shape of [n_sample,]
        """

        self.c = c
    
    @classmethod
    def fit(cls, fcst: np.ndarray, obs: np.ndarray):
        """
        Fit the linear scaling model and return an instance with fitted parameters.
        
        Args:
            fcst: forecasts in training samples, shape of [n_sample, n_ens]
            obs: observations in training samples, shape of [n_sample,]
        
            Returns:
                LS_im: an instance of LS_im with fitted parameters
        """
        # check the dimension of fcst
        temp_fcst = cls._check_fcst_dimension(fcst)
        c = np.nansum(obs) / np.nansum(temp_fcst)
        return cls(c)

    def calibrate(self, tst_sample: np.ndarray):
        """
        Args:
            tst_sample: raw forecasts to be calibrated

        Returns:
            np.ndarray: generate calibrated forecasts
        """
        # linear scaling new forecasts by the coefficient
        return tst_sample.copy() * self.c
    
    @staticmethod
    def _check_fcst_dimension(fcst: np.ndarray) -> np.ndarray:
        """
        Check the dimension of fcst and return the appropriate array.
        If fcst is 1D, return it directly. If fcst is 2D, return the mean along the ensemble dimension.
        
        Args:
            fcst: forecasts in training samples, shape of [n_sample, n_ens] or [n_sample,]
        
        Returns:
            np.ndarray: processed forecast array
        """
        if fcst.ndim == 1:
            return fcst
        elif fcst.ndim == 2:
            return np.nanmean(fcst, axis=ens_dim)
        else:
            raise ValueError('The dimension of fcst is wrong.')


#%% quantile mapping

class QuantileMapping:
    """
    The class to calibrate the raw forecasts by quantile mapping.
    It has been updated to handle different distributions.
    References:
        Statistical bias correction for daily precipitation in regional climate models over Europe (2010)
    """

    def __init__(self,
                 para: Optional[dict] = None,
                 dist: Optional[tuple] = dist_type,):
        """
        Initialize the QM instance. Either fcst and obs or para and censor_cdf must be provided.
        
        Args:
            para: fitted parameters for the distributions
            fcst: forecasts in training samples, 1D or 2D array, [n_sample, n_ens]
            obs: observations in training samples, 1D array
            threshold: threshold to mask the data (censoring), 1D or 2D, e.g., (0.1, 0.1) or thresholds changing with time [2, n_sample]
            dist: distribution to fit the data
        """
        self.dist = dist
        self.dist_para = para
        self.censor_cdf = {}
        for key in para.keys():
            self.censor_cdf[key] = self.dist_para[key].pop('p0', None)

    @classmethod
    def init_from_array(cls, para_fcst: np.ndarray, para_obs: np.ndarray, dist: tuple = dist_type):
        """
        Initialize the QM instance from arrays.
        [p0, a, loc, scale]

        Args:
            para_fcst: parameters for forecasts, 1D array, 
            para_obs: parameters for observations, 1D array
            dist: distribution to fit the data

        Returns:
            QM: an instance of QM with fitted parameters
        """
        para = {}
        para = dict(
            fcst = dict(zip(param_nms[dist[0]], para_fcst[1:])),
            obs = dict(zip(param_nms[dist[1]], para_obs[1:]))
        )
        para['fcst']['p0'] = para_fcst[0]
        para['obs']['p0'] = para_obs[0]

        return cls(para=para, dist=dist)

    @classmethod
    def fit(cls,
            fcst: np.ndarray,
            obs: np.ndarray,
            threshold: Union[None, np.ndarray] = None,
            dist: tuple = dist_type):
        """
        Fit the quantile mapping model and return an instance with fitted parameters.
        
        Args:
            fcst: forecasts in training samples, 1D or 2D array, [n_sample, n_ens]
            obs: observations in training samples, 1D array
            threshold: threshold to mask the data (censoring), 1D or 2D, e.g., (0.1, 0.1) or thresholds changing with time [2, n_sample]
            dist: distribution to fit the data

        Returns:
            QM: an instance of QM with fitted parameters
        """

        flatten_data = {
            'fcst': fcst.flatten(order='C'),
            'obs': obs.flatten(order='C')
        }
        cens_mask = {}
        # case of no censoring
        if threshold is None:
            cens_mask['fcst'] = np.ones(len(flatten_data['fcst']), dtype=bool)
            cens_mask['obs'] = np.ones(len(flatten_data['obs']), dtype=bool)
        else:
            threshold = np.array(threshold)
            cens_mask['obs'] = np.greater(flatten_data['obs'], threshold[1])
            # threshold can be constant or time-varying
            if threshold.ndim > 1: # time-varying threshold
                assert (
                    len(fcst) == threshold.shape[1]
                ), 'The length of threshold should be equal to the number of samples.'
                cens_mask['fcst'] = np.greater(fcst, threshold[0][:, np.newaxis]).flatten(order='C')
            else: # constant threshold
                cens_mask['fcst'] = np.greater(flatten_data['fcst'], threshold[0])

        # dict for parameters, there are three parameters for each variable, censor_p, a and scale
        para = {}
        for i, key in enumerate(flatten_data.keys()):
            # remove nan value
            tmp_nan_mask = np.isnan(flatten_data[key])
            flatten_data[key] = flatten_data[key][~tmp_nan_mask]
            cens_mask[key] = cens_mask[key][~tmp_nan_mask]
            # get uncensored samples to fit the mariginal distribution
            tmp_para = getattr(cls, f'{dist[i]}_fit')(flatten_data[key][cens_mask[key]])

            para_dict = dict(zip(param_nms[dist[i]], tmp_para))
            # calculate the probability of censoring
            para_dict['p0'] = np.sum(~cens_mask[key]) / (len(flatten_data[key]) + 1.0)
            para[key] = para_dict

        return cls(para=para, dist=dist)

    def calibrate(self,
                  tst_sample: np.ndarray,
                  pred_censor: Union[float, None] = None,
                  rnd_seed: Union[np.random.default_rng, int, None] = None):
        """
        Generate quantile mapped forecasts
        
        Args:
            tst_sample: raw forecasts to be calibrated
            pred_censor: threshold to mask the data (censoring)
            rnd_seed: np.random.generator to set random state
        
        Returns:
            np.ndarray: generate calibrated forecasts
        """
        if rnd_seed is None:
            rnd_seed = np.random.default_rng(seed=42)
        elif isinstance(rnd_seed, (int, np.integer)):
            rnd_seed = np.random.default_rng(seed=rnd_seed)
        else:
            assert isinstance(rnd_seed, np.random.Generator), 'The random seed is wrong.'

        flat_tst_sample = tst_sample.copy().flatten(order='C')
        flat_tst_sample_nan_mask = np.isnan(flat_tst_sample)
        flat_tst_sample_clean = flat_tst_sample[~flat_tst_sample_nan_mask]

        if pred_censor is None:
            censored_mask = np.zeros_like(flat_tst_sample_clean, dtype=bool)
        else:
            censored_mask = flat_tst_sample_clean <= pred_censor

        tst_sample_cdf = np.zeros_like(flat_tst_sample_clean, dtype=float)
        tst_sample_cdf[~censored_mask] = getattr(stats, f'{self.dist[0]}').cdf(flat_tst_sample_clean[~censored_mask], **self.dist_para['fcst'])

        # note: there can be outliers of which CDF is one
        tst_sample_cdf[tst_sample_cdf == 1.] = 0.999

        # Considering the existence of 0, the calculated CDF should be converted.
        # To ensure that the maximum of CDF is less or equal to 1.
        tst_sample_cdf2 = tst_sample_cdf * (1.0 - self.censor_cdf['fcst']) + self.censor_cdf['fcst']

        # when x_cdf0 is greater than y_cdf0
        # random uniform samples facilitate the mapping
        tst_sample_cdf2[censored_mask] = rnd_seed.uniform(0, self.censor_cdf['fcst'], size=np.sum(censored_mask))

        # Quantile mapping, the CDF should be modified firstly.
        tst_sample_cdf2y = (tst_sample_cdf2 - self.censor_cdf['obs']) / (1.0 - self.censor_cdf['obs'])
        cdf_less0_mask = tst_sample_cdf2y <= 0.0

        # quantile mapped non-Nan values
        tst_sample_qm = np.full_like(flat_tst_sample_clean, np.nan)
        # The precipitation is set to be the censor_fill_value when the CDF is less or equal to the self.y_cdf0
        tst_sample_qm[cdf_less0_mask] = censor_fill_value
        # for the rest of the data, the quantile mapping is applied
        tst_sample_qm[~cdf_less0_mask] = getattr(stats, f'{self.dist[1]}').ppf(tst_sample_cdf2y[~cdf_less0_mask], **self.dist_para['obs'])

        # results
        # quantile mapped values
        # Note: Should not use qm_fcst[prdor_nan_mask][cdf_less0_mask] to assign values
        qm_fcst = np.full(flat_tst_sample.shape, np.nan)
        qm_fcst[~flat_tst_sample_nan_mask] = tst_sample_qm
        qm_fcst_reshape = qm_fcst.reshape(tst_sample.shape, order='C')

        return qm_fcst_reshape

    @staticmethod
    def gamma_fit(data: np.ndarray):
        temp_scale = (data - data.min()).var() / (data - data.min()).mean()
        temp_para = stats.gamma.fit(data, scale=temp_scale, floc=0)
        return temp_para

    @staticmethod
    def norm_fit(data: np.ndarray):
        return stats.norm.fit(data)


#%% Bernoulli-Gamma-Gaussian

class BGG:
    """
    The class to calibrate raw forecasts by Bernoulli-Gamma-Gaussian model.
    """
    # set the model to tolerate CDF values close to 1 or 0
    # default is True for applications, but False for investigations
    MODE_CDF_TOL = True

    def __init__(
        self,
        para_fcst: dict,
        para_obs: dict,
        corr: float,
        dist_type: tuple = dist_type,
        model_state: bool = True,
    ):
        """
        Initialize BGG_im with fitted parameters

        Args:
            para_fcst: fitted parameters for forecasts
            para_obs: fitted parameters for observations
            corr: correlation coefficient
            dist_type: distribution types for forecasts and observations
        """
        self.para_fcst = para_fcst
        self.para_obs = para_obs
        self.corr = corr
        self.model_state = model_state

        if self.model_state:
            self.fitted_NQT = [
                DistTrans(para_fcst),
                DistTrans(para_obs)
            ]
            self.dist_type = dist_type

        else:
            self.fitted_NQT = None
            self.dist_type = None
            warnings.warn('The model is not fitted. Raw forecasts will be returned.')

    @classmethod
    def init_from_array(
        cls,
        para_fcst_array: np.ndarray,
        para_obs_array: np.ndarray,
        corr: float,
        dist_type: tuple = dist_type,
    ):
        """
        Initialize the BGG_im instance from arrays.
        [p0, a, loc, scale] for para_fcst_array and para_obs_array

        Args:
            para_fcst_array: parameters for forecasts, 1D array, 
            para_obs_array: parameters for observations, 1D array
            corr: correlation coefficient
            dist_type: distribution types for forecasts and observations

        Returns:
            BGG_im: an instance of BGG_im with fitted parameters
        """
        para_fcst = {
            dist_type[0] : dict(zip(['p0'] + param_nms[dist_type[0]], para_fcst_array))
        }
        para_obs = {
            dist_type[1] : dict(zip(['p0'] + param_nms[dist_type[1]], para_obs_array))
        }

        if (
            np.isnan(para_fcst_array).any()
            or np.isnan(para_obs_array).any()
            or np.isnan(corr)
        ):
            model_state = False
        else:
            model_state = True

        return cls(para_fcst, para_obs, corr, dist_type, model_state)

    @classmethod
    def fit(
        cls,
        fcst: np.ndarray,
        obs: np.ndarray,
        threshold: Union[tuple, np.ndarray] = (None, None),
        dist_type: tuple = dist_type,
        censor_corr: bool = True,
    ):
        """
        Fit the BGG model parameters

        Args:
            fcst: forecasts in training samples
            obs: observations in training samples
            threshold: censoring thresholds for forecasts and observations
            dist_type: distribution types for forecasts and observations
            censor_corr: whether to use censored correlation estimation

        Returns:
            BGG_im instance with fitted parameters
        """
        # Fit distributions

        if fcst.ndim == 1:
            tmp_fcst = fcst
        elif fcst.ndim == 2:
            tmp_fcst = np.nanmean(fcst, axis=ens_dim)
        else:
            raise ValueError('The dimension of fcst is wrong.')

        if obs.ndim != 1:
            raise ValueError('The dimension of obs is wrong.')

        assert len(tmp_fcst) == len(obs), 'The length of forecasts and observations should be equal.'

        fitted_params = []
        trans_data = []
        censor_masks = []

        for i, (data, thresh, dist) in enumerate(zip([tmp_fcst, obs], threshold, dist_type)):
            # Fit distribution
            if thresh is None:
                tmp_cens_mask = np.ones_like(data, dtype=bool)
            else:
                tmp_cens_mask = data > thresh
            # return the unfitted model when the number of uncensored value is less than the sample_size_tol
            if tmp_cens_mask.sum() < sample_size_tol:
                warnings.warn(
                    f"The number of uncensored value is {tmp_cens_mask.sum()}, which should be more than than {sample_size_tol}.",
                )
                para_fcst = {
                    dist_type[0]: dict(
                        zip(
                            ["p0"] + param_nms[dist_type[0]],
                            np.full(len(param_nms[dist_type[0]]) + 1, invalid_para),
                        )
                    )
                }
                para_obs = {
                    dist_type[1]: dict(
                        zip(
                            ["p0"] + param_nms[dist_type[1]],
                            np.full(len(param_nms[dist_type[0]]) + 1, invalid_para),
                        )
                    )
                }

                # cls.model_state = False

                return cls(para_fcst, para_obs, invalid_para, dist_type, False)
            else:

                dist_model = DistTrans.dist_fit(data, censored_threshold=thresh, dist_type=dist)
                # Transform to CDFs then to normal
                tmp_cdf_vals = dist_model.trans_cdf(data, tmp_cens_mask, dist)

                if cls.MODE_CDF_TOL:
                    tmp_cdf_vals[tmp_cdf_vals == 0.] = 0.001
                    tmp_cdf_vals[tmp_cdf_vals == 1.] = 0.999
                
                judge_CDF(tmp_cdf_vals)
                # save NQT normalized values and censoring masks for estimating correlation
                trans_data.append(CDF2norm(tmp_cdf_vals))
                censor_masks.append(tmp_cens_mask)

                fitted_params.append(dist_model.full_param_set)

        # Estimate correlation
        if censor_corr:
            corr_mle = CorrMLE(trans_data[0], trans_data[1], censor_masks)
            corr = corr_mle.estimate_corr_brent().x
        else:
            corr = stats.pearsonr(trans_data[0], trans_data[1])[0]

        return cls(fitted_params[0], fitted_params[1], corr, dist_type, True)

    def calibrate(
        self,
        tst_sample: float,
        cens_value: Union[float, None] = None,
        rnd_seed: Union[int, np.random.Generator, None] = None,
    ):

        if not self.model_state:
            return np.full(n_number, tst_sample)

        else:
            return self._calibrate(tst_sample, cens_value, rnd_seed)
    def _calibrate(
        self,
        tst_sample: float,
        cens_value: Union[float, None] = None,
        rnd_seed: Union[int, np.random.Generator, None] = None,
    ):
        """
        Generate calibrated forecasts

        Args:
            tst_sample: raw forecast to be calibrated
            pred_censor: threshold for censoring test sample
            rnd_seed: random seed for generating samples

        Returns:
            array of calibrated forecasts
        """
        tst_sample_array = np.array([tst_sample])

        # set the mask for the test sample
        if cens_value is None:
            tst_sample_mask = np.ones_like(tst_sample_array, dtype = bool)
        else:
            tst_sample_mask = np.greater(tst_sample_array, cens_value)

        # Handle the situation when there is no censored value in the training samples, but there is in the test samples
        if ~tst_sample_mask[0] and self.fitted_NQT[0].censor_p == 0.:
            # tst_samaple_cdf = np.array([least_cens_coeff * np.min([1/(self.T + 1), self.trans_cdf[0].min()])])
            censor_px = getattr(stats, self.dist_type[0]).cdf(cens_value, **self.fitted_NQT[0].para_set[self.dist_type[0]])
            tst_samaple_cdf = np.array([censor_px])
            # censor_px = tst_samaple_cdf

        else:
            tst_samaple_cdf = self.fitted_NQT[0].trans_cdf(
                data2btrans = tst_sample_array,
                data_mask = tst_sample_mask,
                dist_type = self.dist_type[0],
            )
            censor_px = self.fitted_NQT[0].censor_p

        ###############
        if self.MODE_CDF_TOL:
            tst_samaple_cdf[tst_samaple_cdf == 0.] = 0.001
            tst_samaple_cdf[tst_samaple_cdf == 1.] = 0.999
        ###############

        # judge if the test sample is out of the bound of distribution
        judge_CDF(tst_samaple_cdf)

        if isinstance(rnd_seed, (int, np.integer)):
            rnd_seed = np.random.default_rng(seed=rnd_seed)
        elif rnd_seed is None:
            rnd_seed = np.random.default_rng(seed=fix_rnd_seed)
        else:
            assert isinstance(rnd_seed, np.random.Generator)

        tst_x = CDF2norm(tst_samaple_cdf)
        # get the calibrated value (y) through conditional distribution
        prdt_y = conditional_distribution(
            corr = self.corr,
            new_x = tst_x[0],
            censor_p = censor_px,
            new_x_mask = tst_sample_mask[0],
            rnd_seed=rnd_seed
        )

        # back transform the obtained y variables
        prdt_y2 = self.fitted_NQT[1].trans_ppf(
            array = prdt_y,
            dist_type = self.dist_type[1],
        )

        return prdt_y2
    
    def clim_y(self, rnd_seed: Union[int, np.random.Generator, None] = None,):
        """
        Generate climatology forecasts
        Returns:

        """

        if isinstance(rnd_seed, (int, np.integer)):
            rnd_seed = np.random.default_rng(seed=rnd_seed)
        elif rnd_seed is None:
            rnd_seed = np.random.default_rng(seed=fix_rnd_seed)
        else:
            assert isinstance(rnd_seed, np.random.Generator)

        # to generate the climatology forecasts
        norm_array = rnd_seed.normal(size=n_number)

        clim_array = self.fitted_NQT[1].trans_ppf(
            array = norm_array,
            dist_type = self.dist_type[1],
        )

        return clim_array

#%%

class DistTrans:
    """
    class to fit parameters, calculate CDF
    """
    dist_list = ['gamma', 'pearson3', 'kappa4', 'lognorm', 'genextreme', 'genlogistic', 'norm']

    def __init__(self, para_set: dict):
        self.full_param_set = para_set  # just used to save the full parameter set
        self.para_set = copy.deepcopy(para_set)

        # delete p0 for all parameter set
        for dist in self.para_set.keys():
            self.censor_p = self.para_set[dist].pop('p0')


    @classmethod
    def dist_fit(
        cls,
        data: np.ndarray,
        dist_type: Union[str, list],
        censored_threshold: Union[np.ndarray, float] = None,
        min_length: int = 10,
        tol_sample: bool = True,
    ):
        """
        fit the parameters of statistical distributions
        Args:
            data: data to be fitted
            dist_type: type of distribution
            censored_threshold: threshold used for censoring, float or array (1D, with length of n)
            min_length: define the minimum length of uncensored data
            tol_sample: define how to deal with the small number of samples

        Returns:

        """
        nan_mask = np.isnan(data)

        if censored_threshold is None:
            cens_mask = np.ones_like(data, dtype = bool)
            censor_p = 0.

        else:
            cens_mask = np.greater(data, censored_threshold)
            # consider the existence of nan value
            """
            Used to calculate probability of censoring
            CDF is calculated by i / (N + 1)
            References:
                Notes and correspondence Plotting positions in extreme value analysis
                https://journals.ametsoc.org/doi/10.1175/JAM2349.1
            """
            censor_p = np.sum(data <= censored_threshold) / (np.sum(~nan_mask) + 1)

        # to exclude all the censored data points and nan value
        fitting_data = data[np.logical_and(~nan_mask, cens_mask)]

        if np.unique(fitting_data).__len__() < min_length:
            if tol_sample:
                warnings.warn('The number of non-censored value is less than {0}.'.format(min_length), CensoredWarning)
            else:
                raise SampleConstantError('The number of non-censored value is less than {0}.'.format(min_length))

        # used to save the estimated parameters for different distributions
        para_set = {}
        if isinstance(dist_type, str):
            cls.dist_type_check(cls, dist_type)
            tmp_para = dict(
                zip(param_nms[dist_type], getattr(cls, '_{0}_fit'.format(dist_type))(fitting_data))
            )
            tmp_para['p0'] = censor_p
            para_set[dist_type] = tmp_para

        elif isinstance(dist_type, list):
            for dist in dist_type:
                cls.dist_type_check(cls, dist)

                tmp_para = dict(
                    zip(param_nms[dist], getattr(cls, '_{0}_fit'.format(dist))(fitting_data))
                )
                tmp_para['p0'] = censor_p
                para_set[dist] = tmp_para

        return cls(para_set)

    def trans_cdf(self, data2btrans: np.ndarray, data_mask: np.ndarray, dist_type: str,):
        """
        Transform the variable to standard uniform distribution [0,1].
        The censored data is considered, so the CDF = p + (1 - p) * CDF(x).
        CDF(x) represent the CDF value calculated by statistical distribution.

        Args:
            data2btrans: data to be transformed
            data_mask: censoring mask of data, for data larger than threshold
            dist_type: The type of fitted distribution

        Returns:The CDF of all samples according to the statistical distributions.

        """
        self.dist_type_check(dist_type)

        # set array for CDF
        array_cdf = np.full_like(data2btrans, np.nan, dtype = np.float64)

        # set the CDF of censored data
        array_cdf[~data_mask] = self.censor_p
        array_cdf[data_mask] = self.censor_p + (1 - self.censor_p) *\
                               getattr(stats, dist_type).cdf(data2btrans[data_mask], **self.para_set[dist_type])
        # handle the existence of nan value
        array_cdf[np.isnan(data2btrans)] = np.nan

        sort_a = np.argsort(data2btrans[data_mask])
        sort_b = np.argsort(array_cdf[data_mask])

        if not np.all(sort_a == sort_b):
            warnings.warn('The sorted index of data and CDF are not equal.')
        # assert np.all(np.argsort(data2btrans[data_mask]) == np.argsort(array_cdf[data_mask])), f'data2btrans: {data2btrans[data_mask]}\narray_cdf: {array_cdf[data_mask]}'

        return array_cdf

    def trans_ppf(self, array: np.ndarray, dist_type: str,):
        """
        Args:
            array: The array to be ppf transformed
            dist_type: The type of distribution

        Returns: array after ppf

        """
        self.dist_type_check(dist_type)
        # back transformed the value based on the standard normal distribution and gamma distribution
        array_cdf = stats.norm.cdf(array, loc = 0, scale = 1)

        # back transform the data with consideration of censored data
        array_cdf2 = (array_cdf - self.censor_p) / (1 - self.censor_p)

        cdf_mask = array_cdf2 > 0.0

        array_ppf = np.full_like(array, np.nan)

        # the data with CDF lower than p is set to be censored
        array_ppf[~cdf_mask] = censor_fill_value

        array_ppf[cdf_mask] = getattr(self, '_{0}_ppf'.format(dist_type))(
            array_cdf2[cdf_mask], self.para_set[dist_type]
        )

        sort_a = np.argsort(array_cdf2[cdf_mask])
        sort_b = np.argsort(array_ppf[cdf_mask])

        if not np.all(sort_a == sort_b):
            warnings.warn('The sorted index of CDF and PPF are not equal.')
        # assert np.all(np.argsort(array_cdf2[cdf_mask]) == np.argsort(array_ppf[cdf_mask])), f'array_cdf: {array_cdf2[cdf_mask]}\narray_ppf: {array_ppf[cdf_mask]}'

        return array_ppf

    #%% gamma distribution
    @staticmethod
    def _gamma_fit(data: np.ndarray):
        """
        refer to
            https://zhuanlan.zhihu.com/p/37976562
            https://blog.csdn.net/weixin_41875052/article/details/79843374
            the β is the inverse scale parameter, and the initial value of parameters are calculated based on
            the mean value and variance of the variate
            There is one parameter, shape, not yet be determined. equation: shape * scale = mean (after minus loc)
        Returns:

        """
        # calculate the initial guess value of scale
        temp_scale = (data - data.min()).var() / (data - data.min()).mean()

        # floc parameter set the fitted location parameter ( do not change)
        # only focus on the two parameter Gamma distribution
        # return (shape or a, loc, scale) parameters
        temp_para = stats.gamma.fit(data, scale = temp_scale, floc = 0)

        return temp_para

    @staticmethod
    def _gamma_ppf(array_cdf, para):
        return stats.gamma.ppf(array_cdf, **para)

    #%% normal distribution
    @staticmethod
    def _norm_fit(data):

        return stats.norm.fit(data)

    @staticmethod
    def _norm_ppf(array_cdf, para):

        return stats.norm.ppf(array_cdf, **para)

    #%% pearson3 distribution
    @staticmethod
    def _pearson3_fit(data: np.ndarray):

        return stats.pearson3.fit(data)

    @staticmethod
    def _pearson3_ppf(array_cdf, para):
        return stats.pearson3.ppf(array_cdf, **para)

    #%% Kappa 4 distribution
    @staticmethod
    def _kappa4_fit(data):

        return stats.kappa4.fit(data)

    @staticmethod
    def _kappa4_ppf(array_cdf, para):

        return stats.kappa4.ppf(array_cdf, **para)
    #%% Log-normal distribution
    @staticmethod
    def _lognorm_fit(data):
        # to set the initial loc parameter as 0
        # according to the density function of lognormal
        # with the standard form, the variable must be larger than 0 when the loc = 0
        # precipitation cannot be negative, so the initial value of loc is 0
        # the loc is hold at 0 to make it two parameter distribution
        return stats.lognorm.fit(data, floc = 0.0)


    @staticmethod
    def _lognorm_ppf(array_cdf, para):

        return stats.lognorm.ppf(array_cdf, **para)
    #%% generalized extreme distribution
    @staticmethod
    def _genextreme_fit(data):

        # to set the initial loc and scale parameters, which are mean and standard deviation, respectively.
        return stats.genextreme.fit(data, loc = data.mean(), scale = data.std())

    @staticmethod
    def _genextreme_ppf(array_cdf, para):

        return stats.genextreme.ppf(array_cdf, **para)
    #%% generalized logistic distribution
    @staticmethod
    def _genlogistic_fit(data):

        return stats.genlogistic.fit(data)

    @staticmethod
    def _genlogistic_ppf(array_cdf, para):

        return stats.genlogistic.ppf(array_cdf, **para)

    #%%
    def dist_type_check(self, dist_type: str):
        """
        check the input of distribution type
        Args:
            dist_type:

        Returns:

        """

        assert dist_type in self.dist_list, 'Please check the name of distribution, {0}'.format(dist_type)

    #%%


def CDF2norm(array_cdf: np.ndarray):
    """
    transform the CDF value to standard normal distribution, essence of NQT
    Args:
        array_cdf: array of CDF value

    Returns:

    """
    return norm.ppf(array_cdf, loc = 0, scale = 1)

# %%


def conditional_distribution(
    corr: float,
    new_x: float,
    censor_p: float,
    new_x_mask: bool,
    rnd_seed: Optional[np.random.Generator] = None,
):
    """
    Set the conditional distribution
    Args:
        corr: The estimated correlation coefficient
        new_x: The transformed raw forecast to be calibrated
        censor_p: probability of censored threshold
        new_x_mask: The censored mask
        rnd_seed: random state

    Returns: random numbers from  the conditional distribution

    """

    # The sigma of multivariate normal distribution is fit
    dis_sigma = np.sqrt(1 - corr ** 2)

    if rnd_seed is None:
        rnd_seed = np.random.default_rng(fix_rnd_seed)

    if new_x_mask:
        # if the raw forecast is larger than censored threshold
        # the conditional distribution can be obtained directly
        # calculate the parameter mu of the conditional parameters
        dis_mean = corr * new_x

        return norm.rvs(loc = dis_mean, scale = dis_sigma, size = n_number, random_state = rnd_seed)

    else :
        # if the raw forecast is censored
        # the conditional distribution is obtained by Monte Carlo

        # Samples are uniformly distributed over the half-open interval [low, high)
        # includes low, but excludes high bounds.
        # https://numpy.org/doc/stable/reference/random/generated/numpy.random.uniform.html
        rand_cdf = rnd_seed.uniform(0, censor_p, size = n_number)
        # if 0 is obtained, it is regarded as x_p
        rand_cdf[rand_cdf == 0.0] = censor_p

        rand_cdf_normal = norm.ppf(rand_cdf, loc = 0, scale = 1)

        rand_conditional = np.full(n_number, np.nan)

        for i in range(n_number):
            rand_conditional[i] = norm.rvs(
                loc = corr * rand_cdf_normal[i],
                scale = dis_sigma,
                size = 1,
                random_state = rnd_seed
            )

        return rand_conditional


# %%
# This part is for the correlation estimation by censored MLE.

class CorrMLE:

    """
    This class is used to estimate correlation using censored MLE
    """
    def __init__(self, data_x: np.ndarray, data_y: np.ndarray, mask: list, mu: tuple = (0, 0), sigma: tuple = (1, 1)):
        """

        Args:
            data_x: the normalized forecasts, Noted: data below censoring threshold should be set to censoring threshold
            data_y: the normalized observations, Noted: data below censoring threshold should be set to censoring threshold
            mask: the mask of censoring, shape of (2, n_samples)
            mu: specifies mu of multivariate normal distribution
            sigma: specifies sigma of multivariate normal distribution
        """

        self.x = data_x
        self.y = data_y
        # dimension: (n, 2)
        self.xy = np.array([data_x, data_y]).T
        self.mu = mu
        self.sigma = sigma

        array_mask = np.array(mask).astype(int)

        self.ind = {}
        self.ind['11'] = np.where(array_mask.sum(axis=0) > 1)[0]
        self.ind['00'] = np.where(array_mask.sum(axis=0) < 1)[0]
        self.ind['10'] = np.where(
            np.logical_and(array_mask[0] == 1, array_mask[1] == 0)
        )[0]
        self.ind['01'] = np.where(
            np.logical_and(array_mask[0] == 0, array_mask[1] == 1)
        )[0]
        
        censor_type = ['11', '00', '10', '01']
        self.censor_type = []

        for ct in censor_type:
            if len(self.ind[ct]) > 0:
                self.censor_type.append(ct)

        if len(self.censor_type) < 1:
            print(array_mask)
            likelihood_error()

    def estimate_corr_brent(self, tol: float = 1e-5, bound: tuple = (-1, 1)):
        """
        Estimate the correlation coefficient by scipy.optimizer, Brent algorithm

        Args:
            tol: absolute tolerance for convergence
            bound: bound of the correlation coefficient

        Returns:

        """
        return optimize.minimize_scalar(
            self.compute_llf,
            bounds = bound,
            method = 'Bounded',
            options = dict(xatol = tol),
        )


    def compute_llf(self, corr:float):
        
        llf = 0.
        for ci in self.censor_type:

            llf += getattr(self, f'llf_{ci}')(corr)

        return -llf

    def llf_11(self, corr:float) -> float:
        """
        define the likelihood function at the first condition
        in which both x and y are higher than censored threshold

        Args:
            corr (float): correlation coefficient

        Returns:
            float: log value of the likelihood function
        """
        # 

        # if len(self.ind['11']) > 0:
        # construct the corresponding standard multivariate normal distribution
        mul_norm = multivariate_normal(
            mean = self.mu,
            cov = [
                [self.sigma[0] ** 2, np.prod([corr, *self.sigma])],
                [np.prod([corr, *self.sigma]), self.sigma[1] ** 2],
            ],
        )
        # return the log value of PDF
        return np.sum(mul_norm.logpdf(self.xy[self.ind['11'], :]))


    def llf_10(self, corr:float) -> float:
        """
        define the likelihood function at the second condition
        in which x is higher than censored threshold while y is lower

        Args:
            corr (float): correlation coefficient

        Returns:
            float: log value of the likelihood function
        """

        # if len(self.ind['10']) > 0:
        tmp_xy = self.xy[self.ind['10'], :]

        tmp_pdfx = norm.logpdf(
            tmp_xy[:, 0],
            loc = self.mu[0],
            scale = self.sigma[0]
        )

        tmp_cdfy = 0.
        for i in range(len(tmp_xy)):

            tmp_cdfy += norm.logcdf(
                tmp_xy[i, 1],
                loc = self.mu[1] + corr * self.sigma[1] / self.sigma[0] * (tmp_xy[i, 0] - self.mu[0]),
                scale = np.sqrt(1 - corr ** 2) * self.sigma[1]
            )
        
        return np.sum(tmp_pdfx) + tmp_cdfy

        # else:

        #     return 0.


    def llf_01(self, corr:float) -> float:
        """
        define the likelihood function at the third condition
        in which y is higher than censored threshold while x is lower

        Args:
            corr (float): correlation coefficient

        Returns:
            float: log value of the likelihood function
        """

        # if len(self.ind['01']) > 0:
        tmp_xy = self.xy[self.ind['01'], :]

        tmp_pdfy = norm.logpdf(
            tmp_xy[:, 1],
            loc = self.mu[1],
            scale = self.sigma[1]
        )

        tmp_cdfx = 0.

        for i in range(len(tmp_xy)):

            tmp_cdfx += norm.logcdf(
                tmp_xy[i, 0],
                loc = self.mu[0] + corr * self.sigma[0] / self.sigma[1] * (tmp_xy[i, 1] - self.mu[1]),
                scale = np.sqrt(1 - corr ** 2) * self.sigma[0]
            )
        
        return np.sum(tmp_pdfy) + tmp_cdfx
        
        # else:

        #     return 0.

    def llf_00(self, corr:float) -> float:
        """
        define the likelihood function at the fourth condition
        in which both x and y are lower than censored threshold

        Args:
            corr (float): correlation coefficient

        Returns:
            float: log value of the likelihood function
        """
        # 

        # if len(self.ind['00']) > 0:
        # construct the corresponding standard multivariate normal distribution
        mul_norm = multivariate_normal(
            mean = self.mu,
            cov = [
                [self.sigma[0] ** 2, np.prod([corr, *self.sigma])],
                [np.prod([corr, *self.sigma]), self.sigma[1] ** 2]
            ]
        )
        # return the log value of CDF
        return np.sum(mul_norm.logcdf(self.xy[self.ind['00'], :]))

        # else:
        #     return 0.


# %%

def judge_CDF(array_CDF) :
    if len(array_CDF) < 2 :
        cond1 = np.isnan(array_CDF)
        cond2 = array_CDF == 1.0
        cond3 = array_CDF == 0.0
    else :
        cond1 = any(np.isnan(array_CDF))
        cond2 = any(array_CDF == 1.0)
        cond3 = any(array_CDF == 0.0)

    if any([cond1, cond2, cond3]) :
        raiseCDFError(cond1, cond2, cond3)


def raiseCDFError(cond1, cond2, cond3) :
    if cond1 :
        # print('There is nan in the results of CDF in the fitted distribution.')
        raise DistCDFErrorNan()

    if cond2 :
        # print('There is 1 in the results of CDF in the fitted distribution.')
        raise DistCDFError1()

    if cond3 :
        # print('There is 0 in the results of CDF in the fitted distribution.')
        raise DistCDFError0()


def dist_error() :
    raise UnknowDistType('The type of distribution is not available.')


def likelihood_error() :
    raise MaskError('There is mistake in the mask.')


class UnknowDistType(Exception) :
    pass


class DistCDFError0(Exception) :
    pass


class DistCDFError1(Exception) :
    pass


class DistCDFErrorNan(Exception) :
    pass


class SampleConstantError(Exception) :
    pass


class BeyondDistBound(Warning) :
    pass


class ImperfectDist(Warning) :
    pass


class MaskError(Exception) :
    pass


class CensoredWarning(Warning) :
    pass
