from pathlib import Path
import warnings
from typing import Union

import numpy as np
from statsmodels.distributions.empirical_distribution import ECDF
from matplotlib import colorbar, gridspec, colors
from matplotlib import pyplot as plt
# from mpl_toolkits.axes_grid1.inset_locator import InsetPosition
from PIL import Image

#%% verification metrics
class Verify:
    """
    Class used to verify ensemble forecasts
    """

    def __init__(self, ens: np.ndarray, obs: np.ndarray):
        """
        data cleaning
        Args:
            ens:
            obs:
        """

        assert obs.ndim == 1 and np.isin(ens.ndim, [1, 2]),\
            'Check the dimension of ens {0} and obs {1}'.format(ens.ndim, obs.ndim)
        assert len(ens) == len(obs), 'Check the length of ens {0} and obs {1}'.format(len(ens), len(obs))

        # check whether there are invalid forecasts and observations
        # i.e., nan for observations or all nan for forecasts
        obs_mask = np.isnan(obs)
        if ens.ndim == 2:
            ens_mask = np.isnan(ens).all(axis= 1)
        else:
            ens_mask = np.isnan(ens)
        
        self.mask = np.logical_or(ens_mask, obs_mask)
        if np.sum(self.mask) > 0:
            warnings.warn(
                'There are invalid pairs of forecasts and observations {0}'.format(
                    self.mask
                ),
                NaNWarning
            )

        self.ens = ens[~self.mask]
        self.obs = obs[~self.mask]

    def easy_metrics(self, metrics: list, **kwargs) -> list:
        """
        To calculate several metrics specified
        Args:
            metrics: list for the name of metrics
            **kwargs:

        Returns:

        """

        # if np.sum(~self.mask) < 10:
        #
        #     return [np.nan] * len(metrics)
        #
        # else:

        metrics_set = []
        for mi in metrics:
            try:
                metrics_set.append(
                    globals()[mi](ens = self.ens, obs = self.obs, **kwargs)
                )
            except KeyError as ke:
                print(ke, 'There is no metrics named {0}'.format(mi))
                metrics_set.append(np.nan)

            except TypeError as te:
                print(te)
                metrics_set.append(np.nan)

        return metrics_set


# %% Metrics for ensemble forecasts
def crps(ens: np.ndarray, obs: np.ndarray, **kwargs) -> np.ndarray:
    """
    Continuous ranked probability score (CRPS)
    Args:
        ens: ensemble forecasts (n, m)
        obs: observation (n)

    Returns:

    """
    assert len(ens) == len(obs)
    # ensemble forecasts
    if len(ens.shape) == 2:
        crps_all = []
        for i in range(len(obs)):
            crps_all.append(crps_i(ens[i, :], obs[i]))
    # Deterministic forecasts, it is the MAE (mean absolute error)
    elif len(ens.shape) == 1:
        crps_all = np.abs(ens - obs)
    # Unknown forecasts
    else :
        raise InputDimensionError(
            'Check the dimension of inputs, {0} for ens and {1} for obs'.format(ens.ndim, obs.ndim)
        )

    return np.array(crps_all)


def crps_i(ens_i, obs_i):
    """
    To calculate CRPS by numerical integration
    pow(F - 1(y-x), 2) * delta
    Args:
        ens_i: ensemble forecast array, (m)
        obs_i: observation, float

    Returns: The CRPS value

    """
    # delete the np.nan in the ensemble forecast
    ens_i = ens_i[~np.isnan(ens_i)]

    ens_i_sorted = np.sort(ens_i)
    if (np.isnan(obs_i)) | (len(ens_i) < 1): # negative observations are allowed
    # if (np.isnan(obs_i) | (obs_i < 0.0)) | (len(ens_i) < 1):
        # Missing
        val = np.nan
        warnings.warn(
            'The CRPS is NaN due to nan obs {0}, negative obs {1}, no forecasts {2}'.format(
                np.isnan(obs_i), obs_i < 0.0, len(ens_i) < 1
            ),
            NaNWarning
        )
    else :
        val = 0.0
        # The range of ensemble forecasts
        for i in range(len(ens_i) - 1):
            prob_i = (i + 1) / len(ens_i)
            if (obs_i < ens_i_sorted[i]):
                val_i = pow(prob_i - 1.0, 2) * (ens_i_sorted[i + 1] - ens_i_sorted[i])
            elif (obs_i > ens_i_sorted[i + 1]):
                val_i = pow(prob_i - 0.0, 2) * (ens_i_sorted[i + 1] - ens_i_sorted[i])
            else:
                val_i = pow(prob_i - 1.0, 2) * (ens_i_sorted[i + 1] - obs_i) + \
                        pow(prob_i - 0.0, 2) * (obs_i - ens_i_sorted[i])
            val = val + val_i
        # Below the range
        if (obs_i < ens_i_sorted[0]):
            val_i = pow(0.0 - 1.0, 2) * (ens_i_sorted[0] - obs_i)
            val = val + val_i
        # Beyond the range
        if (obs_i > ens_i_sorted[-1]):
            val_i = pow(1.0 - 0.0, 2) * (obs_i - ens_i_sorted[-1])
            val = val + val_i

    return val

def crps_vectorized(
    forecasts,
    obs,
    dim=0,
):
    """
    Calculates the CRPS probabilistic metric given forecast ensembles.

    It is computed as:

    .. math::
        \\text{CRPS}(F, y) = \\int_{-\\infty}^{\\infty} \\left[F(x) - H(x - y)\\right]^2 \\, dF(x)

    where:
    - :math:`F` is the cumulative distribution function (CDF) of the forecast.
    - :math:`y` is the observed value.
    - :math:`H` is the Heaviside step function.

    Args:
        forecasts : Forecast or simulated variables of size (num_events,num_samples)
        obs : Observed variables (scalar or (num_events,))
        dim: dimension along where cdf will be computed

    Returns:
       Vector of crps values

    Notes
    Stats are calculated only from values for which both observations and
    simulations are not null values.

    References:
    Hersbach, H. (2000). Decomposition of the continuous ranked probability score for
    ensemble prediction systems. Weather and Forecasting, 15(5), 559-570.
    #https://docs.nvidia.com/deeplearning/modulus/modulus-core/_modules/modulus/metrics/general/crps.html # pylint: disable=line-too-long
    """
    forecasts = np.moveaxis(forecasts, dim, 0)
    n = forecasts.shape[0]
    forecasts = np.sort(forecasts, axis=0)
    ans = np.zeros_like(obs)

    # dx [F(x) - H(x-y)]^2 = dx [0 - 1]^2 = dx
    # val = ensemble[0] - truth
    val = forecasts[0, :] - obs
    # val = (forecasts[:, 0] - obs)
    ans += np.maximum(val, 0.0)

    for i in range(n - 1):
        x0 = forecasts[i, :]
        x1 = forecasts[i + 1, :]

        cdf = (i + 1) / n

        # a. case y < x0
        val = (x1 - x0) * (cdf - 1) ** 2
        mask = obs < x0
        ans += val * mask

        # b. case x0 <= y <= x1
        val = (obs - x0) * cdf**2 + (x1 - obs) * (cdf - 1) ** 2
        mask = (obs >= x0) & (obs <= x1)
        ans += val * mask

        # c. case x1 < t
        mask = obs > x1
        val = (x1 - x0) * cdf**2
        ans += val * mask

    # dx [F(x) - H(x-y)]^2 = dx [1 - 0]^2 = dx
    val = obs - forecasts[-1, :]
    ans += np.maximum(val, 0.0)
    return ans

# Continuous ranked probability score (CRPS) and its decomposition
def fCRPS(d_obs, d_fcst):
    # This function derives the CRPS value and decomposes into reliablity, uncertainty, and resolution
    #   d_obs - the observation data
    #   d_fcst - the ensemble forecast data
    # The code follows the formulations in Hersbach (2000)
    #   Decomposition of the continuous ranked probability score for ensemble prediction systems

    d_obs = np.array(d_obs)
    d_fcst = np.array(d_fcst)
    clim_obs = d_obs
    
    # (g_bar, o_bar)    
    temp = fCRPS_abgo(d_obs, d_fcst)
    g_bar = temp[0]
    o_bar = temp[1]
        
    res_CRPS_reli = 0.0
    res_CRPS_pot  = 0.0
    for i in np.arange(len(g_bar)):
        res_CRPS_reli = res_CRPS_reli + g_bar[i] * pow((o_bar[i] - (i/(len(g_bar) - 1))), 2)
        res_CRPS_pot  = res_CRPS_pot  + g_bar[i] * o_bar[i] * (1 - o_bar[i])
    
    res_CRPS_u = fCRPS_u(clim_obs)
    res_CRPS_resol = res_CRPS_u - res_CRPS_pot
    
    #res_CRPS = res_CRPS_reli + res_CRPS_pot
    res_CRPS = {"CRPS"      : (res_CRPS_reli + res_CRPS_pot),
                "CRPS_reli" : res_CRPS_reli,
                "CRPS_pot"  : res_CRPS_pot,
                "CRPS_u"    : res_CRPS_u,
                "CRPS_res"  : res_CRPS_resol}
    
    return res_CRPS

def fCRPS_u(clim_obs):
    # This function calculates the uncertainty component for CRPS
    #   hist_obs - historical observations for the derivation of climatology
    n_obs = len(clim_obs)
    obs_sort = np.sort(clim_obs)
    
    u_bar = 0.0
    for i in np.arange(1, n_obs):
        u_bar = u_bar + (i/n_obs) * (1 - (i/n_obs)) * (obs_sort[i] - obs_sort[i-1])
    
    return u_bar

def fCRPS_clim(clim_obs, nyr):
    # This function calculates the CRPS for climatology in leave-nyr-years out cross validation
    n_obs = len(clim_obs)
    
    # Climatology-based forecast - nyr samples are left out
    clim_obs_fcst = np.empty((n_obs, n_obs - nyr))
    for i in np.arange(n_obs):
        ids = np.arange(nyr, n_obs)
        ids = (i + ids)%n_obs
        clim_obs_fcst[i, :] = clim_obs[ids]
        
    temp_crps = fCRPS(clim_obs, clim_obs_fcst)
    temp_res = temp_crps["CRPS"]
    
    return temp_res

def fCRPS_abgo(d_obs, d_fcst):
    # This function derives the alpha, beta, g and o for the CRPS computation
    #   d_obs - the observation data
    #   d_fcst - the ensemble forecast data
    
    if(len(d_obs) != d_fcst.shape[0]):
        print('The numbers of observations and ensemble forecast are not equal')
        print('Please check')
        exit()
    else:
        print('There are', len(d_obs), 'observations and', 
              d_fcst.shape[0], 'ensemble forecasts')

    a = np.zeros((d_fcst.shape[0], d_fcst.shape[1] + 1))
    b = np.zeros((d_fcst.shape[0], d_fcst.shape[1] + 1))
    h0 = np.zeros(d_fcst.shape[0])
    hN = np.zeros(d_fcst.shape[0])
    #g = np.zeros((d_fcst.shape[0], d_fcst.shape[1] + 1))
    #o = np.zeros((d_fcst.shape[0], d_fcst.shape[1] + 1))
    
    # To determine a and b
    for i in np.arange(d_fcst.shape[0]):
        temp_obs = d_obs[i]
        temp_fcst = d_fcst[i, :]
        temp_fcst = np.sort(temp_fcst)
        
        if(temp_obs < temp_fcst[0]):
            # a[i,:] = 0
            b[i, 0] = temp_fcst[0] - temp_obs
            for j in np.arange(1, d_fcst.shape[1]):
                b[i, j] = temp_fcst[j] - temp_fcst[j-1]
        elif(temp_obs > temp_fcst[-1]):
            # b[i,:] = 0
            a[i, d_fcst.shape[1]] = temp_obs - temp_fcst[-1]
            for j in np.arange(1, d_fcst.shape[1]):
                a[i, j] = temp_fcst[j] - temp_fcst[j-1]
        else:
            for j in np.arange(1, d_fcst.shape[1]):
                if(temp_obs > temp_fcst[j]):
                    a[i, j] = temp_fcst[j] - temp_fcst[j-1]
                    b[i, j] = 0
                elif(temp_obs < temp_fcst[j-1]):
                    a[i, j] = 0
                    b[i, j] = temp_fcst[j] - temp_fcst[j-1]
                else:
                    a[i, j] = temp_obs - temp_fcst[j-1]
                    b[i, j] = temp_fcst[j] - temp_obs
        
        # Heaviside function
        if(temp_obs < temp_fcst[0]):
            h0[i] = 1
        if(temp_obs < temp_fcst[-1]):
            hN[i] = 1
    
    # To determine a_bar and b_bar
    a_bar = np.mean(a, 
                    axis = 0)
    b_bar = np.mean(b, 
                    axis = 0)
    
    # To determine g_bar and o_bar
    g_bar = np.zeros(d_fcst.shape[1] + 1)
    o_bar = np.zeros(d_fcst.shape[1] + 1)
    
    for i in np.arange(1, d_fcst.shape[1]):
        g_bar[i] = a_bar[i] + b_bar[i]
        if(g_bar[i] == 0):
            o_bar[i] = 0
        else:
            o_bar[i] = b_bar[i] / g_bar[i]
    
    o_bar[0] = np.sum(h0) / len(h0)
    if(o_bar[0] == 0):
        g_bar[0] = 0
    else:
        g_bar[0] = b_bar[0] / o_bar[0]
    
    o_bar[-1] = np.sum(hN) / len(hN)
    if(o_bar[-1] == 1):
        g_bar[-1] = 0
    else:
        g_bar[-1] = a_bar[-1] / (1 - o_bar[-1])
        
    #if(np.sum(b[:,0] > 0) > 0):
    #    o_bar[0] = np.sum(b[:,0] > 0) / len(d_obs)
    #    g_bar[0] = b_bar[0] / o_bar[0]
    #
    #if(np.sum(a[:,-1] > 0) > 0):
    #    o_bar[-1] = 1 - (np.sum(a[:,-1] > 0) / len(d_obs))
    #    g_bar[-1] = a_bar[-1] / (1 - o_bar[-1])
        
    return (g_bar, o_bar)


def skill(ens: np.ndarray, obs: np.ndarray, clim: np.ndarray, skill_type: str = 'crps', **kwargs) -> np.ndarray:
    """
    Calculate skill based on different metrics
    Args:
        ens: ensemble forecasts, (n, m)
        obs: observations, (n,)
        clim: climatology forecasts, (n, m)
        skill_type: string, specifying the metric used to calculate the skill score
        **kwargs:

    Returns:

    """

    ens_m = globals()[skill_type](ens = ens, obs = obs, **kwargs)
    clim_m = globals()[skill_type](ens = clim, obs = obs, **kwargs)

    return (clim_m.mean() - ens_m.mean()) / clim_m.mean() * 100.


def interp_CDF(pdataset: np.ndarray, pdatapoint: float) -> float:
    """
    Calculate the ECDF of the observatio point in the ensemble forecasts
    Args:
        pdataset: ensemble forecasts for one point, (m)
        pdatapoint: observation, (1)

    Returns:

    """

    # delete the np.nan in the array
    pdataset = pdataset[~np.isnan(pdataset)]
    # add function to deal with the nan in the array
    if len(pdataset) < 1:
        return np.nan

    if not np.isscalar(pdataset) :
        # To sort the data
        seq1 = np.sort(pdataset)
        # The cumulative probability
        seq1p = np.arange(1, 1 + len(pdataset)) / (1.0 + float(len(pdataset)))
        # Interpolation
        i = 0
        tick = True
        if (pdatapoint < seq1[0]) :
            prob = 0.0
            tick = False
        elif (pdatapoint >= seq1[-1]) : # Note: add equal sign in 2023-07-24
            prob = 1.0
            tick = False

        while tick :
            if ((i + 1) < (len(pdataset) - 1)) :
                if (pdatapoint == seq1[i]) :
                    if ((pdatapoint == seq1[i + 1])) :
                        i = i + 1
                        tick = True
                    else :
                        prob = seq1p[i]
                        tick = False
                elif (pdatapoint < seq1[i + 1]) :
                    prob = seq1p[i] + (seq1p[i + 1] - seq1p[i]) * (pdatapoint - seq1[i]) / (seq1[i + 1] - seq1[i])
                    tick = False
                else :
                    i = i + 1
                    tick = True
            else :
                prob = seq1p[i] + (seq1p[i + 1] - seq1p[i]) * (pdatapoint - seq1[i]) / (seq1[i + 1] - seq1[i])
                tick = False
    else :
        # Empirical CDF
        if (pdataset >= pdatapoint) :
            prob = 0.0
        else :
            prob = 1.0

    # To return the result
    return prob


def pseudo_pit(censor_p: np.ndarray, seed: int) -> np.ndarray:
    """
    Generate pseudo pit
    Args:
        censor_p: upper limit (cumulative probability) of p for censored data
        seed: random seed

    Returns:

    """

    # fix the random seed
    # np.random.seed(seed)
    #
    # # random_sample that is multipled is equal to random.uniform, Uniform [a, b), here a = 0, b = censor_p
    # # https://numpy.org/doc/stable/reference/random/generated/numpy.random.random_sample.html#numpy.random.random_sample
    # return np.random.random_sample(size = len(censor_p)) * censor_p
    # ---------------------------------------------------------------------
    return np.random.default_rng(seed = int(seed)).random(size = len(censor_p)) * censor_p



def cal_pit(ens: np.ndarray, obs: np.ndarray, censor_value=None, seed: int = 5, **kwargs) -> tuple:
    """
    Calculate the PIT for each pair of forecasts and observations
    Args:
        ens: ensemble forecast, (n, m)
        obs: observation, (n)
        censor_value: censoring threshold, None or array (2,), for forecasts and observations
        seed: random seed

    Returns: tuple
        set of pit, uniform variate, array of censored observation (True or False)

    """
    # Only for ensemble forecasts
    if ens.ndim != 2 or obs.ndim != 1:
        raise InputDimensionError(
            'Check the dimension of inputs, {0} for ens and {1} for obs'.format(ens.ndim, obs.ndim)
        )

    length = len(obs)
    # calculate the uniform variate as reference
    uniform = (1.0 + np.arange(length)) / (1.0 + length)
    # By default, data censoring is not considered
    if censor_value is not None:
        _mask_obs = obs <= censor_value[1]
    else:
        _mask_obs = np.zeros(obs.shape, dtype = bool)

    pit_set = np.full(length, np.nan)

    # if there is censored observations
    if any(_mask_obs):
        _mask_ens = ens <= censor_value[0]
        # calculate the CDF of censored value in each year by Weibull plotting position
        ens_censor_p = np.sum(_mask_ens, axis = 1) / (1. + np.sum(~np.isnan(ens), axis = 1))
        # generate pseudo PIT
        pit_set[_mask_obs] = pseudo_pit(ens_censor_p[_mask_obs], seed = seed)

    # ndarray.nonzero() return the index of non-zero
    # ~_mask_obs is for uncensored data (True), so that using nonzero to find out indexes
    for i in np.nonzero(~_mask_obs)[0]:
        pit_set[i] = interp_CDF(ens[i, :], obs[i])
    # ensure no nan

    if np.isnan(pit_set).any():
        print('Note: there are nan in pit_set')

    # assert ~np.isnan(pit_set).any()
    # _mask_obs is given to plotted PIT plot
    # as pseudo PIT should be marked independently
    # it can be used with np.argsort
    return pit_set, uniform, _mask_obs


def alpha_index(ens: np.ndarray, obs: np.ndarray, censor_value=None, seed: int = 5, **kwargs) -> float:
    """
    Calculate the alpha index based on PIT
    Args (the same as pit):
        ens: ensemble forecast, (n, m)
        obs: observation, (n)
        censor_value: censoring threshold, None or array (2,), for forecasts and observations
        seed: random seed
        **kwargs:

    Returns: alpha index
    """

    pit_set, uniform, _ = cal_pit(ens, obs, censor_value, seed)

    alpha = 1.0 - 2.0 * np.mean(
        np.abs(np.sort(pit_set) - np.sort(uniform))
    )

    if alpha < 0:
        warnings.warn('Negative alpha index, please check.')
        alpha = 0.

    return alpha


def pit(ens: np.ndarray, obs: np.ndarray, censor_value=None, seed: int = 5, **kwargs) -> float:

    pit_set, uniform, _ = cal_pit(ens, obs, censor_value, seed)

    return pit_set


def iqr(ens, quantile=(5, 95), **kwargs) :
    """
    This function is used to calculate the inter-quartile range (IQR) of ensemble forecasts
    Args:
        ens: ensemble forecast (n, m)
        quantile: tuple for two quantiles used to calculate the IQR

    Returns:

    """
    assert len(quantile) == 2, 'There must be two quantiles, but {0} given.'.format(len(quantile))

    ens_quartile = np.nanpercentile(ens, q = np.sort(quantile), axis = 1)

    return np.mean(ens_quartile[1, :] - ens_quartile[0, :])


#%% Metrics for deterministic forecasts

def check_deterministic(ens: np.ndarray, deterministic_method='mean') -> np.ndarray:
    """
    Check whether the input is deterministic forecasts
    Args:
        ens: ensemble forecasts, (n, m)
        method: method to calculate the deterministic forecasts

    Returns:

    """
    if ens.ndim == 2:

        try:
            return getattr(np, 'nan' + deterministic_method)(ens, axis = 1)
        except AttributeError as ae:
            raise AttributeError('The method {0} is not supported.'.format(deterministic_method))
        
    elif ens.ndim == 1:
        return ens.copy()
    else:
        raise InputDimensionError(
            'Check the dimension of inputs, {0} for ens'.format(ens.ndim)
        )

def rb(ens: np.ndarray, obs: np.ndarray, deterministic_method='mean', **kwargs) -> float:
    """
    Calculate the relative bias
    Args:
        ens: ensemble forecast (n, m) or can be deterministic forecasts (n, )
        obs: observations (n)

    Returns:

    """
    assert len(ens) == len(obs)
    assert obs.ndim == 1
    # no definitions
    if np.sum(obs) == 0.0:
        warnings.warn(
            'Observation mean is zero so relative bias has no definition.',
            CalculatationWarning
        )
        return np.nan

    else :

        _ens = check_deterministic(ens, deterministic_method)

        return ((_ens.sum() - obs.sum()) / obs.sum()) * 100.


def bias(ens: np.ndarray, obs: np.ndarray, deterministic_method='mean', **kwargs) -> np.ndarray:
    """
    The metric can be used when the amount is relatively small, for example, hourly or daily precipitation
    In this case, absolute bias can measure errors reasonably.
    Args:
        ens: ensemble forecasts, 2D, (n, m)
        obs: observations, 1D, (n)

    Returns:

    """

    assert len(ens) == len(obs)
    assert obs.ndim == 1

    _ens = check_deterministic(ens, deterministic_method)

    return np.mean(_ens - obs)


def pcc(ens: np.ndarray, obs: np.ndarray, deterministic_method='mean', **kwargs) -> float:
    """
    Pearson correlation coefficient
    Args:
        ens: ensemble forecasts, (n, m)
        obs: observations, (n)

    Returns:

    """
    from scipy.stats import pearsonr

    # calculate the pearson correlation coefficient
    assert len(ens) == len(obs)
    assert obs.ndim == 1

    _ens = check_deterministic(ens, deterministic_method)

    return pearsonr(_ens, obs)[0]


def scc(ens: np.ndarray, obs: np.ndarray, deterministic_method='mean', **kwargs) -> float:
    """
    Spearman correlation coefficient
    Args:
        ens: ensemble forecasts, (n, m)
        obs: observations, (n)

    Returns:

    """
    from scipy.stats import spearmanr

    # to calculate the spearman correlation coefficient
    assert len(ens) == len(obs)
    assert obs.ndim == 1

    _ens = check_deterministic(ens, deterministic_method)

    return spearmanr(_ens, obs, nan_policy = 'omit')[0]

def ktau(ens: np.ndarray, obs: np.ndarray, deterministic_method='mean', **kwargs) -> float:
    """
    Kendall tau correlation coefficient
    Args:
        ens: ensemble forecasts, (n, m)
        obs: observations, (n)

    Returns:

    """
    from scipy.stats import kendalltau

    # to calculate the kendall tau correlation coefficient
    assert len(ens) == len(obs)
    assert obs.ndim == 1

    _ens = check_deterministic(ens, deterministic_method)

    return kendalltau(_ens, obs, nan_policy = 'omit')[0]


def rmse(ens: np.ndarray, obs: np.ndarray, deterministic_method='mean', **kwargs) -> float:
    """
    Root mean square error
    Args:
        ens: ensemble forecasts, (n, m)
        obs: observations, (n)

    Returns:

    """
    # calculate the RMSE for the forecasts
    # Root mean square error

    return np.sqrt(mse(ens, obs, deterministic_method))


def nrmse(ens: np.ndarray, obs: np.ndarray, deterministic_method='mean', **kwargs) -> float:
    """
    normalized RMSE
    Args:
        ens: ensemble forecasts, (n, m)
        obs: observations, (n)

    Returns:

    """

    if np.sum(obs) == 0.0:
        warnings.warn(
            'Observation mean is zero so nrmse has no definition.',
            CalculatationWarning
        )
        return np.nan

    return rmse(ens, obs, deterministic_method) / obs.mean()


def mae(ens: np.ndarray, obs: np.ndarray, deterministic_method='mean', **kwargs) -> np.ndarray:
    """
    Calculate mean absolute error (MAE)
    Args:
        ens: ensemble forecasts, (n, m)
        obs: observations, (n)

    Returns:

    """
    assert len(ens) == len(obs)

    _ens = check_deterministic(ens, deterministic_method)

    return np.mean(np.abs(_ens - obs))


def mse(ens: np.ndarray, obs: np.ndarray, deterministic_method='mean', **kwargs) -> np.ndarray:
    """
    mean square error
    Args:
        ens: ensemble forecasts, (n, m)
        obs: observations, (n)

    Returns:

    """
    assert len(ens) == len(obs)
    assert obs.ndim == 1

    _ens = check_deterministic(ens, deterministic_method)
    d = _ens - obs

    return np.mean(d ** 2)


def nmse(ens: np.ndarray, obs: np.ndarray, deterministic_method='mean', **kwargs) -> float:
    """
    mean square error skill score
    References:
        Evaluation of the skill of North-American Multi-Model Ensemble (NMME) Global Climate Models in predicting
        average and extreme precipitation and temperature over the continental USA
    Args:
        ens: ensemble forecasts, (n, m)
        obs: observations, (n)

    Returns:

    """

    if np.var(obs) == 0:
        warnings.warn(
            'Observation variance is zero so nmse has no definition.',
            CalculatationWarning
        )
        return np.nan

    return 1 - mse(ens, obs, deterministic_method) / np.var(obs)


def nse(ens: np.ndarray, obs: np.ndarray, deterministic_method='mean', **kwargs) -> float:
    """
    Calculate the NSE for the forecast

    References:
        Decomposition of the mean squared error and NSE performance criteria: Implications for
        improving hydrological modelling, 2009

    Args:
        ens: ensemble or deterministic forecasts
        obs: observations

    Returns:

    """

    return 1.0 - mse(ens, obs, deterministic_method) / np.var(obs)


def kge(ens: np.ndarray, obs: np.ndarray, deterministic_method='mean', **kwargs) -> float:
    """
    Kling-Gupta efficiency, coefficient of variation is used to calculate gamma for non-dimension
    Reference:
        (1) Global evaluation of seasonal precipitation and temperature forecasts from NMME, 2020
        (2) Runoff conditions in the upper Danube basin under an ensemble of climate change scenarios
    -------------------------------------------------------------------------------------------------
    Args:
        ens: ensemble forecasts, (n, m)
        obs: observations, (n)

    Returns:

    """

    assert len(ens) == len(obs)

    _ens = check_deterministic(ens, deterministic_method)

    # KGE is calculated by three components
    r = pcc(_ens, obs)

    beta = np.mean(_ens) / np.mean(obs)

    gamma = (np.std(_ens) / np.mean(_ens)) / (np.std(obs) / np.mean(obs))

    _kge = 1 - np.sqrt(
        (r - 1)**2 + (beta - 1)**2 + (gamma - 1)**2
    )

    return _kge


def npe(ens: np.ndarray, obs: np.ndarray, deterministic_method='mean', **kwargs) -> float:
    """
    Nonparametric Kling-Gupta efficiency
    Args:
        ens:
        obs:

    Returns:

    """

    assert len(ens) == len(obs)

    _ens = check_deterministic(ens, deterministic_method)

    beta = np.mean(_ens) / np.mean(obs)
    epsilon = 1 - 0.5 * np.sum(
        np.abs(
            np.sort(_ens) / _ens.mean() - np.sort(obs) / obs.mean()
        ) / len(obs)
    )
    r = scc(_ens, obs)

    return 1.0 - np.sqrt(
        (beta - 1)**2 + (epsilon - 1)**2 + (r - 1)**2
    )


#%% Categorical metrics

def bs(ens: np.ndarray, obs: np.ndarray, threshold=None, event: str = 'right', **kwargs) -> np.ndarray:
    """
    brier score (bs)
    Note:
        the function only for ensemble forecasts or deterministic forecasts
        if probabilistic forecasts are given, bs is equal to mse of probability
    -------------------------------------------------------------------------------
    Args:
        ens: ensemble forecasts, (n, m)
        obs: observations, (n)
        threshold: the threshold to define the event
        event: string, right (left) mean greater (smaller) than threshold

    Returns:

    """

    if threshold is None:
        threshold = np.median(obs)

    assert len(ens) == len(obs)

    # convert ensemble forecasts into probabilities and observations into binary outcomes
    ens_p = ens2p(ens, threshold, event)
    obs_p = value2bool(obs, threshold, event = event)

    # mse of the probability
    return mse(ens_p, obs_p)


def bs_decomposition(ens: np.ndarray, obs: np.ndarray, threshold: float = None,
                     event: str = 'right', bins: list = None, **kwargs):
    """
    The Brier Score can be decomposited by Reliability, Resolution, Uncertainty
    Reference:
    URL:
        https://timvangelder.com/2015/05/18/brier-score-composition-a-mini-tutorial/
    Reference:
        Two Extra Components in the Brier Score Decomposition, 2008

    Args:
        ens: ensemble forecasts, (n, m)
        obs: observations, (n,)
        threshold: the threshold to define the event
        event: string, right (left) mean greater (smaller) than threshold
        bins: list of bin edges, forecast probabilities can be stratified into a range of bins.
            Default is [0, 0.25, 0.5, 0.75, 1]
            i.e., four bins, [0.00, 0.25), [0.25, 0.50), [0.50, 0.75) and [0.75, 1.00].

    Returns: REL, RES, UNC, and proportion in each bin

    """

    if threshold is None:
        threshold = np.median(obs)
    bins = bins or [0, 0.25, 0.5, 0.75, 1]

    assert len(ens) == len(obs)
    assert len(bins) > 2

    rel_list, res_list, prop_list = [], [], []

    ens_p = ens2p(ens, threshold, event)
    obs_p = value2bool(obs, threshold, event = event)
    # climatological relative frequency of the events
    obs_clim = np.mean(obs_p)
    unc = obs_clim * (1 - obs_clim)

    # bin data by forecast probabilities
    inds = np.digitize(ens_p, bins = bins, right = False)
    # Correct the bin when the value is equal to the upper bound
    inds[inds == len(bins)] = len(bins) - 1

    # to calculate REL and RES in each bin
    for ind in np.unique(inds):

        temp_mask = inds == ind
        rel_list.append(
            np.power(ens_p[temp_mask].mean() - obs_p[temp_mask].mean(), 2)
        )
        res_list.append(
            np.power(obs_p[temp_mask].mean() - obs_clim, 2)
        )
        prop_list.append(np.sum(temp_mask) / len(ens))

    # calculate the weighted sum
    rel = np.sum(np.array(rel_list) * np.array(prop_list))
    res = np.sum(np.array(res_list) * np.array(prop_list))

    # create array for proportion of each bin
    bins_prop = np.zeros(len(bins) - 1)
    # by default, the unique returns sorted array
    bins_prop[np.unique(inds) - 1] = np.array(prop_list)

    return rel, res, unc, bins_prop


def rps(ens: np.ndarray, obs: np.ndarray, threshold_list: list = None, **kwargs) -> np.ndarray:
    """
    This function is used to calculate the RPS, ranked probability score
    Reference:
    (1) The discrete brier and ranked probability skill scores, 2006
    (2) Toward a seasonal precipitation prediction system for West Africa: Performance of CFSv2 and high‐resolution
        dynamical downscaling, 2015
    (3) Statistical methods in the atmospheric sciences, 2011

    Args:
        ens: Ensemble forecasts, 2D array
        obs: Observations, 1D array
        threshold_list: list to bin forecasts, such as [0.33, 0.66] to separate probability into three parts

    Returns:

    """

    assert ens.ndim == 2
    assert len(ens) == len(obs)
    # set default thresholds
    if threshold_list is None:
        threshold_list = [33, 66]
    # threshold_list = threshold_list or [33, 66]

    # calculate the percentile, PS: multiple 100
    # bin_percentile = (np.arange(bins_num - 1) + 1) / bins_num * 100

    # calculate the terciles
    thresholds = np.percentile(obs, q = threshold_list)

    rps_all = []

    for yr_i in range(len(ens)) :
        rps_all.append(rps_i(ens[yr_i, :], obs[yr_i], thresholds))

    return np.mean(rps_all)


def rps_i(ens_i: np.ndarray, obs_i: float, thresholds: np.ndarray) -> np.ndarray:
    """
    This function is used to calculate the RPS for each year.

    Args:
        ens_i: Ensemble forecast for target year, 1D array
        obs_i: Observation for target year
        thresholds: The thresholds used to bin the forecasts

    Returns:

    """

    ens_p = np.ones(len(thresholds) + 1, dtype = float)
    obs_p = np.ones_like(ens_p)

    # obtain length in consideration of nan in ensemble member
    ens_length = np.sum(~np.isnan(ens_i))

    for i in range(len(thresholds)):
        # according to the equation
        # in each bin, the accumulated probability is calculated
        ens_p[i] = np.sum(ens_i < thresholds[i]) / ens_length
        # ens_p[i] = len(ens_i[ens_i < thresholds[i]]) / ens_length
        obs_p[i] = int(obs_i < thresholds[i])

    pro_diff = np.sum((ens_p - obs_p) ** 2)
    # print(rps_i)
    return pro_diff


def rocss(ens: np.ndarray, obs: np.ndarray, threshold=None, event='right', **kwargs) -> float:
    """
    The function is used to calculate the ROC skill score (based on pyNMME sklearn)
    Reference:
    Conditional probabilities, relative operating characteristics, and relative operating levels, 1999
    CSDN: https://blog.csdn.net/hesongzefairy/article/details/104302499
    sklearn: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_curve.html
    ---------------------------------------------------------------------------------------------------------
    Args:
        ens: Ensemble forecasts, (n,m)
        obs: Observations, (n,)
        threshold: Threshold used to define the event
        event:

    Returns:

    """

    from sklearn.metrics import roc_auc_score

    if threshold is None:
        threshold = np.median(obs)
    # threshold = threshold or np.median(obs)

    assert ens.ndim == 2
    assert len(ens) == len(obs)

    # make a mask for the ensemble forecasts
    # transform the value to boolean array by using the threshold
    ens_p = ens2p(ens, threshold, event)
    obs_p = value2bool(obs, threshold, event = event)

    try :
        # directly calculate the auc (area under the ROC curve)
        auc = roc_auc_score(obs_p, ens_p)
        _rocss = 2 * (auc - 0.5)

    except ValueError as er :
        print(er)
        _rocss = np.nan

    return _rocss


def variance(obs: np.ndarray) -> np.ndarray:
    """
    It is used to calculate the variance of observations
    Args:
        obs: Observations

    Returns:

    """

    return np.var(obs)


def ens2p(ens: np.ndarray, threshold: float, event: str) -> np.ndarray:
    """
    transform ensemble forecasts into probabilistic forecasts
    Args:
        ens: forecasts, (n,m) or (n,)
        threshold: threshold to define the event
        event: 'right' or 'left'

    Returns:

    """

    # make a mask for the ensemble forecasts
    # must consider that there may be nan in the ensemble members
    if ens.ndim == 2:
        ens_p = np.sum(
            value2bool(ens, threshold, event = event), axis = 1
        ) / (np.sum(~np.isnan(ens), axis = 1)) # the number of valid ensemble members, without adding 1

    # deterministic forecasts
    elif ens.ndim == 1:
        ens_p = value2bool(ens, threshold, event = event)

    else:
        raise InputDimensionError(
            'Check the dimension of inputs, {0} for ens.'.format(ens.ndim)
        )

    return ens_p


def value2bool(array: np.ndarray, threshold: float, event: str) -> np.ndarray:
    """
    This function is used to transform the value to boolean value by using the threshold.
    Args:
        array: ensemble forecast
        threshold: The threshold used to define the event
        event: boolean array

    Returns:

    """

    if event == 'right' :

        # astype(int) is used to transform the boolean value to int value, 0 or 1
        return (array > threshold).astype(int)

    elif event == 'left' :

        return (array < threshold).astype(int)

    else :
        input('Please check the parameter event. It should be left or right.')


#%%

def mask_data(data_list: list, thresholds: tuple, value: tuple) -> list:
    """
    set the data less than thresholds to assigned value
    for adjustment of forecasts and observations
    Args:
        data_list: list of dataset
        thresholds: threshold for each dataset
        value: assigned value for each dataset

    Returns:

    """

    assert (len(thresholds) == len(data_list)) & (len(value) == len(data_list))

    for i, _data in enumerate(data_list) :
        _data[_data <= thresholds[i]] = value[i]

    return data_list

#%% Declarations of Warning and Error


class NaNWarning(Warning):
    pass


class CalculatationWarning(Warning):
    pass


class InputDimensionError(Exception):
    pass


class ParameterError(Exception):
    pass


#%% diagnostic plots

#common settings
fontsize = 10
alignment = {
    'horizontalalignment' : 'left',
    'verticalalignment' : 'bottom',
}

def quantile_range_plot(ens: np.ndarray, obs: np.ndarray, ax: plt.Axes = None, upper: float = None,
                        lower: float = None, linewidth: float = 2., quantile: tuple = (10, 25, 50, 75, 90),
                        clim : bool = True, dig_line: bool = True,
                        color: tuple = ('lightskyblue', 'blue', 'red')) -> tuple:
    """
    Plot the quantile range plot
    Args:
        ens: ensemble forecasts, (n, m)
        obs: observations, (n, )
        ax: plt.Axes
        upper: upper bound of plot
        lower: lower bound of plot
        linewidth: line width of ensemble spread
        quantile: quantile to display

    Returns:

    """

    assert ens.ndim == 2 and obs.ndim == 1 and len(ens) == len(obs)

    # calculate the percentile of ensemble forecasts
    ens_quantile = np.nanpercentile(ens, axis = 1, q = quantile)

    if ax is None:
        fig, ax = plt.subplots()
    # ax = ax or plt.subplots()[1]

    if upper is None:
        upper = np.max([np.nanmax(ens_quantile), np.max(obs)])

    if lower is None:
        lower = np.min([np.nanmin(ens_quantile), np.min(obs)])

    for yr_i in range(len(obs)) :
        ax.plot(
            [
                ens_quantile[2, yr_i],
                ens_quantile[2, yr_i]
            ],
            [
                ens_quantile[0, yr_i],
                ens_quantile[4, yr_i]
            ],
            '-',
            color = color[0],
            linewidth = linewidth,
            zorder = 0
        )
        ax.plot(
            [
                ens_quantile[2, yr_i],
                ens_quantile[2, yr_i]
            ],
            [
                ens_quantile[1, yr_i],
                ens_quantile[3, yr_i]
            ],
            '-',
            color = color[1],
            linewidth = linewidth,
            zorder = 0
        )

    ax.scatter(
        ens_quantile[2, :],
        obs,
        color = color[2],
        marker = 'o',
        s = 1.5 * linewidth,
        zorder = 5
    )

    # for yr_i in range(len(obs)) :
    #     ax.plot(
    #         ens_quantile[2, yr_i],
    #         obs[yr_i],
    #         color = color[2],
    #         marker = 'o',
    #         markersize = 1.5 * linewidth
    #     )

    if clim:
        ax.axhspan(
            *np.nanpercentile(obs, q = [10, 90]),
            alpha = 0.8,
            color = 'lightgrey',
            zorder = -1
        )

    if dig_line:
        ax.plot([lower, upper], [lower, upper], linestyle = '--', color = 'grey', linewidth = 1., zorder = 1)

    ax.tick_params(labelsize = fontsize)

    return ax, lower, upper


def time_series_plot(ens: np.ndarray, obs: np.ndarray, ax: plt.Axes = None, quantile: tuple = (10, 25, 50, 75, 90),
                     timestamp: np.ndarray = None, linewidth: float = 2.,
                     color: tuple = ('lightskyblue', 'blue', 'red')) -> plt.Axes:
    """
    PLot the time series of forecasts and observations
    Args:
        ens: ensemble forecasts, (n, m)
        obs: observations, (n, )
        ax: plt.Axes
        quantile: quantiles for ensemble forecasts
        timestamp: time index to show in time series
        linewidth: linewidth for ensemble forecasts

    Returns:

    """

    assert ens.ndim == 2 and obs.ndim == 1 and len(ens) == len(obs)

    # calculate the percentile of ensemble forecasts
    ens_quantile = np.nanpercentile(ens, axis = 1, q = quantile)

    # ax = ax or plt.subplots()[1]
    if ax is None:
        fig, ax = plt.subplots()

    if timestamp is None:
        timestamp = np.arange(len(obs))

    for yr_i in timestamp :
        ax.plot(
            [
                yr_i,
                yr_i
            ],
            [
                ens_quantile[0, yr_i - timestamp[0]],
                ens_quantile[4, yr_i - timestamp[0]]
            ],
            '-',
            color = color[0],
            linewidth = linewidth,
            zorder = 0
        )

        ax.plot(
            [
                yr_i,
                yr_i
            ],
            [
                ens_quantile[1, yr_i - timestamp[0]],
                ens_quantile[3, yr_i - timestamp[0]]
            ],
            '-',
            color = color[1],
            linewidth = linewidth,
            zorder = 0
        )
        ax.plot(
            [yr_i],
            [ens_quantile[2, yr_i - timestamp[0]]],
            marker = '_',
            markersize = 1.5 * linewidth,
            markeredgecolor = 'k',
            zorder = 1
        )

    ax.scatter(
        timestamp,
        obs,
        color = color[2],
        marker = 'o',
        s = 2 * linewidth,
        zorder = 5
    )

    ax.tick_params(labelsize = fontsize)

    return ax


def pit_uniform_plot(uniform: np.ndarray, pit: np.ndarray, ax: plt.Axes = None, mask = None, **kwargs):
    """
    PIT uniform plot
    Args:
        ax: plt.Axes
        uniform: standard uniform variate
        pit: pit values
        **kwargs: parameters for plt.scatter

    Returns:

    """
    if mask is None:
        mask = np.ones_like(uniform, dtype = bool)

    ax.plot(
        [0, 1],
        [0, 1],
        linestyle = '--',
        color = 'grey',
        linewidth = 1
    )

    arg_sort = np.argsort(pit)
    sort_mask = mask[arg_sort]

    ax.scatter(
        uniform[sort_mask],
        pit[arg_sort][sort_mask],
        **kwargs
    )

    mask_kwargs = kwargs.copy()
    mask_kwargs['marker'] = 'x'

    ax.scatter(
        uniform[~sort_mask],
        pit[arg_sort][~sort_mask],
        **mask_kwargs
    )

    ax.set_xticks(np.arange(0.0, 1.0 + 0.1, 1 / 4))
    ax.set_yticks(np.arange(0.0, 1.0 + 0.1, 1 / 4))

    ax.tick_params(labelsize = fontsize)

    return ax


def corr_scatter_plot(ens: np.ndarray, obs: np.ndarray, ax: plt.Axes = None, upper: float = None,
                      lower : float = 0, regress: bool = False, **kwargs):
    """
    Correlation scatter plot
    Args:
        ens: ensemble forecasts, (n, m)
        obs: observations, (n, )
        ax: plt.Axes
        upper: upper bound of plot
        lower: lower bound of plot
        regress: whether to add a regression line
        **kwargs:

    Returns:

    """
    from scipy.stats import linregress
    ax.plot([lower, upper], [lower, upper], 'grey', linestyle = '--', linewidth = 1)
    ax.scatter(ens, obs, **kwargs)

    if regress :
        lr = linregress(ens, obs)
        slope = lr.slope
        intercept = lr.intercept
        x_range = np.array([np.min(ens), np.max(ens)])
        y_range = slope * x_range + intercept
        ax.plot(x_range, y_range, color = 'r', linewidth = 2)

    return ax


def reliability_diagram(ens: np.ndarray = None, obs: np.ndarray = None, ens_p: np.ndarray = None,
                        obs_event: np.ndarray = None, ax: plt.Axes = None, threshold: float = None,
                        event: str = 'right', bins: tuple = None, clim_line: float = None,
                        inset_axes: tuple = (0.65, 0.15, 0.3, 0.3), **kwargs):
    """
    plot reliability diagram
    Args:
        ens: ensemble forecasts, (n, m), if given, used to calculate the ensemble probability
        obs: observations, (n), if given, used to calculate the observed event
        ax: plt.Axes
        threshold: threshold to define the event under investigation
        event: 'right' means larger and 'left' means less than
        bins: bin the forecasts
        clim_line:  to define the area of no skill
        inset_axes: location of bar plot of proportion
        **kwargs: for plot

    Returns:

    """

    if obs is not None:
        assert obs.ndim == 1
        # only when obs is not None, the threshold can be computed by obs
        if threshold is None:
            threshold = np.median(obs)

        obs_event = value2bool(obs, threshold, event = event)

    if ens is not None:
        assert ens.ndim == 2
        ens_p = ens2p(ens, threshold, event)


    if bins is None:
        bins = (0, 0.25, 0.5, 0.75, 1)

    assert all([len(ens_p) == len(obs_event), len(bins) > 2, ens_p.ndim == 1, obs_event.ndim == 1])

    # bin data by forecast probabilities
    inds = np.digitize(ens_p, bins = bins, right = False)

    # Correct the bin when the value is equal to the upper or lower bound
    inds[inds == len(bins)] = len(bins) - 1
    inds[inds == 0] = 1

    ens_prob, obs_freq, prop_list = [], [], []
    # to calculate REL and RES in each bin
    for ind in np.unique(inds):

        temp_mask = inds == ind
        ens_prob.append(ens_p[temp_mask].mean())
        obs_freq.append(obs_event[temp_mask].mean())
        prop_list.append(np.sum(temp_mask) / len(ens_p))

    # create array for proportion of each bin
    bins_prop = np.zeros(len(bins) - 1)
    # by default, the unique returns sorted array
    bins_prop[np.unique(inds) - 1] = np.array(prop_list)

    ax.plot(
        [0, 1],
        [0, 1],
        linestyle = '--',
        color = 'grey',
        linewidth = 1
    )

    ax.plot(
        ens_prob,
        obs_freq,
        **kwargs
    )

    upper, lower = 1.02, -0.02

    ax.set_xlim(lower, upper)
    ax.set_ylim(lower, upper)

    if clim_line:
        assert clim_line < 1

        x1 = np.arange(lower, clim_line + 0.0001, 0.01)
        y1 = 0.5 * (x1 + clim_line)  # point and slope
        y2 = lower
        ax.fill_between(x1, y1, y2, facecolor = 'grey', alpha = 0.2)

        x2 = np.arange(clim_line, upper + 0.0001, 0.01)
        y3 = 0.5 * (x2 + clim_line)
        y4 = upper
        ax.fill_between(x2, y3, y4, facecolor = 'grey', alpha = 0.2)

        ax.axvline(clim_line, color = 'grey', lw = 0.3)
        ax.axhline(clim_line, color = 'grey', lw = 0.3)
        ax.plot(
            [lower, upper],
            [y1[0], y3[-1]],
            color = 'grey',
            lw = 0.3
        )
    # for bar plot of proportion
    ax2 = ax.inset_axes(inset_axes)

    # calculate the center of bar plots
    x_ticks = np.array(bins[:-1]) + np.mean(bins[:2])

    ax2.bar(
        x_ticks,
        bins_prop,
        bins[1] - bins[0],
        alpha = 0.3,
        color = 'lightskyblue'
    )
    ax2.patch.set_alpha(0.5)

    return ax, ax2


def ROC_curve(ens: np.ndarray, obs: np.ndarray, ax: plt.Axes = None,
              threshold: float = None, event: str = 'right', **kwargs):
    """
    ROC curve
    Args:
        ens: ensemble forecasts, (n, m)
        obs: observations, (n)
        ax: plt.Axes
        threshold: same as reliability diagram
        event: same as reliability diagram
        **kwargs:

    Returns:

    """

    assert all([ens.ndim == 2, obs.ndim == 1, len(ens) == len(obs)])

    from sklearn.metrics import roc_curve

    if threshold is None:
        threshold = np.median(obs)
    # threshold = threshold or np.median(obs)

    # make a mask for the ensemble forecasts
    # transform the value to boolean array by using the threshold
    ens_p = ens2p(ens, threshold, event)

    obs_p = value2bool(obs, threshold, event)

    fpr, tpr, threshold_p = roc_curve(obs_p, ens_p)

    fpr2 = np.append([0], fpr)

    tpr2 = np.append([0], tpr)

    ax.plot(
        [0, 1],
        [0, 1],
        linestyle = '--',
        color = 'grey',
        linewidth = 1
    )

    ax.plot(
        fpr2,
        tpr2,
        **kwargs
    )

    return ax

def linked_time_series(ens: np.ndarray, obs: np.ndarray, ax: plt.Axes = None, quantile: tuple = (10, 25, 50, 75, 90),
                       xticks: np.ndarray = None, clim: np.ndarray = None, quantile_colors = ('#87bfe8', '#2887cc', '#dd7b1d'),
                       color_median: str = 'k', ):
    """

    Args:
        ens: ensemble forecasts, (n,m)
        obs: observations, (n)
        ax: plt.Axes
        quantile: quantiles to display
        xticks: time label
        clim: climatology forecasts
        c1: color for wide ensemble spread
        c2: color for narrow ensemble spread
        c3: color for ensemble median
        c4: color for observations

    Returns:

    """

    assert all([len(ens) == len(obs), ens.ndim == 2, obs.ndim == 1])

    # 2D array, (5, n_samples)
    _fore_percentile = np.nanpercentile(ens, q = quantile, axis = 1)

    if xticks is None:
        xticks = np.arange(len(obs))
    # xticks = xticks or np.arange(len(obs))

    ax.fill_between(
        x = xticks,
        y1 = _fore_percentile[0, :],
        y2 = _fore_percentile[4, :],
        # color = '#bfc0e6',
        color = quantile_colors[0],
    )
    ax.fill_between(
        x = xticks,
        y1 = _fore_percentile[1, :],
        y2 = _fore_percentile[3, :],
        # color = '#6e70c7',
        color = quantile_colors[1],
    )
    ax.plot(
        xticks,
        _fore_percentile[2, :],
        color = color_median
    )
    ax.plot(
        xticks,
        obs,
        color = quantile_colors[2],
    )

    if clim is not None:
        _clim_percentile = np.percentile(
            clim,
            q = [10, 90],
            axis = 1
        )

        ax.plot(
            xticks,
            _clim_percentile[0, :],
            color = 'lightgrey',
            linestyle = '--',
            linewidth = 0.8
        )
        ax.plot(
            xticks,
            _clim_percentile[1, :],
            color = 'lightgrey',
            linestyle = '--',
            linewidth = 0.8
        )

    return ax

def scatter_with_density(
        tmp_x: np.ndarray,
        tmp_y: np.ndarray,
        ax: plt.Axes = None,
        loess: bool = False,
        loess_frac: float = 1/3.,
        cmap=None,
        norm=None,
        **kwargs):
    
    from scipy.stats import gaussian_kde
    import statsmodels.api as sm

    tmp_xyb = np.vstack([tmp_x, tmp_y])

    tmp_kde = gaussian_kde(tmp_xyb)(tmp_xyb)
    tmp_kde_sort = np.argsort(tmp_kde)    
    
    if cmap is None:
        cmap = getattr(plt.cm, 'jet')
    
    if norm is None:
        norm = colors.Normalize(
            vmin = np.min(tmp_kde),
            vmax = np.max(tmp_kde)
        )

    ax.scatter(
        tmp_x[tmp_kde_sort],
        tmp_y[tmp_kde_sort],
        s = 1,
        c = tmp_kde[tmp_kde_sort],
        edgecolors = 'none',
        cmap = cmap,
        norm = norm,
        **kwargs
    )

    if loess:
        temp_loess = sm.nonparametric.lowess(tmp_y, tmp_x, frac = loess_frac)

        ax.plot(
            temp_loess[:, 0],
            temp_loess[:, 1],
            c = 'r'
        )
    
    return ax

def add_text(ax: plt.Axes, data: Union[list, np.ndarray], text: list, sign_percent: list = None, format_type = None,
             loc_left: float = 0.55, loc_bottom: float = 0.05, loc_margin: float = 0.1,  fontsize = fontsize, **kwargs):
    """
    add text to plot
    Args:
        ax: plt.Axes
        data: metrics to display
        text: names of metrics
        sign_percent: whether to add sign of percent
        loc_left: left location
        loc_bottom: bottom location
        loc_margin: margin between two value

    Returns:

    """
    assert len(data) == len(text)
    sign_percent = sign_percent or np.zeros(len(data), dtype = bool)
    sign_percent_b = np.array([''] * len(data))
    sign_percent_b[sign_percent] = '%'

    for i in range(len(data)):
        if format_type:
            ax.text(
                loc_left,
                loc_bottom + i * loc_margin,
                '{0}: {1:.1e}{2}'.format(text[i], data[i], sign_percent_b[i]),
                **alignment,
                fontsize = fontsize,
                transform = ax.transAxes,
                **kwargs
            )
        else:
            ax.text(
                loc_left,
                loc_bottom + i * loc_margin,
                '{0}: {1:.2f}{2}'.format(text[i], data[i], sign_percent_b[i]),
                **alignment,
                fontsize = fontsize,
                transform = ax.transAxes,
                **kwargs
            )

    return ax

    #%% spatial plotting of verification metrics


def crs_pcolormesh(lats: np.ndarray, lons: np.ndarray, data: np.ndarray, ax : plt.Axes = None,
                   linewidth: float = 0.5, proj: str = 'PlateCarree', **kwargs):
    """
    plot spatial pcolormesh using cartopy
    Args:
        lats: array of lat
        lons: array of lon
        data: data to be plotted
        ax: plt.Axes
        linewidth: linewidth for coastline
        proj: projection of lon, lat, array
        **kwargs:

    Returns:

    """

    import cartopy.crs as ccrs
    import cartopy.feature as cf

    if ax is None:
        fig, ax = plt.subplots()
    # ax = ax or plt.subplots()[1]

    proj = getattr(ccrs, proj)()

    # meshgrid
    lon, lat = np.meshgrid(lons, lats)

    ax.add_feature(
        cf.COASTLINE,
        linewidth = linewidth,
        zorder = 5
    )
    # specify the projection of lat and lon for data
    pcolor = ax.pcolormesh(
        lon, lat,
        data,
        transform = proj,
        **kwargs
    )

    return pcolor


def crs_latlon_ticks(ax: plt.Axes, crs, xticks: Union[np.ndarray, None] = None, draw_labels: tuple = (True, False, True, False),
                     yticks: Union[np.ndarray, None] = None, **kwargs):
    """
    add lat and lon ticks for cartopy (global map)
    Args:
        ax: plt.Axes
        xticks: array of xticks
        yticks: array of yticks
        **kwargs:

    Returns:

    """
    from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
    import matplotlib.ticker as mticker
    import cartopy.crs as ccrs

    gridlines = ax.gridlines(
        crs=crs,
        draw_labels=True,
        **kwargs
    )

    if xticks is None:
        xticks = np.arange(-180, 181, 60)
    
    if yticks is None:
        yticks = np.arange(-90, 91, 30)
    
    # set longitude locator
    gridlines.xlocator = mticker.FixedLocator(xticks)
    gridlines.xformatter = LONGITUDE_FORMATTER

    # set latitude locator
    gridlines.ylocator = mticker.FixedLocator(yticks)
    gridlines.yformatter = LATITUDE_FORMATTER

    # gridlines.xlabel_style = {'size': 12, 'color': 'red'}
    # gridlines.ylabel_style = {'size': 12, 'color': 'blue'}

    # set the labels
    # gridlines.draw_labels = draw_labels  # (left, right, bottom, top)

    gridlines.left_labels = draw_labels[0]
    gridlines.right_labels = draw_labels[1]
    gridlines.bottom_labels = draw_labels[2]
    gridlines.top_labels = draw_labels[3]

    return ax

def crs_create_shp_feature(geometry = None,
                           shp_path : Union[str, Path] = None,
                           crs_nm : str = None,
                           read_engine : str = 'crs',
                           **kwargs):
    """
    Cartopy to create feature for shapefile

    Args:
        shp_path: path of shapefile
        crs_nm: name of projection
        **kwargs:

    Attention:
    The Reader() return an iterator, so the geometries can disappear after one call
    The solution is to create feature for all shapefiles once, and then edit characteristics when add_feature
    Returns:

    """
    from cartopy.io.shapereader import Reader
    from cartopy.feature import ShapelyFeature
    import cartopy.crs as ccrs

    if crs_nm is None:
        crs_nm = 'PlateCarree'
    # crs_nm = crs_nm or 'PlateCarree'
    crs = getattr(ccrs, crs_nm)()

    if geometry or shp_path:

        if read_engine == 'crs':
            geom = geometry or Reader(shp_path).geometries()

        elif read_engine == 'gpd':
            import geopandas as gpd
            gpd_file = gpd.read_file(shp_path)
            geom = gpd_file.to_crs('EPSG:4326').geometry

        else:
            raise IOError('Please Check the engine of reading file.')

    else:
        raise IOError('There must be geometry or shp_path. Please Check.')

    shape_feature = ShapelyFeature(
        geom,
        crs = crs,
        **kwargs
    )

    return shape_feature


def add_sub_axes(main_ax: plt.Axes, sub_ax: plt.Axes, position: list = None):
    """
    Add subplot for cartopy, showing Nan Hai
    References:
        https://stackoverflow.com/questions/45527584/how-to-easily-add-a-sub-axes-with-proper-position-and-size-in-matplotlib-and-car
    Args:
        main_ax:
        sub_ax:
        position:

    Returns:

    """

    if position is None:
        position = [0.85, 0.00, 0.15, 0.25]
    # position = position or [0.85, 0.00, 0.15, 0.25]

    ip = InsetPosition(
        main_ax,
        position,
    )

    sub_ax.set_axes_locator(ip)


def add_cbar(ax, **kwargs):
    """
    Add a colorbar
    Args:
        ax: plt.Axes
        **kwargs:

    Returns:

    """
    cb = colorbar.ColorbarBase(
        ax,
        **kwargs
    )

    return cb


def normalize_colormap(data : Union[float, np.ndarray], cmap_nm : str, vmin : float, vmax : float, ) -> tuple:
    """
    return normalized color of data
    References:
    https://www.geeksforgeeks.org/matplotlib-colors-normalize-class-in-python/
    Args:
        data: data to be normalized
        cmap_nm: name of colormap
        vmin: vmin of cmap
        vmax: vmax of cmap

    Returns:

    """
    from matplotlib import colors
    norm = colors.Normalize(
        vmin = vmin,
        vmax = vmax
    )

    return getattr(plt.cm, cmap_nm)(norm(data))



#%% add lon and lat for cartopy

def find_x_intersections(ax, xticks):
    '''找出xticks对应的经线与下x轴的交点在data坐标下的位置和对应的ticklabel.'''
    # 获取地图的矩形边界和最大的经纬度范围.
    from shapely import geometry as sgeom
    import cartopy.crs as ccrs
    from cartopy.mpl.gridliner import LongitudeFormatter
    x0, x1, y0, y1 = ax.get_extent()
    lon0, lon1, lat0, lat1 = ax.get_extent(ccrs.PlateCarree())
    xaxis = sgeom.LineString([(x0, y0), (x1, y0)])
    # 仅选取能落入地图范围内的ticks.
    lon_ticks = [tick for tick in xticks if tick >= lon0 and tick <= lon1]

    # 每条经线有nstep个点.
    nstep = 50
    xlocs = []
    xticklabels = []
    for tick in lon_ticks:
        lon_line = sgeom.LineString(
            ax.projection.transform_points(
                ccrs.Geodetic(),
                np.full(nstep, tick),
                np.linspace(lat0, lat1, nstep)
            )[:, :]
        )
        # 如果经线与x轴有交点,获取其位置.
        if xaxis.intersects(lon_line):
            point = xaxis.intersection(lon_line)
            xlocs.append(point.x)
            xticklabels.append(tick)
        else:
            continue

    # 用formatter添上度数和东西标识.
    formatter = LongitudeFormatter()
    xticklabels = [formatter(label) for label in xticklabels]

    return xlocs, xticklabels

def find_y_intersections(ax, yticks):
    '''找出yticks对应的纬线与左y轴的交点在data坐标下的位置和对应的ticklabel.'''
    from cartopy.mpl.gridliner import LatitudeFormatter
    from shapely import geometry as sgeom
    import cartopy.crs as ccrs
    x0, x1, y0, y1 = ax.get_extent()
    lon0, lon1, lat0, lat1 = ax.get_extent(ccrs.PlateCarree())
    yaxis = sgeom.LineString([(x0, y0), (x0, y1)])
    lat_ticks = [tick for tick in yticks if tick >= lat0 and tick <= lat1]

    nstep = 50
    ylocs = []
    yticklabels = []
    for tick in lat_ticks:
        # 注意这里与find_x_intersections的不同.
        lat_line = sgeom.LineString(
            ax.projection.transform_points(
                ccrs.Geodetic(),
                np.linspace(lon0, lon1, nstep),
                np.full(nstep, tick)
            )[:, :]
        )
        if yaxis.intersects(lat_line):
            point = yaxis.intersection(lat_line)
            ylocs.append(point.y)
            yticklabels.append(tick)
        else:
            continue

    formatter = LatitudeFormatter()
    yticklabels = [formatter(label) for label in yticklabels]

    return ylocs, yticklabels


def set_lambert_ticks(ax, xticks, yticks):
    '''
    给一个LambertConformal投影的GeoAxes在下x轴与左y轴上添加ticks.

    要求地图边界是矩形的,即ax需要提前被set_extent方法截取成矩形.
    否则可能会出现错误.

    Parameters
    ----------
    ax : GeoAxes
        投影为LambertConformal的Axes.

    xticks : list of floats
        x轴上tick的位置.

    yticks : list of floats
        y轴上tick的位置.

    Returns
    -------
    None
    '''
    # 设置x轴.
    xlocs, xticklabels = find_x_intersections(ax, xticks)
    ax.set_xticks(xlocs)
    ax.set_xticklabels(xticklabels)
    # 设置y轴.
    ylocs, yticklabels = find_y_intersections(ax, yticks)
    ax.set_yticks(ylocs)
    ax.set_yticklabels(yticklabels)

#%% some other functions for convenience
def grid_plot(figsize: tuple = None, **kwargs) -> tuple:
    """
    function used to add grid to the layout of figure
    Args:
        figsize: the size of the figure
        **kwargs: kwargs for GridSpec

    Returns:

    """

    f = plt.figure(figsize = figsize)

    gs = gridspec.GridSpec(**kwargs)

    gs.update(wspace = 0., hspace = 0.)

    return f, gs

def trans_latlon(lat, lon, use_math=False):
    if lon > 180:
        lon = lon - 360

    if use_math:
        sa = r'$^\circ \mathrm{N}$' if lat >= 0 else r'$^\circ \mathrm{S}$'
        sb = r'$^\circ \mathrm{E}$' if lon >= 0 else r'$^\circ \mathrm{W}$'
        return f'{abs(lat)}{sa}, {abs(lon)}{sb}'
    else:
        sa = 'N' if lat >= 0 else 'S'
        sb = 'E' if lon >= 0 else 'W'
        return '{0}$^\circ${1}, {2}$^\circ${3}'.format(abs(lat), sa, abs(lon), sb)
#%%

class IOError(Exception):
    pass


# %%

def concatenate_images(image_paths, output_path, direction='horizontal'):
    # open all images and get their sizes
    images = [Image.open(image_path) for image_path in image_paths]
    widths, heights = zip(*(i.size for i in images))

    if direction == 'horizontal':
        # concatenate images horizontally
        total_width = sum(widths)
        max_height = max(heights)
        new_image = Image.new("RGB", (total_width, max_height))
        x_offset = 0
        for image in images:
            new_image.paste(image, (x_offset, 0))
            x_offset += image.width
    elif direction == 'vertical':
        # concatenate images vertically
        max_width = max(widths)
        total_height = sum(heights)
        new_image = Image.new("RGB", (max_width, total_height))
        y_offset = 0
        for image in images:
            new_image.paste(image, (0, y_offset))
            y_offset += image.height
    else:
        raise ValueError("direction must be 'horizontal' or 'vertical'")

    # save the new image
    new_image.save(output_path)
