from typing import Callable, Dict
import numpy as np
from scipy.interpolate import interp1d
from scipy.stats import binom, iqr
import scipy.optimize as opt

def PDA_likelihood_fun(obs_data:np.ndarray, sim_data:np.ndarray, response_code = [1, -1], bandwidth=1, include_NA = False):
    
    Nsample = len(sim_data)
    PDF = []
    # calculate loglikelihood for two responses separately
    for use_resp in response_code:
        
        # extract data by one response for observed and simulated data
        sampvec = sim_data[sim_data[:,1] == use_resp][:,0]
        data = obs_data[obs_data[:,1] == use_resp][:,0]

        if len(data) == 0:
            PDF.append(None)
        elif len(sampvec) == 0:
            PDF.append(np.zeros_like(data))
        else:
            m = np.min(data) - 3 * bandwidth
            M = np.max(data) + 3 * bandwidth

            if np.min(sampvec) > M or np.max(sampvec) < m:
                PDF.append(np.zeros_like(data))
            else:
                d = np.histogram(sampvec, bins='auto', range=(m, M), density=True)[0]
                d[d < 0] = 0
                d = d * len(sampvec) / Nsample
                f = interp1d(np.histogram(sampvec, bins='auto', range=(m, M))[1][1:], d, fill_value=0, bounds_error=False)
                out = f(data)
                out[out < 0] = 0
                PDF.append(out)
    
    # count for na response and model it as binomial further
    if include_NA:
        na_responses = np.sum(sim_data[:,0] == np.nan)
        PDF.append(na_responses)
    
    return PDF


def RMSE_object_func(res_th: Dict, res_ob: Dict) -> float:
        """RMSE_object_func from pydmc

        Calculate Root Mean Square Error between simulated
        and observed data points

        Parameters
        ----------
        res_th
        res_ob
        """
        n_rt = len(res_th.delta) * 2
        n_err = len(res_th.caf) * 2

        cost_caf = np.sqrt(
            (1 / n_err)
            * np.sum(
                np.sum(
                    (res_th.caf[["comp", "incomp"]] - res_ob.caf[["comp", "incomp"]])
                    ** 2
                )
            )
        )
        cost_rt = np.sqrt(
            (1 / n_rt)
            * np.sum(
                np.sum(
                    (
                        res_th.delta[["mean_comp", "mean_incomp"]]
                        - res_ob.delta[["mean_comp", "mean_incomp"]]
                    )
                    ** 2
                )
            )
        )

        weight_rt = n_rt / (n_rt + n_err)
        weight_caf = (1 - weight_rt) * 1500

        return (weight_caf * cost_caf) + (weight_rt * cost_rt)

def calculate_cost_value_spe(res_th: Dict, res_ob: Dict) -> float:
        """calculate_cost_calue_spe from pydmc

        Calculate Squared Percentage Error between simulated
        and observed data points

        Parameters
        ---------
        res_th
        res_ob
        """
        cost_caf = np.sum(
            (
                (res_ob.caf.iloc[:, 1:3] - res_th.caf.iloc[:, 1:3])
                / res_ob.caf.iloc[:, 1:3]
            )
            ** 2
        ).sum()

        cost_rt = np.sum(
            (
                (res_ob.delta.iloc[:, 1:4] - res_th.delta.iloc[:, 1:4])
                / res_ob.delta.iloc[:, 1:4]
            )
            ** 2
        ).sum()

        return cost_rt + cost_caf

class PDA():

    def __init__(self, data:np.ndarray, simulator:Callable, Nsamples=2500, include_NA=False):
        
        self.data = data
        self.simulator = simulator
        self.Nsamples = int(Nsamples)
        self.include_NA = include_NA

    def log_likelihood_func(self, params:np.ndarray):
        
        Nsamples = self.Nsamples
        data = self.data
        include_NA = self.include_NA
        simulator = self.simulator
        
        # simulate data from model
        sim_data = simulator(params, Nsamples)
        n_na = sum(data[:,1] == np.nan)
        
        I_tmp = iqr(data[:,0])
        S_tmp = np.std(data[:,0])
        bandwidth = 0.9 * min(I_tmp, S_tmp) * (Nsamples ** -0.2)
        
        # PDA_likelihood_fun return likellihood for two responses and non-response(optional)
        tmp1 = PDA_likelihood_fun(
            data,
            sim_data,
            include_NA = include_NA and (n_na>0),
            bandwidth=bandwidth
        )

        # calculate sum loglikelihood
        out = 0
        if any(x is None for x in tmp1):
            if tmp1[0] is not None:
                tmp2 = tmp1[0]
            elif tmp1[1] is not None:
                tmp2 = tmp1[1]
            else: 
                tmp2 = 0
        else:
            tmp2 = np.concatenate([tmp1[0], tmp1[1]])  # 如果有两个选项，返回两个选项的 PDF
        
        out += np.sum(np.log(np.maximum(tmp2, 1e-10)))  # 两个PDF求和
        if (len(tmp1) == 3) and include_NA and (n_na>0):
            out += np.maximum(binom.logpmf(n_na, data.shape[0], tmp1[2] / Nsamples), np.log(1e-10))
        
        return out


    def run_optmize(self, init_params, method='L-BFGS-B',**kwargs):
        
        log_likelihood_func = self.log_likelihood_func

        def negative_log_likelihood(params):
            return -log_likelihood_func(params)
    
        if method == "differential_evolution":
            assert kwargs.get("bounds", None) is not None, "bounds must be provided for differential_evolution"
            # 进行优化
            result = opt.differential_evolution(
                negative_log_likelihood,
                x0=init_params,
                **kwargs
            )
        else:
            result = opt.minimize(
                negative_log_likelihood, 
                init_params, 
                method=method, # Nelder-Mead
                **kwargs
            )

        # 输出结果
        if result.success:
            optimized_params = result.x
            print("Optimized parameters:", optimized_params.round(3))
            print("Maximum log-likelihood:", -result.fun)
        else:
            print("Optimization failed:", result.message)

        return result
