# -*- coding: utf-8 -*-
import numpy as np
from dataclasses import dataclass
from typing import Literal
from XlementFitting import FittingOptions
from .ModelandLoss import INF_value  # 复用你的 INF

Model12Type = Literal["independent_two_sites", "sequential_two_step"]

@dataclass
class Model12Config:
    model_type: Model12Type = "independent_two_sites"
    # 是否把 R = R1 + R2（顺序两步时可选 R1 + 2*R2）
    sequential_use_R1_plus_2R2: bool = True
    background: float = 0.0

def _exp_safe(x):
    # 避免溢出
    x = np.clip(x, -700, 700)
    return np.exp(x)

@np.errstate(invalid="raise", over="raise")
def model12_independent_all_in_one(
    radioligands: np.ndarray,   # A_data (T×M)
    T_array: np.ndarray,        # T_data (T×M)
    Rmax1_array: np.ndarray,    # 长度为 M 的向量或 (M,) / (1×M)
    Rmax2_array: np.ndarray,    # 同上
    kon1_log: float,
    koff1_log: float,
    kon2_log: float,
    koff2_log: float,
    Time0: float,
    BackGround: float = 0.0
):
    """
    两独立位点并联：R = R1 + R2
    直接复用你 1:1 的闭式解析式（association/dissociation 两段），
    但分别对 (kon1,koff1,Rmax1)、(kon2,koff2,Rmax2) 计算后叠加。
    """
    # 对数转化
    kon1, koff1 = np.power(10, kon1_log), np.power(10, koff1_log)
    kon2, koff2 = np.power(10, kon2_log), np.power(10, koff2_log)

    # 统一形状
    if Rmax1_array.ndim == 1: R1 = Rmax1_array.reshape(1, -1)
    else: R1 = Rmax1_array.astype(np.longdouble)
    if Rmax2_array.ndim == 1: R2 = Rmax2_array.reshape(1, -1)
    else: R2 = Rmax2_array.astype(np.longdouble)

    A = np.longdouble(radioligands)
    T = np.longdouble(T_array)

    Kob1 = np.longdouble(A * kon1 + koff1)
    Kob2 = np.longdouble(A * kon2 + koff2)

    # association 段
    Y1_ass = (A*R1*kon1 + _exp_safe(-Kob1*T)*(BackGround*koff1 + A*(-R1+BackGround)*kon1))/Kob1
    Y2_ass = (A*R2*kon2 + _exp_safe(-Kob2*T)*(BackGround*koff2 + A*(-R2+BackGround)*kon2))/Kob2

    # 在 Time0 时刻的值（作为解离初值）
    Y1_t0 = (A*R1*kon1 + _exp_safe(-Kob1*Time0)*(BackGround*koff1 + A*(-R1+BackGround)*kon1))/Kob1
    Y2_t0 = (A*R2*kon2 + _exp_safe(-Kob2*Time0)*(BackGround*koff2 + A*(-R2+BackGround)*kon2))/Kob2

    # dissociation 段
    Y1_diss = Y1_t0 * _exp_safe(-koff1 * (T - Time0))
    Y2_diss = Y2_t0 * _exp_safe(-koff2 * (T - Time0))

    T_flag_diss = (T > Time0).astype(np.float64)
    T_flag_ass  = 1.0 - T_flag_diss

    Y_pred = (Y1_ass + Y2_ass) * T_flag_ass + (Y1_diss + Y2_diss) * T_flag_diss
    return Y_pred

def model12_sequential_all_in_one(
    radioligands: np.ndarray,   # (M×T)，行=浓度，列=时间
    T_array: np.ndarray,        # (M×T)，每行时间序列相同
    Rmax_array: np.ndarray,     # (M,)，每个浓度一个Rmax
    kon1_log: float,
    koff1_log: float,
    kon2_log: float,
    koff2_log: float,
    Time0: float,
    use_R1_plus_2R2: bool = True,
    BackGround: float = 0.0
):
    from scipy.integrate import solve_ivp

    kon1, koff1 = 10**kon1_log, 10**koff1_log
    kon2, koff2 = 10**kon2_log, 10**koff2_log

    M, Tn = radioligands.shape
    Y = np.zeros_like(radioligands, dtype=float)

    def rhs(t, y, C, Rmax):
        R1, R2 = y
        free = Rmax - R1 - R2
        v1_fwd = kon1 * C * free
        v1_rev = koff1 * R1
        v2_fwd = kon2 * C * R1
        v2_rev = koff2 * R2
        return [v1_fwd - v1_rev - v2_fwd + v2_rev, v2_fwd - v2_rev]

    # 每一行就是一个浓度轨迹
    for j in range(M):
        Cj = float(radioligands[j, 0])    # 每行浓度固定
        Rmaxj = float(Rmax_array[j])
        T = T_array[j, :]                 # 时间序列

        mask_ass = T <= Time0
        mask_diss = T > Time0

        y_t0 = [0.0, 0.0]
        if mask_ass.sum() >= 2:
            t_ass = T[mask_ass]
            sol1 = solve_ivp(lambda t, y: rhs(t, y, Cj, Rmaxj),
                              (t_ass[0], t_ass[-1]), y_t0,
                              method="LSODA", t_eval=t_ass)
            if sol1.success and sol1.y.shape[0] == 2:
                R1_ass, R2_ass = sol1.y
                Y_ass = R1_ass + (2.0*R2_ass if use_R1_plus_2R2 else R2_ass)
                Y[j, mask_ass] = Y_ass
                y_t0 = [float(R1_ass[-1]), float(R2_ass[-1])]

        if mask_diss.sum() >= 2:
            t_diss = T[mask_diss]
            sol2 = solve_ivp(lambda t, y: rhs(t, y, 0.0, Rmaxj),
                              (t_diss[0], t_diss[-1]), y_t0,
                              method="LSODA", t_eval=t_diss)
            if sol2.success and sol2.y.shape[0] == 2:
                R1_diss, R2_diss = sol2.y
                Y_diss = R1_diss + (2.0*R2_diss if use_R1_plus_2R2 else R2_diss)
                Y[j, mask_diss] = Y_diss

    return Y

# ---------- 通用损失（与 1:1 的 loss_all_in_one 对齐） ----------
@np.errstate(invalid="raise", over="raise")
def loss12_all_in_one(
    params,
    A_data: np.ndarray,
    T_data: np.ndarray,
    Y_data: np.ndarray,
    T_break: float,
    options: FittingOptions,
    cfg: Model12Config,
    split_flag: bool = False
):
    """
    参数向量按模型类型打包：
    - independent_two_sites:
        params = [Rmax1(每列)..., Rmax2(每列)..., kon1_log, koff1_log, kon2_log, koff2_log]
    - sequential_two_step:
        params = [Rmax(每列)..., kon1_log, koff1_log, kon2_log, koff2_log]
    """
    M = A_data.shape[1]
    if cfg.model_type == "independent_two_sites":
        Rmax1 = np.asarray(params[:M])
        Rmax2 = np.asarray(params[M:2*M])
        kon1_log, koff1_log, kon2_log, koff2_log = params[-4], params[-3], params[-2], params[-1]

        # 保护极端 KD
        kd1_minus_ka1 = koff1_log - kon1_log
        kd2_minus_ka2 = koff2_log - kon2_log
        if (kd1_minus_ka1 < -25.0) or (kd2_minus_ka2 < -25.0):
            return INF_value if not split_flag else np.ones(A_data.shape[0])*INF_value

        Y_pred = model12_independent_all_in_one(
            A_data, T_data, Rmax1, Rmax2,
            kon1_log, koff1_log, kon2_log, koff2_log,
            T_break, BackGround=cfg.background
        )
    else:
        Rmax = np.asarray(params[:M])
        kon1_log, koff1_log, kon2_log, koff2_log = params[-4], params[-3], params[-2], params[-1]
        kd1_minus_ka1 = koff1_log - kon1_log
        kd2_minus_ka2 = koff2_log - kon2_log
        if (kd1_minus_ka1 < -25.0) or (kd2_minus_ka2 < -25.0):
            return INF_value if not split_flag else np.ones(A_data.shape[0])*INF_value

        Y_pred = model12_sequential_all_in_one(
            A_data, T_data, Rmax,
            kon1_log, koff1_log, kon2_log, koff2_log,
            T_break, use_R1_plus_2R2=cfg.sequential_use_R1_plus_2R2,
            BackGround=cfg.background
        )

    residuals = Y_pred - Y_data
    residuals[np.isnan(Y_data)] = 0.0

    if split_flag:
        INF_root = np.power(10, np.log10(INF_value)/3.0)
        res_limited = residuals.copy()
        res_limited[res_limited > INF_root] = INF_root
        return np.sum(np.square(res_limited), axis=1)

    try:
        return np.sum(np.square(residuals))
    except FloatingPointError:
        return INF_value

def punish12(
    params,
    A_data: np.ndarray,
    T_data: np.ndarray,
    Y_data: np.ndarray,
    T_break: float,
    options: FittingOptions,
    cfg: Model12Config
):
    """与 1:1 的 loss_punished 对齐：对两套 KD_log 施加 S 形约束"""
    M = A_data.shape[1]
    if cfg.model_type == "independent_two_sites":
        kon1_log, koff1_log, kon2_log, koff2_log = params[-4], params[-3], params[-2], params[-1]
        kd1_log = koff1_log - kon1_log
        kd2_log = koff2_log - kon2_log
        punish_k = options.get_punish_k()
        lower = options.get_punish_lower()
        upper = options.get_punish_upper()

        def _pf(p):
            if np.abs(p) > 20: return 1.0
            return 2.0 - 1/(1+np.exp(-punish_k*(p-lower))) - 1/(1+np.exp(-punish_k*(-p+upper)))

        real = loss12_all_in_one(params, A_data, T_data, Y_data, T_break, options, cfg, split_flag=False)
        penalty = (_pf(kd1_log) + _pf(kd2_log)) * Y_data.size * options.get_punish_lam()
        return real + penalty
    else:
        kon1_log, koff1_log, kon2_log, koff2_log = params[-4], params[-3], params[-2], params[-1]
        kd1_log = koff1_log - kon1_log
        kd2_log = koff2_log - kon2_log
        punish_k = options.get_punish_k()
        lower = options.get_punish_lower()
        upper = options.get_punish_upper()
        def _pf(p):
            if np.abs(p) > 20: return 1.0
            return 2.0 - 1/(1+np.exp(-punish_k*(p-lower))) - 1/(1+np.exp(-punish_k*(-p+upper)))
        real = loss12_all_in_one(params, A_data, T_data, Y_data, T_break, options, cfg, split_flag=False)
        penalty = (_pf(kd1_log) + _pf(kd2_log)) * Y_data.size * options.get_punish_lam()
        return real + penalty
