import numpy as np
from XlementFitting import FittingOptions

# 处理无穷情况的值取float64最大值的近似值
INF_value = 1.797e+308 # 1.7976931348623157e+308

# 定义我们的模型
@np.errstate(invalid="raise", over="raise")
def model_all_in_one(
    radioligands: np.ndarray,
    T_array: np.ndarray,
    Bmax_value: float,
    kon_log: float,
    koff_log: float, 
    Time0: float,
    BackGround: float = 0.0):
    
    Y_pred = np.zeros(T_array.shape) # 生成Y_pred
    R = Bmax_value
    KD_log = koff_log - kon_log
    
    if KD_log < -25.0:
        return np.ones_like(T_array) * INF_value
    
    try:
        
        # 对数转化为本来的值
        kon = np.power(10,kon_log)
        koff = np.power(10,koff_log)
        
        # 正式的模型计算
        # print(f"浓度类型:{radioligands},kon类型:{kon}")
        Kob=np.longdouble(radioligands*kon+koff)
        # Kd=np.power(10,KD_log)
        # Eq=np.longdouble(Bmax_value*radioligands/(radioligands + Kd))
        # YatTime0 = np.longdouble(Eq*(1-np.exp(-1*Kob*Time0)))
        if not isinstance(R,float): R = R.reshape(-1, 1) # type: ignore
        YatTime0 = (radioligands*R*kon + 
                  np.exp(-Kob*Time0)*(BackGround*koff+radioligands*(-R+BackGround)*kon))/Kob
        
        # 判断时间段
        T_flag_diss = np.float32(T_array>Time0) # 解离
        T_flag_ass = np.float32(T_array<=Time0) # 结合
        
        # # 最终大模型
        # Y_pred = YatTime0 * np.exp(-1 * koff * (T_array - Time0)) * T_flag_diss + Eq * (1-np.exp(-1*Kob*T_array)) * T_flag_ass
        
        Y_pred = (radioligands*R*kon + np.exp(-Kob*T_array) *  
                  (BackGround*koff+radioligands*(-R+BackGround)*kon))/Kob * T_flag_ass + \
                  YatTime0 * np.exp(-1 * koff * (T_array - Time0)) * T_flag_diss
    except FloatingPointError as e:  # 如果有警告发生
        
        Y_pred = np.ones_like(T_array) * INF_value  # 如果有溢出，可以将Y设置为INF_value
        
    return Y_pred # 不含时间

# 损失函数
# A_data是浓度数据, 应该是一个向量而不是单个值
# T_data是时间数据
# Y_data是信号数据
# T_break是结合解离的分割时间
@np.errstate(invalid="raise", over="raise")
def loss_all_in_one(
    params,
    # 以下三个Data应该保持相同大小
    A_data: np.ndarray,
    T_data: np.ndarray,
    Y_data: np.ndarray,
    T_break: float,
    bg: float = 0.0,
    split_flag: bool = False): # 是否按行输出损失
    
    R_max_array = params[:-2]
    ka = params[-2]
    kd = params[-1]
    Y_predictions = model_all_in_one(A_data,T_data,R_max_array,ka,kd,T_break,BackGround=bg)
    residuals = Y_predictions - Y_data
    
    # 将Y_data中的nan值对应的残差设置为零
    residuals[np.isnan(Y_data)] = 0.0
    
    # 如果按照列求损失
    if split_flag:
        INF_root = np.power(10,np.log10(INF_value) / 3.0)
        residuals_limited = residuals.copy()
        residuals_limited[residuals_limited>INF_root] = INF_root
        Loss = np.sum(np.square(residuals_limited), axis=1)
        return Loss
    
    # 计算总残差平方和
    try:
        Loss = np.sum(np.square(residuals))
        
    except FloatingPointError as e:  # 如果有警告发生
        
        Loss = INF_value  # 如果有溢出，可以将Y设置为无穷大或合适的值
            
    return Loss

# 构造惩罚函数
def punish_function(p, lower_bound = -10.0, upper_bound = 0.0, k = 10):
    if np.abs(p) > 20:
        return 1.0
    return 2.0 - 1 / (1 + np.exp(-k * (p - lower_bound))) - 1 / (1 + np.exp(-k * (-p + upper_bound)))

# 构造带惩罚和正则化的损失函数
def loss_punished(
    params,
    A_data: np.ndarray,
    T_data: np.ndarray,
    Y_data: np.ndarray,
    T_break: float,
    options: FittingOptions=FittingOptions({'eps': 1e-3, 'init_params': [1.5,4,-4]}),
    bg: float = 0.0,):
    
    # 校验异常值
    ka_log = params[-2]
    kd_log = params[-1]
    kD_log = kd_log - ka_log
    
    # 真实的损失
    real_loss = loss_all_in_one(params, A_data, T_data, Y_data, T_break, bg=bg)
    # print(f"loss_punished:\nka:{np.power(10, ka_log)}, koff:{np.power(10, kd_log)}, R_max:{params[:-2]}")
    
    # 构造惩罚项
    punishment = punish_function(
        kD_log,
        lower_bound=options.get_punish_lower(),
        upper_bound=options.get_punish_upper(),
        k=options.get_punish_k() # type: ignore
        ) * Y_data.size * options.get_punish_lam() # 这里的浮点数是一个系数
    return real_loss + punishment

# 稳态拟合模型
def balance_model(
    concentrations,
    Rmax,
    Affinity
):
    return (Rmax*concentrations)/(concentrations+Affinity)

# 计算稳态拟合的损失
def balance_loss(
    params,
    y_real,
    concentrations,
    L1_regularized: bool = False # 如果启用这项, 那么就把亲和力大小作为损失算进去
):
    Rmax, Affinity_log = params
    if Affinity_log > 100:
        return INF_value
    try:
        affinity = np.power(10, float(Affinity_log))
    except FloatingPointError as e: # 如果发生了溢出
        return INF_value # 返回最大值
    
    predictions = balance_model(
        concentrations=concentrations,
        Rmax=Rmax,
        Affinity=affinity
    )
    variance = np.var(y_real, axis=0)
    residuals = predictions - y_real
    Loss = np.sum(np.square(residuals), axis=0)
    # print(f"balance_loss: Affinity_log: {Affinity_log} Loss: {np.sum(Loss)/y_real.shape[1]}")
    if not L1_regularized:
        return np.sum(Loss)/y_real.shape[1]
    else:
        return np.sum(Loss)/y_real.shape[1] + affinity
    
# 定义亲和力稳态曲线计算
def affinity_equation_loss(Rmax, Req, A_concentration, KD):
    # print(f"ModelandLoss:affinity_equation_loss, A:{A_concentration}, Rmax:{Rmax}, KD:{KD}")
    predicted_Req = (A_concentration * Rmax) / (A_concentration + KD)
    # 保证Rmax大于最大值
    if Rmax <= np.max(Req):
        return INF_value
    # 使用归一化的 A_concentration 作为权重
    # 实现较高的Rmax
    weights = A_concentration / np.max(A_concentration)
    affinity_loss = np.sum(((predicted_Req - Req) * weights) ** 2)
    # 避免Rmax和最大信号重合
    try:
        penalty_Rmax_close2ReqMax = 10.0 / ((Rmax - np.max(Req)))
        affinity_loss += penalty_Rmax_close2ReqMax
    except FloatingPointError as e:
        return INF_value
    return affinity_loss