# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from XlementFitting import FittingOptions
from XlementFitting.FileProcess.Json2Data import transform_dataframe
from XlementFitting.FileProcess.ExcelandImage import excel_output, save_output_img
from .ModelandLossOneTwo import (
    Model12Config, loss12_all_in_one, punish12
)
from .ModelandLoss import affinity_equation_loss, INF_value

__all__ = ["GlobalBivariate12"]

def _pack_x0_bounds(M, options: FittingOptions, cfg: Model12Config):
    """
    把初值与边界打包为向量形式，兼容两种 1:2 子模型。
    默认把每个浓度的 Rmax 初值设为同一值（例如 1.0 或 max(Y)），边界 [0, +inf)。
    ka/kd 初值与边界来自 options.init_params_list() 的最后一组（或第一组）。
    """
    init_list = options.get_init_params_list()
    if len(init_list) == 0:
        # 1:2 独立位点： [Rmax1, Rmax2, ka1_log, kd1_log, ka2_log, kd2_log]
        # 顺序两步：     [Rmax, ka1_log, kd1_log, ka2_log, kd2_log]
        default = [1.0, 5.0, -3.0, 4.5, -2.5]  # 备选
        init_list = [default]

    ip = np.asarray(init_list[-1], dtype=float)

    if cfg.model_type == "independent_two_sites":
        # ip: [ka1_log, kd1_log, ka2_log, kd2_log] 或 [Rhint?, ka1, kd1, ka2, kd2]
        # 兼容你的 1:1 习惯：init_params 里前面常放 Rhint，但 1:2 我们只读取后 4 个
        if ip.size >= 5:
            ka1_log, kd1_log, ka2_log, kd2_log = ip[-4], ip[-3], ip[-2], ip[-1]
        else:
            ka1_log, kd1_log, ka2_log, kd2_log = 5.0, -3.0, 4.5, -2.5
        # 向量化参数：先放 M 个 Rmax1，再放 M 个 Rmax2，最后 4 个 log
        x0 = np.r_[np.ones(M)*1.0, np.ones(M)*0.6, [ka1_log, kd1_log, ka2_log, kd2_log]]
        lb = np.r_[np.zeros(M), np.zeros(M), [3.0, -6.0, 3.0, -6.0]]
        ub = np.r_[np.ones(M)*np.inf, np.ones(M)*np.inf, [7.5, 0.0, 7.5, 0.0]]
        return x0, lb, ub

    else:  # sequential
        if ip.size >= 5:
            ka1_log, kd1_log, ka2_log, kd2_log = ip[-4], ip[-3], ip[-2], ip[-1]
        else:
            ka1_log, kd1_log, ka2_log, kd2_log = 5.0, -3.0, 4.5, -2.5
        x0 = np.r_[np.ones(M)*1.2, [ka1_log, kd1_log, ka2_log, kd2_log]]
        lb = np.r_[np.zeros(M), [3.0, -6.0, 3.0, -6.0]]
        ub = np.r_[np.ones(M)*np.inf, [7.5, 0.0, 7.5, 0.0]]
        return x0, lb, ub

def GlobalBivariate12(
    data_frame: pd.DataFrame, 
    time0: float = -1,
    options: FittingOptions = FittingOptions(),
    write_file: bool=True, 
    save_png: bool=False,
    excel_path: str='Result',
    png_path: str='Output',
    model12_type: str = "independent_two_sites",   # 或 "sequential_two_step"
    seq_use_R1_plus_2R2: bool = True
):
    """
    全局 1:2 拟合接口（对标你的 GlobalBivariate）。
    - 自动把每列浓度的 Rmax（或 Rmax1、Rmax2）当作可变参数；
    - kon/koff 用 log10 参数（两套）；
    - 罚函数/边界同 1:1 风格；
    - 输出字段与 1:1 尽量对齐，并额外给出两套 KD。
    """
    # 读入矩阵
    Y_data, A_data, T_data = transform_dataframe(data_frame)
    R_guess = np.nanmax(Y_data)
    M = A_data.shape[1]

    # 配置模型
    cfg = Model12Config(
        model_type = "independent_two_sites" if model12_type=="independent_two_sites" else "sequential_two_step",
        sequential_use_R1_plus_2R2 = seq_use_R1_plus_2R2,
        background = 0.0
    )

    x0, lb, ub = _pack_x0_bounds(M, options, cfg)

    # 约束示例：可以要求两套 KD 不要相差太极端（可按需关闭/调整）
    KD_bound = options.get_KD_bound()
    def _cons_fun(p):
        if cfg.model_type == "independent_two_sites":
            ka1, kd1, ka2, kd2 = p[-4], p[-3], p[-2], p[-1]
            return (kd1 - ka1 - KD_bound) + (kd2 - ka2 - KD_bound)
        else:
            ka1, kd1, ka2, kd2 = p[-4], p[-3], p[-2], p[-1]
            return (kd1 - ka1 - KD_bound) + (kd2 - ka2 - KD_bound)

    cons = ({'type': 'ineq', 'fun': _cons_fun},)

    # 优化
    res = minimize(
        lambda p: punish12(p, A_data, T_data, Y_data/R_guess, time0, options, cfg),
        x0,
        method="SLSQP",
        bounds=list(zip(lb, ub)),
        constraints=cons,
        options={'eps': options.get_eps()}
    )

    # 反归一化（把 Rmax*R_guess）
    p_opt = res.x.copy()
    if cfg.model_type == "independent_two_sites":
        p_opt[:2*M] *= R_guess
        Rmax1_vec = p_opt[:M]
        Rmax2_vec = p_opt[M:2*M]
        ka1_log, kd1_log, ka2_log, kd2_log = p_opt[-4], p_opt[-3], p_opt[-2], p_opt[-1]
        kon1, koff1 = np.power(10, ka1_log), np.power(10, kd1_log)
        kon2, koff2 = np.power(10, ka2_log), np.power(10, kd2_log)
        KD1, KD2 = koff1/kon1, koff2/kon2

        # 逐列预测（已是反归一化 Rmax）
        # 注意：loss12_all_in_one 内部期望的是未反归一化的 Y_data，所以此处我们再跑一次预测用于输出
        p_for_pred = res.x.copy()
        p_for_pred[:2*M] *= R_guess
        Loss_split = loss12_all_in_one(p_for_pred, A_data, T_data, Y_data, time0, options, cfg, split_flag=True)
    else:
        p_opt[:M] *= R_guess
        Rmax_vec = p_opt[:M]
        ka1_log, kd1_log, ka2_log, kd2_log = p_opt[-4], p_opt[-3], p_opt[-2], p_opt[-1]
        kon1, koff1 = np.power(10, ka1_log), np.power(10, kd1_log)
        kon2, koff2 = np.power(10, ka2_log), np.power(10, kd2_log)
        KD1, KD2 = koff1/kon1, koff2/kon2

        p_for_pred = res.x.copy()
        p_for_pred[:M] *= R_guess
        Loss_split = loss12_all_in_one(p_for_pred, A_data, T_data, Y_data, time0, options, cfg, split_flag=True)

    # 计算预测矩阵（用于导出/画图）
    def _predict_for_export():
        if cfg.model_type == "independent_two_sites":
            return loss12_all_in_one(p_for_pred, A_data, T_data, Y_data, time0, options, cfg, split_flag=False), \
                   None  # 这里返回的是总Loss，不是 Y_pred
        else:
            return loss12_all_in_one(p_for_pred, A_data, T_data, Y_data, time0, options, cfg, split_flag=False), \
                   None

    # 为了与你的导出函数兼容，我们直接重算 Y_pred（不走 loss）：
    from .ModelandLossOneTwo import model12_independent_all_in_one, model12_sequential_all_in_one
    if cfg.model_type == "independent_two_sites":
        Y_pred = model12_independent_all_in_one(
            A_data, T_data,
            p_for_pred[:M], p_for_pred[M:2*M],
            ka1_log, kd1_log, ka2_log, kd2_log,
            time0, BackGround=0.0
        )
    else:
        Y_pred = model12_sequential_all_in_one(
            A_data, T_data,
            p_for_pred[:M],
            ka1_log, kd1_log, ka2_log, kd2_log,
            time0, use_R1_plus_2R2=cfg.sequential_use_R1_plus_2R2,
            BackGround=0.0
        )

    # 计算 R2 / Chi2（与 1:1 对齐）
    TSS_array = np.nansum((Y_data - np.nanmean(Y_data, axis=0))**2.0, axis=1)
    Loss_total = np.nansum(Loss_split)
    R2 = 1.0 - Loss_total/np.nansum((Y_data - np.nanmean(Y_data))**2.0)
    R2_array = 1.0 - Loss_split/TSS_array

    # 伪 Rmax（仍用 1:1 的稳态公式，作为辅助统计）
    time0_rank = np.abs(T_data[0, :] - time0).argmin()
    Req = Y_data[:, time0_rank]
    A_concentration = A_data[:, time0_rank]
    KD_geo = np.sqrt(KD1*KD2)  # 两位点等效一个“几何均值 KD”作为参考
    result_pseudo = minimize(
        affinity_equation_loss,
        R_guess*(np.nanmax(A_concentration)+KD_geo)/np.nanmax(A_concentration),
        args=(Req, A_concentration, KD_geo)
    )
    pseudoRmax = result_pseudo.x[0]

    # 汇总结果字典（字段命名尽量贴近你的 1:1）
    Results = {
        "Conc": A_data[:, 0].tolist(),
        "Loss": Loss_split.tolist(),
        "Global R2": [R2],
        "R2": R2_array.tolist(),
        "Global Chi2": [Loss_total/(Y_data.size - (2*M + 4))],  # 参数数：2*M + 4 或 M + 4
        "pseudoRmax": [pseudoRmax],
        "kon1": [kon1], "koff1": [koff1], "KD1": [KD1],
        "kon2": [kon2], "koff2": [koff2], "KD2": [KD2],
    }

    if cfg.model_type == "independent_two_sites":
        Results["Rmax1"] = [Rmax1_vec.tolist()]
        Results["Rmax2"] = [Rmax2_vec.tolist()]
    else:
        Results["Rmax"]  = [Rmax_vec.tolist()]

    # 导出
    Data = [Y_data, A_data, T_data, R_guess]
    r_path = Y_pred.T
    i_path = ''
    # if write_file:
    #     r_path = excel_output(
    #         file_path, Data, time0=time0, results=Results, Y_pred=Y_pred,
    #         target_dir=excel_path, global_flag='G12'
    #     )
    # if save_png:
    #     i_path = save_output_img(
    #         file_path, T_data=T_data, Y_data=Y_data, Y_pred=Y_pred,
    #         Concs=A_data, res=Results, target_dir=png_path, global_flag='G12'
    #     )
    return Results, r_path, i_path
