import numpy as np
import pandas as pd
from pathlib import Path
import scipy
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from XlementFitting import FittingOptions
from XlementFitting.FileProcess.Json2Data import transform_dataframe
from .FunctionalBivariate11 import local_data_generator, Bivariate11_for_local, get_geometric_mean
from XlementFitting.ModelandLoss import model_all_in_one, loss_all_in_one, loss_punished, affinity_equation_loss, INF_value

__all__ = ["SingelCycleFitting3"]

def process_dataframe(df):
    # 创建一个新的DataFrame来存储处理后的数据
    processed_df = df.copy()
    
    # 用于存储每列的偏移量
    offsets = {}
    
    # 遍历除'XValue'外的每一列
    for column in processed_df.columns[1:]:
        # 保存第一行的值
        first_value = processed_df.loc[0, column]
        
        # 获取从索引1开始的非NaN值的索引
        non_nan_indices = processed_df.loc[1:, column].dropna().index
        
        # 检查非NaN值是否连续
        if len(non_nan_indices) > 0 and np.all(np.diff(non_nan_indices) == 1):
            # 找到第一个非NaN值的索引
            first_non_nan_index = non_nan_indices[0]
            
            # 记录偏移量
            offsets[column] = first_non_nan_index - 1
            
            # 移动数据（从第二行开始）
            processed_df.loc[1:, column] = processed_df[column].shift(-offsets[column]).iloc[1:]
            
            # 将第一个值放回第一行
            processed_df.loc[0, column] = first_value
        else:
            offsets[column] = 0
    
    # 删除非'XValue'列全为NaN的行
    rows_before = len(processed_df)
    processed_df = processed_df.dropna(subset=processed_df.columns[1:], how='all')
    rows_after = len(processed_df)
    rows_removed = rows_before - rows_after
    
    # 重置索引
    processed_df = processed_df.reset_index(drop=True)
    
    # offsets['last_nan_row_num'] = rows_removed
    
    return processed_df, offsets, rows_removed

# 主要接口 计算都在这里
# 加入Background的拟合
def single_cycle_init(
    Data: np.ndarray,
    time0: float = -1,
    init_params: list = [1.5,4,-4],
    options: FittingOptions=FittingOptions({'eps': 1e-3, 'init_params': [1.5,4,-4]})):
    
    # 数据整理
    Y_data, A_data, T_data, R_guess = Data
    initial_guess = [init_params[0], init_params[1], init_params[2]]
    # print(f"FB11, Bivariate_init: Y_data:{Y_data.shape}, A_data:{A_data.shape}, T_data:{T_data.shape}, R_guess:{R_guess}")
    # 构造constrains
    KD_bound = options.get_KD_bound()
    cons = ({'type': 'ineq', 'fun': lambda p: p[2] - p[1] - KD_bound})
    
    eps = options.get_eps()
    result = minimize(loss_punished,
                      initial_guess,
                      args=(A_data,
                            T_data,
                            Y_data/R_guess,
                            time0,
                            options,
                            Y_data[0,0]), 
                      method='SLSQP',
                      constraints=cons, 
                      options={'eps': eps}) # Y_data归一化
    
    R_opt, ka_opt_log, kd_opt_log = result.x
    result.x[0]*=R_guess # 反归一化
    ka_opt, kd_opt = np.power(10,(ka_opt_log, kd_opt_log))
    Loss = loss_all_in_one(
        result.x,
        A_data,
        T_data,
        Y_data,
        time0,
        split_flag=True)
    
    Results = {"Rmax":R_opt*R_guess,
               "kon":ka_opt,
               "koff":kd_opt,
               "KD":kd_opt/ka_opt,
               "Loss":Loss}
    return Results

def SingleCycle_for_local(
    Data: pd.DataFrame,
    time0: float,
    options: FittingOptions):
    # 在这里处理每个数据起点不是0的问题
    # 是拟合里面的Background暂时不行 241109
    # 直接减去然后加会来
    Y_data, A_data, T_data= transform_dataframe(Data)
    R_guess  = np.nanmax(Y_data)
    signal_start = Y_data[0,0]
    # print(f"Single Cycle Local: Y_data第一行:{signal_start}")
    init_params_list = options.get_init_params_list()
    Data = [Y_data-signal_start, A_data, T_data, R_guess]
    Results = single_cycle_init(
        Data,
        time0,
        [1.0,4,0],
        options)
    last_min_loss = np.nansum(Results["Loss"])
    for init_params in init_params_list:
        current_result =  single_cycle_init(Data, time0, init_params, options)
        if last_min_loss > np.nansum(current_result["Loss"]):
            Results = current_result
            last_min_loss = np.nansum(current_result["Loss"])
    
    Y_pred = model_all_in_one(
        A_data,
        T_data,
        Results["Rmax"],
        np.log10(Results["kon"]),
        np.log10(Results["koff"]), 
        time0) + signal_start
    
    Results['Loss'] = Results['Loss'][0]
    TSS_array = np.nansum((Y_data - np.nanmean(Y_data))**2.0,axis=0)
    R2 = 1.0 - np.nansum(Results['Loss'])/np.nansum(TSS_array)
    Results["R2"] = R2
    Results["Chi2"] = Results['Loss']/(Y_data.size - 3)

    return Results, Y_pred

def SingleCycleFitting3(
    data_frame : pd.DataFrame,
    time0_dict : dict,
    options : FittingOptions
):
    processed_data_frame, offsets, removed_nan_row_num = process_dataframe(data_frame)
    processed_time0_dict = time0_dict # {key: (time0_dict[key] - offsets[key]) for key in offsets.keys()}
    # print(f"SC3: SingleCycleF3:\n"
    #       f"处理后的数据:\n{processed_data_frame}\n"
    #       f"偏移量:\n{offsets}\n"
    #       f"处理后的time0:\n{processed_time0_dict}"
    #       )
    # 提取 'XValue' 列，跳过第一行（假设第一行是浓度）
    x_values = data_frame['XValue'].iloc[1:].to_numpy()

    # 计算最小值
    min_x = np.nanmin(x_values)

    # 对 'XValue' 列进行归零操作，保持第一行不变
    data_frame.loc[1:, 'XValue'] = data_frame.loc[1:, 'XValue'] - min_x
    
    Data = processed_data_frame
    
    # 拆分为不同的浓度组
    local_datas = local_data_generator(Data)
    
    current_results_list = []
    y_predictions_list = []
    
    # 遍历每一个单独的浓度组
    num_params = 0
    for local_data in local_datas:
        # print(f"LocalBivariate:local_data: {local_data}")
        num_params += 3.0
        column_name = local_data.columns[1]
        time0 = processed_time0_dict[column_name]
        current_results, y_prediction = SingleCycle_for_local(local_data,time0,options)
        current_results["Conc"] = local_data.iat[0,1]
        y_predictions_list.append(np.squeeze(y_prediction))
        if np.array(local_data.columns)[1] != 0.0: # 0浓度参与拟合不参与最终ka和kd的计算
            current_results_list.append(current_results)
        
    # 把所有的预测值变成一个大表
    y_predictions = np.column_stack(y_predictions_list)
    r_path = y_predictions # 如果不写入文件那么计算结果就会作为第二个返回值
        
    # 求Rmax算数平均值
    results_global_from_local_Rmax = sum(d['Rmax'] for d in current_results_list)/len(current_results_list)
    
    # 求Loss的和
    results_global_from_local_Loss = sum(d['Loss'] for d in current_results_list)
    
    # 求kon和koff几何平均值
    results_global_from_local_kon = get_geometric_mean(current_results_list,'kon')
    results_global_from_local_koff = get_geometric_mean(current_results_list,'koff')
    
    # 计算R2
    Y_data, A_data, T_data = transform_dataframe(Data)
    R_guess = np.nanmax(Y_data)
    Conc_num = A_data.shape[1]
    TSS = np.nansum((Y_data - np.nanmean(Y_data))**2.0)
    R2 = 1 - results_global_from_local_Loss/TSS
    
    # 合并字典为Results，每个键对应一个向量
    Results = {key: [d[key] for d in current_results_list] for key in current_results_list[0]}
    # print(f"LocalResult输出:{Results}")
    # 计算Chi2并且梳理输出
    # 确保输出等长且为一个python的list
    Results["Global R2"] = [R2]
    Results["Global Chi2"] = [np.nansum(Results['Loss'][0])/(Y_data.size - 2 - Conc_num)]
    Results["Rmax"] = Results["Rmax"]
    
    # 利用亲和力稳态公式计算伪Rmax
    # 提取已知数据
    Req = []
    columns = processed_data_frame.columns.tolist()

    for sample_id, time0 in processed_time0_dict.items():
        time0_rank = np.abs(x_values - time0).argmin()
        value = processed_data_frame.loc[time0_rank, sample_id]
        
        # 获取当前列的索引
        current_col_index = columns.index(sample_id)
        
        # 如果不是第一列（XValue），则减去左边一列最后一个非 NaN 值
        if current_col_index > 0:
            left_col = columns[current_col_index - 1]
            if left_col != 'XValue':
                left_col_values = processed_data_frame[left_col].dropna()
                if not left_col_values.empty:
                    last_non_nan = left_col_values.iloc[-1]
                    value -= last_non_nan
        
        Req.append(value)
    A_concentration = A_data[:, time0_rank]
    KD = results_global_from_local_koff/results_global_from_local_kon # 不计算0浓度

    # 计算伪Rmax
    # 使用 minimize 进行拟合
    result = minimize(
        affinity_equation_loss,
        R_guess*(np.nanmax(A_concentration)+KD)/np.nanmax(A_concentration),  # 初始值按照最大浓度计算
        args=(Req, A_concentration, KD)
        )
    Results['pseudoRmax'] = [R_guess] # 单循环中直接报告信号最大值作为Rmax
    # print(f"SingleCycle3: 结合终点信号:{Req}, 浓度:{A_concentration}, KD:{KD}")
    # print(f"伪Rmax: {Results['pseudoRmax']}, 信号最大值: {R_guess}")
    # Results["Conc"] = Results["Conc"][0]
    # Results["Loss"] = Results["Loss"][0].tolist()
    
    Results["Conc"] = Results["Conc"]
    Results["Loss"] = Results["Loss"]

    Data = [Y_data, A_data, T_data, R_guess]
    i_path = offsets
    # i_path['last_nan_row_num'] = removed_nan_row_num
    
    return Results , r_path , i_path
