from sklearn.model_selection import train_test_split

from utils.utils import *
import pandas as pd
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
import joblib
class DataSplitUtil:
    def __init__(self,split_random_state,df=None,save_dir = opj(base_path,'data','split_patient_ids')):
        self.df = df
        self.save_dir = save_dir
        self.split_random_state = split_random_state

    def test_split(self,test_size=0.3):
        patient_ids = self.df['Patient_ID']
        y = self.df[target_column]
        y.index = patient_ids
        # 划分
        train_ids, test_ids = train_test_split(patient_ids,  stratify=y, test_size=test_size, random_state=self.split_random_state)
        # 保存划分结果
        save_path = opj(self.save_dir,str(self.split_random_state))
        md(save_path)
        train_ids.to_csv(opj(save_path, 'train.csv'), index=False, header=False)
        test_ids.to_csv(opj(save_path, 'test.csv'), index=False, header=False)
        #
        # y_train = y.loc[train_ids]
        # y_test = y.loc[test_ids]
        # print("✅ 训练集标签分布（比例）:")
        # print(y_train.value_counts(normalize=True).rename(lambda x: f"y={x}"))
        #
        # print("\n✅ 测试集标签分布（比例）:")
        # print(y_test.value_counts(normalize=True).rename(lambda x: f"y={x}"))

    def split_train_and_test(self,X,data_type,flag=False,model_name="signature_model"):
        # 训练集测试集划分
        patient_ids = X['Patient_ID']
        X_selected = X.drop(columns=[target_column])
        y = X[target_column]
        y.index = patient_ids

        # 根据 ID 划分
        train_ids = pd.read_csv(opj(self.save_dir, str(self.split_random_state), 'train.csv'), header=None)[0]
        test_ids = pd.read_csv(opj(self.save_dir, str(self.split_random_state), 'test.csv'), header=None)[0]

        X_train_raw = X_selected.set_index(patient_ids).loc[train_ids].drop(columns=['Patient_ID'])
        X_test_raw = X_selected.set_index(patient_ids).loc[test_ids].drop(columns=['Patient_ID'])

        y_train = y.loc[train_ids]
        y_test = y.loc[test_ids]
        if flag == True:
            # 仅在训练集上进行插补 + 标准化
            imp = SimpleImputer(strategy="median")
            X_train_imputed = imp.fit_transform(X_train_raw)
            X_test_imputed = imp.transform(X_test_raw)

            scaler = StandardScaler()
            X_train_scaled = scaler.fit_transform(X_train_imputed)
            X_test_scaled = scaler.transform(X_test_imputed) 
            # 转成带 ID 的 DataFrame
            feature_columns = X_train_raw.columns.tolist()
            save_path = opj(weight_result_path,model_name,data_type.replace('Roi','Nuclei'))
            md(save_path)
            joblib.dump(imp, opj(save_path,'imputer_median.pkl'))
            joblib.dump(scaler, opj(save_path,'scaler_standard.pkl'))

            X_train_df = pd.DataFrame(X_train_scaled, index=train_ids, columns=feature_columns)
            X_test_df = pd.DataFrame(X_test_scaled, index=test_ids, columns=feature_columns)

            return X_train_df, X_test_df, y_train, y_test, train_ids, test_ids
        else:
            return X_train_raw, X_test_raw, y_train, y_test, train_ids, test_ids
        

    def get_train_test_df(self,X):
        # 训练集测试集划分
        patient_ids = X['Patient_ID']
        # 根据 ID 划分
        train_ids = pd.read_csv(opj(self.save_dir, str(self.split_random_state), 'train.csv'), header=None)[0]
        test_ids = pd.read_csv(opj(self.save_dir, str(self.split_random_state), 'test.csv'), header=None)[0]

        train_raw = X.set_index(patient_ids).loc[train_ids]
        test_raw = X.set_index(patient_ids).loc[test_ids]
        return train_raw, test_raw