import pandas as pd
import numpy as np
from sklearn.impute import  SimpleImputer
import warnings
warnings.filterwarnings("ignore")
from utils.utils import *
class WSIDataProcessor:

    def __init__(self,data_path=opj(base_path,'data/original_data/WSI')):
        md(merge_data_path)
        self.data_path = data_path
        self.save_merge_path = merge_data_path

    def get_merge_data(self):
        total_df = None
        for data_type in data_type_list:
            df_means = self.process_data(
                file_path=opj(self.data_path, data_type + metric_type_list[0] + '.csv'),
            )
            df_means.columns = ['Patient_ID'] + [data_type.replace('FeatureSummary', '') + '_M(' + col + ')' for col in
                                                 df_means.columns[1:]]
            df_stds = self.process_data(
                file_path=opj(self.data_path, data_type + metric_type_list[1] + '.csv'),
            )
            df_stds.columns = ['Patient_ID'] + [data_type.replace('FeatureSummary', '') + '_S(' + col + ')' for
                                                col in df_stds.columns[1:]]
            merged_df = df_means.merge(df_stds, on='Patient_ID', how='inner')
            merged_df.to_csv(opj(self.save_merge_path, data_type.replace('FeatureSummary', '') + '.csv'),
                             index=False)

            if total_df is None:
                total_df = merged_df
            else:
                total_df = total_df.merge(merged_df, on='Patient_ID', how='inner')

        # Global_data = self.process_data(
        #     file_path=opj(self.data_path, 'GlobalRoiBasedFeatures.csv'),
        # )
        #
        # Global_data.columns = ['Patient_ID'] + ['Global(' + col + ')' for col in
        #                                         Global_data.columns[1:]]
        # Global_data.to_csv(opj(self.save_merge_path, 'Global.csv'), index=False)
        #
        # total_df = total_df.merge(Global_data, on='Patient_ID', how='inner')

        total_df.to_csv(opj(self.save_merge_path, 'Total.csv'), index=False)

    def get_same_patient(self):
        file_list = ol(self.save_merge_path)
        id_sets = [set(pd.read_csv(opj(self.save_merge_path,f))['Patient_ID']) for f in file_list]

        # 获取每个病人的ALN Status
        aln_df = pd.read_csv(clinic_data_path)[[target_column,'Patient_ID']]

        from functools import reduce
        # 找到所有文件共有的 Patient_ID
        common_ids = reduce(set.intersection, id_sets)
        # 遍历每个文件，保留公共 ID 的行并保存为新文件（可选择覆盖原文件）
        for f in file_list:
            file_path = opj(self.save_merge_path,f)
            df = pd.read_csv(file_path)
            df['Patient_ID'] = df['Patient_ID'].astype(int)
            df_filtered = df[df['Patient_ID'].isin(common_ids)]
            df_filtered = pd.merge(df_filtered, aln_df, on='Patient_ID', how='left')
            df_filtered.to_csv(file_path, index=False)  # 覆盖原文件

    def process_data(self,file_path):
        df = pd.read_csv(file_path)
        print('初始形状'+str(df.shape))
        # 修改异常值为NaN
        df_cleaned = df.replace([np.inf, -np.inf], np.nan)
        # 删除特征值全部为0或NaN的样本（即行）
        df_cleaned = df_cleaned[
            ~(df.drop(columns=['Patient_ID']) == 0).all(axis=1) &  # 非全0行
            ~df.drop(columns=['Patient_ID']).isna().all(axis=1)  # 非全NaN行
            ]
        # 删除每个样本数据全部为0或NaN的变量（即列）
        df_cleaned = df_cleaned.loc[:, ~((df_cleaned == 0).all(axis=0) | df_cleaned.isna().all(axis=0))]
        # 中位数插补缺失值
        imp = SimpleImputer(strategy='median')
        df_imputed = pd.DataFrame(imp.fit_transform(df_cleaned), columns=df_cleaned.columns)
        print('结束形状' + str(df_imputed.shape))
        return df_imputed

    # 执行示例
if __name__ == "__main__":
    obj = WSIDataProcessor()
    obj.get_merge_data()
    obj.get_same_patient()