import pickle
import yaml
import os
import re
import numpy as np
import pandas as pd
from torch.utils.data import DataLoader, Dataset


def process_func(path: str, aug_rate=2, missing_ratio=0.1):
    print(f"Loading data from {path}...")
    data = pd.read_excel(path, header=0).iloc[:, 0:]
    print(f"原始数据形状：{data.shape}")
    print("前3行数据样例：\n", data.head(3))
    data.replace("?", np.nan, inplace=True)
    # data_aug = pd.concat([data] * aug_rate)

    # 数据增强方法
    def generate_synthetic_data(data, aug_rate):
        synthetic_data = []
        num_samples = len(data)
        features = data.columns

        for _ in range(aug_rate - 1):
            for idx in range(num_samples):
                sample = data.iloc[idx].copy()
                # 在 generate_synthetic_data 中限制噪声范围
                noise = np.clip(np.random.normal(0, 0.1, size=len(features)), -0.2, 0.2)

                # 只对数值列添加噪声
                for i, col in enumerate(features):
                    if pd.api.types.is_numeric_dtype(data[col]):
                        if not pd.isna(sample[col]):
                            # ===== 修改的噪声添加逻辑 =====
                            if data[col].std() == 0:
                                sample[col] += noise[i] * 1e-6  # 避免无效噪声
                            else:
                                sample[col] += noise[i] * data[col].std()
                            # ===== 修改结束 =====

                synthetic_data.append(sample)

        return pd.DataFrame(synthetic_data)

    synthetic_data = generate_synthetic_data(data, aug_rate)
    data_aug = pd.concat([data, synthetic_data], ignore_index=True)

    observed_values = data_aug.values.astype("float32")
    observed_masks = ~np.isnan(observed_values)

    masks = observed_masks.copy()
    # for each column, mask {missing_ratio} % of observed values.
    for col in range(observed_values.shape[1]):  # col #
        obs_indices = np.where(masks[:, col])[0]
        miss_indices = np.random.choice(
            obs_indices, (int)(len(obs_indices) * missing_ratio), replace=False
        )
        masks[miss_indices, col] = False
    # gt_mask: 0 for missing elements and manully maksed elements
    gt_masks = masks.reshape(observed_masks.shape)

    observed_values = np.nan_to_num(observed_values) # 替换 nan 为 0
    observed_masks = observed_masks.astype(int)
    gt_masks = gt_masks.astype(int)

    return observed_values, observed_masks, gt_masks


class tabular_dataset(Dataset):
    def __init__(
        self, eval_length=15, use_index_list=None, aug_rate=2, missing_ratio=0.1, seed=0
    ):
        self.eval_length = eval_length
        np.random.seed(seed)

        dataset_path = "./data_breast/material_dataset55.xlsx"
        processed_data_path = (
            f"./data_breast/material55-missing_ratio-{missing_ratio}_seed-{seed}.pk"
        )
        processed_data_path_norm = (
            f"./data_breast/material55-missing_ratio-{missing_ratio}_seed-{seed}_max-min_norm.pk"
        )

        if not os.path.isfile(processed_data_path):
            self.observed_values, self.observed_masks, self.gt_masks = process_func(
                dataset_path, aug_rate=aug_rate, missing_ratio=missing_ratio
            )

            with open(processed_data_path, "wb") as f:
                pickle.dump(
                    [self.observed_values, self.observed_masks, self.gt_masks], f
                )
            print("--------Dataset created--------")

        if not os.path.isfile(processed_data_path_norm):
            print(
                "--------------Dataset has not been normalized yet. Perform data normalization and store the mean value of each column.--------------"
            )
            col_num = self.observed_values.shape[1]
            max_arr = np.zeros(col_num)
            min_arr = np.zeros(col_num)
            for k in range(col_num):
                obs_ind = self.observed_masks[:, k].astype(bool)
                temp = self.observed_values[obs_ind, k]
                max_arr[k] = np.max(temp)
                min_arr[k] = np.min(temp)

            print(f"--------------Max-value for each column {max_arr}--------------")
            print(f"--------------Min-value for each column {min_arr}--------------")

            for k in range(col_num):
                obs_ind = self.observed_masks[:, k].astype(bool)
                temp = self.observed_values[:, k]
                range_val = max_arr[k] - min_arr[k]
                if range_val == 0:
                    self.observed_values[:, k] = 0.5 * self.observed_masks[:, k]
                else:
                    self.observed_values[:, k] = (temp - min_arr[k]) / range_val * self.observed_masks[:, k]

            with open(processed_data_path_norm, "wb") as f:
                pickle.dump([self.observed_values, self.observed_masks, self.gt_masks, max_arr, min_arr], f)

        with open(processed_data_path_norm, "rb") as f:
            self.observed_values, self.observed_masks, self.gt_masks, self.max_values, self.min_values = pickle.load(f)

        if use_index_list is None:
            self.use_index_list = np.arange(len(self.observed_values))
        else:
            self.use_index_list = use_index_list

    def __getitem__(self, org_index):
        index = self.use_index_list[org_index]
        s = {
            "observed_data": self.observed_values[index],
            "observed_mask": self.observed_masks[index],
            "gt_mask": self.gt_masks[index],
            "timepoints": np.arange(self.eval_length),
        }
        return s

    def __len__(self):
        return len(self.use_index_list)


def get_dataloader(seed=1, nfold=5, batch_size=16, missing_ratio=0.1):
    dataset = tabular_dataset(missing_ratio=missing_ratio, seed=seed)
    print(f"Dataset size:{len(dataset)} entries")

    indlist = np.arange(len(dataset))

    np.random.seed(seed + 1)
    np.random.shuffle(indlist)

    tmp_ratio = 1 / nfold
    start = (int)((nfold - 1) * len(dataset) * tmp_ratio)
    end = (int)(nfold * len(dataset) * tmp_ratio)

    test_index = indlist[start:end]
    remain_index = np.delete(indlist, np.arange(start, end))

    np.random.shuffle(remain_index)
    num_train = (int)(len(remain_index) * 0.8)  # 80% for training
    train_index = remain_index[:num_train]
    valid_index = remain_index[num_train:]

    if len(valid_index) == 0:
        raise ValueError("Validation set is empty. Please check the data split logic.")

    train_dataset = tabular_dataset(
        use_index_list=train_index, missing_ratio=missing_ratio, seed=seed
    )
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=1)

    valid_dataset = tabular_dataset(
        use_index_list=valid_index, missing_ratio=missing_ratio, seed=seed
    )
    valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=0)

    test_dataset = tabular_dataset(
        use_index_list=test_index, missing_ratio=missing_ratio, seed=seed
    )
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=0)

    return train_loader, valid_loader, test_loader, dataset.max_values, dataset.min_values