import os
import torch
import numpy as np
import pandas as pd
import h5py
from dataset_modules.dataset_generic import Generic_WSI_Classification_Dataset


class Extended_MIL_Dataset(Generic_WSI_Classification_Dataset):

	def __init__(self,
		data_dir, enable_augment,
		**kwargs):
		super(Extended_MIL_Dataset, self).__init__(**kwargs)
        
		self.data_dir = data_dir
		self.use_h5 = True  # 只使用H5文件

		self.enable_augment = enable_augment

	def load_from_h5(self, toggle):
		self.use_h5 = toggle

	def __getitem__(
		self,
		idx,
	):
		slide_id = self.slide_data["slide_id"][idx]
		label = self.slide_data["label"][idx]
		if type(self.data_dir) == dict:
			source = self.slide_data["source"][idx]
			data_dir = self.data_dir[source]
		else:
			data_dir = self.data_dir

		full_path = os.path.join(data_dir, "h5_files", "{}.h5".format(slide_id))
		with h5py.File(full_path, "r") as hdf5_file:
			features = hdf5_file["features"][:]
			coords = hdf5_file["coords"][:]

		features = torch.from_numpy(features)
		coords = torch.from_numpy(coords)
		if self.enable_augment:
			features, coords = self.__shake(features, coords)
		return features, label, coords

	def return_splits(self, from_id=True, csv_path=None):
		assert csv_path
		all_splits = pd.read_csv(
			csv_path, dtype=self.slide_data["slide_id"].dtype
		)  # Without "dtype=self.slide_data['slide_id'].dtype", read_csv() will convert all-number columns to a numerical type. Even if we convert numerical columns back to objects later, we may lose zero-padding in the process; the columns must be correctly read in from the get-go. When we compare the individual train/val/test columns to self.slide_data['slide_id'] in the get_split_from_df() method, we cannot compare objects (strings) to numbers or even to incorrectly zero-padded objects/strings. An example of this breaking is shown in https://github.com/andrew-weisman/clam_analysis/tree/main/datatype_comparison_bug-2021-12-01.
		train_split = self.get_split_from_df(all_splits, "train")
		val_split = self.get_split_from_df(all_splits, "val")
		test_split = self.get_split_from_df(all_splits, "test")

		return train_split, val_split, test_split

	def get_split_from_df(self, all_splits, split_key="train"):
		split = all_splits[split_key]
		split = split.dropna().reset_index(drop=True)

		if len(split) > 0:
			mask = self.slide_data["slide_id"].isin(split.tolist())
			df_slice = self.slide_data[mask].reset_index(drop=True)
			if split_key == "train":
				split = Extended_Split(
					df_slice,
					data_dir=self.data_dir,
					num_classes=self.num_classes,
					enable_augment=True,
				)
			else:
				split = Extended_Split(
					df_slice, data_dir=self.data_dir, num_classes=self.num_classes
				)
		else:
			split = None

		return split

	def __sampling(self, features, coords):
		count = features.size(0)
		index = torch.randperm(count)
		low_limit = int(0.05 * count)
		high_limit = int(0.25 * count)
		new_count = torch.randint(low_limit, high_limit, (1,))
		select_tensor = index[:new_count]

		if coords is None:
			new_feat = torch.index_select(features, 0, select_tensor)
			return torch.cat((features, new_feat), 0)
		else:
			new_feat = torch.index_select(features, 0, select_tensor)
			new_coord = torch.index_select(coords, 0, select_tensor)
			return torch.cat((features, new_feat), 0), torch.cat((coords, new_coord), 0)

	def __mask(self, features, coords):
		# 使用随机向量mask掉部分特征向量
		M = 4
		count = features.size(0)
		new_count = torch.div(count, M, rounding_mode="trunc") * M

		index = torch.randperm(new_count)
		low_limit = int(0.05 * count)
		high_limit = int(0.25 * count)
		rnd_count = torch.randint(low_limit, high_limit, (1,))
		rnd_index = index[:rnd_count]
		rnd_feat = torch.rand(rnd_count, features.size(1), device=features.device)

		index = torch.randperm(new_count)
		if coords is None:
			new_feat = features[:new_count]
			new_feat[rnd_index, :] = rnd_feat
			return torch.index_select(new_feat, 0, index)
		else:
			new_feat = features[:new_count]
			new_feat[rnd_index, :] = rnd_feat
			new_coord = coords[:new_count]
			return torch.index_select(new_feat, 0, index), torch.index_select(
				new_coord, 0, index
			)

	def __shake(self, features, coords):
		# 使用随机向量mask掉部分特征向量
		M = 4
		count = features.size(0)
		new_count = torch.div(count, M, rounding_mode="trunc") * M

		index = torch.randperm(new_count)
		low_limit = int(0.25 * new_count)
		high_limit = int(0.75 * new_count)
		rnd_count = torch.randint(low_limit, high_limit, (1,))
		rnd_index = index[:rnd_count]
		rnd_feat = torch.rand(rnd_count, features.size(1), device=features.device)

		alpha = 0.05
		if coords is None:
			new_feat = features[:new_count]
			new_feat[rnd_index, :] = (1 - alpha) * new_feat[
				rnd_index, :
			] + alpha * rnd_feat
			return new_feat
		else:
			new_feat = features[:new_count]
			new_feat[rnd_index, :] = (1 - alpha) * new_feat[
				rnd_index, :
			] + alpha * rnd_feat
			new_coord = coords[:new_count]
			return new_feat, new_coord


class Extended_Split(Extended_MIL_Dataset):

	def __init__(self, slide_data, data_dir=None, enable_augment=False, num_classes=2):

		self.slide_data = slide_data
		self.num_classes = num_classes
		self.slide_cls_ids = [[] for i in range(self.num_classes)]
		for i in range(self.num_classes):
			self.slide_cls_ids[i] = np.where(self.slide_data["label"] == i)[0]

		self.data_dir = data_dir
		self.use_h5 = True  # 只使用H5文件

		self.enable_augment = enable_augment

	def __len__(self):
		return len(self.slide_data)
