import os
import h5py
import numpy as np
import csv, copy
from utils.file_utils import save_hdf5
from tqdm import tqdm
import pandas as pd


def randomly_sampling_idx(amount, sample_num):
    # 样本池大小＜抽样数时，允许重复抽样，逐级递增每个样本可被重复抽样的最大次数
    rate = 1
    while amount * rate < sample_num:
        rate += 1
    ori_idx = np.repeat(np.arange(amount), rate)
    np.random.shuffle(ori_idx)
    sample_idx = ori_idx[:sample_num]
    return sample_idx


def check_dir(dirname):
    if not os.path.exists(dirname):
        os.makedirs(dirname)

class RandomCombinedSampler:
    def __init__(self,
                 feat_dir,
                 source_path,
                 split_dir,
                 origin_sample_num,
                 seq_len,
                 output_num,
                 output_dir,
                 repeat_times=1,
                 seed=1,):
        """
        Args:
            feat_dir (string): 特征h5文件目录,
            source_path (string): 切片来源csv文件,
            split_dir (string): 数据集按训练集、验证集和测试集分割后的csv文件目录
            origin_sample_num (int): 每次图块抽样的来源切片数，
            seq_len (int or tuple): 如果输入为数字，表示固定值；
                                    如果输入为元组，表示随机浮动范围，
            output_num (int or dict): 如果输入为数字，表示各类均输出相同数量的合成样本；
                                       如果输入为字典，表示各类输出各自指定数量的合成样本，
            output_dir (str): 输出文件目录
            repeat_times (int): 每次从来源切片一次抽样后，合成序列重复使用的次数
            seed (int): 随机种子
        """
        self.feat_dir = feat_dir
        self.source_path = source_path
        self.split_dir = split_dir
        self.origin_sample_num = origin_sample_num
        self.seq_len = seq_len
        self.output_num = output_num
        self.output_dir = output_dir
        self.output_split_dir = os.path.join(self.output_dir, 'splits')
        self.repeat_times = repeat_times
        self.seed = seed

        self.labels = set()
        # self.splits_num = {'train': 0, 'val': 0, 'test': 0}
        self.output_feat_dir = ''
        np.random.seed(seed)
        
        self.label_dict = self.label_info()
        self.init_dirs()

    def label_info(self):
        """_summary_
            读取dataset_csv下的csv文件, 返回label字典
        Returns:
            _type_: dict
        """        
        label_dict = {}
        with open(self.source_path, 'r', encoding='utf-8') as f:
            reader = csv.reader(f)
            headers = next(reader)
            for row in reader:
                patient_id, slide_id, label = row
                label_dict[slide_id] = label
                self.labels.add(label)

        return label_dict
    
    def split_info(self, fold_id):
        """_summary_
            从加载split信息
        Args:
            fold_id (_type_): 第几折

        Returns:
            _type_: splits各集合中不同类别的切片id, splits_num各集合中不同类别的切片数量
        """        
        split_names = ['train', 'val', 'test']
        # label_dict = self.label_info()
        label_dict = self.label_dict
        csv_filename = os.path.join(self.split_dir, f'splits_{fold_id}.csv')
        
        splits = {} # 两层dict，train -> MSI-H
        for split_name in split_names:# 第一层是  train, val, test
            split = splits[split_name] = {}
            for label in self.labels: # 第二层是 {'Normal', 'MSI-H', 'MSS'}
                split[label] = []

        with open(csv_filename, 'r', encoding='utf-8') as f:
            reader = csv.reader(f)
            headers = next(reader)
            for row in reader:
                for j in range(1, len(row)):
                    slide_id = row[j]
                    if slide_id == '':
                        continue
                    label = label_dict[slide_id]
                    split_name = headers[j]
                    split = splits[split_name]
                    split[label].append(slide_id)

        splits_num = {'train': 0, 'val': 0, 'test': 0}
        for split_name in split_names:
            count = 0
            for cls in splits[split_name].keys():
                count += len(splits[split_name][cls])
            splits_num[split_name] = count

        return splits, splits_num

    def slide_feats(self, slide_id):
        """_summary_
            从h5文件中读取取图块的特征序列和坐标
        Args:
            slide_id (_type_): slide_id

        Returns:
            _type_: 特征序列和坐标
        """        
        with h5py.File(os.path.join(self.feat_dir, 'h5_files', slide_id + '.h5'), 'r') as h5f:
            features = h5f['features'][:]
            coords = h5f['coords'][:]

        return features, coords

    def sampling_feats(self, slides):
        """_summary_
            从slides中随机抽取多个样本进行合并
        Args:
            slides (_type_): wsi slide的id集合

        Returns:
            _type_: samples_pool合并后的数据集
        """        
        slide_num = len(slides)
        sample_idx = randomly_sampling_idx(slide_num, self.origin_sample_num) #从集合中随机抽取origin_sample_num个id的索引
        sample_slides = np.array(slides)[sample_idx] # origin_sample_num个slide id
        samples_pool = dict.fromkeys(['feats', 'coords', 'slide_ids', 'slide_name'])
        # 将特征与坐标数据进行合并
        samples_pool['slide_name'] = []
        for id, slide_id in enumerate(sample_slides):
            features, coords = self.slide_feats(slide_id)
            patch_num = features.shape[0]

            encoded_slide_id = str.encode(slide_id, 'ascii')
            samples_pool['slide_name'].append(encoded_slide_id)
            
            if samples_pool['feats'] is None:
                samples_pool['feats'] = features
                samples_pool['coords'] = coords
                samples_pool['slide_ids'] = np.repeat(id, patch_num)
            else:
                samples_pool['feats'] = np.vstack((samples_pool['feats'], features))
                samples_pool['coords'] = np.vstack((samples_pool['coords'], coords))
                samples_pool['slide_ids'] = np.hstack((samples_pool['slide_ids'],
                                                       np.repeat(id, patch_num)))

        return samples_pool

    def combine_samples(self, samples):
        """_summary_
            从samples数据集中合成出新样本
        Args:
            samples (_type_): 随机抽取并合成的数据

        Returns:
            _type_: 合成样本
        """        
        sample_feats = samples['feats']
        sample_coords = samples['coords']
        slide_ids = samples['slide_ids']
        slide_name = samples['slide_name']
        
        patch_num = len(slide_ids)

        seq_len = self.seq_len
        if type(seq_len) == tuple or type(seq_len) == list:
            low, high = seq_len
            seq_len = np.random.randint(low, high)
        sample_idx = randomly_sampling_idx(patch_num, seq_len)
        combined_slide = {
            'features': sample_feats[sample_idx],
            'coords': sample_coords[sample_idx],
            'slide_ids': slide_ids[sample_idx],
            'slide_name': np.array(slide_name),
        }
        return combined_slide

    def init_dirs(self,):
        """_summary_
        初始化特征所在路径，检查存盘路径是否存在
        """        
        self.output_feat_dir = os.path.join(self.output_dir, 'features')
        check_dir(self.output_feat_dir)
        check_dir(self.output_split_dir)

    def save_feat(self, slide, fold, cls, dataset_name, slide_num):
        """_summary_
            将合成样本进行存盘
        Args:
            slide (_type_): 样本数据
            fold (_type_): k折
            cls: 样本所属类别
            dataset_name (_type_): 数据集（train, val, test）
            slide_num (_type_): 当前样本的id

        Returns:
            _type_: _description_
        """        
        slide_id = f'fold{fold}_{dataset_name}_{cls}_{str(slide_num).zfill(4)}'
        feat_path = os.path.join(self.output_feat_dir, 'h5_files', f'{slide_id}.h5')
        save_hdf5(feat_path, slide, mode='w')
        return slide_id

    def __call__(self, *args, **kwargs):
        k = kwargs['fold_id']
        cur_split, cur_split_num = self.split_info(k) # 加载foid-k的分割信息
        print(cur_split_num)
        
        if type(self.output_num) == int:
            output_nums = dict.fromkeys(self.labels, self.output_num)
        else:
            output_nums = self.output_num

        check_dir(self.output_dir)
        check_dir(os.path.join(self.output_dir, 'features', 'h5_files'))
        new_split = copy.deepcopy(cur_split)
        
        print(f"\n正在生成第{k}折的组合样本")
        for cls in self.labels:
            # print(f"\n正在生成{cls}类的组合样本")
            
            for s, dataset_name in enumerate(cur_split.keys()):
                output_num = output_nums[dataset_name][cls]
                wsi_ids = cur_split[dataset_name][cls]
                
                count = 0
                print(f"正在生成{dataset_name}集中{cls}类的的组合样本")
                for i in tqdm(range(int(np.ceil(output_num / self.repeat_times))), delay=0.2):
                    samples = self.sampling_feats(wsi_ids) # 得到样本的缓存   
                    for j in range(self.repeat_times): # 进行多次抽取，生成合成样本
                        if count == output_num:
                            break
                        combined_slide = self.combine_samples(samples) # 生成合成样本
                        new_id = self.save_feat(combined_slide, k, cls, dataset_name, count)
                        new_split[dataset_name][cls].append(new_id)
                        count += 1
        # 生成新的split 信息                
        sample_split = {'train': [], 'val': [], 'test': []}
        patient_id = 0
        csv_path = os.path.join(self.output_split_dir, f'synthetic_slides_fold{k}.csv')
        with open(csv_path, 'a', encoding='utf-8', newline='') as f:
            writer = csv.writer(f)
            writer.writerow(['case_id','slide_id','label'])
            for dataset_name in sample_split:
                for wsi_label, wsi_ids in new_split[dataset_name].items():
                    sample_split[dataset_name].extend(wsi_ids)
                    
                    for slide_id in wsi_ids:
                        if slide_id not in self.label_dict:
                            writer.writerow([f'virtual_patient_{patient_id}', slide_id, wsi_label]) 
                            patient_id += 1
                
        csv_path = os.path.join(self.output_split_dir, f'splits_{k}.csv')
        df = pd.concat([pd.Series(sample_split['train']), 
                        pd.Series(sample_split['val']), 
                        pd.Series(sample_split['test'])], 
                       ignore_index=True, axis=1)
        df.columns = ['train', 'val', 'test']
        df.to_csv(csv_path)        

if __name__ == '__main__':
    feat_dir=r'E:/Medical_AI/HCH_Result/features'
    output_dir=r'E:/Medical_AI/HCH_Result/synthetic_features'
    # feat_dir=r'/home/whut/D/HCH_Result/features'
    # output_dir=r'/home/whut/D/HCH_Result/synthetic_features'
    # feat_dir=r'/public/home/xxxy_ruanjun/data/HCH_Result/features'
    # output_dir=r'/public/home/xxxy_ruanjun/data/HCH_Result/synthetic_features'
    
    sampler = RandomCombinedSampler(feat_dir=feat_dir,
                                    source_path='./dataset_csv/MSI_classification_HCH20221202.csv',
                                    origin_sample_num=3,
                                    split_dir='./splits/MSI_classification_95',
                                    seq_len=(200, 500),
                                    output_num={
                                        'train':
                                        {
                                            'MSS': 300, 'MSI-H': 300, 'Normal': 300
                                        },
                                        'val':
                                        {
                                            'MSS': 150, 'MSI-H': 150, 'Normal': 150
                                        },
                                        'test':
                                        {
                                            'MSS': 150, 'MSI-H': 150, 'Normal': 150
                                        },
                                    },
                                    output_dir=output_dir,
                                    repeat_times=3)
    for i in range(5):
        sampler(fold_id=i)

