from torch.utils.data import Dataset, DataLoader
import torch
import numpy as np
import utils as u
import pdb

class splitter():
    '''
    creates 3 splits
    train
    dev
    test
    '''
    def __init__(self,args,tasker):
        
        ## pdb.set_trace()        
        if tasker.is_static: #### For static datsets
            assert args.train_proportion + args.dev_proportion < 1, \
                'there\'s no space for test samples'
            #only the training one requires special handling on start, the others are fine with the split IDX.
            
            random_perm=False
            indexes = tasker.data.nodes_with_label
            
            if random_perm:
                perm_idx = torch.randperm(indexes.size(0))
                perm_idx = indexes[perm_idx]
            else:
                print ('tasker.data.nodes',indexes.size())
                perm_idx, _ = indexes.sort()
            #print ('perm_idx',perm_idx[:10])
            
            self.train_idx = perm_idx[:int(args.train_proportion*perm_idx.size(0))]
            self.dev_idx = perm_idx[int(args.train_proportion*perm_idx.size(0)): int((args.train_proportion+args.dev_proportion)*perm_idx.size(0))]
            self.test_idx = perm_idx[int((args.train_proportion+args.dev_proportion)*perm_idx.size(0)):]
            # print ('train,dev,test',self.train_idx.size(), self.dev_idx.size(), self.test_idx.size())
            
            train = static_data_split(tasker, self.train_idx, test = False)
            train = DataLoader(train, shuffle=True,**args.data_loading_params)
            
            dev = static_data_split(tasker, self.dev_idx, test = True)
            dev = DataLoader(dev, shuffle=False,**args.data_loading_params)
            
            test = static_data_split(tasker, self.test_idx, test = True)
            test = DataLoader(test, shuffle=False,**args.data_loading_params)
                        
            self.tasker = tasker
            self.train = train
            self.dev = dev
            self.test = test

        else:  # Time-series datasets
            # 原有时间序列数据划分逻辑保持不变
            assert args.train_proportion + args.dev_proportion < 1, 'No space for test samples'
            ## pdb.set_trace()

            # 时间窗口划分（修复后）
            start = tasker.data.min_time + args.num_hist_steps
            end = int(np.floor(tasker.data.max_time * args.train_proportion))
            train = data_split(tasker, start, end, test=False)
            train = DataLoader(train, **args.data_loading_params)

            # Dev split
            start = end
            end = int(np.floor(tasker.data.max_time * (args.train_proportion + args.dev_proportion)))
            dev = data_split(tasker, start, end, test=True)
            dev = DataLoader(dev, num_workers=args.data_loading_params['num_workers'])

            # Test split
            start = end
            end = int(tasker.data.max_time) + 1  # 假设 max_time 是整数
            test = data_split(tasker, start, end, test=True)
            test = DataLoader(test, num_workers=args.data_loading_params['num_workers'])

            print('Dataset splits sizes:  train', len(train), 'dev', len(dev), 'test', len(test))

            # 新增：生成节点索引（假设节点是全局的，非动态）
            all_nodes = torch.arange(tasker.data.num_nodes)  # 获取所有节点
            perm_idx = torch.randperm(all_nodes.size(0))  # 随机排列

            # 按比例划分索引
            train_size = int(args.train_proportion * perm_idx.size(0))
            dev_size = int(args.dev_proportion * perm_idx.size(0))
            self.train_idx = perm_idx[:train_size]
            self.dev_idx = perm_idx[train_size:train_size + dev_size]
            self.test_idx = perm_idx[train_size + dev_size:]

            # 原有属性
            self.tasker = tasker
            self.train = train
            self.dev = dev
            self.test = test
        


class data_split(Dataset):
    def __init__(self, tasker, start, end, test, **kwargs):
        '''
        start and end are indices indicating what items belong to this split
        '''
        self.tasker = tasker
        self.start = start
        self.end = end
        self.test = test
        self.kwargs = kwargs

    def __len__(self):
        return self.end-self.start

    def __getitem__(self,idx):
        idx = self.start + idx
        ## pdb.set_trace()
        t = self.tasker.get_sample(idx, test = self.test, **self.kwargs)
        return t


class static_data_split(Dataset):
    def __init__(self, tasker, indexes, test):
        '''
        start and end are indices indicating what items belong to this split
        '''
        self.tasker = tasker
        self.indexes = indexes
        self.test = test
        self.adj_matrix = tasker.adj_matrix

    def __len__(self):
        return len(self.indexes)

    def __getitem__(self,idx):
        idx = self.indexes[idx]
        return self.tasker.get_sample(idx,test = self.test)
