import torch
import numpy as np

class transferToDeviceMemory:
    def __init__(self):
        pass

    def start(self):
        self.stream_for_data_trans = torch.cuda.Stream()

    # 0: [Tensor,Tensor,...] -> [Gpu_Tensor,Gpu_Tensor,...]
    # 1: [Batch_Tensor,Batch_Tensor,...] -> [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...]
    # 2: [[Tensor,Tensor,Tensor],[Tensor,Tensor,Tensor],...] -> [[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],...]
    # 3: [[Batch_Tensor,Batch_Tensor,Batch_Tenor],[Batch_Tensor,Batch_Tensor,Batch_Tensor],...] -> [[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tenor],[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor],...]
    # Tensor: Numpy-Array, dim 3 Numpy-Array
    # Batch_Tensor: NumPy-Array, dim 4 Numpy-Array
    # Gpu_Tensor: Tuple, (C, H, W, pos, block_id)
    # Batch_Gpu_Tensor: Tuple, (B, C, H, W, pos, block_id)
    # DONE: 完善方法使其支持上述泛型
    def process(self, data, pos, size):
        assert len(data) > 0
        item = data[0]
        if isinstance(item, list): # 2,3
            assert len(item) > 0
            if len(item[0].shape) == 3: # 2
                res = eval('self.process_02(data, pos, size)')
            elif len(item[0].shape) == 4: # 3
                res = eval('self.process_03(data, pos, size)')
            else:
                assert False
        else: # 0,1
            if len(item.shape) == 3: # 0
                res = eval('self.process_00(data, pos, size)')
            elif len(item.shape) == 4: # 1
                res = eval('self.process_01(data, pos, size)')
            else:
                assert False
        return res 
    
    # 00: [Tensor,Tensor,...] -> [Gpu_Tensor,Gpu_Tensor,...]
    def process_00(self, data, pos, size):
        res = []
        for i in data:
            # numpy转tensor
            item = torch.from_numpy(i)
            # 获取申请好的pos,size, 并在对应stream下进行传输到Tensor操作
            with torch.cuda.stream(self.stream_for_data_trans):
                torch.cuda.insert_shared_cache(pos, size)
                item = item.cuda()
                torch.cuda.synchronize() # 等待传输完毕
            res.append((item.shape[0], item.shape[1], item.shape[2], pos, 0)) # (C, H, W, pos, block_id)
            with torch.cuda.stream(self.stream_for_data_trans):
                del item
                torch.cuda.clear_shared_cache()
        return res

    # 01: [Batch_Tensor,Batch_Tensor,...] -> [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...]
    def process_01(self, data, pos, size):
        res = []
        for i in data:
            # numpy转tensor
            item = torch.from_numpy(i)
            # tensor传输
            with torch.cuda.stream(self.stream_for_data_trans):
                torch.cuda.insert_shared_cache(pos, size)
                self.item = item.cuda()
                torch.cuda.synchronize()
            res.append((self.item.shape[0],self. item.shape[1], self.item.shape[2], self.item.shape[3], pos, 0)) # (B, C, H, W, pos, block_id)
            with torch.cuda.stream(self.stream_for_data_trans):
                del self.item
                torch.cuda.clear_shared_cache()
        return res

    # 02: [[Tensor,Tensor,Tensor],[Tensor,Tensor,Tensor],...] -> [[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],...]
    def process_02(self, data, pos, size):
        res = []
        for i in data:
            assert len(i) == 3
            group = []
            np_array0, np_array1, np_array2 = i[0], i[1], i[2]
            item0 = torch.from_numpy(np_array0)
            item1 = torch.from_numpy(np_array1)
            item2 = torch.from_numpy(np_array2)
            with torch.cuda.stream(self.stream_for_data_trans):
                torch.cuda.insert_shared_cache(pos, size)
                item0 = item0.cuda()
                item1 = item1.cuda()
                item2 = item2.cuda()
                torch.cuda.synchronize()
                group.append((item0.shape[0], item0.shape[1], item0.shape[2], pos, 0))
                group.append((item1.shape[0], item1.shape[1], item1.shape[2], pos, 1))
                group.append((item2.shape[0], item2.shape[1], item2.shape[2], pos, 2))
                del item0
                del item1
                del item2
                torch.cuda.clear_shared_cache()
            res.append(group)
        return res


    # 03: [[Batch_Tensor,Batch_Tensor,Batch_Tenor],[Batch_Tensor,Batch_Tensor,Batch_Tensor],...] -> [[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tenor],[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor],...]
    def process_03(self, data, pos, size):
        res = []
        for i in data:
            assert len(i) == 3
            group = []
            np_array0, np_array1, np_array2 = i[0], i[1], i[2]
            item0 = torch.from_numpy(np_array0)
            item1 = torch.from_numpy(np_array1)
            item2 = torch.from_numpy(np_array2)
            with torch.cuda.stream(self.stream_for_data_trans):
                torch.cuda.insert_shared_cache(pos, size)
                self.item0 = item0.cuda()
                self.item1 = item1.cuda()
                self.item2 = item2.cuda()
                torch.cuda.synchronize()
                group.append((self.item0.shape[0], self.item0.shape[1], self.item0.shape[2], self.item0.shape[3], pos, 0))
                group.append((self.item1.shape[0], self.item1.shape[1], self.item1.shape[2], self.item1.shape[3], pos, 1))
                group.append((self.item2.shape[0], self.item2.shape[1], self.item2.shape[2], self.item2.shape[3], pos, 2))
                del self.item0
                del self.item1
                del self.item2
                torch.cuda.clear_shared_cache()
            res.append(group)
        return res

    def finish(self):
        with torch.cuda.stream(self.stream_for_data_trans):
            torch.cuda.clear_shared_cache()
        
                     