import torch
import numpy as np
import time

class transferToHostMemory:
    def __init__(self):
        pass

    # 开始申请资源
    def start(self):
        # 初始化stream
        self.stream_for_data_trans = torch.cuda.Stream()

    # 0: [Gpu_Tensor,Gpu_Tensor,...] -> [Tensor,Tensor,...]
    # 1: [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...] -> [Batch_Tensor,Batch_Tensor,...]
    # 2: [[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],...] -> [[Tensor,Tensor,Tensor],[Tensor,Tensor,Tensor],...]
    # 3: [[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor],[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor],...] -> [[Batch_Tensor,Batch_Tensor,Batch_Tensor],[Batch_Tensor,Batch_Tensor,Batch_Tensor],...]
    # Gpu_Tensor: Tuple, (C, H, W, pos, size)
    # Batch_Gpu_Tensor: Tuple, (B, C, H, W, pos, size)
    # Tensor: Numpy-Array, dim 3 
    # Batch_Tensor: NumPy-Array, dim 4 
    # DONE: 完善方法使其支持上述泛型
    def process(self, data):
        assert len(data) > 0
        item = data[0]
        if isinstance(item, list): # 2,3
            assert len(item) > 0
            if len(item[0]) == 5: # 2
                res = eval('self.process_02(data)')
            elif len(item[0]) == 6: # 3
                res = eval('self.process_03(data)')
            else:
                assert False
        elif isinstance(item, tuple): # 0,1
            if len(item) == 5: # 0
                res = eval('self.process_00(data)')
            elif len(item) == 6: # 1
                res = eval('self.process_01(data)')
            else:
                assert False
        else:
            assert False
        return res

    # DONE: 这里的传输出错，需要修改，应该找到GPU的tensor之后再传输
    # 0: [Gpu_Tensor,Gpu_Tensor,...] -> [Tensor,Tensor,...]
    def process_00(self, data):
        res = []
        for i in data:
            assert len(i) == 5
            c, h, w, pos, size = i[0], i[1], i[2], i[3], i[4]
            s = torch.cuda.Stream()
            with torch.cuda.stream(s):
                torch.cuda.insert_shared_cache(pos, size)
                item = torch.empty([c,h,w], device='cuda:0')
                i = item.cpu()
                torch.cuda.synchronize()
                del item
                torch.cuda.clear_shared_cache()
            res.append(i.detach().numpy())
        return res

    # 1: [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...] -> [Batch_Tensor,Batch_Tensor,...]
    def process_01(self, data):
        res = []
        # t0 = time.time()
        for i in data:
            assert len(i) == 6
            b, c, h, w, pos, size = i[0], i[1], i[2], i[3], i[4], i[5]
            # print(b, c, h, w, b*c*h*w)
            with torch.cuda.stream(self.stream_for_data_trans):
                torch.cuda.insert_shared_cache(pos, size)
                item = torch.empty([b,c,h,w], device='cuda:0')
                # t1 = time.time()
                i = item.cpu()
                # t2 = time.time()
                torch.cuda.synchronize()
                del item
                torch.cuda.clear_shared_cache()
            # t3 = time.time()
            res.append(i.detach().numpy())
        # t4 = time.time()
        # print("time cost: ", t4 - t0, t1 - t0, t2 - t1, t3 - t2, t4 - t3)
        return res
    
    # 2: [[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],...] -> [[Tensor,Tensor,Tensor],[Tensor,Tensor,Tensor],...]
    def process_02(self, data):
        res = []
        for i in data:
            assert len(i) == 3
            assert len(i[0]) == 5
            pos, size = i[0][3], i[0][4]
            group = []
            c0, h0, w0 = i[0][0], i[0][1], i[0][2]
            c1, h1, w1 = i[1][0], i[1][1], i[1][2]
            c2, h2, w2 = i[2][0], i[2][1], i[2][2]
            with torch.cuda.stream(self.stream_for_data_trans):
                torch.cuda.insert_shared_cache(pos, size)
                item0 = torch.empty([c0,h0,w0], device='cuda:0')
                item1 = torch.empty([c1,h1,w1], device='cuda:0')
                item2 = torch.empty([c2,h2,w2], device='cuda:0')
                item0 = item0.cpu()
                item1 = item1.cpu()
                item2 = item2.cpu()
                torch.cuda.synchronize()
                group.append(item0.detach().numpy())
                group.append(item1.detach().numpy())
                group.append(item2.detach().numpy())
                torch.cuda.clear_shared_cache()
            res.append(group)
        return res

    # 3: [[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor],[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor],...] -> [[Batch_Tensor,Batch_Tensor,Batch_Tensor],[Batch_Tensor,Batch_Tensor,Batch_Tensor],...]
    def process_03(self, data):
        res = []
        for i in data:
            assert len(i) == 3
            assert len(i[0]) == 6
            pos, size = i[0][4], i[0][5]
            group = []
            b0, c0, h0, w0 = i[0][0], i[0][1], i[0][2], i[0][3]
            b1, c1, h1, w1 = i[1][0], i[1][1], i[1][2], i[1][3]
            b2, c2, h2, w2 = i[2][0], i[2][1], i[2][2], i[2][3]
            with torch.cuda.stream(self.stream_for_data_trans):
                torch.cuda.insert_shared_cache(pos, size)
                item0 = torch.empty([b0,c0,h0,w0], device='cuda:0')
                item1 = torch.empty([b1,c1,h1,w1], device='cuda:0')
                item2 = torch.empty([b2,c2,h2,w2], device='cuda:0')
                item0 = item0.cpu()
                item1 = item1.cpu()
                item2 = item2.cpu()
                torch.cuda.synchronize()
                group.append(item0.detach().numpy())
                group.append(item1.detach().numpy())
                group.append(item2.detach().numpy())
                torch.cuda.clear_shared_cache()
            res.append(group)
        return res

    # 结束回收资源
    def finish(self):
        with torch.cuda.stream(self.stream_for_data_trans):
            torch.cuda.clear_shared_cache()