import torch
import numpy as np
from repo.yolov5.models.experimental import attempt_load
from repo.yolov5.utils.general import non_max_suppression
import time

# 载入模型
class yolo:
    # 构造
    def __init__(self, width, height):
        self.width = int(width)
        self.height = int(height)
        self.cnt = 0

    # 载入模型
    def start(self):
        # 创建cuda stream
        self.stream_for_data_trans = torch.cuda.Stream()
        self.stream_for_load_model = torch.cuda.Stream()
        self.stream_for_inference = torch.cuda.Stream()
        # 给stream划分空间
        with torch.cuda.stream(self.stream_for_load_model):
            torch.cuda.insert_shared_cache(2*1024*1024*1024, 512*1024*1024)
        with torch.cuda.stream(self.stream_for_inference):
            torch.cuda.insert_shared_cache(3*1024*1024*1024, 6*1024*1024*1024)
        # 加载模型
        with torch.cuda.stream(self.stream_for_load_model):
            self.model = attempt_load('yolov5m', map_location=torch.device('cuda')).eval()
            # 给Detect添加属性，使其新传递的变量也在当前流下，不会在计算时改变。
            for name, module in self.model.named_children():
                for n, m in module.named_children():
                    if m.__class__.__name__ == 'Detect':
                        setattr(m, 'stream', self.stream_for_load_model)
            torch.cuda.synchronize()
        print("load yolo.")
        with torch.cuda.stream(self.stream_for_inference):
            input_tensor = torch.rand([8, 3, 384, 640], device='cuda:0')
            res = self.model(input_tensor)
        print("warm GPU.")

    # [Mat,Mat,...] -> [Tensor,Tensor,...]
    def preprocess(self, data):
        assert len(data) > 0
        # 给图像大小赋值
        imgs = [img.transpose(2, 0, 1) for img in data]   # HWC转CHW
        imgs = [np.float32(i) for i in imgs]              # 转为float32
        imgs = [i/255 for i in imgs]                      # /255
        self.cnt += 1
        return imgs

    # 0: [Tensor,Tensor,...] -> [Tensor,Tensor,...]
    # 1: [Batch_Tensor,Batch_Tensor,...] -> [Batch_Tensor,Batch_Tensor,...]
    # 2: [Gpu_Tensor,Gpu_Tensor,...] -> [Gpu_Tensor,Gpu_Tensor,...]
    # 3: [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...] -> [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...]
    # Tensor: Numpy-Array, dim 3 Numpy-Array -> Numpy-Array
    # Batch_Tensor: NumPy-Array, dim 4 Numpy-Array -> Numpy-Array
    # Gpu_Tensor: Tuple, (C, H, W, pos, size) -> (C, H, W, pos, block_id)
    # Batch_Gpu_Tensor: Tuple, (B, C, H, W, pos, size) -> (B, C, H, W, pos, block_id)
    # DONE: 使其支持以上四种泛型
    def inference(self, data):
        assert len(data) > 0
        item = data[0]
        if isinstance(item, tuple): # 2,3
            if len(item) == 5: # 2
                res = eval('self.inference_02(data)')
            elif len(item) == 6: # 3
                res = eval('self.inference_03(data)')
            else:
                assert False
        else: # 0,1
            if len(item.shape) == 3: # 0
                res = eval('self.inference_00(data)')
            elif len(item.shape) == 4: # 1
                res = eval('self.inference_01(data)')
            else:
                assert False
        self.cnt += 1
        return res

    # [Tensor,Tensor,...] -> [Tensor,Tensor,...]
    def inference_00(self, data):
        res = []
        # 拼接tensor
        input_tensor = torch.from_numpy(np.array(data))
        # Gpu操作
        with torch.cuda.stream(self.stream_for_inference):
            # 传输到gpu
            input_tensor = input_tensor.cuda()
            # 进行推理
            with torch.no_grad():
                preds = self.model(input_tensor)
                # 传回cpu
                self.prebs = preds[0].cpu().detach().numpy()
        # 拆分tensor并装入res
        for i in range(self.prebs.shape[0]):
            res.append(np.expand_dims(self.prebs[i], 0))
        return res

    # 1: [Batch_Tensor,Batch_Tensor,...] -> [Batch_Tensor,Batch_Tensor,...]
    def inference_01(self, data):
        res = []
        for i in data:
            # 转换为tensor
            input_tensor = torch.from_numpy(i)
            # 进行推理
            with torch.cuda.stream(self.stream_for_inference):
                # 传输到gpu
                input_tensor = input_tensor.cuda()
                with torch.no_grad():
                    preds = self.model(input_tensor)
                    # 传回cpu
                    prebs = preds[0].cpu().detach().numpy()
                    # 加入res
                    res.append(prebs)
        return res

    # 2: [Gpu_Tensor,Gpu_Tensor,...] -> [Gpu_Tensor,Gpu_Tensor,...]
    def inference_02(self, data):
        res = []
        # 获取并拼接
        for i in data:
            assert len(i) == 5
            c, h, w, pos, size = i[0], i[1], i[2], i[3], i[4]
            # 获取输入tensor
            with torch.cuda.stream(self.stream_for_data_trans):
                torch.cuda.insert_shared_cache(pos, size)
                self.input_tensor = torch.empty([c,h,w], device='cuda:0')
                torch.cuda.synchronize()
            # 拷贝到计算空间并进行推理
            with torch.cuda.stream(self.stream_for_inference):
                with torch.no_grad():
                    self.preds = self.model(self.input_tensor.unsqueeze(0))
                    self.preds = self.preds[0].unsqueeze(3).squeeze(0)
                torch.cuda.synchronize()
            # 拷贝到输出位置
            with torch.cuda.stream(self.stream_for_data_trans):
                del self.input_tensor
                item = torch.empty(self.preds.shape, device = 'cuda:0')
                item.copy_(self.preds)
                torch.cuda.synchronize()
                res.append((item.shape[0], item.shape[1], item.shape[2], pos, 0))
                del item
                torch.cuda.clear_shared_cache()
        return res

    # 3: [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...] -> [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...]
    def inference_03(self, data):
        res = []
        # 获取并拼接
        for i in data:
            assert len(i) == 6
            b, c, h, w, pos, size = i[0], i[1], i[2], i[3], i[4], i[5]
            # 获取输入tensor
            with torch.cuda.stream(self.stream_for_data_trans):
                torch.cuda.insert_shared_cache(pos, size)
                self.input_tensor = torch.empty([b,c,h,w], device='cuda:0')
                torch.cuda.synchronize()
            # 拷贝到计算空间并进行推理
            with torch.cuda.stream(self.stream_for_inference):
                with torch.no_grad():
                    self.preds = self.model(self.input_tensor)[0]
                    self.preds = self.preds.unsqueeze(3)
                    torch.cuda.synchronize()
            # 拷贝到输出位置
            with torch.cuda.stream(self.stream_for_data_trans):
                del self.input_tensor
                item = torch.empty(self.preds.shape, device = 'cuda:0')
                item.copy_(self.preds)
                torch.cuda.synchronize()
                res.append((item.shape[0], item.shape[1], item.shape[2], item.shape[3], pos, 0)) 
                del item 
                torch.cuda.clear_shared_cache()
            # 进行计算空间清理
            with torch.cuda.stream(self.stream_for_inference): 
                del self.preds 
        return res

    # # [Tenosr,Tensor,...] -> [[Rect,Rect,...],[Rect,Rect,...],...]
    def postprocess(self, data):
        conf = 0.25
        iou = 0.45
        rectangles_list = []
        # 转tensor并进行维度变换
        data_tensor = torch.from_numpy(np.array(data))
        if len(data_tensor.shape) == 3:
            data_tensor = data_tensor.unsqueeze(1)
        elif len(data_tensor.shape) == 4:
            data_tensor = data_tensor.squeeze(3)
            if len(data_tensor.shape) == 3:
                data_tensor = data_tensor.unsqueeze(1)
        else:
            assert False
        # nms
        data = non_max_suppression(data_tensor, conf_thres=conf, iou_thres=iou)
        for i in data:
            target_classes = [2,5,7]
            rectangles = []
            for item in i:
                x,y,w,h,n,c = (float)(item[0]),(float)(item[1]),(float)(item[2])-(float)(item[0]),(float)(item[3])-(float)(item[1]),float(item[4]),(int)(item[5])
                # 换算成比例
                xx, yy, ww, hh = x/self.width, y/self.height, w/self.width, h/self.height
                # y: 0.5 - 0.95 x: 0.02 - 0.8
                if yy >= 0.5 and yy <= 0.95 and xx >= 0.02 and xx <= 0.8 and (c in target_classes): # 取划定范围内的车辆
                    # 封装成cv::Rect
                    rectangle = (xx, yy, ww, hh)
                    rectangles.append(rectangle)
            rectangles_list.append(rectangles)
        self.cnt += 1
        # print(rectangles_list)
        return rectangles_list

    def finish(self):
        with torch.cuda.stream(self.stream_for_data_trans):
            torch.cuda.clear_shared_cache()
        with torch.cuda.stream(self.stream_for_load_model):
            torch.cuda.clear_shared_cache()
        with torch.cuda.stream(self.stream_for_inference):
            torch.cuda.clear_shared_cache()