from repo.Retinanet.data import cfg_mnet  # 导入完整路径防止在同一个环境中运行多个python function时的路径查找错误
from repo.Retinanet.models.retina import Retina
from repo.Retinanet.layers.functions.prior_box import PriorBox
from repo.Retinanet.utils.box_utils import decode_landm
from repo.Retinanet.utils.nms.py_cpu_nms import py_cpu_nms
import numpy as np
import torch
import time

class retinanet:
    def __init__(self):
        self.cnt = 0
        # 前置计算
        priorbox = PriorBox(cfg_mnet, image_size=(320, 320))
        priors = priorbox.forward()
        self.prior_data = priors.data
        self.scale = torch.Tensor([320, 320, 320, 320])

    def start(self):
        # 创建cuda stream
        self.stream_for_data_trans = torch.cuda.Stream()
        self.stream_for_load_model = torch.cuda.Stream()
        self.stream_for_inference = torch.cuda.Stream()
        # 给stream划分空间
        with torch.cuda.stream(self.stream_for_load_model):
            torch.cuda.insert_shared_cache((2*1024+512)*1024*1024, 256*1024*1024)
        with torch.cuda.stream(self.stream_for_inference):
            torch.cuda.insert_shared_cache(3*1024*1024*1024, 6*1024*1024*1024)
        self.model = Retina(cfg=cfg_mnet, phase='test')
        with torch.cuda.stream(self.stream_for_load_model):
            self.model.load_state_dict(torch.load('/home/lx/SmartPipe/src/core/functions/Model/Retinanet/retinanet/weights/mnet_plate.pth', map_location=torch.device('cuda')))
            self.model.cuda().eval()
            torch.cuda.synchronize()
        print("load retinanet.")
        with torch.cuda.stream(self.stream_for_inference):
            input_tensor = torch.rand([8, 3, 320, 320], device='cuda:0')
            res = self.model(input_tensor)
        print("warm GPU.")

    # [Mat,Mat,...] -> [Tensor,Tensor,...]
    def preprocess(self, data):
        # data的形式是[numpy, numpy, numpy], numpy的格式为RGB。
        # 拷贝一份
        imgs = [np.float32(i) for i in data]              # 转为浮点型
        imgs = [img - (104, 117, 123) for img in imgs]    # 减去平均值
        imgs = [img.transpose(2, 0, 1) for img in imgs]   # HWC转CHW
        imgs = [np.float32(i) for i in imgs]              # 转为float32
        self.cnt += 1
        return imgs

    # 0: [Tensor,Tensor,...] -> [[Tensor,Tensor,Tensor],[Tensor,Tensor,Tensor],...]
    # 1: [Batch_Tensor,Batch_Tensor,...] -> [[Batch_Tensor,Batch_Tensor,Batch_Tensor], [Batch_Tensor,Batch_Tensor,Batch_Tensor],...]
    # 2: [Gpu_Tensor,Gpu_Tensor,...] -> [[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],...]
    # 3: [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...] -> [[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor], [Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor],...]
    # DONE: 使其支持以上四种泛型
    def inference(self, data):
        assert len(data) > 0
        item = data[0]
        if isinstance(item, tuple): # 2,3
            if len(item) == 5: # 2
                res = eval('self.inference_02(data)')
            elif len(item) == 6: # 3
                res = eval('self.inference_03(data)')
            else:
                assert False
        else: # 0,1
            if len(item.shape) == 3: # 0
                res = eval('self.inference_00(data)')
            elif len(item.shape) == 4: # 1
                res = eval('self.inference_01(data)')
            else:
                assert False
        self.cnt += 1
        return res

    # 0: [Tensor,Tensor,...] -> [[Tensor,Tensor,Tensor],[Tensor,Tensor,Tensor],...]
    def inference_00(self, data):
        res = []
        # 拼接tensor
        input_tensor = torch.from_numpy(np.array(data))
        # 进行推理
        with torch.cuda.stream(self.stream_for_inference):
            # 传输到gpu
            input_tensor = input_tensor.cuda()
            with torch.no_grad():
                self.loc, self.conf, self.landms = self.model(input_tensor)
                # exp操作使用cpu太慢，这里使用gpu
                self.loc[:,:2] = self.loc[:,:2]*0.1
                self.loc[:,2:] = torch.exp(self.loc[:,2:]*0.2)
                # 传回cpu
                self.loc = self.loc.detach().cpu().numpy()
                self.conf = self.conf.detach().cpu().numpy()
                self.landms = self.landms.detach().cpu().numpy()
        # 拆分tensor并装入res
        for i in range(self.loc.shape[0]):
            res.append([np.expand_dims(self.loc[i],0), np.expand_dims(self.conf[i],0), np.expand_dims(self.landms[i],0)])
        return res

    # 1: [Batch_Tensor,Batch_Tensor,...] -> [[Batch_Tensor,Batch_Tensor,Batch_Tensor], [Batch_Tensor,Batch_Tensor,Batch_Tensor],...]
    def inference_01(self, data):
        res = []
        for i in data:
            # 转换为tensor
            input_tensor = torch.from_numpy(i)
            # 进行推理
            with torch.cuda.stream(self.stream_for_inference):
                # 传输到gpu
                input_tensor = input_tensor.cuda()
                with torch.no_grad():
                    self.loc, self.conf, self.landms = self.model(input_tensor)
                    # exp操作使用cpu太慢，这里使用gpu
                    self.loc[:,:,:2] = self.loc[:,:,:2]*0.1
                    self.loc[:,:,2:] = torch.exp(self.loc[:,:,2:]*0.2)
                    # 传回cpu
                    self.loc = self.loc.detach().cpu().numpy()
                    self.conf = self.conf.detach().cpu().numpy()
                    self.landms = self.landms.detach().cpu().numpy()
            # 加入res
            res.append([self.loc, self.conf, self.landms])
        return res

    # 2: [Gpu_Tensor,Gpu_Tensor,...] -> [[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],[Gpu_Tensor,Gpu_Tensor,Gpu_Tensor],...]
    def inference_02(self, data):
        res = []
        # 获取并拼接
        for i in data:
            assert len(i) == 5
            c, h, w, pos, size = i[0], i[1], i[2], i[3], i[4]
            # 获取输入tensor
            with torch.cuda.stream(self.stream_for_data_trans):
                torch.cuda.insert_shared_cache(pos, size)
                input_tensor = torch.empty([c,h,w], device='cuda:0')
                torch.cuda.synchronize()
            # 拷贝到计算空间并进行推理
            with torch.cuda.stream(self.stream_for_inference):
                with torch.no_grad():
                    loc, conf, landms = self.model(input_tensor.unsqueeze(0))
                    # exp操作使用cpu太慢，这里使用gpu
                    self.loc[:,:,:2] = self.loc[:,:,:2]*0.1
                    self.loc[:,:,2:] = torch.exp(self.loc[:,:,2:]*0.2)
                    loc = loc.unsqueeze(3).squeeze(0)
                    conf = conf.unsqueeze(3).squeeze(0)
                    landms = landms.unsqueeze(3).squeeze(0)
                torch.cuda.synchronize()
            # 拷贝到输出位置
            with torch.cuda.stream(self.stream_for_data_trans):
                del input_tensor
                loc_ = torch.empty(loc.shape, device = 'cuda:0')
                conf_ = torch.empty(conf.shape, device = 'cuda:0')
                landms_ = torch.empty(landms.shape, device = 'cuda:0')
                loc_.copy_(loc)
                conf_.copy_(conf)
                landms_.copy_(landms)
                torch.cuda.synchronize()
                res.append([(loc_.shape[0], loc_.shape[1], loc_.shape[2], pos, 0),(conf_.shape[0], conf_.shape[1], conf_.shape[2], pos, 1),(landms_.shape[0], landms_.shape[1], landms_.shape[2], pos, 2)])
                del loc_
                del conf_
                del landms_
                torch.cuda.clear_shared_cache()
        return res

    # 3: [Batch_Gpu_Tensor,Batch_Gpu_Tensor,...] -> [[Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor], [Batch_Gpu_Tensor,Batch_Gpu_Tensor,Batch_Gpu_Tensor],...]
    def inference_03(self, data):
        res = []
        # 获取并拼接
        for i in data:
            assert len(i) == 6
            b, c, h, w, pos, size = i[0], i[1], i[2], i[3], i[4], i[5]
            # 获取输入tensor
            with torch.cuda.stream(self.stream_for_data_trans):
                torch.cuda.insert_shared_cache(pos, size)
                self.input_tensor = torch.empty([b,c,h,w], device='cuda:0')
                torch.cuda.synchronize()
            # 拷贝到计算空间并进行推理
            with torch.cuda.stream(self.stream_for_inference):
                with torch.no_grad():
                    self.loc, self.conf, self.landms = self.model(self.input_tensor)
                    # exp操作使用cpu太慢，这里使用gpu
                    self.loc[:,:,:2] = self.loc[:,:,:2]*0.1
                    self.loc[:,:,2:] = torch.exp(self.loc[:,:,2:]*0.2)
                    # 增加一个维度
                    self.loc = self.loc.unsqueeze(3)
                    self.conf = self.conf.unsqueeze(3)
                    self.landms = self.landms.unsqueeze(3)
                    torch.cuda.synchronize()
            # 拷贝到输出位置
            with torch.cuda.stream(self.stream_for_data_trans):
                del self.input_tensor
                self.loc_ = torch.empty(self.loc.shape, device = 'cuda:0')
                self.conf_ = torch.empty(self.conf.shape, device = 'cuda:0')
                self.landms_ = torch.empty(self.landms.shape, device = 'cuda:0')
                self.loc_.copy_(self.loc)
                self.conf_.copy_(self.conf)
                self.landms_.copy_(self.landms)
                torch.cuda.synchronize()
                res.append([(self.loc_.shape[0], self.loc_.shape[1], self.loc_.shape[2], self.loc_.shape[3], pos, 0),(self.conf_.shape[0], self.conf_.shape[1], self.conf_.shape[2], self.conf_.shape[3], pos, 1),(self.landms_.shape[0], self.landms_.shape[1], self.landms_.shape[2], self.landms_.shape[3], pos, 2)])
                del self.loc_
                del self.conf_
                del self.landms_
                torch.cuda.clear_shared_cache()
            with torch.cuda.stream(self.stream_for_inference):
                del self.loc
                del self.conf
                del self.landms
        return res

    # [[Tensor,Tensor,Tensor],[Tensor,Tensor,Tensor],...] -> [[Rect,Rect,...],[Rect,Rect,...],...]
    def postprocess(self, data):
        res = []
        for index in range(len(data)):
            # 接收
            loc = torch.from_numpy(data[index][0])
            conf = torch.from_numpy(data[index][1])
            landms = torch.from_numpy(data[index][2])
            if len(loc.shape) == 3:
                loc = loc.squeeze(2)
            if len(conf.shape) == 3:
                conf = conf.squeeze(2)
            if len(data[index][2].shape) == 3:
                landms = landms.squeeze(2)

            # 进行后处理
            boxes0 = self.prior_data[:, :2] + loc[:, :2] * self.prior_data[:, 2:]
            boxes1 = self.prior_data[:, 2:] * loc[:, 2:]
            boxes = torch.cat((boxes0, boxes1), 1)
            boxes[:, :2] -= boxes[:, 2:] / 2
            boxes[:, 2:] += boxes[:, :2]

            boxes = boxes * self.scale
            boxes = boxes.numpy()

            scores = conf.data.numpy()[:, 1]

            landms_ = decode_landm(landms.data, self.prior_data, cfg_mnet['variance'])
            landms_ = landms.numpy()*320

            # ignore low scores
            inds = np.where(scores > 0.02)[0]
            boxes = boxes[inds]
            landms_ = landms_[inds]
            scores = scores[inds]

            # keep top-K before NMS
            order = scores.argsort()[::-1][:1000]
            boxes = boxes[order]
            landms_ = landms_[order]
            scores = scores[order]

            # do NMS
            dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
            keep = py_cpu_nms(dets, 0.4)
            # keep = nms(dets, args.nms_threshold,force_cpu=args.cpu)
            dets = dets[keep, :]
            landms_ = landms_[keep]
            # keep top-K faster NMS
            dets = dets[:500, :]
            landms_ = landms_[:500, :]
            dets = np.concatenate((dets, landms_), axis=1)
            # 将车牌所在矩形放在列表里
            plates = []
            for b in dets:
                plate = ((float)(b[0]), (float)(b[1]), (float)(b[2]-b[0]), (float)(b[3]-b[1]))
                plates.append(plate)
            res.append(plates)
        # 返回结果
        self.cnt += 1
        return res

    def finish(self):
        with torch.cuda.stream(self.stream_for_data_trans):
            torch.cuda.clear_shared_cache()
        with torch.cuda.stream(self.stream_for_load_model):
            torch.cuda.clear_shared_cache()
        with torch.cuda.stream(self.stream_for_inference):
            torch.cuda.clear_shared_cache()

