import os
from ai_hub import inferServer
import json
import torch
import sys
import numpy as np
import cv2
import cv2 as cv
from mmcv import imnormalize
from mmdet.apis import init_detector, inference_detector


class myserver(inferServer):
    def __init__(self, model):
        super().__init__(model)
        print("init_myserver")
        self.model = model
        print("init_model_success")
        self.file_name = None

        self.surfGPU = cv.cuda.SURF_CUDA_create(10000, _nOctaves=4, _nOctaveLayers=3, _upright=True)
        self.matcherGPU = cv.cuda.DescriptorMatcher_createBFMatcher(cv.NORM_L2)
        self.std_height = 2048

        test_img = cv2.cvtColor(cv2.imread('/debug_img/198_19_t20201119103227654_CAM3_2.jpg'),cv2.COLOR_BGR2GRAY)

        gpu_im_ = cv2.equalizeHist(test_img)
        gpu_im_t_ = cv2.equalizeHist(test_img)
        gpu_im_ = cv.cuda_GpuMat(gpu_im_)
        gpu_im_t_ = cv.cuda_GpuMat( gpu_im_t_)
        kp1gpu, d1gpu = self.surfGPU.detectWithDescriptors(gpu_im_, None)
        kp2gpu, d2gpu = self.surfGPU.detectWithDescriptors(gpu_im_t_, None)
        kp1gpu = self.surfGPU.downloadKeypoints(kp1gpu)
        kp2gpu = self.surfGPU.downloadKeypoints(kp2gpu)
        matches_gpu = self.matcherGPU.match(d1gpu, d2gpu)

        print("init_success")
        

    def result_submit(self, file_name, result, th):
        submit = []
        for cls, bbxes in enumerate(result):
            for bbx in bbxes:
                bbx = bbx.tolist()
                x1, y1, x2, y2 = int(bbx[0]), int(bbx[1]), int(bbx[2]), int(bbx[3])
                if bbx[4] > th:
                    submit.append({"name": file_name, "category": cls + 1, "bbox": [x1, y1, x2, y2], "score": round(float(bbx[4]), 5)})
        return submit

    def img_align(self,im, im_t):
        print('img_laign')
        rgb_t = im_t

        im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
        im_t = cv2.cvtColor(im_t , cv2.COLOR_BGR2GRAY)

        im_t = (im_t + (np.mean(im) - np.mean(im_t))).astype(np.uint8)

        height, width = im_t.shape[:2]
        h_ratio = 1.0 * height / self.std_height
        std_width = int(round(width / h_ratio))
        w_ratio = 1.0 * width / std_width

        im_ = cv2.resize(im, (std_width, self.std_height))
        im_ = cv2.equalizeHist(im_)
        im_t_ = cv2.resize(im_t, (std_width, self.std_height))
        im_t_ = cv2.equalizeHist(im_t_)

        gpu_im_ = cv.cuda_GpuMat(im_)
        gpu_im_t_ = cv.cuda_GpuMat(im_t_)
        kp1gpu, d1gpu = self.surfGPU.detectWithDescriptors(gpu_im_, None)
        kp2gpu, d2gpu = self.surfGPU.detectWithDescriptors(gpu_im_t_, None)
        kp1gpu = self.surfGPU.downloadKeypoints(kp1gpu)
        kp2gpu = self.surfGPU.downloadKeypoints(kp2gpu)

        matches_gpu = self.matcherGPU.match(d1gpu, d2gpu)

        matches_gpu = [m for m in matches_gpu if m.distance <= 0.7]

        no_of_matches = len(matches_gpu)

        if no_of_matches >= 4:
            # Define empty matrices of shape no_of_matches * 2.
            p1 = np.zeros((no_of_matches, 2))
            p2 = np.zeros((no_of_matches, 2))

            for i in range(len(matches_gpu)):
                p1[i, :] = kp1gpu[matches_gpu[i].queryIdx].pt
                p2[i, :] = kp2gpu[matches_gpu[i].trainIdx].pt

            p1 = p1 * [[w_ratio, h_ratio]]
            p2 = p2 * [[w_ratio, h_ratio]]

            # Find the homography matrix.

            homography, mask = cv2.findHomography(p2, p1, cv2.RANSAC)
            transformed_img = cv2.warpPerspective(rgb_t, homography, (width, height))
        else:
            transformed_img = rgb_t


        return transformed_img


    def dual_threshhold(self, result, th):
        max_score = 0
        for cls in result:
            for bbox in cls:
                if bbox[4] > max_score:
                    max_score = bbox[4]
        if max_score < th:
            return None
        else:
            return result


    def pre_process(self, request):
        print("my_pre_process.")
        #json process
        #file example

        file_ori = request.files['img']
        file_t = request.files['img_t']
        # print(file.filename)
        self.file_name = file_ori.filename
        file_data = file_ori.read()
        temp_data = file_t.read()
        img_temp = cv2.imdecode(np.frombuffer(temp_data, np.uint8), cv2.IMREAD_COLOR)
        img = cv2.imdecode(np.frombuffer(file_data, np.uint8), cv2.IMREAD_COLOR)
        #
        img_temp = self.img_align(img, img_temp)
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        temp_gray = cv2.cvtColor(img_temp, cv2.COLOR_BGR2GRAY)
        res = (img_gray - np.mean(1.0 * img_gray)) - (temp_gray - np.mean(1.0 * temp_gray))
        res = (res - 0.0) / 14.47

        img = imnormalize(img, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
        #img_temp = imnormalize(img_temp, mean=np.array([123.675, 116.28, 103.53]), std=np.array([58.395, 57.12, 57.375]), to_rgb=True)
        concat = np.concatenate([img,np.expand_dims(res, axis=2)], axis=2).astype(np.float32)
        print('444444_channel')
        return concat

    def pridect(self, data):
        print('pridect')
        ret = inference_detector(self.model, data)
        return ret

    def post_process(self, data):
        print('post_process')
        #return result_submit(self.file_name, data)

        data = self.dual_threshhold(data, th=0.5)
        if data:
            kk = self.result_submit(self.file_name, data, th=0.001)
        else:
            kk = []
        return kk

if __name__ == '__main__':
    config_file = '/my_config/cascad_S50.py'
    checkpoint_file = '/dcn_r101/latest.pth'
    print('detectort_init')
    model = init_detector(config_file, checkpoint_file, device='cuda:0')
    print('detectort_init_finish')
    myserver = myserver(model)
    #run your server, defult ip=localhost port=8080 debuge=false
    myserver.run(debuge=True) #myserver.run("127.0.0.1", 123fsd
