from ai_hub import inferServer
import torch
from mmdet.apis import init_detector, inference_detector_now

class myserver(inferServer):
    def __init__(self, model):
        super().__init__(model)

    def pre_process(self, request):
        print("my pre process")
        file = request.files['img']
        file_t = request.files['img_t']
        self.filename = file.filename
        print(self.filename)
        file_data = file.read()
        return file_data

    def pridect(self, data):
        ret = inference_detector_now(self.model, data)
        return ret

    def post_process(self, data):
        infer_score_thre = 0.5
        results = output2result(data, self.filename, infer_score_thre)
        return results


def output2result(result, name, infer_score_thre):
    image_name = name
    results = []
    for i, boxes in enumerate(result):
        class_id = i + 1
        for per_class_results in boxes:
            xmin, ymin, xmax, ymax, score = per_class_results
            if(score < infer_score_thre):
                continue
            xmin, ymin, xmax, ymax = int(xmin), int(ymin), int(xmax), int(ymax)
            dict_instance = dict()
            dict_instance['name'] = image_name
            dict_instance['category'] = class_id
            dict_instance['score'] = round(float(score), 6)
            dict_instance['bbox'] = [xmin, ymin, xmax, ymax]
            results.append(dict_instance)
    return results

if __name__ == '__main__':
    config_path = '/workspace/mmdetection/tools/work_dirs/69_b/69_b.py'
    model_path = '/workspace/mmdetection/tools/work_dirs/69_b/latest.pth'
    mymodel = init_detector(config_path, model_path, device='cuda:0')
    myserver = myserver(mymodel)
    myserver.run(debuge=True)
