import copy
import json
import math
import time
from typing import List, Any

import cv2
import numpy as np
import torch.nn as nn
import torch
import torch.optim as optim
import torchvision.models
from easydict import EasyDict
from torch.optim.lr_scheduler import StepLR, MultiStepLR
from pytorch_lightning.core import LightningModule
from torch.utils.data import DataLoader
from torchvision.models.resnet import BasicBlock

from chinese_lp import putTextPIL, correct_roi
from sys_mgr import GlobMgr
from FOTS.base import BaseModel
from FOTS.model.modules import shared_conv
from FOTS.model.modules.roi_rotate import ROIRotate
from FOTS.model.modules.crnn import CRNN
from FOTS.data_loader.datautils import de_normalize_image

from FOTS.utils.util import keys, str_label_converter
from FOTS.model.loss import FOTSLoss, eval_model
from FOTS.utils.bbox import Toolbox
from FOTS.utils.post_processor import PostProcessor
from FOTS.utils.util import visualize
from FOTS.rroi_align.functions.rroi_align import RRoiAlignFunction
from FOTS.utils.detect import get_boxes

import torch.nn.functional as F


def visualise_rois(image_tensor, rois, name, mul_scale=4.0):
    rois = rois.reshape(-1, 6)
    im = de_normalize_image(image_tensor)
    for i, roi in enumerate(rois):
        roi_xywh = roi[1:5]
        roi = ((int(roi_xywh[0] * mul_scale), int(roi_xywh[1] * mul_scale))
               , (int(roi_xywh[2] * mul_scale), int(roi_xywh[3] * mul_scale)), roi[5])
        im = draw_roi(im, roi, idx=i)
    cv2.imshow(name, im)


def show_rois_at_im(im, rois, name, mul_scale=4.0):
    im = im.copy()
    if isinstance(rois, list):
        rois_ = [0]
        rois_.extend(rois[0])
        rois = np.array(rois_)
    rois = rois.reshape(-1, 6)
    for i, roi in enumerate(rois):
        roi_xywh = roi[1:5]
        roi = ((int(roi_xywh[0] * mul_scale), int(roi_xywh[1] * mul_scale))
               , (int(roi_xywh[2] * mul_scale), int(roi_xywh[3] * mul_scale)), roi[5])
        draw_roi(im, roi, idx=i)
    cv2.imshow(name, im)
    cv2.waitKey(20)


def draw_roi(im, roi, idx=None):
    center = roi[0]
    (w, h) = roi[1]
    min_rect_angle = roi[2]
    # if h > w:
    #     min_rect_angle = min_rect_angle + 180

    cv2.rectangle(im, (int(center[0] - w / 2), int(center[1] - h / 2)),
                  (int(center[0] + w / 2), int(center[1] + h / 2)), (0, 255, 0), 2)

    angelPi = (min_rect_angle / 180) * math.pi
    x = center[0]
    y = center[1]
    x1 = x + (w / 2) * math.cos(angelPi) - (h / 2) * math.sin(angelPi)
    y1 = y + (w / 2) * math.sin(angelPi) + (h / 2) * math.cos(angelPi)

    x2 = x + (w / 2) * math.cos(angelPi) + (h / 2) * math.sin(angelPi)
    y2 = y + (w / 2) * math.sin(angelPi) - (h / 2) * math.cos(angelPi)

    x3 = x - (w / 2) * math.cos(angelPi) + (h / 2) * math.sin(angelPi)
    y3 = y - (w / 2) * math.sin(angelPi) - (h / 2) * math.cos(angelPi)

    x4 = x - (w / 2) * math.cos(angelPi) - (h / 2) * math.sin(angelPi)
    y4 = y - (w / 2) * math.sin(angelPi) + (h / 2) * math.cos(angelPi)
    cv2.circle(im, (int(x), int(y)), 2, (0, 0, 255), thickness=2)
    cv2.line(im, (int(x1), int(y1)), (int(x2), int(y2)), (250, 0, 0), thickness=2, )
    cv2.line(im, (int(x2), int(y2)), (int(x3), int(y3)), (0, 0, 250), thickness=2)
    cv2.line(im, (int(x3), int(y3)), (int(x4), int(y4)), (250, 0, 0), thickness=2)
    cv2.line(im, (int(x4), int(y4)), (int(x1), int(y1)), (0, 0, 250), thickness=2)
    im = putTextPIL(im, str(idx), (int(x1), int(y1)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
    return im


from torch.optim.lr_scheduler import _LRScheduler


class CosScheduler(_LRScheduler):
    def __init__(self, optimizer, warmup_steps, stages_lrs: list, stages: list, last_epoch=-1):
        self.warmup_lr_init = 0.0001
        self.warmup_steps: int = warmup_steps
        self.stages_lrs: list = stages_lrs
        self.stages: list = stages

        super(CosScheduler, self).__init__(optimizer, last_epoch, False)

    def get_warmup_lr(self):
        alpha = float(self.last_epoch) / float(self.warmup_steps)
        return [self.stages_lrs[0] * alpha for _ in self.optimizer.param_groups]

    def get_stage_lr_idx(self):
        cos_steps = (self.last_epoch - self.warmup_steps)
        idx = 0
        cos_steps = cos_steps - self.stages[idx]
        while cos_steps > 0:
            idx += 1
            if idx >= len(self.stages):
                return -1, self.stages[-1]
            cos_steps = cos_steps - self.stages[idx]
        return idx, cos_steps + self.stages[idx]

    def get_lr(self):
        if self.last_epoch == -1:
            return [self.warmup_lr_init for _ in self.optimizer.param_groups]
        if self.last_epoch < self.warmup_steps:
            return self.get_warmup_lr()
        else:
            try:
                idx, cos_step = self.get_stage_lr_idx()
                return [self.stages_lrs[idx] * (1 + math.cos(cos_step / self.stages[idx] * math.pi)) / 2
                        for _ in self.optimizer.param_groups]
            except Exception as ex:
                print(ex)


class FOTSModel(LightningModule):

    def __init__(self, config):
        super(FOTSModel, self).__init__()
        self.config = config

        self.mode = config['model']['mode']

        # bbNet = torch.hub.load(self.config.backbone_weights, 'resnet50', pretrained=True, source='local')
        bbNet = torchvision.models.resnet50(pretrained=True)
        # BasicBlock.expansion = 4
        # bbNet = torchvision.models.resnet34(pretrained=False)
        self.sharedConv = shared_conv.SharedConv(bbNet, config)

        # 'blank' and '<other>'
        nclass = len(keys) + 2
        self.recognizer = Recognizer(nclass, config)
        self.detector = Detector(config)
        # self.roirotate = ROIRotate()
        self.roirotate = RRoiAlignFunction()
        self.pooled_height = 8
        self.spatial_scale = 1.0

        self.max_length_crnn = 10
        self.max_roi_num = 2

        self.max_transcripts_pre_batch = self.config.data_loader.max_transcripts_pre_batch

        self.loss = FOTSLoss(config=config)

        self.postprocessor = PostProcessor()

        self.use_gt_roi_box_training = True

    def do_decode_pred(self, preds, length):
        pred_tensor = torch.softmax(preds, dim=-1).argmax(dim=-1).t().detach().cpu()
        decode_pred = str_label_converter.decode(pred_tensor, length.int().detach().cpu())
        if isinstance(decode_pred, list) is False:
            decode_pred = [decode_pred]
        decode_pred = str_label_converter.remove_duplicate(decode_pred)
        return decode_pred

    def get_str_by_single_feat(self, roi_feat, length):
        preds = self.recognizer(roi_feat, length.cpu())
        preds = preds.permute(1, 0, 2)  # B, T, C -> T, B, C

        decode_pred = self.do_decode_pred(preds, length)
        assert (len(decode_pred) == 1)
        return decode_pred[0]

    def configure_optimizers(self):
        # optimizer = getattr(optim, self.config.optimizer_type)(
        #     self.parameters(),
        #     **self.config.optimizer
        # )

        # warming_up_epoch = 100
        # stages_lrs = [1e-2, 5e-4]
        # stages = [400, 500]
        # warming_up_epoch = 2
        # stages_lrs = [1e-3, 5e-4]
        # stages = [20, 20]
        # optimizer = torch.optim.AdamW(self.parameters(), lr=1e-3)
        optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)

        # if not self.config.lr_scheduler.name:
        #     return optimizer
        # else:
        # if self.config.lr_scheduler.name == 'StepLR':
        #     lr_scheduler = StepLR(optimizer, **self.config.lr_scheduler.args)
        # else:
        lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=self.trainer.max_epochs,
                                                                  eta_min=5e-6)
        # lr_scheduler = StepLR(optimizer, 400, 0.1, -1)
        # lr_scheduler = MultiStepLR(optimizer, [80, 200], 0.1, -1)
        # lr_scheduler = CosScheduler(
        #     optimizer=optimizer,
        #     warmup_steps=warming_up_epoch,
        #     # warmup_steps=0,
        #     stages_lrs=stages_lrs,
        #     stages=stages,
        # )

        return dict(optimizer=optimizer, lr_scheduler=lr_scheduler)

    def show_roi_feat_and_correspond_str(self, roi_feat_raw, pooled_width, lengths, i, name="pred_roi_feat"):
        roi_feat = F.interpolate(roi_feat_raw.detach().cpu().unsqueeze(0),
                                 [self.pooled_height * 4, pooled_width * 4])[:, 0:3, :, :]
        roi_feat = de_normalize_image(roi_feat[0])
        single_str = self.get_str_by_single_feat(roi_feat_raw.unsqueeze(0), lengths[i])
        roi_feat = putTextPIL(roi_feat, single_str, (5, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 0))
        cv2.imshow("{}{}".format(name, i), roi_feat)
        return i

    def visualise_ret_in_train(self, score_map, geo_map, feature_map, image_tensor):
        score = score_map.detach().cpu().numpy()
        geometry = geo_map.detach().cpu().numpy()

        # score_heat = (copy.deepcopy(score) * 255).reshape(160, 160, 1).astype(np.uint8)
        # cv2.imshow("score heat", score_heat)

        im = de_normalize_image(image_tensor[0])
        preds, lengths = None, None
        pred_boxes = []
        rois = []
        scale = 4
        for i in range(score.shape[0]):
            s = score[i]
            g = geometry[i]
            bb = get_boxes(s, g, score_thresh=0.9)
            if bb is not None:
                roi = []
                for _, gt in enumerate(bb[:, :8].reshape(-1, 4, 2)):
                    rr = cv2.minAreaRect(gt)
                    rr = correct_roi(rr)
                    center = rr[0]
                    (w, h) = rr[1]
                    min_rect_angle = rr[2]

                    roi.append([i, center[0] / scale, center[1] / scale, w / scale, h / scale, min_rect_angle])
                    # if h > w:
                    #     min_rect_angle = min_rect_angle + 180
                    #     roi.append([i, center[0] / scale, center[1] / scale, w / scale, h / scale, -min_rect_angle])
                    # else:
                    #     roi.append([i, center[0] / scale, center[1] / scale, h / scale, w / scale, -min_rect_angle])

                pred_boxes.append(bb)
                rois.append(np.stack(roi))

        if len(rois) > 0 and np.array([roi.shape[0] for roi in rois]).max() < self.max_roi_num:
            pred_boxes = torch.as_tensor(np.concatenate(pred_boxes), dtype=feature_map.dtype,
                                         device=feature_map.device)
            rois = torch.as_tensor(np.concatenate(rois), dtype=feature_map.dtype, device=feature_map.device)
            visualise_rois(image_tensor[0], rois, "roi_pred")

            preds, lengths = self.get_recog_data_map(rois, feature_map)

            pred_tensor = torch.softmax(preds, dim=-1).argmax(dim=-1).t().detach().cpu()
            decode_pred = str_label_converter.decode(pred_tensor, lengths.int().detach().cpu())
            if isinstance(decode_pred, list) is False:
                decode_pred = [decode_pred]
            decode_pred = str_label_converter.remove_duplicate(decode_pred)
            print(" pred in visual:{} ".format(decode_pred))

            polys = []
            if pred_boxes is not None and len(pred_boxes) > 0:
                # boxes = bboxes[0][:, :8].reshape((-1, 4, 2))
                boxes = pred_boxes[:, :8].reshape((-1, 4, 2))

                for cnt, box in enumerate(boxes):
                    box = box.detach().cpu().numpy()
                    box = Toolbox.sort_poly(box.astype(np.int32))
                    if np.linalg.norm(box[0] - box[1]) < 5 or np.linalg.norm(box[3] - box[0]) < 5:
                        # print('wrong direction')
                        continue
                    poly = np.array([[box[0, 0], box[0, 1]], [box[1, 0], box[1, 1]], [box[2, 0], box[2, 1]],
                                     [box[3, 0], box[3, 1]]])
                    polys.append(polys)
                    p_area = Toolbox.polygon_area(poly)
                    if p_area > 0:
                        poly = poly[(0, 3, 2, 1), :]

                    cv2.polylines(im, [box.astype(np.int32).reshape((-1, 1, 2))], True,
                                  color=(0, 0, 255), thickness=2)
                    im = putTextPIL(im, decode_pred[cnt] + "_" + str(cnt), (box[0, 0], box[0, 1]),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2)
        cv2.imshow("with x", im)
        cv2.waitKey(1)
        return preds, lengths, pred_boxes

    def rois_format_convert(self, feature_map, rois):
        feature_map = feature_map.contiguous()
        new_rois = rois.clone()
        new_rois[:, 3] = rois[:, 4]
        new_rois[:, 4] = rois[:, 3]
        new_rois[:, 5] = -new_rois[:, 5]
        return feature_map, new_rois

    def visualise_roirotate_inraw(self, image_tensor, rois):
        with torch.no_grad():
            ratios = rois[:, 3] / rois[:, 4]
            tmp_pooled_height = 18
            maxratio = ratios.max().item()
            pooled_width = np.ceil(tmp_pooled_height * maxratio).astype(int)

            image_tensor = image_tensor.contiguous()
            feature_map, rois_new = self.rois_format_convert(image_tensor, rois)
            rotated_image_tensors = self.roirotate.apply(feature_map, rois_new, tmp_pooled_height, pooled_width,
                                                         4)
            for i, rotated_im in enumerate(rotated_image_tensors):
                rotated_im = de_normalize_image(rotated_im)
                # cv2.putText(rotated_im, "{:.2f}".format(float(rois_new[i][5])),
                #             (5, 5), cv2.FONT_HERSHEY_PLAIN, 0.5, (0, 0, 255), 1)
                cv2.imshow("rotated_{}".format(i), rotated_im)
            c = cv2.waitKey(1)
            if c == 27:
                cv2.waitKey(0)

    def get_recog_data_map(self, rois, feature_map):
        ratios = rois[:, 3] / rois[:, 4]
        maxratio = ratios.max().item()
        pooled_width = np.ceil(self.pooled_height * maxratio).astype(int)

        feature_map, rois_new = self.rois_format_convert(feature_map, rois)
        roi_features = self.roirotate.apply(feature_map, rois_new
                                            , self.pooled_height, pooled_width, self.spatial_scale)
        lengths = torch.ceil(self.pooled_height * ratios)

        # visualise_rois(F.interpolate(feature_map, (640, 640)).detach().cpu()[:, 0:3, :, :][0],
        #                rois, "gt_featMapROI", )

        # for i, roi_feat_raw in enumerate(roi_features):
        #     i = self.show_roi_feat_and_correspond_str(roi_feat_raw, pooled_width, lengths, i, name="gt_roi_feat")
        lengths = torch.clip(lengths, 0, self.max_length_crnn)
        preds = self.recognizer(roi_features, lengths.cpu())
        preds = preds.permute(1, 0, 2)  # B, T, C -> T, B, C
        return preds, lengths

    def forward(self, images, boxes=None, rois=None):
        # self.eval()
        feature_map = self.sharedConv.forward(images)

        if self.training:
            self.visualise_roirotate_inraw(images, rois)

        if self.training and self.mode == 'recognition':
            preds, lengths = self.get_recog_data_map(rois, feature_map)
            data = dict(score_maps=None,
                        geo_maps=None,
                        transcripts=(preds, lengths),
                        bboxes=None,
                        mapping=None,
                        indices=None)
            return data
        score_map, geo_map = self.detector(feature_map)

        if self.training:

            visualise_rois(images[0], rois, "roi_gt")
            if self.mode == 'detection':
                data = dict(score_maps=score_map,
                            geo_maps=geo_map,
                            transcripts=(None, None),
                            bboxes=boxes,
                            mapping=None,
                            indices=None)
                return data

            self.visualise_ret_in_train(score_map, geo_map, feature_map, images)
            preds, lengths = self.get_recog_data_map(rois, feature_map)
            pred_boxes = boxes

            data = dict(score_maps=score_map,
                        geo_maps=geo_map,
                        transcripts=(preds, lengths),
                        bboxes=pred_boxes
                        )
            return data
        else:
            if rois is not None:
                preds, lengths = self.get_recog_data_map(rois, feature_map)
                data = dict(score_maps=None,
                            geo_maps=None,
                            transcripts=(preds, lengths),
                            bboxes=None,
                            mapping=None,
                            indices=None)
                return data

            # cv2.imshow("predict score map",
            #            ((score_map.detach().cpu()).permute(0, 2, 3, 1)[0] * 255).numpy().astype(np.uint8))

            score = score_map.detach().cpu().numpy()
            geometry = geo_map.detach().cpu().numpy()

            im = de_normalize_image(images[0])
            pred_boxes = []
            rois = []
            scale = 4
            for i in range(score.shape[0]):
                s = score[i]
                g = geometry[i]
                bb = get_boxes(s, g, score_thresh=0.9)
                if bb is not None:
                    roi = []
                    for _, gt in enumerate(bb[:, :8].reshape(-1, 4, 2)):
                        rr = cv2.minAreaRect(gt)
                        rr = correct_roi(rr)
                        center = rr[0]
                        (w, h) = rr[1]
                        min_rect_angle = rr[2]

                        roi.append([i, center[0] / scale, center[1] / scale, w / scale, h / scale, min_rect_angle])

                    pred_boxes.append(bb)
                    rois.append(np.stack(roi))

            if len(rois) > 0 and np.array([roi.shape[0] for roi in rois]).max() < self.max_roi_num:
                pred_boxes = torch.as_tensor(np.concatenate(pred_boxes), dtype=feature_map.dtype,
                                             device=feature_map.device)
                rois = torch.as_tensor(np.concatenate(rois), dtype=feature_map.dtype, device=feature_map.device)
                pred_mapping = rois[:, 0]
                # visualise_rois(images[0], rois, "roi_pred")

                preds, lengths = self.get_recog_data_map(rois, feature_map)

                pred_tensor = torch.softmax(preds, dim=-1).argmax(dim=-1).t().detach().cpu()
                decode_pred = str_label_converter.decode(pred_tensor, lengths.int().detach().cpu())
                if isinstance(decode_pred, list) is False:
                    decode_pred = [decode_pred]
                decode_pred = str_label_converter.remove_duplicate(decode_pred)
                print(" pred in visual:{}".format(decode_pred))

                output = dict(score_maps=score_map,
                              geo_maps=geo_map,
                              transcripts=(preds, lengths),
                              bboxes=pred_boxes,
                              mapping=pred_mapping,
                              indices=None)
                score = output['score_maps']
                geometry = output['geo_maps']
                preds = output['transcripts']
                bboxes = output['bboxes']
                mapping = output['mapping']
                indices = output['indices']

                polys = []
                if bboxes is not None and len(bboxes) > 0:
                    # boxes = bboxes[0][:, :8].reshape((-1, 4, 2))
                    boxes = bboxes[:, :8].reshape((-1, 4, 2))

                    for cnt, box in enumerate(boxes):
                        box = box.detach().cpu().numpy()
                        box = Toolbox.sort_poly(box.astype(np.int32))
                        if np.linalg.norm(box[0] - box[1]) < 5 or np.linalg.norm(box[3] - box[0]) < 5:
                            # print('wrong direction')
                            continue
                        poly = np.array([[box[0, 0], box[0, 1]], [box[1, 0], box[1, 1]], [box[2, 0], box[2, 1]],
                                         [box[3, 0], box[3, 1]]])
                        polys.append(polys)
                        p_area = Toolbox.polygon_area(poly)
                        if p_area > 0:
                            poly = poly[(0, 3, 2, 1), :]

                        cv2.polylines(im, [box.astype(np.int32).reshape((-1, 1, 2))], True,
                                      color=(0, 0, 255), thickness=2)
                        im = putTextPIL(im, decode_pred[cnt], (box[0, 0], box[0, 1]),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2)
            # cv2.imshow("eval im out", im)
            # cv2.waitKey(0)

    def training_step(self, *args, **kwargs):
        input_data = args[0]
        bboxes = input_data['bboxes']
        rois = input_data['rois']

        image_copy = de_normalize_image(input_data['images'][0])
        transcripts = input_data['transcripts']
        decode_strs = str_label_converter.decode(transcripts[0], torch.tensor(transcripts[1]))
        if isinstance(decode_strs, list) is False:
            decode_strs = [decode_strs]
        rectangles = bboxes.detach().cpu().numpy()
        for i, rectangle in enumerate(rectangles):
            cv2.polylines(image_copy, [rectangle.astype(np.int32).reshape((-1, 1, 2))], True,
                          color=(255, 0, 0), thickness=2)
            # print("  \n\n ************* poly text is : {}".format(decode_strs[i]))
            image_copy = putTextPIL(image_copy, str(decode_strs[i]) + "_" + str(i),
                                    (int(rectangle[0][0]), int(rectangle[0][1])),
                                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
        cv2.imshow("training using im with gt ", image_copy)
        # cv2.imshow("data loaded score", (copy.deepcopy(score_map) * 255).astype(np.uint8))
        # cv2.waitKey(1)

        if self.trainer.current_epoch > int((self.trainer.max_epochs) * 3 / 4):
            self.use_gt_roi_box_training = False
        output = self.forward(images=input_data['images'],
                              boxes=bboxes,
                              rois=rois)

        # sampled_indices = output['indices']
        y_true_recog = (input_data['transcripts'][0],
                        input_data['transcripts'][1])

        loss_dict = self.loss(y_true_cls=input_data['score_maps'],
                              y_pred_cls=output['score_maps'],
                              y_true_geo=input_data['geo_maps'],
                              y_pred_geo=output['geo_maps'],
                              y_true_recog=y_true_recog,
                              y_pred_recog=output['transcripts'],
                              training_mask=input_data['training_masks'])

        loss = loss_dict['reg_loss'] + loss_dict['cls_loss'] + loss_dict['recog_loss']
        self.log('loss', loss, logger=True)
        self.log('reg_loss', loss_dict['reg_loss'], logger=True, prog_bar=True)
        self.log('cls_loss', loss_dict['cls_loss'], logger=True, prog_bar=True)
        self.log('recog_loss', loss_dict['recog_loss'], logger=True, prog_bar=True)
        self.log('lr', self.trainer.optimizers[0].param_groups[0]['lr'], logger=True, prog_bar=True)

        c = cv2.waitKey(1)
        if c == 27:
            cv2.waitKey(0)

        return loss


class Recognizer(BaseModel):

    def __init__(self, nclass, config):
        super().__init__(config)
        self.crnn = CRNN(8, 32, nclass, 256)

    def forward(self, rois, lengths):
        return self.crnn(rois, lengths)


class Detector(BaseModel):

    def __init__(self, config):
        super().__init__(config)
        self.scoreMap = nn.Conv2d(32, 1, kernel_size=1)
        self.geoMap = nn.Conv2d(32, 4, kernel_size=1)
        self.angleMap = nn.Conv2d(32, 1, kernel_size=1)
        self.size = config.data_loader.size

    def forward(self, *input):
        final, = input

        score = self.scoreMap(final)
        score = torch.sigmoid(score)

        geoMap = self.geoMap(final)
        # 出来的是 normalise 到 0 -1 的值是到上下左右的距离，但是图像他都缩放到  640 * 640 了，但是 gt 里是算的绝对数值来的
        geoMap = torch.sigmoid(geoMap) * self.size  # TODO: 640 is the image size

        angleMap = self.angleMap(final)
        angleMap = (torch.sigmoid(angleMap) - 0.5) * math.pi

        geometry = torch.cat([geoMap, angleMap], dim=1)

        return score, geometry


if __name__ == '__main__':
    config = json.load(open("../../pretrain.json"))
    config = EasyDict(config)

    model = FOTSModel(config)
    model.cuda()
    model.eval()

    input_ten = torch.randn([1, 3, 640, 640], dtype=torch.float).cuda()
    while True:
        t0 = time.time()
        out = model(input_ten)
        t1 = time.time()
        print("cost: {}".format(t1 - t0))
