'''
Function:
    Implementation of Inferencer
'''
import os
import cv2
import copy
import torch
import warnings
import argparse
import numpy as np
import torch.nn.functional as F
from tqdm import tqdm
from modules import (
    BuildDataset, BuildSegmentor, Logger, ConfigParser, touchdir, loadckpts
)
warnings.filterwarnings('ignore')


'''parse arguments in command line'''
def parsecmdargs():
    parser = argparse.ArgumentParser(description='SSSegmentation is an open source supervised semantic segmentation toolbox based on PyTorch')
    parser.add_argument('--videodir', dest='videodir', help='video directory, which means we let the segmentor inference on the images existed in the given video directory', type=str)
    parser.add_argument('--videopath', dest='videopath', help='video path, which means we let the segmentor inference on the given video', type=str)
    parser.add_argument('--outputdir', dest='outputdir', help='directory to save output video(s)', type=str, default='inference_outputs')
    parser.add_argument('--cfgfilepath', dest='cfgfilepath', help='config file path you want to use', type=str, required=True)
    parser.add_argument('--ckptspath', dest='ckptspath', help='checkpoints you want to resume from', type=str, required=True)
    args = parser.parse_args()
    return args


'''Inferencer'''
class Inferencer():
    def __init__(self):
        self.cmd_args = parsecmdargs()
        self.cfg, self.cfg_file_path = ConfigParser()(self.cmd_args.cfgfilepath)
        assert self.cmd_args.videopath or self.cmd_args.videodir, 'imagepath or imagedir should be specified'
        # open full fp32
        torch.backends.cuda.matmul.allow_tf32 = False
        torch.backends.cudnn.allow_tf32 = False
    '''start'''
    def start(self):
        cmd_args, cfg, cfg_file_path = self.cmd_args, self.cfg, self.cfg_file_path
        # touch work dir and output dir
        touchdir(cmd_args.outputdir)
        # cuda detect
        use_cuda = torch.cuda.is_available()
        # initialize logger_handle
        logger_handle = Logger(cfg.SEGMENTOR_CFG['logfilepath'])
        # build segmentor
        cfg.SEGMENTOR_CFG['backbone']['pretrained'] = False
        segmentor = BuildSegmentor(segmentor_cfg=cfg.SEGMENTOR_CFG, mode='TEST')
        if use_cuda: segmentor = segmentor.cuda()
        # build dataset
        cfg.SEGMENTOR_CFG['dataset']['evalmode'] = 'server'
        dataset = BuildDataset(mode='TEST', logger_handle=logger_handle, dataset_cfg=cfg.SEGMENTOR_CFG['dataset'])
        # build palette
        palette = dataset.palette
        # load ckpts
        cmd_args.local_rank = 0
        ckpts = loadckpts(cmd_args.ckptspath)
        try:
            segmentor.load_state_dict(ckpts['model'])
        except Exception as e:
            logger_handle.warning(str(e) + '\n' + 'Try to load ckpts by using strict=False')
            segmentor.load_state_dict(ckpts['model'], strict=False)
        # set eval
        segmentor.eval()
        # start to test
        FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
        if not cmd_args.videodir:
            videopaths = [cmd_args.videopath]
        else:
            videonames = os.listdir(cmd_args.videodir)
            videopaths = [os.path.join(cmd_args.videopath, name) for name in videonames]
        pbar = tqdm(range(len(videopaths)))
        for idx in pbar:
            videopath = videopaths[idx]
            if videopath.split('.')[-1] not in ['mp4']:
                continue
            pbar.set_description('Processing %s' % videopath)
            vid = cv2.VideoCapture(str(videopath))
            total_frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))  #获取视频帧数
            output_video = cv2.VideoWriter(os.path.join(cmd_args.outputdir, os.path.basename(videopath).replace('.mp4', '_seg.mp4')),
                                           cv2.VideoWriter_fourcc(*'mp4v'), vid.get(cv2.CAP_PROP_FPS),
                                           (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))))
            print(f"{videopath}有{total_frames}帧")
            pbar_1 = tqdm(range(total_frames))
            for _ in pbar_1:
                ret, frame = vid.read()
                pbar_1.set_description(f'Processing {_}')
                if not ret:
                    continue
                sample_meta = {
                    'image': frame, 'seg_target': None, 'width': frame.shape[1], 'height': frame.shape[0],
                }
                image = sample_meta['image']
                sample_meta = dataset.synctransforms(sample_meta)
                image_tensor = sample_meta['image'].unsqueeze(0).type(FloatTensor)
                output = segmentor.inference(image_tensor)
                output = F.interpolate(output, size=(sample_meta['height'], sample_meta['width']), mode='bilinear', align_corners=segmentor.align_corners)
                pred = (torch.argmax(output[0], dim=0)).cpu().numpy().astype(np.int32)
                mask = np.zeros((pred.shape[0], pred.shape[1], 3), dtype=np.uint8)
                for clsid, color in enumerate(palette):
                    mask[pred == clsid, :] = np.array(color)[::-1]
                image = image * 0.5 + mask * 0.5
                image = image.astype(np.uint8)
                output_video.write(image)
            vid.release()
            output_video.release()

'''debug'''
if __name__ == '__main__':
    with torch.no_grad():
        client = Inferencer()
        client.start()
