#!/usr/bin/env python2

# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################

"""Perform inference on a single image or all images with a certain extension
(e.g., .jpg) in a folder.
"""




#from __future__ import absolute_import
#from __future__ import division
#from __future__ import print_function
#from __future__ import unicode_literals

from collections import defaultdict

import cv2 
#img = cv2.imread('/media/u/82AAFB2EAAFB1D7B/LabelAffixingRobot/res/48.jpg.jpg');

#fourcc = cv2.VideoWriter_fourcc(*"XVID")

import argparse
#import cv2  # NOQA (Must import before importing caffe2 due to bug in cv2)
import glob
import logging
import os
import sys
import time


#xvid='X'
#fourcc = cv2.VideoWriter_fourcc(*xvid)
#fourcc = cv2.VideoWriter_fourcc('X','V','I','D')
#fourcc=cv2.cv.CV_FOURCC('a','v','c','1')


from caffe2.python import workspace

from detectron.core.config import assert_and_infer_cfg
from detectron.core.config import cfg
from detectron.core.config import merge_cfg_from_file
from detectron.utils.io import cache_url
from detectron.utils.logging import setup_logging
from detectron.utils.timer import Timer
import detectron.core.test_engine as infer_engine
import detectron.datasets.dummy_datasets as dummy_datasets
import detectron.utils.c2 as c2_utils
import detectron.utils.vis as vis_utils


import pycocotools.mask as mask_util

c2_utils.import_detectron_ops()

# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)

class pp(object):
    def __init__(self, keypoint, box):
        self.keypoint = keypoint
        self.box = box

    def __str__(self):
        return str(self.keypoint) + "\n" + str(self.box) + "\n"


def proce(
        im, boxes, segms=None, keypoints=None, thresh=0.9, kp_thresh=2):
    """Constructs a numpy array with the detections visualized."""

    #print("donnnnn")

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = vis_utils.convert_from_cls_format(
            boxes, segms, keypoints)
   

    people = []

    
    for i in range(0, len(boxes)):
        #print(i)
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue
        people.append(pp(keypoints[i], boxes[i]))
        
    for i in range(0, len(people)):
        print(i)
        print(people[i])


def dvp(path):
	name = path[path.rfind('/')+1:]
	folder = path[:path.rfind('/')]
	return folder, name


def main(path, config_yaml='/home/u/detectron/configs/12_2017_baselines/e2e_keypoint_rcnn_R-101-FPN_1x.yaml', model_pkl='https://s3-us-west-2.amazonaws.com/detectron/37697946/12_2017_baselines/e2e_keypoint_rcnn_R-101-FPN_1x.yaml.08_45_06.Y14KqbST/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl'):
    
    merge_cfg_from_file(config_yaml)
 
    cfg.NUM_GPUS = 1
    weights = cache_url(model_pkl, cfg.DOWNLOAD_CACHE)
    
    assert_and_infer_cfg(cache_urls=False)
    
    assert not cfg.MODEL.RPN_ONLY, \
    'RPN models are not supported'
    assert not cfg.TEST.PRECOMPUTED_PROPOSALS, \
    'Models that require precomputed proposals are not supported'
    
    model = infer_engine.initialize_model_from_cfg(weights)
    dummy_coco_dataset = dummy_datasets.get_coco_dataset()
    
    # Capture video from file
    #cap = cv2.VideoCapture('/media/u/B60C3E030C3DBF65/192.168.1.7_01_20180306113702585.mp4')
    #cap = cv2.VideoCapture(0)
    
    cap = cv2.VideoCapture(path)
    
    ret=cap.isOpened()
    
    fo, fn=dvp(path)
    
    fo_cf, fn_cf=dvp(config_yaml)
    
    print(fo)
    print(fn)
    
    k=0;
    count=0
    
    vid = None
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    #fourcc = cv2.cv.FOURCC(*'XVID')
    
    while ret & (k!=27):
        # Capture frame-by-frame
        ret, frame = cap.read()
        if ret:
            #help(cv2)
            frame=cv2.resize(frame, (960//1,540//1))
            timers = defaultdict(Timer)
            t = time.time()
            with c2_utils.NamedCudaScope(0):
                cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(model, frame, None, timers=timers)
            print('Inference time: {:.3f}s'.format(time.time() - t))
            if False:
                vis_utils.vis_one_image(
                    frame[:, :, ::-1],  # BGR -> RGB for visualization
                    fn+'_temp.jpg',
                    fo,
                    cls_boxes,
                    cls_segms,
                    cls_keyps,
                    dataset=dummy_coco_dataset,
                    box_alpha=0.3,
                    show_class=True,
                    thresh=0.7,
                    kp_thresh=2,
                    ext='jpg',
                    out_when_no_box=True
                )
                im = cv2.imread(fo+'/'+fn+'_temp.jpg.jpg')
                cv2.imshow('frame',im)
                count=count+1
                
                if vid is None:
                    vid = cv2.VideoWriter(path+"_"+fn_cf+".avi", fourcc, float(30), (im.shape[1], im.shape[0]), True)                
                vid.write(im)    
                
            else:
                vis_utils.vis_one_image_opencv(
                    frame,  # BGR -> RGB for visualization                
                    cls_boxes,
                    cls_segms,
                    cls_keyps,    
                    thresh=0.7,
                    kp_thresh=2,
                    show_box=True,
                    dataset=dummy_coco_dataset,                
                    show_class=True
                )
                cv2.imshow('frame',frame)
                #proce(frame, cls_boxes, cls_segms, cls_keyps)
                if vid is None:
                    vid = cv2.VideoWriter(path+"_"+fn_cf+"_out2.avi", fourcc, float(30), (frame.shape[1], frame.shape[0]), True)                
                vid.write(frame)                
            k=cv2.waitKey(1)
    cap.release()
    if vid is not None:
        vid.release()


# boxes, segms, keypoints, classes = vis_utils.convert_from_cls_format(
#     cls_boxes, cls_segms, cls_keyps)
#
# masks = mask_util.decode(segms)


        # print("------------------------------------im_name-------------------------------------------------------------")
        # print(type(im_name))
        # print(im_name)
        # print("------------------------------------cls_boxes-------------------------------------------------------------")
        # print(type(cls_boxes[1]))
        # print(cls_boxes)
        # print("------------------------------------cls_segms-------------------------------------------------------------")
        # print(type(cls_segms[1]))
        # print(cls_segms)
        # print("------------------------------------cls_keyps-------------------------------------------------------------")
        # print(type(cls_keyps))
        # print(cls_keyps)
        # print("------------------------------------boxes-------------------------------------------------------------")
        # print(type(boxes[1][0]))
        # print(len(boxes))
        # print(boxes)
        # with open("/home/u/boxes.txt", 'wb') as f:
        #     f.write(boxes)
        # print("------------------------------------segms-------------------------------------------------------------")
        # print(type(segms[1]['size'][0]))
        # print(segms)
        # print("------------------------------------keypoints-------------------------------------------------------------")
        # print(type(keypoints))
        # print(keypoints)
        # print("------------------------------------classes-------------------------------------------------------------")
        # print(type(classes[0]))
        # print(classes)
        # # with open("/home/u/classes.txt", 'w') as f:
        # #     f.write(classes)
        # print("------------------------------------masks-------------------------------------------------------------")
        # print(type(masks[0]))
        # print(masks[0])
        # cv2.imwrite("/home/u/mask.png", masks[:, :, 0])
        # print("------------------------------------end-------------------------------------------------------------")


if __name__ == '__main__':
    workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
    #main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/rpn_person_only_R-50-FPN_1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/35998996/12_2017_baselines/rpn_person_only_R-50-FPN_1x.yaml.08_10_08.0ZWmJm6F/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
    #main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/rpn_person_only_R-101-FPN_1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/35999521/12_2017_baselines/rpn_person_only_R-101-FPN_1x.yaml.08_20_33.1OkqMmqP/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
    #main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/rpn_person_only_X-101-64x4d-FPN_1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/35999553/12_2017_baselines/rpn_person_only_X-101-64x4d-FPN_1x.yaml.08_21_33.ghFzzArr/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
    #main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/rpn_person_only_X-101-32x8d-FPN_1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/36760438/12_2017_baselines/rpn_person_only_X-101-32x8d-FPN_1x.yaml.06_04_23.M2oJlDPW/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
    #main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/keypoint_rcnn_R-50-FPN_1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/37651787/12_2017_baselines/keypoint_rcnn_R-50-FPN_1x.yaml.20_00_48.UiwJsTXB/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
    #main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/keypoint_rcnn_R-50-FPN_s1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/37651887/12_2017_baselines/keypoint_rcnn_R-50-FPN_s1x.yaml.20_01_40.FDjUQ7VX/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
    #main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/keypoint_rcnn_R-101-FPN_1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/37651996/12_2017_baselines/keypoint_rcnn_R-101-FPN_1x.yaml.20_02_37.eVXnKM2Q/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
    #main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/keypoint_rcnn_R-101-FPN_s1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/37652016/12_2017_baselines/keypoint_rcnn_R-101-FPN_s1x.yaml.20_03_32.z86wT97d/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
    #main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/keypoint_rcnn_X-101-64x4d-FPN_1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/37731079/12_2017_baselines/keypoint_rcnn_X-101-64x4d-FPN_1x.yaml.16_40_56.wj7Hg7lX/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
    #main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/keypoint_rcnn_X-101-64x4d-FPN_s1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/37731142/12_2017_baselines/keypoint_rcnn_X-101-64x4d-FPN_s1x.yaml.16_41_54.e1sD4Frh/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
    #main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/keypoint_rcnn_X-101-32x8d-FPN_1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/37730253/12_2017_baselines/keypoint_rcnn_X-101-32x8d-FPN_1x.yaml.16_34_24.3G9OcQuR/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
    #main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/keypoint_rcnn_X-101-32x8d-FPN_s1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/37731010/12_2017_baselines/keypoint_rcnn_X-101-32x8d-FPN_s1x.yaml.16_39_51.xt1oMzRk/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
    #main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/37697547/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_1x.yaml.08_42_54.kdzV35ao/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
    #main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_s1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/37697714/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_s1x.yaml.08_44_03.qrQ0ph6M/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
    #main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/e2e_keypoint_rcnn_R-101-FPN_1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/37697946/12_2017_baselines/e2e_keypoint_rcnn_R-101-FPN_1x.yaml.08_45_06.Y14KqbST/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
    main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/e2e_keypoint_rcnn_R-101-FPN_s1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/37698009/12_2017_baselines/e2e_keypoint_rcnn_R-101-FPN_s1x.yaml.08_45_57.YkrJgP6O/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
    #main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/e2e_keypoint_rcnn_X-101-64x4d-FPN_1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/37732355/12_2017_baselines/e2e_keypoint_rcnn_X-101-64x4d-FPN_1x.yaml.16_56_16.yv4t4W8N/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
    #main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/e2e_keypoint_rcnn_X-101-64x4d-FPN_s1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/37732415/12_2017_baselines/e2e_keypoint_rcnn_X-101-64x4d-FPN_s1x.yaml.16_57_48.Spqtq3Sf/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
    #main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/e2e_keypoint_rcnn_X-101-32x8d-FPN_1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/37792158/12_2017_baselines/e2e_keypoint_rcnn_X-101-32x8d-FPN_1x.yaml.16_54_16.LgZeo40k/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
    #main('/media/u/B60C3E030C3DBF65/IMG_2335.MOV', '/home/u/detectron/configs/12_2017_baselines/e2e_keypoint_rcnn_X-101-32x8d-FPN_s1x.yaml', 'https://s3-us-west-2.amazonaws.com/detectron/37732318/12_2017_baselines/e2e_keypoint_rcnn_X-101-32x8d-FPN_s1x.yaml.16_55_09.Lx8H5JVu/output/train/keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival/generalized_rcnn/model_final.pkl')
