#coding=utf-8
from __future__ import print_function
import os, sys

import datetime
import oss2
import commands
import glob
import redis
import shutil
import numpy as np
import cv2
import pickle
from moviepy.editor import VideoFileClip
from utils_undistortion import undist_dic
from multiprocessing import Manager
from PIL import Image
import yaml
import time
import multiprocessing
from multiprocessing import Pool

import _init_paths
import logging
import flask
import werkzeug
import optparse
import tornado.wsgi
import tornado.httpserver
import skimage.io
import json
from scipy import misc
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import scipy.io as sio
import caffe
import argparse
import csv

import os
import errno
import socket
import random

import tensorflow as tf
import tensorflow.contrib.slim as slim

import httplib 
import json

import os.path as osp
import logging

logger = logging.getLogger()
logger.setLevel(logging.INFO)
rq = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))
logfile = rq + '.log'
fh = logging.FileHandler(logfile, mode='w')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
fh.setFormatter(formatter)
logger.addHandler(fh)

os.environ["TF_CPP_MIN_LOG_LEVEL"] = '3'
REPO_DIRNAME = os.path.dirname(os.path.abspath(__file__))

VIDEO_NUMBER_ONE_DAY = 3240# 9h * 60 *6

def expPoster():
    reportTime = time.strftime("%Y-%m-%d %H:%M:%S")
    resExpDict["reportDate"] = str(reportTime)
    resExpDict["errorLevel"] = 1000
    logger.error(str(resExpDict))
    requrl = "http://" + str(config['returnUrl']['host']) + ":" + str(config['returnUrl']['exceptionPort'])
    connt = httplib.HTTPConnection(str(config['returnUrl']['host']),config['returnUrl']['exceptionPort'])
    headerdata = {"Content-type": "application/json"}       
    #connt.request('POST',requrl,json.dumps(resExpDict),headerdata)

# 授权设置
def oss_auth():
    auth = oss2.Auth(config['oss']['access_key_id'], config['oss']['access_key_secret'])
    bucket = oss2.Bucket(auth, 'oss-cn-shanghai.aliyuncs.com', 'vdodev-smpv')
    return bucket

def read_video_frame(local_path, seconds=10, frame=4):
    """读取视频帧
    note: 依赖于moviepy，需要单独安装
    :param local_path: 视频文件路径
    :param seconds: 视频时间长度
    :param frame: 每秒取几帧
    :return camera_detect_img ： 图像字典
    """
    try:
        clip = VideoFileClip(local_path)  # read video
    except Exception, e:
        resExpDict["errorCode"] = 1002
        resExpDict["errorDesc"] = "Read Video Error! see more: " + str(e)
        expPoster()
        for i in range(40):
        #pop decoder error
        #     conn.rpush('SQ2130_error_handle_test_list', local_path)  #直接记录oss本地路径
            return np.zeros((1000, 500, 3), np.uint8)

    number = seconds * frame  # 输出图片数量， 默认 10*4=40
    count, count2 = 0, 0

    gen_iter_frames = clip.iter_frames()
    imgs = []
    for im in gen_iter_frames:  # 依次读取视频帧
        if count <= number - 1:
            img2 = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
            imgs.append(img2)  # 按顺序将图片存入字典
            count += 1
            [gen_iter_frames.next() for _ in range(random.choice([2,3]))]  # 编码是变码，随机的选择跳过的帧
        elif count==number:
            break
    clip.__del__()
    return imgs


global detectCount
global detectErrStaus
detectCount=0
detectErrStaus=0
def camera_err_detection(img):
    global detectCount
    global detectErrStaus
    detectCount += 1
    if detectCount<60:
        return True
    else:
        print('detectCount is global =%d'%detectCount)
        detectCount=0
        tinyImg = cv2.resize(img,dsize=(320, 180))
        b,g,r = cv2.split(tinyImg)
        sumb=int(b.sum()/255)
        sumg=int(g.sum()/255)
        sumr=int(r.sum()/255)
        
        sumall = sumb+sumg+sumr
        print('sumb,sumg,sumr,sumall=',sumb,sumg,sumr,sumall)
        if sumall <= int(320*180*3*1/8):
            #black video error
            if detectErrStaus ==0:
                #pop the error
                logger.error('black video error.pop to application')
            detectErrStaus=1
            return False
        if sumall >= int(320*180*3*7/8):
            #white video error
            if detectErrStaus ==0:
                #pop the error
                logger.error('white video error.pop to application')
            detectErrStaus=1
            return False
        if (sumb==0) or (sumr==0) or (sumg==0):
            #single color error
            if detectErrStaus ==0:
                #pop the error
                logger.error('single color error.pop to application')
            detectErrStaus=1
            return False
        #using CNN to classificate the error,such as masaic error
        
        print('true')
        detectErrStaus=0
        return True

def parse_path(path):
    """
    testInput/SQ2108/20171122/Channel_09/Append/1414345345.mp4

    :param path:
    :return: shop_id, channel_num
    """
    p_lst = path.split('/')
    shop_id = p_lst[1]
    date = p_lst[2]
    channel_num = int(p_lst[3].split('_')[-1])
    return shop_id, date, channel_num

def fetch_item_download(channelID,shopID,q,lock):
    """
    取流，切帧，处理完成后写入redis的finish_handl_list , 一次调用默认处理一条记录
    :param i: 只对一个摄像头处理
    :param camera_detect_dict:异常检测缓存图像
    :return camera_undist_img:畸变校正后的图像矩阵
    """

    videolst =[]
    resExpDict["entityCode"] = shopID
    resExpDict["cameraCode"] = channelID
    while(True):
        try:
            item_key, video_path = conn.brpop(config['redis']['wait_fetch_list_' + str(shopID)],timeout=30)  
	    shop_id, date, channel_num = parse_path(video_path)
            if (channel_num != channelID) or (str(date) != str(currentDate)):
                continue
            else:
                videolst.append(video_path)
                #logger.info(video_path)
                if( batch == len(videolst)):
                    break                    
        except Exception as e:
            logger.error("Fail to fetch down wait fetch list records.")
            return
    
    print("batch num :"+ str(batch))
    for currentVideo in videolst:
        time4perVideo = time.time()
        reslut={}
        pfx = config['pfx']
        
        print('--current video--'+ currentVideo)

        local_video_path = os.path.join(config['oss_pfx'], currentVideo)
        camera_detect_img = {}

        local_path = os.path.join(pfx, currentVideo)

        try:
            startTime = time.time()
            camera_detect_img = read_video_frame(local_video_path)  # 切分            
            print("read_video_frame :" + str(time.time() - startTime))
            
            if( False == camera_err_detection(camera_detect_img[0])):
                resExpDict["errorCode"]  = 1003
                resExpDict["errorDesc"] = "Black/White/Single clor video error"
                expPoster()
                continue

        except KeyboardInterrupt, e:
            # 用户终止，将当前处理的视频流目录重新写入 wait_fetch_list
            resExpDict["errorCode"]  = 1004
            resExpDict["errorDesc"] = "User Stop Process. See more: " + str(e)
            expPoster()

            conn.rpush(config['redis']['wait_fetch_list_' + str(shopID)], currentVideo)
            return None
        except Exception, e:
            resExpDict["errorCode"]  = 1005
            resExpDict["errorDesc"] = "Oss video abnormal. See more: " + str(e)
            expPoster()

            for i in range(40):
                emptyImage = np.zeros((1000, 500, 3), np.uint8)
                camera_detect_img[i] = emptyImage
            conn.rpush(config['redis']['error_handle_list_' + str(shopID)], currentVideo)
            reslut[0]=camera_detect_img
            reslut[1]=local_path
            reslut[2]=channelID
            reslut[3]=shopID
            lock.acquire()
            q.put(reslut)
            lock.release()
        else:
            conn.rpush(config['redis']['finish_handle_list_' + str(shopID)], currentVideo)
            reslut[0] = camera_detect_img
            reslut[1] = local_path
            reslut[2] = channelID
            reslut[3] = shopID
            lock.acquire()
            q.put(reslut)
            lock.release()
        finally:
            print(local_path +'image fetch finally done!')
            print("fetch per video time:" + str(time.time()-time4perVideo))
    print("batch fetch image done")


class PersonDetector(object):
    cfg.TEST.HAS_RPN = True  # Use RPN for proposals
    default_args = {
        'CLASSES': ('__background__', 'customer', 'sale'),
    }
    default_args['prototxt'] = os.path.join(REPO_DIRNAME, '../models/SAIC_Person/faster_rcnn_end2end/test.prototxt')#YYModel/test_agnostic.prototxt
    default_args['caffemodel'] = os.path.join(REPO_DIRNAME,
                                              '../models/SAIC_Person/faster_rcnn_end2end/vgg16_1122_faster_rcnn_iter_70000.caffemodel')#.resnet50_rfcn_iter_120000
    # default_args['output_file'] = '/data/program/rcnn_mtcnn/f-rcnn/output/det.txt'
    default_args['CONF_THRESH'] = 0.7
    default_args['NMS_THRESH'] = 0.2

    def __init__(self, CLASSES, prototxt, caffemodel, CONF_THRESH, NMS_THRESH):
        caffe.set_mode_gpu()
        caffe.set_device(0)
        self.classes = CLASSES
        self.conf_thresh = CONF_THRESH
        self.nms_thresh = NMS_THRESH
        self.net = caffe.Net(prototxt, caffemodel, caffe.TEST)

    def detect(self, input):
        image_dic = input[0]
        local_path = input[1]
        channel = input[2]

        local_path_b = local_path.split('/')

        b_num = len(local_path_b)


        output_file = config['per_det_output_file']+'/'+local_path_b[b_num - 5]+'/'+local_path_b[b_num - 4]+'/Channel_0'+str(channel)+'/Append'
        time_file=(local_path.split('/')[-1]).split('.')[0]
        if not os.path.exists(output_file):
            os.makedirs(output_file)

        file_temp=os.path.join(output_file,time_file+'.txt')
        #logger.info (file_temp)

        f = open(file_temp, 'w')

        for img_num in range(len(image_dic)):

            im = image_dic[img_num]
            tmpTime = time.time()
            scores, boxes = im_detect(self.net, im)
            print("imdetect_Time: " + str(time.time() - tmpTime))
            customer_exist = 0
            for cls_ind, cls in enumerate(self.classes[1:]):
                cls_ind += 1
                if 1 == cls_ind:
                    customer_exist = 1
            if 0 == customer_exist:
                 continue

            for cls_ind, cls in enumerate(self.classes[1:]):
                cls_ind += 1  # because we skipped background
                cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
                cls_scores = scores[:, cls_ind]
                dets = np.hstack((cls_boxes,
                                  cls_scores[:, np.newaxis])).astype(np.float32)
                keep = nms(dets, self.nms_thresh)
                dets = dets[keep, :]
                # vis_detections(im, cls, dets, thresh=CONF_THRESH)
                inds = np.where(dets[:, -1] >= self.conf_thresh)[0]
                if len(inds) == 0:
                        if 1 == cls_ind    :
                            break
                        else:
                            continue
                for i in inds:
                    result = []
                    name = "%07d" % int(img_num+1)
                    f.write(name)
                    f.write(',')
                    result.extend([-1])
                    bbox = dets[i, :4]
                    bbox = map(int, bbox)
                    bbox[2] = bbox[2] - bbox[0]
                    bbox[3] = bbox[3] - bbox[1]
                    result.extend(bbox)
                    score = float(dets[i, -1])
                    result.extend([score])
                    result.extend([-1, -1])
                    f.write(",".join(repr(e) for e in result))
                    f.write("," + str(cls_ind))
                    # logger.info result
                    f.write('\n')

        f.close()

def _batch_norm_fn(x, scope=None):
    if scope is None:
        scope = tf.get_variable_scope().name + "/bn"
    return slim.batch_norm(x, scope=scope)


def create_link(
        incoming, network_builder, scope, nonlinearity=tf.nn.elu,
        weights_initializer=tf.truncated_normal_initializer(stddev=1e-3),
        regularizer=None, is_first=False, summarize_activations=True):
    if is_first:
        network = incoming
    else:
        network = _batch_norm_fn(incoming, scope=scope + "/bn")
        network = nonlinearity(network)
        if summarize_activations:
            tf.summary.histogram(scope + "/activations", network)

    pre_block_network = network
    post_block_network = network_builder(pre_block_network, scope)

    incoming_dim = pre_block_network.get_shape().as_list()[-1]
    outgoing_dim = post_block_network.get_shape().as_list()[-1]
    if incoming_dim != outgoing_dim:
        assert outgoing_dim == 2 * incoming_dim, \
            "%d != %d" % (outgoing_dim, 2 * incoming)
        projection = slim.conv2d(
            incoming, outgoing_dim, 1, 2, padding="SAME", activation_fn=None,
            scope=scope + "/projection", weights_initializer=weights_initializer,
            biases_initializer=None, weights_regularizer=regularizer)
        network = projection + post_block_network
    else:
        network = incoming + post_block_network
    return network


def create_inner_block(
        incoming, scope, nonlinearity=tf.nn.elu,
        weights_initializer=tf.truncated_normal_initializer(1e-3),
        bias_initializer=tf.zeros_initializer(), regularizer=None,
        increase_dim=False, summarize_activations=True):
    n = incoming.get_shape().as_list()[-1]
    stride = 1
    if increase_dim:
        n *= 2
        stride = 2

    incoming = slim.conv2d(
        incoming, n, [3, 3], stride, activation_fn=nonlinearity, padding="SAME",
        normalizer_fn=_batch_norm_fn, weights_initializer=weights_initializer,
        biases_initializer=bias_initializer, weights_regularizer=regularizer,
        scope=scope + "/1")
    if summarize_activations:
        tf.summary.histogram(incoming.name + "/activations", incoming)

    incoming = slim.dropout(incoming, keep_prob=0.6)

    incoming = slim.conv2d(
        incoming, n, [3, 3], 1, activation_fn=None, padding="SAME",
        normalizer_fn=None, weights_initializer=weights_initializer,
        biases_initializer=bias_initializer, weights_regularizer=regularizer,
        scope=scope + "/2")
    return incoming


def residual_block(incoming, scope, nonlinearity=tf.nn.elu,
                   weights_initializer=tf.truncated_normal_initializer(1e3),
                   bias_initializer=tf.zeros_initializer(), regularizer=None,
                   increase_dim=False, is_first=False,
                   summarize_activations=True):
    def network_builder(x, s):
        return create_inner_block(
            x, s, nonlinearity, weights_initializer, bias_initializer,
            regularizer, increase_dim, summarize_activations)

    return create_link(
        incoming, network_builder, scope, nonlinearity, weights_initializer,
        regularizer, is_first, summarize_activations)


def _create_network(incoming, num_classes, reuse=None, l2_normalize=True,
                    create_summaries=True, weight_decay=1e-8):
    nonlinearity = tf.nn.elu
    conv_weight_init = tf.truncated_normal_initializer(stddev=1e-3)
    conv_bias_init = tf.zeros_initializer()
    conv_regularizer = slim.l2_regularizer(weight_decay)
    fc_weight_init = tf.truncated_normal_initializer(stddev=1e-3)
    fc_bias_init = tf.zeros_initializer()
    fc_regularizer = slim.l2_regularizer(weight_decay)

    def batch_norm_fn(x):
        return slim.batch_norm(x, scope=tf.get_variable_scope().name + "/bn")

    network = incoming
    network = slim.conv2d(
        network, 32, [3, 3], stride=1, activation_fn=nonlinearity,
        padding="SAME", normalizer_fn=batch_norm_fn, scope="conv1_1",
        weights_initializer=conv_weight_init, biases_initializer=conv_bias_init,
        weights_regularizer=conv_regularizer)
    if create_summaries:
        tf.summary.histogram(network.name + "/activations", network)
        tf.summary.image("conv1_1/weights", tf.transpose(
            slim.get_variables("conv1_1/weights:0")[0], [3, 0, 1, 2]),
                         max_images=128)
    network = slim.conv2d(
        network, 32, [3, 3], stride=1, activation_fn=nonlinearity,
        padding="SAME", normalizer_fn=batch_norm_fn, scope="conv1_2",
        weights_initializer=conv_weight_init, biases_initializer=conv_bias_init,
        weights_regularizer=conv_regularizer)
    if create_summaries:
        tf.summary.histogram(network.name + "/activations", network)

    network = slim.max_pool2d(network, [3, 3], [2, 2], scope="pool1")

    network = residual_block(
        network, "conv2_1", nonlinearity, conv_weight_init, conv_bias_init,
        conv_regularizer, increase_dim=False, is_first=True,
        summarize_activations=create_summaries)
    network = residual_block(
        network, "conv2_3", nonlinearity, conv_weight_init, conv_bias_init,
        conv_regularizer, increase_dim=False,
        summarize_activations=create_summaries)

    network = residual_block(
        network, "conv3_1", nonlinearity, conv_weight_init, conv_bias_init,
        conv_regularizer, increase_dim=True,
        summarize_activations=create_summaries)
    network = residual_block(
        network, "conv3_3", nonlinearity, conv_weight_init, conv_bias_init,
        conv_regularizer, increase_dim=False,
        summarize_activations=create_summaries)

    network = residual_block(
        network, "conv4_1", nonlinearity, conv_weight_init, conv_bias_init,
        conv_regularizer, increase_dim=True,
        summarize_activations=create_summaries)
    network = residual_block(
        network, "conv4_3", nonlinearity, conv_weight_init, conv_bias_init,
        conv_regularizer, increase_dim=False,
        summarize_activations=create_summaries)

    feature_dim = network.get_shape().as_list()[-1]
    print("feature dimensionality: ", feature_dim)
    network = slim.flatten(network)

    network = slim.dropout(network, keep_prob=0.6)
    network = slim.fully_connected(
        network, feature_dim, activation_fn=nonlinearity,
        normalizer_fn=batch_norm_fn, weights_regularizer=fc_regularizer,
        scope="fc1", weights_initializer=fc_weight_init,
        biases_initializer=fc_bias_init)

    features = network

    if l2_normalize:
        # Features in rows, normalize axis 1.
        features = slim.batch_norm(features, scope="ball", reuse=reuse)
        feature_norm = tf.sqrt(
            tf.constant(1e-8, tf.float32) +
            tf.reduce_sum(tf.square(features), [1], keep_dims=True))
        features = features / feature_norm

        with slim.variable_scope.variable_scope("ball", reuse=reuse):
            weights = slim.model_variable(
                "mean_vectors", (feature_dim, num_classes),
                initializer=tf.truncated_normal_initializer(stddev=1e-3),
                regularizer=None)
            scale = slim.model_variable(
                "scale", (num_classes,), tf.float32,
                tf.constant_initializer(0., tf.float32), regularizer=None)
            if create_summaries:
                tf.summary.histogram("scale", scale)
            scale = tf.nn.softplus(scale)

        # Each mean vector in columns, normalize axis 0.
        weight_norm = tf.sqrt(
            tf.constant(1e-8, tf.float32) +
            tf.reduce_sum(tf.square(weights), [0], keep_dims=True))
        logits = scale * tf.matmul(features, weights / weight_norm)

    else:
        logits = slim.fully_connected(
            features, num_classes, activation_fn=None,
            normalizer_fn=None, weights_regularizer=fc_regularizer,
            scope="softmax", weights_initializer=fc_weight_init,
            biases_initializer=fc_bias_init)

    return features, logits


def _network_factory(num_classes, is_training, weight_decay=1e-8):
    def factory_fn(image, reuse, l2_normalize):
        with slim.arg_scope([slim.batch_norm, slim.dropout],
                            is_training=is_training):
            with slim.arg_scope([slim.conv2d, slim.fully_connected,
                                 slim.batch_norm, slim.layer_norm],
                                reuse=reuse):
                features, logits = _create_network(
                    image, num_classes, l2_normalize=l2_normalize,
                    reuse=reuse, create_summaries=is_training,
                    weight_decay=weight_decay)
                return features, logits

    return factory_fn


def _preprocess(image, is_training=False, enable_more_augmentation=True):
    image = image[:, :, ::-1]  # BGR to RGB
    if is_training:
        image = tf.image.random_flip_left_right(image)
        if enable_more_augmentation:
            image = tf.image.random_brightness(image, max_delta=50)
            image = tf.image.random_contrast(image, lower=0.8, upper=1.2)
            image = tf.image.random_saturation(image, lower=0.8, upper=1.2)
    return image


def _run_in_batches(f, data_dict, out, batch_size):
    data_len = len(out)
    num_batches = int(data_len / batch_size)

    s, e = 0, 0
    for i in range(num_batches):
        s, e = i * batch_size, (i + 1) * batch_size
        batch_data_dict = {k: v[s:e] for k, v in data_dict.items()}
        out[s:e] = f(batch_data_dict)
    if e < len(out):
        batch_data_dict = {k: v[e:] for k, v in data_dict.items()}
        out[e:] = f(batch_data_dict)


def extract_image_patch(image, bbox, patch_shape):
    """Extract image patch from bounding box.

    Parameters
    ----------
    image : ndarray
        The full image.
    bbox : array_like
        The bounding box in format (x, y, width, height).
    patch_shape : Optional[array_like]
        This parameter can be used to enforce a desired patch shape
        (height, width). First, the `bbox` is adapted to the aspect ratio
        of the patch shape, then it is clipped at the image boundaries.
        If None, the shape is computed from :arg:`bbox`.

    Returns
    -------
    ndarray | NoneType
        An image patch showing the :arg:`bbox`, optionally reshaped to
        :arg:`patch_shape`.
        Returns None if the bounding box is empty or fully outside of the image
        boundaries.

    """
    bbox = np.array(bbox)
    if patch_shape is not None:
        # correct aspect ratio to patch shape
        target_aspect = float(patch_shape[1]) / patch_shape[0]
        new_width = target_aspect * bbox[3]
        bbox[0] -= (new_width - bbox[2]) / 2
        bbox[2] = new_width

    # convert to top left, bottom right
    bbox[2:] += bbox[:2]
    bbox = bbox.astype(np.int)

    # clip at image boundaries
    bbox[:2] = np.maximum(0, bbox[:2])
    bbox[2:] = np.minimum(np.asarray(image.shape[:2][::-1]) - 1, bbox[2:])
    if np.any(bbox[:2] >= bbox[2:]):
        return None
    sx, sy, ex, ey = bbox
    image = image[sy:ey, sx:ex]
    image = cv2.resize(image, patch_shape[::-1])

    return image


def _create_image_encoder(preprocess_fn, factory_fn, image_shape, batch_size=32,
                          session=None, checkpoint_path=None,
                          loss_mode="cosine"):
    image_var = tf.placeholder(tf.uint8, (None,) + image_shape)

    preprocessed_image_var = tf.map_fn(
        lambda x: preprocess_fn(x, is_training=False),
        tf.cast(image_var, tf.float32))

    l2_normalize = loss_mode == "cosine"
    feature_var, _ = factory_fn(
        preprocessed_image_var, l2_normalize=l2_normalize, reuse=None)
    feature_dim = feature_var.get_shape().as_list()[-1]

    if session is None:
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
        session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    if checkpoint_path is not None:
       slim.get_or_create_global_step()
       init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
            checkpoint_path, slim.get_variables_to_restore())
       session.run(init_assign_op, feed_dict=init_feed_dict)

    def encoder(data_x):
        out = np.zeros((len(data_x), feature_dim), np.float32)
        _run_in_batches(
            lambda x: session.run(feature_var, feed_dict=x),
            {image_var: data_x}, out, batch_size)
        return out

    return encoder


def create_image_encoder(model_filename, batch_size=32, loss_mode="cosine",
                         session=None):
    image_shape = 128, 64, 3
    factory_fn = _network_factory(
        num_classes=1501, is_training=False, weight_decay=1e-8)

    return _create_image_encoder(
        _preprocess, factory_fn, image_shape, batch_size, session,
        model_filename, loss_mode)


def create_box_encoder(model_filename, batch_size=32, loss_mode="cosine"):
    image_shape = 128, 64, 3
    image_encoder = create_image_encoder(model_filename, batch_size, loss_mode)

    def encoder(image, boxes):
        image_patches = []
        for box in boxes:
            patch = extract_image_patch(image, box, image_shape[:2])
            if patch is None:
                print("WARNING: Failed to extract image patch: %s." % str(box))
                patch = np.random.uniform(
                    0., 255., image_shape).astype(np.uint8)
            image_patches.append(patch)
        image_patches = np.asarray(image_patches)
        return image_encoder(image_patches)

    return encoder


def get_host_ip():
    try:
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        s.connect(('8.8.8.8', 80))
        ip = s.getsockname()[0]
    finally:
        s.close()

    return ip


def generate_detections(encoder, input ):
    """Generate detections with features.

    Parameters
    ----------
    encoder : Callable[image, ndarray] -> ndarray
        The encoder function takes as input a BGR color image and a matrix of
        bounding boxes in format `(x, y, w, h)` and returns a matrix of
        corresponding feature vectors.
    mot_dir : str
        Path to the MOTChallenge directory (can be either train or test).
    output_dir
        Path to the output directory. Will be created if it does not exist.
    detection_dir
        Path to custom detections. The directory structure should be the default
        MOTChallenge structure: `[sequence]/det/det.txt`. If None, uses the
        standard MOTChallenge detections.

    """

    detection_dir = config['per_det_output_file']
    output_dir = config['args_output_dir']



    # logger.info("Processing %s" % image_dir)

    image_dic = input[0]
    local_path = input[1]
    channel = input[2]
    time_dir = (local_path.split('/')[-1]).split('.')[0]
    myip = get_host_ip().split('.')[-1]
    local_path_b = local_path.split('/')
    b_num = len(local_path_b)

    try:
        temp_det = os.path.join(detection_dir+'/' + local_path_b[b_num - 5] + '/' + local_path_b[
        b_num - 4] +'/Channel_0' + str(channel),'Append')
        os.makedirs(temp_det)
    except OSError as exception:
        if exception.errno == errno.EEXIST and os.path.isdir(temp_det):
            pass
        else:
            raise ValueError(
                "Failed to created output directory '%s'" % temp_det)

    try:
        temp_out = os.path.join(output_dir + '/' + local_path_b[b_num - 5] + '/' + local_path_b[
            b_num - 4] + '/Channel_0' + str(channel), 'Append')
        os.makedirs(temp_out)
    except OSError as exception:
        if exception.errno == errno.EEXIST and os.path.isdir(temp_out):
            pass
        else:
            raise ValueError(
                "Failed to created output directory '%s'" % temp_out)


    detection_file = os.path.join(detection_dir+'/' + local_path_b[b_num - 5] + '/' + local_path_b[
        b_num - 4] +'/Channel_0' + str(channel),'Append', time_dir + '.txt')
    image_filenames = {}
    output_filename = os.path.join(output_dir+'/' + local_path_b[b_num - 5] + '/' + local_path_b[
        b_num - 4] +'/Channel_0' + str(channel), 'Append',time_dir + '.npy')
    detections_out = []

    if not os.path.exists(detection_file):
        np.save(
            output_filename, np.asarray(detections_out), allow_pickle=False)
        return

    detections_in = np.loadtxt(detection_file, delimiter=',')
    if len(detections_in.shape) < 2:
        np.save(
            output_filename, np.asarray(detections_out), allow_pickle=False)
        return

    frame_indices = detections_in[:, 0].astype(np.int)
    min_frame_idx = frame_indices.astype(np.int).min()
    max_frame_idx = frame_indices.astype(np.int).max()
    for frame_idx in range(min_frame_idx, max_frame_idx + 1):
            # logger.info("Frame %05d/%05d" % (frame_idx, max_frame_idx))
        mask = frame_indices == frame_idx
        rows = detections_in[mask]
        if frame_idx > 40 or frame_idx < 1:
            print("WARNING could not find image for frame %d" % frame_idx)
            continue
        bgr_image = image_dic[frame_idx-1]
        features = encoder(bgr_image, rows[:, 2:6].copy())
        detections_out += [np.r_[(row, feature)] for row, feature
                            in zip(rows, features)]

    np.save(
        output_filename, np.asarray(detections_out), allow_pickle=False)

def camera_frame_detection(camera_detect_img,motion_thd):
    """
    帧差检测
    :param camera_detect_img:
        type: dict
        {int: np.array}
    :return:
    """
    print(len(camera_detect_img))
    tmp0 = camera_detect_img[0]
    tmp1 = cv2.resize(tmp0, dsize=(320, 180))
    try:
        tmp2 = cv2.cvtColor(tmp1, cv2.COLOR_BGR2GRAY)
    except Exception,e:
    	return
    img1 = cv2.medianBlur(tmp2, 3)

    for i in range(len(camera_detect_img) - 2):
        tmp0 = camera_detect_img[i + 1]
        tmp1 = cv2.resize(tmp0, dsize=(320, 180))
        try:
            tmp2 = cv2.cvtColor(tmp1, cv2.COLOR_BGR2GRAY)
        except Exception,e:
            return
        img2 = cv2.medianBlur(tmp2, 3)
        
        # absdiff
        dst_per = cv2.absdiff(img1, img2)
        #binarization with otsu
        ret,im_binary = cv2.threshold(dst_per,100,255,cv2.THRESH_BINARY) 
        #erode and dilate
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(3, 3)) 
        eroded = cv2.erode(im_binary,kernel)
        eroded = cv2.erode(eroded ,kernel)
        dilated = cv2.dilate(eroded,kernel)
        dilated = cv2.dilate(dilated,kernel)
        #sum all the outstanding
        sumall = int(dilated.sum()/255)
        #judge if over threshold
        if sumall>motion_thd:
            return 1

    return 0

def double_deep(q,f,camera_number,lock):
    #print("double_deep")
    for batch_tmp in range(batch*camera_number):
        if q.empty():
            time.sleep(10)
            if q.empty():
                return True
        else:
            lock.acquire()
            temp_input=q.get()
            lock.release()                        
            ret = camera_frame_detection(temp_input[0],500)
            if 1 == ret:
                starTime = time.time()
                person_det.detect(temp_input)
                detctTime = time.time()
                print("person_det_detect_time :" + str(detctTime - starTime))
                generate_detections(f, temp_input)
                print("generate_detections_time :" + str(time.time() - detctTime))
    logger.info("deep_learing batch 10 over")
    return False


global config
global batch
global resExpDict
global currentDate
global shop_number
global conn
global f
global person_det

def startEnd(arg):
 
    # 合并数据切分和行人检测模块
    # 修改输入方式，传摄像头编号，是否是人脸摄像头
    # 修改新的视频解析代码，获取图像数据，并作格式转换 end
    # 修改多进程，编写缓存，注销temp写文件
    
    global config
    global batch
    global resExpDict
    global currentDate
    global shop_number
    global conn
    global f
    global person_det

    config = yaml.load(open(arg))
    batch = config['batch']
    args_model= config['args_model']
    args_loss_mode = "cosine"
    camera_order = config['camera_order']
    shopId_order = config['shopId_order']
    resExpDict = {}
   
    currentDate = datetime.datetime.now().strftime('%Y%m%d') 
    star_hour_time = datetime.datetime.now().hour
    print("star_hour_time: " +str(star_hour_time))
    shop_number = len(shopId_order)

    bucket = oss_auth()
    # from redis import ConnectionPool

    conn = redis.Redis(host=config['redis']['host'], password=config['redis']['password'],
                       db=config['redis']['db'])  # 默认连接本地host
    f = create_box_encoder(args_model, batch_size=1, loss_mode=args_loss_mode)
    person_det = PersonDetector(**PersonDetector.default_args)
    manager = multiprocessing.Manager()

    q = manager.Queue()
    lock = manager.Lock()

    #缓存start############################################
    logger.info("Start caching: " + str(time.time()))

    pool = Pool(processes=shop_number)

    temp_time = time.time()

    for x in range(shop_number):
        fetch_item_download(camera_order[x],shopId_order[x], q, lock)

    pool.close()
    pool.join()

    logger.info("caching done use " + str(time.time() - temp_time))
    #缓存end############################################

    #logger.info("start_WhileTrue_Loop:" + str(time.time()))
    videoNum = 0
    while True:      

        pool = Pool(processes=shop_number)
        for x in range(shop_number):
            pool.apply_async(fetch_item_download, (camera_order[x],shopId_order[x],q,lock))

        videoNum += 1
        #print(videoNum)
        nowHourTime = datetime.datetime.now().hour
        runHourTime = 0
        timeFlag = False
        if nowHourTime >= star_hour_time:
            runHourTime = nowHourTime - star_hour_time
        else:
            runHourTime = (24 - star_hour_time) + nowHourTime
            if( nowHourTime >= 2 ):
                timeFlag = True
        #print("run time hour: " + str(runHourTime))
        if runHourTime > 17 or True == timeFlag:
            REPO_DIR = os.path.dirname(os.path.abspath(__file__))
            result_dir = os.path.join(REPO_DIR, 'result')
            try:
                os.makedirs(result_dir)
            except OSError as exception:
                if exception.errno == errno.EEXIST and os.path.isdir(result_dir):
                    pass
                else:
                    resExpDict["entityCode"] = "SQ"
                    resExpDict["cameraCode"] = "Channel"
                    resExpDict["errorCode"] = 1006
                    resExpDict["errorDesc"] = "Fail to create output directory"
                    expPoster()
                    raise ValueError("Failed to created output directory '%s'" % result_dir)
            
            result_file = open(os.path.join(result_dir, 'track_result' + '.txt'), 'a+')
            result_file.write(arg+"done")
            result_file.write('\n')
            result_file.flush()
	    logger.warning("Step one run hour over 17")
	    logger.info("videoNum: " + str(videoNum*batch) )
	    return videoNum * batch
   
        
        finsh_label=double_deep(q, f, shop_number,lock)
        #print("finish_lable")
        pool.close()
        pool.join()
        
        if finsh_label:
            pool2 = Pool(processes=shop_number)
            for x in range(shop_number):
                pool2.apply_async(fetch_item_download, (camera_order[x],shopId_order[x], q, lock))
            pool2.close()
            pool2.join()

            now_hour_time = datetime.datetime.now().hour
            run_hour_time = 0

            if now_hour_time > star_hour_time:
                run_hour_time = now_hour_time - star_hour_time
            else:
                run_hour_time = (24 - star_hour_time) + now_hour_time
            
            if (now_hour_time > 18 or run_hour_time > 10) and q.empty():
                REPO_DIR = os.path.dirname(os.path.abspath(__file__))
                result_dir = os.path.join(REPO_DIR, 'result')
                try:
                    os.makedirs(result_dir)
                except OSError as exception:
                    if exception.errno == errno.EEXIST and os.path.isdir(result_dir):
                        pass
                    else:
			
			            resExpDict["entityCode"] = "SQ"
             			resExpDict["cameraCode"] = "Channel"
			            resExpDict["errorCode"] = 1006
			            resExpDict["errorDesc"] = "Fail to create output directory"
                        expPoster()
                        raise ValueError("Failed to created output directory '%s'" % result_dir)
                result_file = open(os.path.join(result_dir, 'track_result' + '.txt'), 'a+')

                result_file.write(arg+"done")
                result_file.write('\n')
                result_file.flush()
                return VIDEO_NUMBER_ONE_DAY
        
        logger.info('well Done continue next: '+ str(shop_number))


if __name__ == '__main__':
    arg = sys.argv[1]
    videoNum = startEnd(arg)




