#!/usr/bin/env python
# coding=utf-8

import numpy as np
import cv2
from sklearn.utils.linear_assignment_ import linear_assignment
import json

import siltp
import channel_histogram

# import reid_config as deploy_config

# todo upper case, __private
block_size_list = [4, 6, 8, 10, 12, 14, 16]
step_list = [2, 3, 4, 5, 6, 7, 8]
row_num_list = [50, 75, 100, 125, 150, 175, 200]
block_dict = dict([(r, b) for r, b in zip(row_num_list, block_size_list)])
step_dict = dict([(r, s) for r, s in zip(row_num_list, step_list)])

__FEATURE_LENGTH__ = 26960

CONFIG_JSON_PATH = '/Volumes/more/source/cv/Tracker/KCFcpp-py-wrapper/reid/deploy_config.json'
with open(CONFIG_JSON_PATH, 'r') as f:
    g_config = json.load(f)


def align_height(height):
    dists = np.abs(height - np.array(row_num_list))
    rank = np.argsort(dists)[0]
    return row_num_list[rank]


def padding_boundingbox_by_rows(height, cfg):
    dists = np.abs(height - np.array(row_num_list))
    rank = np.argsort(dists)
    index = rank[0]
    config = cfg.copy()
    config['lomo']['block_step'] = step_list[index]
    config['lomo']['block_size'] = block_size_list[index]
    config['target_height'] = row_num_list[index]
    print('source /target :{0}/ {1}'.format(height, config['target_height']))
    return config


def averagePooling(img):
    if img.shape[0] % 2 != 0:
        img = img[:-1]
    if img.shape[1] % 2 != 0:
        img = img[:, :-1]

    img_pool = img[0::2] + img[1::2]
    img_pool = img_pool[:, 0::2] + img_pool[:, 1::2]

    img_pool /= 4
    return img_pool


"""
def get_pedestrian_by_frame(tree, frame_id, deploy_config=None):
    s = STR_PSTS_VIEW1_PATH + 'frame_{0:0>4}.jpg'.format(frame_id)
    print s
    img = mpimg.imread(s)
    xpath = './/frame[@number=\'{0}\']'.format(frame_id)
    print xpath
    frame = tree.iterfind(xpath).next()
    if frame == None:
        print 'frame doesnt exist. '
        return
    obj_list = frame[0]
    labels = []  # [[]]* num
    alpha_list = []
    target_height_list = []
    mean_width = 0
    mean_height = 0
    for obj in obj_list:
        print obj.keys()
        labels.append(obj.get('id'))
        [h, yc, w, xc] = [float(obj[0].get(k)) for k in obj[0].keys()]
        if not deploy_config is None:
            deploy_config = padding_boundingbox_by_rows(h, deploy_config)
            print('height: {}'.format(h))
            w = deploy_config['target_height'] * w / h
            h = deploy_config['target_height']
        mean_width += w
        mean_height += h
        imgb = img[int(round(yc - h / 2.0)):int(round(yc + h / 2.0)), int(xc - w // 2):int(xc + w // 2), :]
        alpha_list.append(imgb)
        target_height_list.append(int(deploy_config['target_height']))
    mean_width // len(labels)
    mean_height // len(labels)
    print('mean size : {0} x {1}'.format(mean_width, mean_height))
    return labels, alpha_list, target_height_list
"""


def LOMO_(img, config):
    sigma_list = config['retinex']['sigma_list']
    G = config['retinex']['G']
    b = config['retinex']['b']
    alpha = config['retinex']['alpha']
    beta = config['retinex']['beta']
    low_clip = config['retinex']['low_clip']
    high_clip = config['retinex']['high_clip']
    R_list = config['lomo']['R_list']
    tau = config['lomo']['tau']
    hsv_bin_size = config['lomo']['hsv_bin_size']
    block_size = config['lomo']['block_size']
    block_step = config['lomo']['block_step']

    # img_retinex = retinex.MSRCR(img, sigma_list, G, b, alpha, beta, low_clip, high_clip)
    img_retinex = img  # todo ; trick for test;
    siltp_feat = np.array([])
    hsv_feat = np.array([])
    for pool in range(3):
        row_num = (img.shape[0] - (block_size - block_step)) / block_step
        col_num = (img.shape[1] - (block_size - block_step)) / block_step
        if row_num == 0 or col_num == 0:
            return np.zeros(__FEATURE_LENGTH__)
        for row in range(row_num):
            for col in range(col_num):
                img_block = img[
                            row * block_step:row * block_step + block_size,
                            col * block_step:col * block_step + block_size
                            ]

                siltp_hist = np.array([])
                for R in R_list:
                    siltp4 = siltp.SILTP4(img_block, R, tau)
                    unique, count = np.unique(siltp4, return_counts=True)
                    siltp_hist_r = np.zeros([3 ** 4])
                    for u, c in zip(unique, count):
                        siltp_hist_r[u] = c
                    siltp_hist = np.concatenate([siltp_hist, siltp_hist_r], 0)

                img_block = img_retinex[
                            row * block_step:row * block_step + block_size,
                            col * block_step:col * block_step + block_size
                            ]

                img_hsv = cv2.cvtColor(img_block, cv2.COLOR_BGR2HSV)
                hsv_hist = channel_histogram.jointHistogram_(
                    img_hsv,
                    [0, 255],
                    hsv_bin_size
                )

                if col == 0:
                    siltp_feat_col = siltp_hist
                    hsv_feat_col = hsv_hist
                else:
                    siltp_feat_col = np.maximum(siltp_feat_col, siltp_hist)
                    hsv_feat_col = np.maximum(hsv_feat_col, hsv_hist)
            siltp_feat = np.concatenate([siltp_feat, siltp_feat_col], 0)
            hsv_feat = np.concatenate([hsv_feat, hsv_feat_col], 0)

        print('row_num, {0},height: {1}'.format(row_num, img.shape[0]))
        img = averagePooling(img)
        img_retinex = averagePooling(img_retinex)
    siltp_feat = np.log(siltp_feat + 1.0)
    siltp_feat[:siltp_feat.shape[0] / 2] /= np.linalg.norm(siltp_feat[:siltp_feat.shape[0] / 2])
    siltp_feat[siltp_feat.shape[0] / 2:] /= np.linalg.norm(siltp_feat[siltp_feat.shape[0] / 2:])

    hsv_feat = np.log(hsv_feat + 1.0)
    hsv_feat /= np.linalg.norm(hsv_feat)
    feat = np.concatenate([siltp_feat, hsv_feat], 0)
    print('162line, length of feature {}'.format(len(feat)))
    return feat


def adaptive_describe(img, figure_box):
    x, y, w, h = tuple(figure_box)
    if h < 40:
        raise Exception('height wrong')
    config = padding_boundingbox_by_rows(h, g_config)
    xc = x + w / 2.
    yc = y + h / 2.
    nh = config['target_height']
    imgb = img[int(round(yc - nh / 2.0)):int(round(yc + nh / 2.0)), int(xc - w // 2):int(xc + w // 2), :]
    feat = LOMO_(imgb, g_config)
    if len(feat) != __FEATURE_LENGTH__:
        print('something wrong with feature length!')
        return None
    return feat


def lomo(img):
    config = padding_boundingbox_by_rows(img.shape[0], g_config)
    return LOMO_(img, config)


def calc_lomo_dist_mat(figures1, figures2):
    """ figure1 detection
        figure2 trackers
    """
    dist = np.zeros(shape=(len(figures1), len(figures2)))
    with open('../viper_config.json', 'r') as f:
        config = json.load(f)
    for inds in np.ndindex(len(figures1), len(figures2)):
        fig1 = figures1[inds[0]]
        height = int(fig1.shape[0])
        config['target_height'] = height
        config['lomo']['block_size'] = block_dict[height]
        config['lomo']['block_step'] = step_dict[height]
        feat1 = LOMO_(fig1, config)

        fig2 = figures2[inds[1]]
        height = int(fig2.shape[0])
        config['target_height'] = height
        config['lomo']['block_size'] = block_dict[height]
        config['lomo']['block_step'] = step_dict[height]
        feat2 = LOMO_(fig2, config)
        print('shape:')
        print(feat1.shape)
        print(feat2.shape)
        dist[inds[0], inds[1]] = np.linalg.norm(feat1 - feat2)
    print('dist.shape'), ;
    print dist
    matched_indices = linear_assignment(dist)
    print matched_indices

    unmatched_detections = []
    for d, det in enumerate(figures1):
        if d not in matched_indices[:, 0]:
            unmatched_detections.append(d)
    unmatched_trackers = []
    for t, trk in enumerate(figures2):
        if t not in matched_indices[:, 1]:
            unmatched_trackers.append(t)
    # if len(unmatched_trackers) != 0:
    #     mpimg.imsave('../result_mpt/unmatch_track_img.jpg', figures2[unmatched_trackers[0]])
    # if len(unmatched_detections) != 0:
    #     mpimg.imsave('../result_mpt/unmatched_detections.jpg', figures1[unmatched_detections[0]])
