from __future__ import print_function

from data import *
from utils.augmentations import SSDAugmentation
from layers.modules import MultiBoxLoss
from myssd import build_ssd
import os
import sys
import time
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.utils.data as data
import numpy as np
import argparse
import random
from layers import *

import math
import numpy as np
import cv2
import argparse
from glob import glob
from timers import Timer, CudaTimer
import torch

from src.visualize import vis_utils as vis
from numpy.lib.recfunctions import structured_to_unstructured
from src.io.psee_loader import PSEELoader
import torch.nn.functional as F
import warnings

parser = argparse.ArgumentParser(description='Single Shot MultiBox Detection')

parser.add_argument('--trained_model', default='weights/ssd_300_VOC0712.pth',
                    type=str, help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='eval/', type=str,
                    help='Dir to save results')
parser.add_argument('--visual_threshold', default=0.6, type=float,
                    help='Final confidence threshold')
parser.add_argument('--cuda', default=True, type=bool,
                    help='Use cuda to train model')
parser.add_argument('--voc_root', default=VOC_ROOT, help='Location of VOC root directory')
parser.add_argument('-f', default=None, type=str, help="Dummy arg so we can load in Jupyter Notebooks")
args = parser.parse_args()

if args.cuda and torch.cuda.is_available():
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
    torch.set_default_tensor_type('torch.FloatTensor')

if not os.path.exists(args.save_folder):
    os.mkdir(args.save_folder)

warnings.filterwarnings("ignore")


def events_to_voxel_grid_pytorch(events, num_bins, width, height, device):


    """
    Build a voxel grid with bilinear interpolation in the time domain from a set of events.

    :param events: a [N x 4] NumPy array containing one event per row in the form: [timestamp, x, y, polarity]
    :param num_bins: number of bins in the temporal axis of the voxel grid
    :param width, height: dimensions of the voxel grid
    :param device: device to use to perform computations
    :return voxel_grid: PyTorch event tensor (on the device specified)
    """

    DeviceTimer = CudaTimer if device.type == 'cuda' else Timer

    assert(events.shape[1] == 4)
    assert(num_bins > 0)
    assert(width > 0)
    assert(height > 0)

    with torch.no_grad():

        events_torch = torch.from_numpy(events)
        with DeviceTimer('Events -> Device (voxel grid)'):
            events_torch = events_torch.to(device)

        with DeviceTimer('Voxel grid voting'):
            voxel_grid = torch.zeros(num_bins, height, width, dtype=torch.float32, device=device).flatten()

            # normalize the event timestamps so that they lie between 0 and num_bins
            last_stamp = events_torch[-1, 0]
            first_stamp = events_torch[0, 0]
            deltaT = last_stamp - first_stamp

            if deltaT == 0:
                deltaT = 1.0

            events_torch[:, 0] = (num_bins - 1) * (events_torch[:, 0] - first_stamp) / deltaT
            ts = events_torch[:, 0]
            xs = events_torch[:, 1].long()
            ys = events_torch[:, 2].long()
            pols = events_torch[:, 3].float()
            pols[pols == 0] = -1  # polarity should be +1 / -1

            tis = torch.floor(ts)
            tis_long = tis.long()
            dts = ts - tis
            vals_left = pols * (1.0 - dts.float())
            vals_right = pols * dts.float()

            valid_indices = tis < num_bins
            valid_indices &= tis >= 0
            voxel_grid.index_add_(dim=0,
                                  index=(xs[valid_indices] + ys[valid_indices]
                                  * width + tis_long[valid_indices] * width * height).type(torch.cuda.LongTensor),
                                  source=vals_left[valid_indices])

            valid_indices = (tis + 1) < num_bins
            valid_indices &= tis >= 0

            voxel_grid.index_add_(dim=0,
                                  index=(xs[valid_indices] + ys[valid_indices] * width
                                  + (tis_long[valid_indices] + 1) * width * height).type(torch.cuda.LongTensor),
                                  source=vals_right[valid_indices])

        voxel_grid = voxel_grid.view(num_bins, height, width).unsqueeze(0)
        x = F.interpolate(voxel_grid, size=([512, 512]), mode='bilinear').squeeze(0)

        nonzero_ev = (x != 0)
        num_nonzeros = nonzero_ev.sum()
        mean = x.sum() / num_nonzeros
        stddev = torch.sqrt((x ** 2).sum() / num_nonzeros - mean ** 2)
        mask = nonzero_ev.float()
        x = mask * (x - mean) / stddev

        # nnn = x.cpu().numpy() * 255  # .permute(0,2,1)
        # cv2.imshow('123', nnn[3])
        # cv2.waitKey(1)
        #
        # nn = x.cpu().numpy() * (-255)
        # cv2.imshow('12', nn[3])
        # cv2.waitKey(1)

    return x.unsqueeze(0)




def test_voc(td_files, delta_t=50000):


    # load net
    num_classes = 8
    net = build_ssd('train', 512, num_classes) # initialize SSD

    if args.cuda:

        net = torch.nn.DataParallel(net)
        cudnn.benchmark = True


    net.load_state_dict(torch.load('weights/32000.pth', map_location="cuda:0"))

    if args.cuda:
        net = net.cuda()

    net.train()
    print('Finished loading model!')

    criterion = MultiBoxLoss(num_class, 0.5, True, 0, True, 3, 0.5,
                             False, args.cuda)

    num_classes = 8
    softmax = nn.Softmax(dim=-1)
    detect = Detect(num_classes, 512, 0, 200, 0.01, 0.25)

    videos = [PSEELoader(td_file) for td_file in td_files]
    # use the naming pattern to find the corresponding box file
    box_videos = [PSEELoader(glob(td_file.split('_td.dat')[0] + '*.npy')[0]) for td_file in td_files]

    len_videos = len(td_files)
    seq = 10
    det_t = int(60 * 1000000 / delta_t / seq)
    print(det_t)

    input_order = list(range(len_videos * det_t))


    states = [None] * 5
    #labels = ['0', '1', '2', '3', '4', '5', '6', '7']
    labels = ['pedestrian', 'bicycle', 'car', 'truck', 'bus', 'sign', 'light']
    show = True
    while True:
        for j in range(len_videos):
            states = [None] * 5
            last_chor = []
            print(videos[j].name)
            t0 = time.time()
            ii = 0
            iii =0
            track = [3,5,1,4,6,9,8,10,0,13,12,17,18,14,21,19,11,16,15]
            while not videos[j].done:
                t0 = time.time()
                loss = 0
                loss_ll = 0
                loss_cc = 0

                print(iii)
                iii += 1

                #events = videos[j].load_delta_t(delta_t)
                events = videos[j].load_n_events(500000)


                if events.shape[0] == 0:
                    break
                te = events['t']
                xe = events['x']
                xe[xe > 1280] = 1279
                ye = events['y'] + 280
                pe = events['p']
                newe = np.stack((te, xe, ye, pe), axis=1)
                newe = newe.astype(np.float64)

                #print(newe.shape)
                ne = newe[:, 1]
                me = newe[:, 2]

                images = events_to_voxel_grid_pytorch(newe, 5, 1280, 1280, torch.device('cuda:0'))

                images = Variable(images.cuda())
                #
                out, states = net(images, states)

                last_states = []
                for i in range(5):
                    tema = states[i][0].data
                    temb = states[i][1].data
                    last_states.append((tema, temb))

                states = last_states

                if show:
                    box_e = box_videos[j].load_delta_t(delta_t)
                    box_e['y'] = box_e['y'] + 280

                    new_box_e = []
                    tracked = []
                    for box in box_e:
                        if box['w'] + box['h'] < 60:
                            continue
                        if box['track_id'] in tracked:
                            continue
                        else:
                            new_box_e.append((box['x'], box['y'], box['w'], box['h'], box['class_id'], box['track_id']))
                            tracked.append(box['track_id'])

                    newbox = np.array(new_box_e)
                    if newbox.shape[0]==0:
                        break
                    newbox[:, 3] = newbox[:, 1] + newbox[:, 3]
                    newbox[:, 2] = newbox[:, 0] + newbox[:, 2]

                    newbox[newbox[:, 0] < 0, 0] = 0
                    newbox[newbox[:, 1] < 0, 1] = 0
                    newbox[newbox[:, 2] > 1279, 2] = 1279
                    newbox[newbox[:, 3] > 1279, 3] = 1279


                    xx1 = newbox[:, 0]
                    yy1 = newbox[:, 1]
                    xx2 = newbox[:, 2]
                    yy2 = newbox[:, 3]

                    aa = (ne[None, :] >= xx1[:, None]) & (ne[None, :] <= xx2[:, None])
                    bb = (me[None, :] >= yy1[:, None]) & (me[None, :] <= yy2[:, None])
                    cc = aa & bb
                    ff = np.sum(cc != 0, axis=1)
                    ee = ff / (yy2 - yy1) / (xx2 - xx1)
                    dd = ee < 0.025
                    for i in range(newbox.shape[0]):
                        if dd[i] and newbox[i,5] in last_chor:
                            dd[i] =False
                    newbox = np.delete(newbox, dd, 0)


                    newbox[:, 0:4] = newbox[:, 0:4] / 1280



                    delete_line = []
                    for i in range(newbox.shape[0]):
                        if (newbox[i][0]) >= 1 or (newbox[i][2] - newbox[i][0]) <= 0 or (newbox[i][3] - newbox[i][1]) <= 0:
                            delete_line.append(i)
                    newbox = np.delete(newbox, delete_line, 0)
                    last_chor = list(newbox[:,5])
                    #print(last_chor)


                    newbox = torch.from_numpy(newbox[:,0:5]).float()
                    targets = [newbox]

                    targets = [Variable(ann.cuda(), volatile=True) for ann in targets]

                    loss_l, loss_c = criterion(out, targets)
                    loss_ll += loss_l
                    loss_cc += loss_c
                    loss += loss_l + loss_c

                    #print('Loss: %.4f ||' % (loss.item())  + ' || Lossll: %.4f ||' % (loss_ll.item()) + ' || Losscc: %.4f ||' % (loss_cc.item()))

                detections = detect(out[0], softmax(out[1]), out[2].data).data

                nnn = images[0][3].cpu().numpy() * 255
                nnn = cv2.cvtColor(nnn, cv2.COLOR_GRAY2BGR)  # .permute(0,2,1)

                #
                # cv2.imshow('pos', nnn)
                # cv2.waitKey(1)

                # scale each detection back up to the image

                # scale = torch.Tensor(rgb_image.shape[1::-1]).repeat(2)  # 4个尺度的缩放系数
                track_id = 1
                for i in range(detections.size(1)):  # 遍历num_class
                    # print(i)
                    # print(i)
                    jj = 0

                    while detections[0, i, jj, 0] >= 0.3:
                        # print(i + '  ' + j)

                        score = detections[0, i, jj, 0]
                        label_name = labels[i - 1]
                        #display_txt = '%s: %.2f' % (label_name, score)

                        display_txt = '%s ID: %d' % (label_name, track[track_id])

                        pt = (detections[0, i, jj, 1:] * 512).cpu().numpy().astype(np.int16)


                        jj += 1

                        if pt[3] + pt[2] - pt[1] - pt[0] < 60:
                        #    track_id -= 1
                            continue
                        track_id += 1
                        print(pt, score)
                        nnn = cv2.rectangle(nnn, (pt[0], pt[1]), (pt[2], pt[3]), (255, 0, 0), 2)
                        nnn = cv2.putText(nnn, display_txt, (pt[0], pt[1]), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)

                cv2.imshow('pre', nnn)
                t1 = time.time()
                #print(ii,t1-t0)
                ii += 1
                if show:
                    mmm = images[0][3].cpu().numpy() * 255
                    mmm = cv2.cvtColor(mmm, cv2.COLOR_GRAY2BGR)
                    for i in range(targets[0].size(0)):  # 遍历num_class
                        t = targets[0][i]
                        # print(t)
                        lab = int(t[4])
                        label_name = labels[lab]
                        display_txt = '%s' % (label_name)
                        pt = (t[:4] * 512).cpu().numpy().astype(np.int16)
                        # print(pt, score)
                        if pt[2] + pt[3] - pt[1] - pt[0] >= 0:
                            mmm = cv2.rectangle(mmm, (pt[0], pt[1]), (pt[2], pt[3]), (255, 0, 0), 2)
                            mmm = cv2.putText(mmm, display_txt, (pt[0], pt[1]), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 1)

                    cv2.imshow('tru', mmm)

                temp = cv2.waitKey(0)
                if temp == 101:
                    return
                if temp == 102:
                    break
                if temp == 103:
                    videos[j].seek_time(0)
                if temp == 104:
                    cv2.imwrite('u%d.jpg'%ii,nnn)



if __name__ == '__main__':



    doc = []
    # for root, dirs, files in os.walk('/media/fei/two/two/val'):
    #     for file in files:
    #         p = os.path.join(root, file)
    #         if '.dat' in p:
    #             doc.append(p)
    path = '/media/fei/two/two/prophesee/test'
    for root, dirs, files in os.walk(path):
        for file in files:
            p = os.path.join(root, file)
            if '.dat' in p:
                doc.append(p)

    doc1 = ["/media/fei/two/two/val/moorea_2019-06-17_test_02_000_3111500000_3171500000_td.dat"]
    doc2 = ["/media/fei/two/two/val/moorea_2019-06-26_test_01_000_2379500000_2439500000_td.dat"]
    doc3 = ['/media/fei/two/two/val/moorea_2019-06-17_test_02_000_3294500000_3354500000_td.dat']
    test_voc(doc, delta_t=70000)
