from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from data import *
from utils.augmentations import SSDAugmentation
from layers.modules import MultiBoxLoss
from myssd import build_ssd
import os
import sys
import time
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.utils.data as data
import numpy as np
import argparse
from layers import *
import torch.utils.data as data

import math
import numpy as np
import cv2
import argparse
from glob import glob
from timers import Timer, CudaTimer
import torch
import random

from src.visualize import vis_utils as vis
from numpy.lib.recfunctions import structured_to_unstructured
from src.io.psee_loader import PSEELoader
import torch.nn.functional as F

from tensorboardX import SummaryWriter

def str2bool(v):
    return v.lower() in ("yes", "true", "t", "1")

parser = argparse.ArgumentParser(
    description='Single Shot MultiBox Detector Training With Pytorch')


parser.add_argument('-s', '--skip', default=0, type=int, help="skip the first n microseconds")
parser.add_argument('-d', '--delta_t', default=20000, type=int, help="load files by delta_t in microseconds")

train_set = parser.add_mutually_exclusive_group()
parser.add_argument('--dataset', default='VOC', choices=['VOC', 'COCO'],
                    type=str, help='VOC or COCO')
parser.add_argument('--dataset_root', default=VOC_ROOT,
                    help='Dataset root directory path')
parser.add_argument('--basenet', default='vgg16_reducedfc.pth',
                    help='Pretrained base model')
parser.add_argument('--batch_size', default=32, type=int,
                    help='Batch size for training')
parser.add_argument('--resume', default=None, type=str,
                    help='Checkpoint state_dict file to resume training from')
parser.add_argument('--start_iter', default=0, type=int,
                    help='Resume training at this iter')
parser.add_argument('--num_workers', default=4, type=int,
                    help='Number of workers used in dataloading')
parser.add_argument('--cuda', default=True, type=str2bool,
                    help='Use CUDA to train model')
parser.add_argument('--lr', '--learning-rate', default=5e-5, type=float,
                    help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float,
                    help='Momentum value for optim')
parser.add_argument('--weight_decay', default=5e-4, type=float,
                    help='Weight decay for SGD')
parser.add_argument('--gamma', default=0.1, type=float,
                    help='Gamma update for SGD')
parser.add_argument('--visdom', default=False, type=str2bool,
                    help='Use visdom for loss visualization')
parser.add_argument('--save_folder', default='weights/',
                    help='Directory for saving checkpoint models')
args = parser.parse_args()


num_classes = 8


writer = SummaryWriter('./runs/result')

def events_to_voxel_grid_pytorch(events, num_bins, width, height, device):


    """
    Build a voxel grid with bilinear interpolation in the time domain from a set of events.

    :param events: a [N x 4] NumPy array containing one event per row in the form: [timestamp, x, y, polarity]
    :param num_bins: number of bins in the temporal axis of the voxel grid
    :param width, height: dimensions of the voxel grid
    :param device: device to use to perform computations
    :return voxel_grid: PyTorch event tensor (on the device specified)
    """

    DeviceTimer = CudaTimer if device.type == 'cuda' else Timer

    assert(events.shape[1] == 4)
    assert(num_bins > 0)
    assert(width > 0)
    assert(height > 0)

    with torch.no_grad():

        events_torch = torch.from_numpy(events)
        with DeviceTimer('Events -> Device (voxel grid)'):
            events_torch = events_torch.to(device)

        with DeviceTimer('Voxel grid voting'):
            voxel_grid = torch.zeros(num_bins, height, width, dtype=torch.float32, device=device).flatten()

            # normalize the event timestamps so that they lie between 0 and num_bins
            last_stamp = events_torch[-1, 0]
            first_stamp = events_torch[0, 0]
            deltaT = last_stamp - first_stamp

            if deltaT == 0:
                deltaT = 1.0

            events_torch[:, 0] = (num_bins - 1) * (events_torch[:, 0] - first_stamp) / deltaT
            ts = events_torch[:, 0]
            xs = events_torch[:, 1].long()
            ys = events_torch[:, 2].long()
            pols = events_torch[:, 3].float()
            pols[pols == 0] = -1  # polarity should be +1 / -1

            tis = torch.floor(ts)
            tis_long = tis.long()
            dts = ts - tis
            vals_left = pols * (1.0 - dts.float())
            vals_right = pols * dts.float()

            valid_indices = tis < num_bins
            valid_indices &= tis >= 0
            voxel_grid.index_add_(dim=0,
                                  index=(xs[valid_indices] + ys[valid_indices]
                                  * width + tis_long[valid_indices] * width * height).type(torch.cuda.LongTensor),
                                  source=vals_left[valid_indices])

            valid_indices = (tis + 1) < num_bins
            valid_indices &= tis >= 0

            voxel_grid.index_add_(dim=0,
                                  index=(xs[valid_indices] + ys[valid_indices] * width
                                  + (tis_long[valid_indices] + 1) * width * height).type(torch.cuda.LongTensor),
                                  source=vals_right[valid_indices])

        voxel_grid = voxel_grid.view(num_bins, height, width).unsqueeze(0)
        x = F.interpolate(voxel_grid, size=([512, 512]), mode='bilinear').squeeze(0)

        nonzero_ev = (x != 0)
        num_nonzeros = nonzero_ev.sum()
        mean = x.sum() / num_nonzeros
        stddev = torch.sqrt((x ** 2).sum() / num_nonzeros - mean ** 2)
        mask = nonzero_ev.float()
        x = mask * (x - mean) / stddev

        # nnn = x[0].cpu().numpy() * 255  # .permute(0,2,1)
        # cv2.imshow('123', nnn[3])
        # cv2.waitKey(1)
        #
        # nn = x.cpu().numpy() * (-255)
        # cv2.imshow('12', nn[3])
        # cv2.waitKey(1)

    return x.unsqueeze(0)


def read_event(i, j, delta_t=50000):
    """
    Plays simultaneously files and their boxes in a rectangular format.
    """
    # open the video object for the input files

    # use the naming pattern to find the corresponding box file
    height, width = videos[0].get_size()


    # cv2.namedWindow('out', cv2.WINDOW_NORMAL)

    # while all videos have something to read
    if sum([video.done for video in videos]):
        for video in videos:
            video.reset()
        for box_video in box_videos:
            box_video.reset()


    # load events and boxes from all files
    events = [video.load_delta_t(delta_t) for video in videos]

    te = events[0]['t']
    xe = events[0]['x']
    ye = events[0]['y'] + 280
    pe = events[0]['p']

    newe = np.stack((te, xe, ye, pe), axis=1)
    newe = newe.astype(np.float64)

    newe = events_to_voxel_grid_pytorch(newe, 5, 1280, 1280, torch.device('cuda:0'))




    box_events = [box_video.load_delta_t(delta_t) for box_video in box_videos]

    #print(box_events[0])

    for box_e in box_events:
        box_e['y'] = box_e['y'] + 280
        box_e['w'] = (box_e['w'] + box_e['x']) / width
        box_e['h'] = (box_e['h'] + box_e['y']) / width
        box_e['x'] = box_e['x'] / width
        box_e['y'] = box_e['y'] / width

        box_e['x'][box_e['x'] < 0] = 0.0
        box_e['y'][box_e['y'] < 0] = 0.0
        box_e['w'][box_e['w'] > 1] = 1.0
        box_e['h'][box_e['h'] > 1] = 1.0

    new_box_e = []
    tracked = []
    for box in box_events[0]:
        if box['w'] + box['h'] - box['x'] -box['y'] < (40 / 512):
            continue
        if box['track_id'] in tracked:
            continue
        else:
            new_box_e.append((box['x'], box['y'], box['w'], box['h'], box['class_id']))
            tracked.append(box['track_id'])

    newbox = np.array(new_box_e)
    delete_line = []
    for i in range(newbox.shape[0]):
        if(newbox[i][0]) >= 1 or (newbox[i][2]-newbox[i][0]) <= 0 or (newbox[i][3]-newbox[i][1]) <= 0:
            delete_line.append(i)
    newbox = np.delete(newbox, delete_line, 0)

    newbox = torch.from_numpy(newbox).float()



    return newe, [newbox]

def read_n(n = 10):
    newes = []
    for i in range(n):
        newes.append(read_event())
    return newes



if torch.cuda.is_available():
    if args.cuda:
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
    if not args.cuda:
        print("WARNING: It looks like you have a CUDA device, but aren't " +
              "using CUDA.\nRun with --cuda for optimal training speed.")
        torch.set_default_tensor_type('torch.FloatTensor')
else:
    torch.set_default_tensor_type('torch.FloatTensor')

if not os.path.exists(args.save_folder):
    os.mkdir(args.save_folder)



def train(td_files, delta_t=50000):

    videos = [PSEELoader(td_file) for td_file in td_files]
    # use the naming pattern to find the corresponding box file
    box_videos = [PSEELoader(glob(td_file.split('_td.dat')[0] + '*.npy')[0]) for td_file in td_files]

    len_videos = len(td_files)
    seq = 10
    det_t = int(60 * 1000000 / delta_t/seq)
    print(det_t)

    input_order = list(range(len_videos*det_t))
    #random.shuffle(input_order)


    num_class = 8
    show = False


    net = build_ssd('train', 512, num_class)

    if args.cuda:
        net = torch.nn.DataParallel(net)
        cudnn.benchmark = True

    model_path = './weights/32000.pth'
    if os.path.exists(model_path):
        print('loading******************')
        net.load_state_dict(torch.load(model_path, map_location="cuda:0"))
    #net.eval()


    if args.cuda:
        net = net.cuda()

    if not args.resume:
        print('Initializing weights...')
        # initialize newly added layers' weights with xavier method

    optimizer = optim.SGD(net.parameters(), lr=1e-4, momentum=0.9,
                          weight_decay=args.weight_decay)
    criterion = MultiBoxLoss(num_class, 0.5, True, 0, True, 3, 0.5,
                             False, args.cuda)

    net.train()

    iii = 0

    for iteration in range(0, 2000001):
        random.shuffle(input_order)
        io_idx = 0
        while io_idx != len_videos*det_t:

            # forward
            t0 = time.time()
            loss = 0
            loss_ll = 0
            loss_cc = 0
            states = [None] * 5

            j = input_order[io_idx] % len_videos
            k = input_order[io_idx] / det_t
            io_idx += 1
            # print(j, k)
            videos[j].seek_time(k * delta_t *seq)
            box_videos[j].seek_time(k * delta_t *seq)

            #print(targets)




            for i in range(seq):
    ##################################################################




                events = videos[j].load_delta_t(delta_t)
                te = events['t']
                xe = events['x']
                xe[xe > 1280] = 1279
                ye = events['y'] + 280
                pe = events['p']
                newe = np.stack((te, xe, ye, pe), axis=1)
                newe = newe.astype(np.float64)
                # print(newe.shape)
                newe = events_to_voxel_grid_pytorch(newe, 5, 1280, 1280, torch.device('cuda:0'))



                box_e = box_videos[j].load_delta_t(delta_t)

                box_e['y'] = box_e['y'] + 280
                box_e['w'] = (box_e['w'] + box_e['x']) / 1280
                box_e['h'] = (box_e['h'] + box_e['y']) / 1280
                box_e['x'] = box_e['x'] / 1280
                box_e['y'] = box_e['y'] / 1280

                box_e['x'][box_e['x'] < 0] = 0.0
                box_e['y'][box_e['y'] < 0] = 0.0
                box_e['w'][box_e['w'] > 1] = 1.0
                box_e['h'][box_e['h'] > 1] = 1.0

                new_box_e = []
                tracked = []
                for box in box_e:
                    if box['w'] + box['h'] - box['x'] - box['y'] < (30 / 512):
                        continue
                    if box['track_id'] in tracked:
                        continue
                    else:
                        new_box_e.append((box['x'], box['y'], box['w'], box['h'], box['class_id']))
                        tracked.append(box['track_id'])

                newbox = np.array(new_box_e)
                delete_line = []
                for i in range(newbox.shape[0]):
                    if (newbox[i][0]) >= 1 or (newbox[i][2] - newbox[i][0]) <= 0 or (newbox[i][3] - newbox[i][1]) <= 0:
                        delete_line.append(i)
                newbox = np.delete(newbox, delete_line, 0)
                newbox = torch.from_numpy(newbox).float()
                newbox = [newbox]
    ##################################################################################

                images = Variable(newe.cuda())
                targets = [Variable(ann.cuda(), volatile=True) for ann in newbox]


                out, states = net(images, states)


                loss_l, loss_c = criterion(out, targets)
                loss_ll += loss_l
                loss_cc += loss_c
                loss += loss_l + loss_c

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            writer.add_scalar('loss', loss.item(), iii)
            writer.add_scalar('loss_ll', loss_ll.item(), iii)
            writer.add_scalar('loss_cc', loss_cc.item(), iii)

            t1 = time.time()
            #loc_loss += loss_l.item()
            #conf_loss += loss_c.item()


            iii += 1
            if iii*seq % 100 == 0:
                print('timer: %.4f sec.' % (t1 - t0))
                print('iter ' + repr(iteration) + ' || iii ' + repr(iii) + ' || Loss: %.4f ||' % (loss.item())
                      + ' || Lossll: %.4f ||' % (loss_ll.item()) + ' || Losscc: %.4f ||' % (loss_cc.item()), end=' ')



            if iii*seq % 5000 == 0:
                print('Saving state, iter:', iteration)

                torch.save(net.state_dict(), 'weights/v8/' + repr(iii)  + '.pth')
    #torch.save(net.state_dict(),  args.save_folder + '' + args.dataset + '.pth')


def adjust_learning_rate(optimizer, gamma, step):
    """Sets the learning rate to the initial LR decayed by 10 at every
        specified step
    # Adapted from PyTorch Imagenet example:
    # https://github.com/pytorch/examples/blob/master/imagenet/main.py
    """
    lr = args.lr * (gamma ** (step))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


def xavier(param):
    init.xavier_uniform(param)


def weights_init(m):
    if isinstance(m, nn.Conv2d):
        xavier(m.weight.data)
        m.bias.data.zero_()


def create_vis_plot(_xlabel, _ylabel, _title, _legend):
    return viz.line(
        X=torch.zeros((1,)).cpu(),
        Y=torch.zeros((1, 3)).cpu(),
        opts=dict(
            xlabel=_xlabel,
            ylabel=_ylabel,
            title=_title,
            legend=_legend
        )
    )


def update_vis_plot(iteration, loc, conf, window1, window2, update_type,
                    epoch_size=1):
    viz.line(
        X=torch.ones((1, 3)).cpu() * iteration,
        Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu() / epoch_size,
        win=window1,
        update=update_type
    )
    # initialize epoch plot on first iteration
    if iteration == 0:
        viz.line(
            X=torch.zeros((1, 3)).cpu(),
            Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu(),
            win=window2,
            update=True
        )



if __name__ == '__main__':

    doc = []
    path = '/media/fei/two/two/prophesee/test'
    for root, dirs, files in os.walk(path):
        for file in files:
            p = os.path.join(root, file)
            if '.dat' in p:
                doc.append(p)

    print(len(doc))
    train(doc, delta_t=50000)
