import argparse
import os
import shutil
import time
import math
import timeit
from datetime import datetime
import socket
import glob
import warnings
warnings.filterwarnings("ignore")

import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torch.distributed as dist

try:
    from nvidia.dali.plugin.pytorch import DALIGenericIterator
    from nvidia.dali.pipeline import Pipeline
    import nvidia.dali.types as types
    import nvidia.dali.fn as fn
except ImportError:
    raise ImportError("Please install DALI from https://www.github.com/NVIDIA/DALI to run this example.")

from dataloaders.dataset import VideoDataset
from dali_dataset import create_dali_pipeline
# from dali_dataset import VideoPipe
from network import C3D_model, R2Plus1D_model, R3D_model


def parse():
    parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
    parser.add_argument('--data', metavar='DIR', default='../data/ILSVRC2012/',
                        help='path(s) to dataset (if one path is provided, it is assumed\n' +
                       'to have subdirectories named "train" and "val"; alternatively,\n' +
                       'train and val paths can be specified directly by providing both paths as arguments)')
    parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
                        help='number of data loading workers (default: 4)')
    parser.add_argument('--epochs', default=100, type=int, metavar='N',
                        help='number of total epochs to run')
    parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
                        help='manual epoch number (useful on restarts)')
    parser.add_argument('-b', '--batch-size', default=32, type=int,
                        metavar='N', help='mini-batch size per process (default: 256)')
    parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
                        metavar='LR', help='Initial learning rate.  Will be scaled by <global batch size>/256: args.lr = args.lr*float(args.batch_size*args.world_size)/256.  A warmup schedule will also be applied over the first 5 epochs.')
    parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                        help='momentum')
    parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
                        metavar='W', help='weight decay (default: 1e-4)')
    parser.add_argument('--print-freq', '-p', default=100, type=int,
                        metavar='N', help='print frequency (default: 10)')
    parser.add_argument('--resume', default='jig_model_best.pth.tar', type=str, metavar='PATH',
                        help='path to latest checkpoint (default: none)')
    parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
                        help='evaluate model on validation set')
    parser.add_argument('--pretrained', dest='pretrained', action='store_true',
                        help='use pre-trained model')

    parser.add_argument('--dali_cpu', action='store_true',
                        help='Runs CPU based version of DALI pipeline.')
    parser.add_argument('--prof', default=-1, type=int,
                        help='Only run 10 iterations for profiling.')
    parser.add_argument('--deterministic', action='store_true')

    parser.add_argument("--local_rank", default=0, type=int)
    parser.add_argument('--sync_bn', action='store_true',
                        help='enabling apex sync BN.')

    parser.add_argument('--opt-level', type=str, default="02")
    parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
    parser.add_argument('--loss-scale', type=str, default=None)
    parser.add_argument('--channels-last', type=bool, default=False)
    parser.add_argument('-t', '--test', action='store_true',
                        help='Launch test mode with preset arguments')
    parser.add_argument('--distributed', action='store_true',
                        help='Choose distribute training')
    parser.add_argument('--stride', default=2, type=int,
                        help='Freeze the first n layers.')
    args = parser.parse_args()
    return args

def save_checkpoint(state, is_best, filename='stride{}_split{}_checkpoint.pth.tar'):
    torch.save(state, filename)
    if is_best:
        shutil.copyfile(filename, 'best_'+filename)

# item() is a recent addition, so this helps with backward compatibility.
def to_python_float(t):
    if hasattr(t, 'item'):
        return t.item()
    else:
        return t[0]

class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count


def adjust_learning_rate(optimizer, epoch, step, len_epoch):
    """LR schedule that should yield 76% converged accuracy with batch size 256"""
    factor = epoch // 30

    if epoch >= 80:
        factor = factor + 1

    lr = args.lr*(0.1**factor)

    """Warmup"""
    if epoch < 5:
        lr = lr*float(1 + step + epoch*len_epoch)/(5.*len_epoch)

    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


def accuracy(output, target, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    # maxk = max(topk)
    maxk = 1
    batch_size = target.size(0)

    _, pred = output.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))

    k = 1
    correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
    res = correct_k.mul_(100.0 / batch_size)
    return res


def reduce_tensor(tensor):
    rt = tensor.clone()
    dist.all_reduce(rt, op=dist.reduce_op.SUM)
    rt /= args.world_size
    return rt


def main(split=1, f=None):
    """
        Args:
            num_classes (int): Number of classes in the data
            num_epochs (int, optional): Number of epochs to train for.
    """

    global best_prec, args
    best_prec = 0
    args = parse()
    args.split = split

    # Use GPU if available else revert to CPU
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("Device being used:", device)

    dataset = 'ucf101' # Options: hmdb51 or ucf101

    if dataset == 'hmdb51':
        num_classes=51
    elif dataset == 'ucf101':
        num_classes = 101
    else:
        print('We only implemented hmdb and ucf datasets.')
        raise NotImplementedError

    modelName = 'C3D' # Options: C3D or R2Plus1D or R3D

    if modelName == 'C3D':
        model = C3D_model.C3D(num_classes=num_classes, pretrained=False)
        train_params = [{'params': C3D_model.get_1x_lr_params(model), 'lr': args.lr},
                        {'params': C3D_model.get_10x_lr_params(model), 'lr': args.lr * 10}]
    elif modelName == 'R2Plus1D':
        model = R2Plus1D_model.R2Plus1DClassifier(num_classes=num_classes, layer_sizes=(2, 2, 2, 2))
        train_params = [{'params': R2Plus1D_model.get_1x_lr_params(model), 'lr': args.lr},
                        {'params': R2Plus1D_model.get_10x_lr_params(model), 'lr': args.lr * 10}]
    elif modelName == 'R3D':
        model = R3D_model.R3DClassifier(num_classes=num_classes, layer_sizes=(2, 2, 2, 2))
        train_params = model.parameters()
    else:
        print('We only implemented C3D and R2Plus1D models.')
        raise NotImplementedError
    criterion = nn.CrossEntropyLoss()  # standard crossentropy loss for classification
    optimizer = optim.SGD(train_params, lr=args.lr, momentum=0.9, weight_decay=5e-4)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, threshold=1e-6, patience=5)

    print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0))
    model.to(device)
    criterion.to(device)

    train_dir = '../data/UCF101/split{}/train/'.format(i)
    val_dir = '../data/UCF101/split{}/test/'.format(i)
    dic = {}
    with open('../data/ucfTrainTestlist/classInd.txt', 'r') as f:
        lines = f.readlines()
        for line in lines:
            line = line.strip().split(' ')
            act, label = line[1], line[0]
            dic[act] = int(label) - 1
    train_list, val_list = [], []
    for rootname, dirname, filenames in os.walk(train_dir):
        for filename in filenames:
            video_file = os.path.join(rootname, filename)
            video_label = dic[rootname.split('/')[-1]]
            train_list.append([video_file, video_label])
    for rootname, dirname, filenames in os.walk(val_dir):
        for filename in filenames:
            video_file = os.path.join(rootname, filename)
            video_label = dic[rootname.split('/')[-1]]
            val_list.append([video_file, video_label])
    with open('train_list.txt', 'w') as f:
        for elem in train_list:
            print(elem[0], ' ', elem[1], file=f)
    with open('val_list.txt', 'w') as f:
        for elem in val_list:
            print(elem[0], ' ', elem[1], file=f)
    
    print('Training model on {} dataset...'.format(dataset))
    pipe = create_dali_pipeline(
        # file_root='../data/UCF101/split{}/train/'.format(i),
        file_list='train_list.txt',
        batch_size=args.batch_size,
        resize_x=128,
        resize_y=171,
        stride=args.stride,
        sequence_length=16,
        num_threads=4,
        device_id=0,
        crop_target_size=112,
        is_training=True,
        num_shards=1,
        shard_id=0,)
#     pipe = VideoPipe(
#         file_list='train_list.txt',
#         batch_size=args.batch_size,
#         resize_x=128,
#         resize_y=171,
#         stride=args.stride,
#         sequence_length=16,
#         num_threads=4,
#         device_id=0,
#         crop_target_size=112,
#         is_training=True,
#         num_shards=1,
#         shard_id=0,)
    pipe.build()
    train_dataloader = DALIGenericIterator(
        [pipe], ['data', 'label'],
                size=len(train_list))

    pipe = create_dali_pipeline(
        # file_root='../data/UCF101/split{}/test/'.format(i),
        file_list='val_list.txt',
        batch_size=args.batch_size,
        resize_x=128,
        resize_y=171,
        stride=args.stride,
        sequence_length=16,
        num_threads=4,
        device_id=0,
        crop_target_size=112,
        is_training=False,
        num_shards=1,
        shard_id=0,)
#     pipe = VideoPipe(
#         # file_root='../data/UCF101/split{}/test/'.format(i),
#         file_list='val_list.txt',
#         batch_size=args.batch_size,
#         resize_x=128,
#         resize_y=171,
#         stride=args.stride,
#         sequence_length=16,
#         num_threads=4,
#         device_id=0,
#         crop_target_size=112,
#         is_training=False,
#         num_shards=1,
#         shard_id=0,)
    pipe.build()
    val_dataloader = DALIGenericIterator(
        [pipe], ['data', 'label'],
                size=len(val_list))
    print(len(train_dataloader), len(val_dataloader))
    total_time = AverageMeter()
    min_val_loss = float('inf')
    es = 0
    for epoch in range(args.start_epoch, args.epochs):
        # train for one epoch
        train_loss, avg_train_time = train(train_dataloader, model, criterion, optimizer, epoch)
        total_time.update(avg_train_time)
        if args.test:
            break
        scheduler.step(train_loss)
        # evaluate on validation set
        prec, val_loss = validate(val_dataloader, model, criterion)

        # remember best prec@1 and save checkpoint
        if args.local_rank == 0:
            is_best = prec > best_prec
            best_prec = max(prec, best_prec)
            save_checkpoint({
                'epoch': epoch + 1,
                'arch': modelName,
                'state_dict': model.state_dict(),
                'best_prec': best_prec,
                'optimizer' : optimizer.state_dict(),
            }, is_best, filename='stride{}_split{}_checkpoint.pth.tar'.format(args.stride, args.split))
            if epoch == args.epochs - 1:
                print('##Top-1 {0}\n'
                      '##Perf  {2}'.format(
                      prec,
                      args.total_batch_size / total_time.avg))

        train_dataloader.reset()
        val_dataloader.reset()

        if val_loss < min_val_loss:
            min_val_loss = val_loss
            es = 0
        else:
            es += 1
            if es == 8:
                break



def train(train_dataloader, model, criterion, optimizer, epoch):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()

    # switch to train mode
    model.train()
    end = time.time()
    count = 0
    for i, data in enumerate(train_dataloader):
        input = data[0]["data"]
        target = data[0]["label"].squeeze(-1).long()
        train_dataloader_len = int(math.ceil(train_dataloader._size / args.batch_size))
        # compute output
        output = model(input)
        loss = criterion(output, target)

        # compute gradient and do SGD step
        optimizer.zero_grad()

        loss.backward()
        optimizer.step()

        if i%args.print_freq == 0:
            # Every print_freq iterations, check the loss, accuracy, and speed.
            # For best performance, it doesn't make sense to print these metrics every
            # iteration, since they incur an allreduce and some host<->device syncs.

            # Measure accuracy
            prec = accuracy(output.data, target, topk=(1))

            # Average loss and accuracy across processes for logging
            reduced_loss = loss.data

            # to_python_float incurs a host<->device sync
            losses.update(to_python_float(reduced_loss), input.size(0))
            top1.update(to_python_float(prec), input.size(0))

            torch.cuda.synchronize()
            batch_time.update((time.time() - end)/args.print_freq)
            end = time.time()
            if args.local_rank == 0:
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Loss {loss.val:.10f} ({loss.avg:.4f})\t'
                      'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'.format(
                       epoch, i, train_dataloader_len, loss=losses, top1=top1))
        count += 1
        if count == 900:
            break

    return losses.avg, batch_time.avg

def validate(val_dataloader, model, criterion):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    count = 0
    for i, data in enumerate(val_dataloader):
        input = data[0]["data"]
        target = data[0]["label"].squeeze(-1).long()
        val_dataloader_len = int(val_dataloader._size / args.batch_size)

        # compute output
        with torch.no_grad():
            output = model(input)
            loss = criterion(output, target)

        # measure accuracy and record loss
        prec = accuracy(output.data, target, topk=(1))
        reduced_loss = loss.data

        losses.update(to_python_float(reduced_loss), input.size(0))
        top1.update(to_python_float(prec), input.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # TODO:  Change timings to mirror train().
        if args.local_rank == 0 and i % args.print_freq == 0:
            print('Test: [{0}]\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'.format(
                   i, loss=losses, top1=top1))
                   
        count += 1
        if count == 300:
            break

    print(' * Prec@1 {top1.avg:.4f}'
        .format(top1=top1))

    return top1.avg, losses.avg


if __name__ == "__main__":
    with open('training_log.txt', 'a') as f:
        for i in range(1,4):
            main(split=i)