"""Run testing given a trained model."""

import argparse
import time
import datetime

import numpy as np
import torch.nn.parallel
import torch.optim
import torchvision

from dataset import CoviarDataSet
from model import Model, Coviar
from transforms import GroupCenterCrop
from transforms import GroupOverSample
from transforms import GroupScale

parser = argparse.ArgumentParser(description="Standard video-level testing")
parser.add_argument('--data-name', type=str,
                    choices=['ucf101', 'hmdb51', 'ylimed', 'trecmed'])
parser.add_argument('--representation', type=str,
                    choices=['iframe', 'residual', 'mv'])
parser.add_argument('--no-accumulation', action='store_true',
                    help='disable accumulation of'
                    'motion vectors and residuals.')
# TODO: lstm-infeature 不够明确
parser.add_argument('--lstm-infeature', type=str, default=None,
                    help='Collect the feature input to LSTM and'
                    'save to the dir lstm_infeature.')
parser.add_argument('--version', type=str, default='coviar-lstm',
                    choices=['coviar', 'coviar-lstm'],
                    help='Model version.')
parser.add_argument('--lstm-outfeature', type=str, default=None)
parser.add_argument('--data-root', type=str)
parser.add_argument('--test-list', type=str)
parser.add_argument('--weights', type=str)
parser.add_argument('--arch', type=str)
parser.add_argument('--save-scores', type=str, default=None)
parser.add_argument('--test_segments', type=int, default=25)
parser.add_argument('--test-crops', type=int, default=10)
parser.add_argument('--hidden_size', default=1024, type=int,
                    help='lstm hidden_size.')
# TODO 实现batch收集scores
parser.add_argument('--batch-size', default=1, type=int,
                    help='test batch size.')
parser.add_argument('--input_size', type=int, default=224)
parser.add_argument('-j', '--workers', default=1, type=int, metavar='N',
                    help='number of workers for data loader.')
parser.add_argument('--gpus', nargs='+', type=int, default=None)

args = parser.parse_args()

if args.data_name == 'ucf101':
    num_class = 101
elif args.data_name == 'hmdb51':
    num_class = 51
elif args.data_name == 'ylimed':
    num_class = 11
elif args.data_name == 'trecmed':
    num_class = 21
else:
    raise ValueError('Unknown dataset '+args.data_name)


# 闭包: 被函数记住的封闭作用域(被decorate函数记住的kwargs参数)
def static_vars(**kwargs):
    def decorate(func):
        for k in kwargs:
            setattr(func, k, kwargs[k])
        return func
    return decorate


@static_vars(outX=None, save=False)
def get_out_features(self, input, output):
    global args
    fself = get_out_features
    # average pooling
    avg = np.mean(input[0].data.cpu().numpy(), axis=0)  # input of fc
    row = np.reshape(avg, (1, 1024))
    fself.outX = row if fself.outX is None else np.vstack((fself.outX, row))
    print("outX.shape:", fself.outX.shape)
    if fself.save is True:
        np.save(args.lstm_outfeature, fself.outX)


@static_vars(inX=None, save=False)
def get_in_features(self, input, output):
    global args
    fself = get_in_features
    avg = np.mean(output.data.cpu().numpy(), axis=0)  # output of avgpool
    row = np.reshape(avg, (1, 2048))
    fself.inX = row if fself.inX is None else np.vstack((fself.inX, row))
    print("inX.shape:", fself.inX.shape)
    if fself.save is True:
        np.save(args.lstm_infeature, fself.inX)


def main():
    print('Testing arguments:')
    # 顺序取决于parser.add_argument的顺序
    for k, v in vars(args).items():
        print('\t{}: {}'.format(k, v))

    start_tim = datetime.datetime.now()
    print('Start time:', start_tim.strftime('%Y-%m-%d %H:%M:%S'))

    if args.version == 'coviar-lstm':
        net = Model(num_class, args.test_segments, args.representation,
                    args.hidden_size, base_model=args.arch)
    elif args.version == 'coviar':
        net = Coviar(num_class, args.test_segments, args.representation,
                    base_model=args.arch)

    if args.lstm_infeature is not None:
        handle1 = net.base_model.avgpool.register_forward_hook(get_in_features)
    if args.lstm_outfeature is not None:
        handle2 = net.fc.register_forward_hook(get_out_features)
    checkpoint = torch.load(args.weights)
    print("model epoch {} best prec@1: {}".format(checkpoint['epoch'],
                                                  checkpoint['best_prec1']))

    base_dict = {'.'.join(k.split('.')[1:]): v
                 for k, v in list(checkpoint['state_dict'].items())}
    net.load_state_dict(base_dict)

    if args.test_crops == 1:
        cropping = torchvision.transforms.Compose([
            GroupScale(net.scale_size),
            GroupCenterCrop(net.crop_size),
        ])
    elif args.test_crops == 10:  # default
        cropping = torchvision.transforms.Compose([
            GroupOverSample(net.crop_size, net.scale_size,
                            is_mv=(args.representation == 'mv'))
        ])
    else:
        raise ValueError(f"Only 1 and 10 crops are supported, but got {args.test_crops}.")

    data_loader = torch.utils.data.DataLoader(
        CoviarDataSet(
            args.data_root,
            args.data_name,
            video_list=args.test_list,
            num_segments=args.test_segments,
            representation=args.representation,
            transform=cropping,
            is_train=False,
            accumulate=(not args.no_accumulation),
            ),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers * 2, pin_memory=True)

    if args.gpus is not None:
        devices = [args.gpus[i] for i in range(args.workers)]
    else:
        devices = list(range(args.workers))

    net = torch.nn.DataParallel(net.cuda(devices[0]), device_ids=devices)
    net.eval()

    total_num = len(data_loader.dataset)
    output = []

    def forward_video(data):
        input_var = torch.autograd.Variable(data, volatile=True)
        scores = net(input_var)
        # 25*10
        scores = scores.view((-1, args.test_segments * args.test_crops)
                             + scores.size()[1:])
        scores = torch.mean(scores, dim=1)
        return scores.data.cpu().numpy().copy()

    proc_start_time = time.time()

    done = 0
    y = []  # 收集label
    for i, (data, label) in enumerate(data_loader):
        data_time = time.time() - proc_start_time
        # (batchsize, 10*25, channel, H, W)
        # data.shape => torch.Size([1, 250, 3, 224, 224])
        if args.lstm_infeature is not None or args.lstm_outfeature is not None:
            y.append(label)
            if i+1 == total_num:
                get_out_features.save = True
                get_in_features.save = True
                # TODO lstm_infeature may be None
                fn = '/'.join(args.lstm_infeature('/')[:-1]) + '/feature_label.npy'
                np.save(fn, np.array(y))
        video_scores = forward_video(data)
        for vdo_score, lbl in zip(video_scores, label):
            output.append((vdo_score.reshape(1, -1), lbl))
        cnt_time = time.time() - proc_start_time
        done += data.shape[0]
        if i % 5 == 0:    # 每5个batch打印信息
            print(f'{done}/{total_num} done, '
                  f'average {float(cnt_time)/done} sec/video, '
                  f'data time {float(data_time)/done} sec/video')

    video_pred = [np.argmax(x[0]) for x in output]
    video_labels = [x[1] for x in output]

    print('Accuracy {:.02f}% ({})'.format(
        float(np.sum(np.array(video_pred) == np.array(video_labels))) / len(video_pred) * 100.0,
        len(video_pred)))

    if args.save_scores is not None:

        name_list = [x.strip().split()[0] for x in open(args.test_list)]
        order_dict = {e: i for i, e in enumerate(sorted(name_list))}

        reorder_output = [None] * len(output)
        reorder_label = [None] * len(output)
        reorder_name = [None] * len(output)

        for i in range(len(output)):
            idx = order_dict[name_list[i]]
            reorder_output[idx] = output[i]
            reorder_label[idx] = video_labels[i]
            reorder_name[idx] = name_list[i]

        np.savez(args.save_scores, scores=reorder_output,
                 labels=reorder_label, names=reorder_name)

    end_tim = datetime.datetime.now()
    print('End time:', end_tim.strftime('%Y-%m-%d %H:%M:%S'))
    total_tim = end_tim - start_tim  # timedelta
    print('Total time:', str(total_tim))


if __name__ == '__main__':
    main()
