# -*- encoding: utf-8 -*-
from __future__ import print_function
import pandas as pd
import scipy.io as sio
import os

import argparse
import time

import numpy as np
import torch.nn.parallel
import torch.optim
import torchvision

from dataset import CoviarDataSet
from model import Model
from transforms import GroupCenterCrop
from transforms import GroupOverSample
from transforms import GroupScale

"""
TODO:
Run train.
"""

parser = argparse.ArgumentParser(description="Standard video-level testing")
parser.add_argument('--data-name', type=str,
                    choices=['ucf101', 'hmdb51', 'ylimed'])
parser.add_argument('--representation', type=str,
                    choices=['iframe', 'residual', 'mv'])
parser.add_argument('--no-accumulation', action='store_true',
                    help='disable accumulation of'
                    'motion vectors and residuals.')
parser.add_argument('--data-root', type=str)
parser.add_argument('--test-list', type=str)
parser.add_argument('--weights', type=str)
parser.add_argument('--arch', type=str)
parser.add_argument('--save-scores', type=str, default=None)
parser.add_argument('--test_segments', type=int, default=25)
parser.add_argument('--test-crops', type=int, default=10)
parser.add_argument('--input_size', type=int, default=224)
parser.add_argument('-j', '--workers', default=1, type=int, metavar='N',
                    help='number of workers for data loader.')
parser.add_argument('--gpus', nargs='+', type=int, default=None)

args = parser.parse_args()

if args.data_name == 'ucf101':
    num_class = 101
elif args.data_name == 'hmdb51':
    num_class = 51
elif args.data_name == 'ylimed':
    num_class = 11
else:
    raise ValueError('Unknown dataset '+args.data_name)



def main():
    net = Model(num_class, args.test_segments, args.representation,
                base_model=args.arch)

    checkpoint = torch.load(args.weights)
    print("model epoch {} best prec@1: {}".format(checkpoint['epoch'],
                                                  checkpoint['best_prec1']))

    base_dict = {'.'.join(k.split('.')[1:]): v
                 for k, v in list(checkpoint['state_dict'].items())}
    net.load_state_dict(base_dict)

    if args.test_crops == 1:
        cropping = torchvision.transforms.Compose([
            GroupScale(net.scale_size),
            GroupCenterCrop(net.crop_size),
        ])
    elif args.test_crops == 10:
        cropping = torchvision.transforms.Compose([
            GroupOverSample(net.crop_size, net.scale_size,
                            is_mv=(args.representation == 'mv'))
        ])
    else:
        raise ValueError("Only 1 and 10 crops are supported, but got {}.".format(args.test_crops))

    data_loader = torch.utils.data.DataLoader(
        CoviarDataSet(
            args.data_root,
            args.data_name,
            video_list=args.test_list,
            num_segments=args.test_segments,
            representation=args.representation,
            transform=cropping,
            is_train=False,
            accumulate=(not args.no_accumulation),
            ),
        batch_size=1, shuffle=False,
        num_workers=args.workers * 2, pin_memory=True)

    if args.gpus is not None:
        devices = [args.gpus[i] for i in range(args.workers)]
    else:
        devices = list(range(args.workers))

    net = torch.nn.DataParallel(net.cuda(devices[0]), device_ids=devices)
    net.eval()

    total_num = len(data_loader.dataset)
    output = []

    def forward_video(data):
        input_var = torch.autograd.Variable(data, volatile=True)
        scores = net(input_var)
        # 25*10
        scores = scores.view((-1, args.test_segments * args.test_crops)
                             + scores.size()[1:])
        scores = torch.mean(scores, dim=1)
        return scores.data.cpu().numpy().copy()

    proc_start_time = time.time()

    for i, (data, label) in enumerate(data_loader):
        video_scores = forward_video(data)
        output.append((video_scores, label[0]))
        cnt_time = time.time() - proc_start_time
        if (i + 1) % 100 == 0:
            print('video {} done, '
                  'total {}/{}, '
                  'average {} sec/video'.format(i, i+1,
                                                total_num,
                                                float(cnt_time) / (i+1)))

    video_pred = [np.argmax(x[0]) for x in output]
    video_labels = [x[1] for x in output]

    print('Accuracy {:.02f}% ({})'.format(
        float(np.sum(np.array(video_pred) == np.array(video_labels))) / len(video_pred) * 100.0,
        len(video_pred)))

    if args.save_scores is not None:

        name_list = [x.strip().split()[0] for x in open(args.test_list)]
        order_dict = {e: i for i, e in enumerate(sorted(name_list))}

        reorder_output = [None] * len(output)
        reorder_label = [None] * len(output)
        reorder_name = [None] * len(output)

        for i in range(len(output)):
            idx = order_dict[name_list[i]]
            reorder_output[idx] = output[i]
            reorder_label[idx] = video_labels[i]
            reorder_name[idx] = name_list[i]

        np.savez(args.save_scores, scores=reorder_output,
                 labels=reorder_label, names=reorder_name)


if __name__ == '__main__':
    main()

# python装饰器实现静态变量
def static_vars(**kwargs):
    def decorate(func):
        for k in kwargs:
            setattr(func, k, kwargs[k])
        return func
    return decorate


@static_vars(flag=True)
def read_mat():
    # 101->110
    # features_struct = sio.loadmat('metainfo_YLIMED10_short.mat')
    # [features] = features_struct['metainfo_short']

    # 100
    features_struct = sio.loadmat('metainfo_YLIMED_ev100sel_short.mat')
    print(features_struct.keys())
    [features] = features_struct['metainfo_ev100_short']
    # print(type(features))
    # print(features.shape)
    d = {}
    # with open('metainfo_YLIMED10_short.txt', 'w') as f:
    # 101->110
    # for i in range(features.shape[0]):
    #     [name] = features[i][0]
    #     [path] = features[i][1]
    #     [[duration]] = features[i][2]
    #     [[bits]] = features[i][3]
    #     [[fps]] = features[i][4]
    #     [[frames]] = features[i][5]
    #     [[height]] = features[i][6]
    #     [[width]] = features[i][7]
    #     [event] = features[i][8]
    #     [Set] = features[i][9][0][0]
    #     [[idx]] = features[i][10]
    #     d[str(name)] = int(fps)
            # f.write(' '.join([str(name), str(path), str(duration), str(bits),
            #         str(fps), str(frames), str(height), str(width),
            #         str(event), str(Set), str(idx)]) + '\n')

    for i in range(features.shape[0]):
        videoFormat = features[i][0]
        audioFormat = features[i][1]
        [name] = features[i][2]
        [path] = features[i][3]
        [[duration]] = features[i][4]
        [[bits]] = features[i][5]
        [[fps]] = features[i][6]
        [[frames]] = features[i][7]
        [[height]] = features[i][8]
        [[width]] = features[i][9]
        [event] = features[i][10]
        Set = features[i][11][0]
        [[idx]] = features[i][12]
        d[str(name)] = int(fps)
        # print(type(str(name)))
        # print(' '.join([str(videoFormat), str(audioFormat), str(name),
        #                 str(path), str(duration), str(bits),
        #                 str(fps), str(frames), str(height), str(width),
        #                 str(event), str(Set), str(idx)]) + '\n')

    top = '/home/ping501b/data/YLIMED/WIP/frames_short/'
    out = '/home/ping501b/data/YLIMED/WIP/mp4_short/'
    # os.mkdir(out)

    # with open('YLIMED_short_mp4_dirs.txt', 'w') as f:
    for root, dirs, files in os.walk(top):
        if len(root) >= 70 and root.split('/')[-2] == 'Ev100':
            # if read_mat.flag is True:
            #     print(root.split('/')[-1])
            #     read_mat.flag = False
            # f.write(root+'\n')

            ld = os.listdir(root)
            ld.sort()
            ldn = ld[0][:-4]

            os.system('ffmpeg' +
                      ' -start_number ' + ldn +
                      ' -framerate ' + str(d[root.split('/')[-1]]) +
                      ' -i ' + root + '/%04d.jpg ' +
                      # ' -c:v mpeg4 ' +
                      ' -y ' +
                      # ' -f rawvideo ' +
                      out + root.split('/')[-2] + '/' + root.split('/')[-1] + '.mp4')
      # ffmpeg -i $outname/%04d.jpg -c:v mpeg4 -f rawvideo  out.mp4
        # for file in files:
        #     print(file)
            # if os.path.splitext(file)[1] == '.mp4':
            #     # L.append(os.path.join(root, file)[2:-4]+'.avi'+
            #     # ' '+root[2:]+' '+root[4:])
            #     f.write(os.path.join(root, file)[2:-4]+'.avi'+
            #             ' '+root[2:]+' '+str(int(root[4:])-101)+'\n')
    # print(d)


if __name__ == '__main__':
    read_mat()
