import timeit
from datetime import datetime
import socket
import os
import glob
from tqdm import tqdm
import shutil
import argparse

import torch
from tensorboardX import SummaryWriter
from torch import nn, optim
from torch.utils.data import DataLoader
from torch.autograd import Variable

from dataloaders.dataset import TestVideoDataset
from network import C3D_model, R2Plus1D_model, R3D_model
import matplotlib.pyplot as plt

def main():
    """
        Args:
            num_classes (int): Number of classes in the data
            num_epochs (int, optional): Number of epochs to train for.
    """

    # Use GPU if available else revert to CPU
    device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
    print("Device being used:", device)

    dataset = 'hmdb51' # Options: hmdb51 or ucf101
    modelName = 'C3D'  # Options: C3D or R2Plus1D or R3D

    if dataset == 'ucf101':
        num_classes=101
        data_dir = '../data/UCF--101/split1/test/'
        res_dir = './checkpoint/'
    elif dataset == 'hmdb51':
        num_classes = 51
        data_dir = '../data/hmdb51/split1/test/'
        res_dir = './checkpoint1/'
    else:
        print('We only implemented hmdb and ucf datasets.')
        raise NotImplementedError

    settings = os.listdir(res_dir)
    settings.sort()
    x = []
    y1 = []
    y2 = []
    for setting in settings:
        clip_len, stride = setting.split('-')
        clip_len = int(clip_len[-2:])
        stride = int(stride[-2:])
        model = C3D_model.C3D(num_classes=num_classes, num_frames=clip_len//stride, pretrained=False)
        resume_file = os.path.join(res_dir, setting, 'best.pth.tar')
        checkpoint = torch.load(resume_file, map_location=lambda storage, loc: storage)   # Load all tensors onto the CPU
        print("Initializing weights from: {}...".format(resume_file))
        model.load_state_dict(checkpoint['state_dict'])
        best_prec = checkpoint['best_prec']
        print('Validation Accuracy of setting is {}'.format(best_prec))

        val_dataloader  = DataLoader(TestVideoDataset(dataset=dataset, split='test',  clip_len=clip_len, stride=stride), batch_size=1, num_workers=1, shuffle=False)

        model.to(device)
        model.eval()
        clip_corrects = 0
        clip_total = 0
        vid_corrects = 0
        vid_total = 0
        with torch.no_grad():
            for inputs, labels in val_dataloader:
                inputs = inputs.squeeze(0)
                labels = labels.squeeze(0)
                inputs = Variable(inputs).to(device)
                labels = Variable(labels).to(device)
                outputs = model(inputs)
                probs = nn.Softmax(dim=1)(outputs)
                preds = torch.max(probs, 1)[1]

                clip_corrects += torch.sum(preds == labels.data)
                probs = torch.mean(probs, dim=0)
                preds = torch.max(probs.unsqueeze(0), 1)[1]
                vid_corrects += torch.sum(preds == labels[0].data)
                clip_total += 10
                vid_total += 1
            clip_acc = clip_corrects / clip_total
            vid_acc = vid_corrects / vid_total

            print('Clip_len: {}, Stride: {}, clip_acc is {}, vid_acc is {}'.format(clip_len, stride, clip_acc, vid_acc))
        x.append(stride)
        y1.append(clip_acc)
        y2.append(vid_acc)

    plt.title('Video and clip accuracy of different strides')
    plt.plot(x, y1, label='clip accuracy')
    plt.plot(x, y2, label='video accuracy')
    plt.xlabel('stride')
    plt.ylabel('accuracy')
    plt.legend()
    plt.savefig('act_reg_res.png', bbox_inches='tight')


if __name__ == "__main__":
    main()