import random
import os
import time
import numpy as np
from tqdm import tqdm
import argparse
import logging
from datetime import datetime
from collections import OrderedDict

import tensorflow as tf
import tensorboard
import torch

from action_dataset import Video_3D
from transforms import resize, get_center_crop, get_multi_scale_crop, get_random_horizontal_flip, stack_then_normalize
from econet import ECONet
from opts import parser


# os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# old_v = tf.compat.v1.logging.get_verbosity()
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

_PREFETCH_BUFFER_SIZE = 30
_NUM_PARALLEL_CALLS = 10

CLASS_INFO = {
    'ucf101': 101,
    'hmdb51': 51 
}

def get_timestamp():
    return datetime.now().strftime('%y%m%d-%H%M%S')


def mkdir(path):
    if not os.path.exists(path):
        os.makedirs(path)


def _get_data_label_from_info(train_info_tensor, is_training, num_segments):
    """ Wrapper for `tf.py_func`, get video clip and label from info list."""
    clip_holder, label_holder = tf.py_func(
        process_video, [train_info_tensor, is_training, num_segments], [tf.float32, tf.int64]) 
    return clip_holder, label_holder


def process_video(data_info, is_training, num_segments, data_augment=None):
    """ Get video clip and label from data info list."""
    video = Video_3D(data_info)
    clip_seq, label_seq = video.get_frames(num_segments, is_training=is_training)

    if is_training:
        clip_seq = get_multi_scale_crop(clip_seq, patch_size=224, scales=[1, .875, .75, .66])
        clip_seq = get_random_horizontal_flip(clip_seq)
    else:
        clip_seq = resize(clip_seq, patch_size=256)
        clip_seq = get_center_crop(clip_seq, patch_size=224)

    normalize_list = [104, 117, 128]
    clip_seq = stack_then_normalize(clip_seq, normalize_list)

    return clip_seq, label_seq


def load_pt_model(path):
    pt_model = torch.load(path)['state_dict']

    pt_model_inception = OrderedDict()
    pt_model_resnet = OrderedDict()
    pt_model_fc = OrderedDict()

    for k, v in pt_model.items():
        if 'num_batches_tracked' not in k:
            if 'res' in k:
                pt_model_resnet[k] = v
            elif 'fc' in k:
                pt_model_fc[k] = v
            else:
                pt_model_inception[k] = v

    # Update for matching tensorflow variable scopes
    pt_model_inception.update(pt_model_resnet)
    # pt_model_inception.update(pt_model_fc)

    print(len(pt_model_inception.keys()))
    
    return pt_model_inception

def main():
    # Load options
    args = parser.parse_args()
    

    # Initialize Logger
    format_str = '%(asctime)s %(levelname)s: %(message)s'
    logging.basicConfig(format=format_str, level=logging.INFO)
    logger = logging.getLogger('econet')

    # Preload data filelist 
    train_file = args.train_list
    test_file = args.val_list

    with open(train_file, 'r') as f:
        train_info = list()
        for line in f.readlines():
            train_info.append(line.strip().split(' '))

    with open(test_file, 'r') as f:
        test_info = list()
        for line in f.readlines():
            test_info.append(line.strip().split(' '))

    num_train_sample = len(train_info)
    num_test_sample = len(test_info)

    train_info_tensor = tf.constant(train_info)
    test_info_tensor = tf.constant(test_info)

    num_segments = args.num_segments if not args.debug else 1

    # Build dataset
    train_info_dataset = tf.data.Dataset.from_tensor_slices(
        (train_info_tensor))
    train_info_dataset = train_info_dataset.shuffle(buffer_size=num_train_sample)
    train_dataset = train_info_dataset.map(lambda x: _get_data_label_from_info(
        x, is_training=True, num_segments=num_segments), num_parallel_calls=_NUM_PARALLEL_CALLS)
        
    train_dataset = train_dataset.repeat().batch(args.batch_size)
    train_dataset = train_dataset.prefetch(buffer_size=_PREFETCH_BUFFER_SIZE)

    # Phase 2 Testing
    test_info_dataset = tf.data.Dataset.from_tensor_slices(
        (test_info_tensor))
    test_dataset = test_info_dataset.map(lambda x: _get_data_label_from_info(
        x, is_training=False, num_segments=num_segments), num_parallel_calls=_NUM_PARALLEL_CALLS)

    test_dataset = test_dataset.batch(1)
    test_dataset = test_dataset.prefetch(buffer_size=_PREFETCH_BUFFER_SIZE)

    train_iterator = tf.data.Iterator.from_structure(
        train_dataset.output_types, train_dataset.output_shapes)

    test_iterator = tf.data.Iterator.from_structure(
        test_dataset.output_types, test_dataset.output_shapes)

    train_init_op = train_iterator.make_initializer(train_dataset)
    test_init_op = test_iterator.make_initializer(test_dataset)

    train_clip_holder, train_label_holder = train_iterator.get_next()
    test_clip_holder, test_label_holder = test_iterator.get_next()

    train_clip_holder = tf.reshape(train_clip_holder, [-1, 224, 224, 3])
    test_clip_holder = tf.reshape(test_clip_holder, [-1, 224, 224, 3])

    clip_holder = tf.placeholder(tf.float32, [None, 224, 224, 3], name='clip_holder')
    label_holder = tf.placeholder(tf.int64, [None,], name='label_holder')
    is_training = tf.placeholder(tf.bool, shape=(), name='is_training')
    net2d_dropout_holder = tf.placeholder(tf.float32, name='net2d_dropout_holder')
    net3d_dropout_holder = tf.placeholder(tf.float32, name='net3d_dropout_holder')

    # Network definition
    num_classes = CLASS_INFO[args.dataset]

    net_opt = {
       'weight_decay': args.weight_decay, 
       'net2d_keep_prob': net2d_dropout_holder,
       'net3d_keep_prob': net3d_dropout_holder,
       'num_segments': num_segments,
       'num_classes': num_classes 
    }

    logits, end_points = ECONet(clip_holder, opt=net_opt, is_training=is_training)

    pred_classes = tf.argmax(logits, axis=1)

    train_acc_op, train_acc_update = tf.metrics.accuracy(labels=label_holder, predictions=pred_classes)
    test_acc_op, test_acc_update = tf.metrics.accuracy(labels=label_holder, predictions=pred_classes)

    # Loss calculation, excluding l2-norm
    loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=label_holder, logits=logits))

    # Load pre-trainned model
    # saver = tf.train.Saver(var_list=variable_map, reshape=True)
    # saver2 = tf.train.Saver(max_to_keep=_SAVER_MAX_TO_KEEP)

    # Specific Hyperparams
    per_epoch_step = int(np.ceil(num_train_sample/args.batch_size))
    global_step = args.epochs * per_epoch_step

    # Set learning rate schedule by hand, also you can use an auto way
    global_index = tf.Variable(0, trainable=False, name='learning_rate')
    boundaries = [20000, 30000, 40000, 50000]
    values = [args.lr, 0.005, 0.003, 0.001, 0.0005]
    learning_rate = tf.train.piecewise_constant(
        global_index, boundaries, values)

    # Initialize tensorboard summary
    loss_summary = tf.summary.scalar('loss', loss)
    lr_summary = tf.summary.scalar('learning_rate', learning_rate)
    train_acc_summary = tf.summary.scalar('acc/train_acc', train_acc_op)

    test_acc_summary = tf.summary.scalar('acc/test_acc', test_acc_op)

    # Optimizer set-up
    # FOR batch normalization, we then use this updata_ops
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9).minimize(loss, global_step=global_index)
        # optimizer = tf.train.AdamOptimizer(learning_rate=0.001, momentum=_MOMENTUM).minimize(loss)
    sess = tf.Session()
    train_summary = tf.summary.merge([loss_summary, lr_summary, train_acc_summary])
    # test_summary = tf.summary.merge([test_acc_summary])

    sess.run(tf.global_variables_initializer())
    sess.run(train_init_op)

    saver = tf.train.Saver()

    if args.resume_path is not None:
        logger.info('Loading checkpoint from {}...'.format(args.resume_path))
        saver.restore(sess, args.resume_path)

    pt_model_path = '/home/paper99/Projects/ECO-pytorch/ECO_Full_rgb_model_Kinetics.pth.tar'
    save_path = 'experiments/ECOfull/ECOfull_kinetics.ckpt'
    pt_model = load_pt_model(pt_model_path)

    vars_list = tf.global_variables() # shows every trainable variable being used.
    # vars_list = tf.trainable_variables() # shows every variable being used.
    
    for (k, v), variable in zip(pt_model.items(), vars_list[:484]):

        if len(v.size()) == 4:
            v = v.permute(2, 3, 1, 0).contiguous()
        elif len(v.size()) == 5:
            v = v.permute(2, 3, 4, 1, 0).contiguous()
        elif len(v.size()) == 2:
            v = v.permute(1, 0).contiguous()


        v_np = v.cpu().numpy()

        assert v_np.shape == variable.shape, \
            'Assigned variable shape {} not equals to original variable shape {}'.format(v_np.shape, variable.shape)
        print("Converting {} to {}".format(k, variable.name))
        sess.run(variable.assign(v_np))
            # print(variable.eval())
            # i += 1 
            # break

    # saver = tf.train.Saver()
    saver.save(sess, save_path)   
   
    # for epoch in range(args.epochs):
    #     logger.info('Epoch: [{}] - Start Training Phase...'.format(epoch))

    #     # local variables reinitilization for accuracy computation
    #     sess.run(tf.local_variables_initializer()) 
    #     sess.run(train_init_op)

    #     for idx in range(per_epoch_step):
    #         step += 1
    #         start_time = time.time()

    #         clip, label = sess.run([train_clip_holder, train_label_holder])
    #         _, loss_now, per_iter_acc, summary = sess.run([optimizer, loss, train_acc_update, train_summary], \
    #                                              feed_dict={clip_holder: clip, label_holder: label, 
    #                                                         is_training: True,
    #                                                         net2d_dropout_holder: args.net2d_dropout,
    #                                                         net3d_dropout_holder: args.net3d_dropout})

    #         summary_writer.add_summary(summary, step)
    #         duration = time.time() - start_time

    #         cur_lr = sess.run(learning_rate)
    #         # responsible for printing relevant results
    #         if idx % args.print_freq == 0:
    #             logger.info('Epoch: [%d] Iter: % -4d, loss: %-.4f, acc: %.2f, lr: %f \
    #                             ( %.2f sec/batch)' %
    #                         (epoch, idx, loss_now, per_iter_acc*100, cur_lr, float(duration)))
    
    #     train_acc = sess.run(train_acc_op)

    #     logger.info('Epoch: [{}] Avg. Training Acc.: {:.3f}'.format(epoch, train_acc*100))

    #     # Test Phase
    #     logger.info('Epoch: [{}] - Start Test Phase...'.format(epoch))
    #     sess.run(test_init_op)

    #     # start test process        
    #     with tqdm(range(num_test_sample)) as t:
    #         for _ in t:
    #             clip, label = sess.run([test_clip_holder, test_label_holder])
    #             test_iter_acc = sess.run(test_acc_update, \
    #                                      feed_dict={clip_holder: clip, label_holder: label, is_training: False,
    #                                                 net2d_dropout_holder: 1.,
    #                                                 net3d_dropout_holder: 1.})
    #             t.set_postfix_str('Acc: {:.2f}'.format(test_iter_acc*100))
    #             t.update()

    #     test_acc, summary = sess.run([test_acc_op, test_acc_summary], \
    #                                      feed_dict={clip_holder: clip, label_holder: label, is_training: False,
    #                                                 net2d_dropout_holder: 1.,
    #                                                 net3d_dropout_holder: 1.})                                        
    #     summary_writer.add_summary(summary, epoch)

    #     logger.info('Epoch: [{}], Avg Test Acc.: {:.2f}'.format(epoch, test_acc*100))

    #     summary_writer.flush()

    #     if epoch % args.save_freq == 0:
    #         logger.info('Saving checkpoint to {}...'.format(os.path.join(ckp_dir, 'epoch_{}.ckpt'.format(epoch))))
    #         saver.save(sess, os.path.join(ckp_dir, 'epoch_{}.ckpt'.format(epoch))) 

    #     if test_acc > best_acc:
    #         saver.save(sess, os.path.join(ckp_dir, 'best.ckpt'))
    #         logger.info('Best Acc. is {:.2f} in Epoch [{}]'.format(test_acc*100, epoch))


    sess.close()


if __name__ == '__main__':
    main()
