import importlib
import json
import logging
from pathlib import Path

from trainer import lr_schedule
import torch
import tqdm
import yaml
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader

from system import map
from trainer import make_encoder
from dataset.training import lif_dataset as ldata
from network import criterion
from utils import exp_util
import GPUtil


class TensorboardViz(object):

    def __init__(self, logdir):
        self.logdir = logdir
        self.writter = SummaryWriter(self.logdir)

    def text(self, _text):
        # Enhance line break and convert to code blocks
        _text = _text.replace('\n', '  \n\t')
        self.writter.add_text('Info', _text)

    def update(self, mode, it, eval_dict):
        self.writter.add_scalars(mode, eval_dict, global_step=it)

    def flush(self):
        self.writter.flush()


parser = exp_util.ArgumentParserX(add_hyper_arg=True)
parser.add_argument('-v', '--visualize', action='store_true', help='Visualize')
parser.add_argument('--num', type=int, help='using how many RGBD images to recon', default=4)
parser.add_argument('--max_hits', type=int, default=10, help="the max hit iter of ray")
parser.add_argument('--pixel_per_view', type=int, default=2048, help="sampling pixels of each view")
parser.add_argument('--raymarching_stepsize_ratio', type=float, default=0.125, help="per voxel sample point")
### use in network/loss
parser.add_argument('--L1', action='store_true',
                    help='if enabled, use L1 instead of L2 for RGB loss')
parser.add_argument('--color-weight', type=float, default=256.0)
parser.add_argument('--depth-weight', type=float, default=0.0)
parser.add_argument('--depth-weight-decay', type=str, default=None,
                    help="""if set, use tuple to set (final_ratio, steps).
                            For instance, (0, 30000)    
                        """)
parser.add_argument('--alpha-weight', type=float, default=0.0)
parser.add_argument('--vgg-weight', type=float, default=0.0)
parser.add_argument('--eikonal-weight', type=float, default=0.0)
parser.add_argument('--regz-weight', type=float, default=0.0)
parser.add_argument('--vgg-level', type=int, choices=[1,2,3,4], default=2)
parser.add_argument('--eval-lpips', action='store_true',
                    help="evaluate LPIPS scores in validation")
parser.add_argument('--no-background-loss', action='store_true')




def main():
    logging.basicConfig(level=logging.INFO)
    args = parser.parse_args()
    args.mapping = exp_util.dict_to_args(args.mapping)
    args.sequence_kwargs = exp_util.dict_to_args(args.sequence_kwargs)
    args.tracking = exp_util.dict_to_args(args.tracking)
    logging.info(args)

    ### GPU
    if args.gpu == "auto":
        deviceIDs = GPUtil.getAvailable(order='memory', limit=1, 
            maxLoad=0.5, maxMemory=0.5, includeNan=False, excludeID=[], 
            excludeUUID=[])
        gpu = deviceIDs[0]
    else:
        gpu = args.gpu
    print("in this exp, we use GPU: ", gpu)
    device = torch.device("cuda:{}".format(gpu))

    checkpoints = list(range(args.snapshot_frequency, args.num_epochs + 1, args.snapshot_frequency))
    for checkpoint in args.additional_snapshots:
        checkpoints.append(checkpoint)
    checkpoints.sort()

    lr_schedules = lr_schedule.get_learning_rate_schedules(args)


    ### here we use the RGBD seq as the dataset:
    # Load in sequence.
    seq_package, seq_class = args.sequence_type.split(".")
    sequence_module = importlib.import_module("dataset.production." + seq_package)
    sequence_module = getattr(sequence_module, seq_class)
    seq_dataset = sequence_module(args=args, path=args.sequence_kwargs.path,
        start_frame=0,end_frame=-1,device = device, load_gt=True, first_tq=args.sequence_kwargs.first_tq)
    all_results = seq_dataset.forward(0) # get all the results

    print("?")





if __name__ == '__main__':
    main()








