####################################################################
# This script for class type parameter savler for super parameters
#
# Written by CathyQ
####################################################################

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import json

from utils.datasets import Kalantari as DataSets


class Config(object):
    """DataSet"""
    scene = 'Dynamic'
    architecture = 'AHDR'
    norm_interval = False
    mu = 5000

    """Training"""
    batch_per_gpu = 4
    num_epochs = 200
    summary_freq = 1
    save_freq = 2
    val_whole = True
    sample_freq = 2
    sample_steps = 2
    test_freq = 25  # val for see
    view_freq = test_freq * 2  # train for see

    """Parallel processing of data"""
    num_GPUs = 2
    num_parallel_calls = num_GPUs

    """Loss"""
    loss_norm = 1

    """Checkpoint"""
    learning_rate = 1e-5
    learning_rate_scheduling = True

    """Loading"""
    restart = False
    load_rule = False  # True for best, False for last
    resume = "Models"

    """Log"""
    log_save_prefix = 'log'

    def __init__(self, **kwargs):
        super(Config, self).__init__(**kwargs)
        self.batch_size = int(self.num_GPUs * self.batch_per_gpu)
        self.prefetch_buffer_size = max(8, self.batch_size * self.num_parallel_calls)
        self.shuffle_buffer_size = max(self.prefetch_buffer_size * 2, 1000)
        d_set = DataSets()
        self.load_size = 256
        self.fine_size = self.load_size
        self.image_size = (self.fine_size, self.fine_size)
        self.c_dim = d_set.color_channel
        self.num_shots = d_set.num_seq
        self.ref_idx = int(self.num_shots / 2)
        self.batch_size_per_file = d_set.batch_size_per_file

        self.data_set = d_set.path[self.scene]
        self.csv_file = d_set.csv_file
        self.data_set_test = d_set.test_path[self.scene]
        self.view_path = d_set.view_path
        self.test_csv_file = d_set.test_csv[d_set.scenes.index(self.scene)]
        self.view_csv_file = d_set.view_csv[d_set.scenes.index(self.scene)]

        if self.architecture is "AHDR":
            self.loss = 'single_scale'
        if self.restart:
            self.resume = None
        self.sample_steps = 1 if self.batch_per_gpu // 8 > 0 else 2


class ResetConfig(Config):
    def __init__(self, config, **kwargs):
        super(ResetConfig, self).__init__(**kwargs)
        for key, item in vars(config).items():
            setattr(self, key, item)
        self.prefetch_buffer_size = max(8, self.batch_size * self.num_parallel_calls)

        d_set = DataSets()
        self.data_set = d_set.path[self.scene]
        self.csv_file = d_set.csv_file
        self.data_set_test = d_set.test_path[self.scene]
        self.test_csv_file = d_set.test_csv[d_set.scenes.index(self.scene)]
        self.view_csv_file = d_set.view_csv[d_set.scenes.index(self.scene)]

        if self.architecture is 'AHDR':
            self.loss = 'single_scale'
        # if self.loss == 'multi_scale':
        #     self.loss_epsilon = 0.
        #     self.loss_q = 1.
        if self.restart:
            self.resume = None
        self.sample_steps = 1 if self.batch_per_gpu // 8 > 0 else 2
