import platform
import argparse

"""
Default paramters for experiemnt
"""


class ExpConfig:
    
    GPU_ID = 0
    # phase 
    PHASE = 'test'  # ["train", "test"]------------------------------------------
    VISUALIZE = True

    # input and output
    DATA_BASE_DIR = "/home/haichao/Projects/chinese_ocr/Datasets/attention_ocr/images"
    DATA_PATH = './train.txt' # path containing data file names and labels. Format: 
    MODEL_DIR = 'weights' # the directory for saving and loading model parameters (structure is not stored)
    LOG_PATH = 'log.txt'
    OUTPUT_DIR = 'results' # output directory
    STEPS_PER_CHECKPOINT = 500 # checkpointing (print perplexity, save model) per how many steps

    # Optimization
    NUM_EPOCH = 10
    BATCH_SIZE = 64
    INITIAL_LEARNING_RATE = 0.001 # initial learning rate, note the we use AdaDelta, so the initial value doe not matter much

    # Network parameters
    CLIP_GRADIENTS = True # whether to perform gradient clipping
    MAX_GRADIENT_NORM = 5.0 # Clip gradients to this norm
    TARGET_EMBEDDING_SIZE = 10 # embedding dimension for each target
    ATTN_USE_LSTM = True # whether or not use LSTM attention decoder cell
    ATTN_NUM_HIDDEN=128 # number of hidden units in attention decoder cell
    ATTN_NUM_LAYERS = 2 # number of layers in attention decoder cell
                        # (Encoder number of hidden units will be ATTN_NUM_HIDDEN*ATTN_NUM_LAYERS)
    LOAD_MODEL = False
    OLD_MODEL_VERSION = False
    TARGET_VOCAB_SIZE = 26 + 10 + 3 # 0: PADDING, 1: GO, 2: EOS, >2: 0-9, a-z
    
def process_args(args, defaults):
    parser = argparse.ArgumentParser()

    parser.add_argument('--gpu-id', dest="gpu_id",
                        type=int, default=defaults.GPU_ID)

    parser.add_argument('--use-gru', dest='use_gru', action='store_true')

    parser.add_argument('--phase', dest="phase",
                        type=str, default=defaults.PHASE,
                        choices=['train', 'test'],
                        help=('Phase of experiment, can be either' 
                            ' train or test, default=%s'%(defaults.PHASE)))
    parser.add_argument('--data-path', dest="data_path",
                        type=str, default=defaults.DATA_PATH,
                        help=('Path of file containing the path and labels'
                            ' of training or testing data, default=%s'
                            %(defaults.DATA_PATH)))
    parser.add_argument('--data-base-dir', dest="data_base_dir",
                        type=str, default=defaults.DATA_BASE_DIR,
                        help=('The base directory of the paths in the file '
                            'containing the path and labels, default=%s'
                            %(defaults.DATA_PATH)))
    parser.add_argument('--visualize', dest='visualize', action='store_true',
                        help=('Visualize attentions or not'
                            ', default=%s' %(defaults.VISUALIZE)))
    parser.add_argument('--no-visualize', dest='visualize', action='store_false')
    parser.set_defaults(visualize=defaults.VISUALIZE)
    parser.add_argument('--batch-size', dest="batch_size",
                        type=int, default=defaults.BATCH_SIZE,
                        help=('Batch size, default = %s'
                            %(defaults.BATCH_SIZE)))
    parser.add_argument('--initial-learning-rate', dest="initial_learning_rate",
                        type=float, default=defaults.INITIAL_LEARNING_RATE,
                        help=('Initial learning rate, default = %s'
                            %(defaults.INITIAL_LEARNING_RATE)))
    parser.add_argument('--num-epoch', dest="num_epoch",
                        type=int, default=defaults.NUM_EPOCH,
                        help=('Number of epochs, default = %s'
                            %(defaults.NUM_EPOCH)))
    parser.add_argument('--steps-per-checkpoint', dest="steps_per_checkpoint",
                        type=int, default=defaults.STEPS_PER_CHECKPOINT,
                        help=('Checkpointing (print perplexity, save model) per'
                            ' how many steps, default = %s'
                            %(defaults.STEPS_PER_CHECKPOINT)))
    parser.add_argument('--target-vocab-size', dest="target_vocab_size",
                        type=int, default=defaults.TARGET_VOCAB_SIZE,
                        help=('Target vocabulary size, default=%s' 
                            %(defaults.TARGET_VOCAB_SIZE)))
    parser.add_argument('--model-dir', dest="model_dir",
                        type=str, default=defaults.MODEL_DIR,
                        help=('The directory for saving and loading model '
                            '(structure is not stored), '
                            'default=%s' %(defaults.MODEL_DIR)))
    parser.add_argument('--target-embedding-size', dest="target_embedding_size",
                        type=int, default=defaults.TARGET_EMBEDDING_SIZE,
                        help=('Embedding dimension for each target, default=%s' 
                            %(defaults.TARGET_EMBEDDING_SIZE)))
    parser.add_argument('--attn-num-hidden', dest="attn_num_hidden",
                        type=int, default=defaults.ATTN_NUM_HIDDEN,
                        help=('number of hidden units in attention decoder cell'
                            ', default=%s' 
                            %(defaults.ATTN_NUM_HIDDEN)))
    parser.add_argument('--attn-num-layers', dest="attn_num_layers",
                        type=int, default=defaults.ATTN_NUM_LAYERS,
                        help=('number of hidden layers in attention decoder cell'
                            ', default=%s' 
                            %(defaults.ATTN_NUM_LAYERS)))
    parser.add_argument('--load-model', dest='load_model', action='store_true',
                        help=('Load model from model-dir or not'
                            ', default=%s' %(defaults.LOAD_MODEL)))
    parser.add_argument('--no-load-model', dest='load_model', action='store_false')
    parser.set_defaults(load_model=defaults.LOAD_MODEL)
    parser.add_argument('--log-path', dest="log_path",
                        type=str, default=defaults.LOG_PATH,
                        help=('Log file path, default=%s' 
                            %(defaults.LOG_PATH)))
    parser.add_argument('--output-dir', dest="output_dir",
                        type=str, default=defaults.OUTPUT_DIR,
                        help=('Output directory, default=%s' 
                            %(defaults.OUTPUT_DIR)))
    parser.add_argument('--max_gradient_norm', dest="max_gradient_norm",
                        type=int, default=defaults.MAX_GRADIENT_NORM,
                        help=('Clip gradients to this norm.'
                              ', default=%s'
                              % (defaults.MAX_GRADIENT_NORM)))
    parser.add_argument('--no-gradient_clipping', dest='clip_gradients', action='store_false',
                        help=('Do not perform gradient clipping, difault for clip_gradients is %s' %
                              (defaults.CLIP_GRADIENTS)))
    parser.set_defaults(clip_gradients=defaults.CLIP_GRADIENTS)

    parameters = parser.parse_args(args)
    return parameters
