"""
Train the adas-lane detection model

Author: Ye Yuan
"""
import sys

# sys.path.insert(0, "../../python")
sys.path.insert(0, '/home/xiaomin/wxm/mxnet/python')
import mxnet as mx
import sys
import os
from data_loader import DataLoader
import symbol
import ex_utils
import ConfigParser
from datetime import datetime
from eval_metric import *
import math


class Fitter:
    def __init__(self, config):
        try:
            self.config = config
            # [model]
            # Choose model type and data loader
            self.model_type = config.get('model', 'model_type')
            self.multi_thread = config.getboolean('model', 'multi_thread')

            # Fine-tune options
            self.finetune = config.getboolean('model', 'finetune')
            self.finetune_model_prefix = config.get('model', 'finetune_model_prefix')
            self.model_epoch = int(config.get('model', 'model_epoch'))

            # Save model options
            self.model_dir = config.get('model', 'model_dir')
            self.model_prefix = config.get('model', 'model_prefix')

            # [data]
            # Data directory
            self.data_path = config.get('data', 'data_path')
            self.folder = config.get('data', 'folder')
            self.train_list = config.get('data', 'train_list')
            self.val_list = config.get('data', 'val_list')

            # Data info
            self.img_width = int(config.get('data', 'img_width'))
            self.img_height = int(config.get('data', 'img_height'))
            self.num_label = int(config.get('data', 'num_label'))
            self.ignore_label = int(config.get('data', 'ignore_label'))
            self.input_shape = (1, self.img_width, self.img_height)

            # [train]
            # SGD options
            self.lr = float(config.get('train', 'learning_rate'))
            self.momentum = float(config.get('train', 'momentum'))
            self.num_epoch = int(config.get('train', 'num_epoch'))
            self.weight_decay = float(config.get('train', 'weight_decay'))
            self.lr_factor = float(config.get('train', 'lr_factor'))
            self.lr_factor_epoch = int(config.get('train', 'lr_factor_epoch'))

            # Training options
            self.checkpoint_period = int(config.get('train', 'checkpoint_period'))
            self.use_gpu_idx = config.get('train', 'use_gpu_idx')
            self.ctx = mx.cpu() if self.use_gpu_idx == 'None' else [
                mx.gpu(int(i)) for i in self.use_gpu_idx.split(',')]
            self.gpu_num = len(self.use_gpu_idx.split(','))
            self.kv_store = config.get('train', 'kv_store')
            self.numpy_batch_size = int(config.get('train', 'batch_size'))

            # Compute epoch size, input_shape, output_shape
            self.train_size = len([line for line in open(self.train_list, 'r')])
            self.batch_size = int(config.get('train', 'batch_size'))
            self.epoch_size = self.train_size / self.batch_size

        except ValueError:
            logging.error('Config parameter error')

    def choose_model(self):
        if self.model_type == 'lenet':
            net = symbol.get_lenet(self.num_label)
            return net
        elif self.model_type == 'neonscript_nornn':
            net = symbol.get_neonscript_symbol(self.num_label)
            return net
        elif self.model_type == 'neonscript_withrnn':
            net = symbol.get_neonscriptwithrnn_symbol(self.num_label)
            return net
        elif self.model_type == 'lenet_noBNnoDropout':
            net = symbol.get_lenet_noBNnoDropout(self.num_label)
            return net
        elif self.model_type == 'lenet_withDropout':
            net = symbol.get_lenet_withDropout(self.num_label)
            return net
        elif self.model_type == 'lenet_withrnn':
            net = symbol.get_lenet_withrnn(self.num_label)
            return net
        else:
            logging.error('Not valid model type')
            return None

    def build_model(self):
        net = self.choose_model()
        model_args = {}
        print self.finetune_model_prefix
        print self.model_epoch
        if self.finetune:
            arg_params, aux_params = ex_utils.load_params(self.finetune_model_prefix, self.model_epoch)
            model_args = {'arg_params': arg_params, 'aux_params': aux_params}
            begin_epoch = self.model_epoch
            # begin_epoch = 0
        else:
            begin_epoch = 0

        # Learning rate scheduler
        model_args['lr_scheduler'] = mx.lr_scheduler.FactorScheduler(
            step=max(1, int(self.epoch_size * self.lr_factor_epoch)),
            factor=self.lr_factor
        )

        model = mx.model.FeedForward(
            ctx=self.ctx,
            symbol=net,
            num_epoch=self.num_epoch,
            learning_rate=self.lr,
            momentum=self.momentum,
            wd=self.weight_decay,
            initializer=mx.init.Xavier(rnd_type="gaussian", factor_type="in", magnitude=2.0),
            numpy_batch_size=self.numpy_batch_size,
            begin_epoch=begin_epoch,
            allow_extra_params=True,
            # arg_params=model_args['arg_params'],
            # aux_params=model_args['aux_params'],
            # learning_rate=args.lr,
            **model_args
        )
        # model = mx.model.FeedForward(
        #     ctx=self.ctx,
        #     symbol=net,
        #     num_epoch=self.num_epoch,
        #     learning_rate=self.lr,
        #     momentum=self.momentum,
        #     wd=self.weight_decay,
        #     initializer=mx.init.Xavier(factor_type="in", magnitude=2.34),
        #     numpy_batch_size=self.numpy_batch_size,
        #     begin_epoch=begin_epoch,
        #     allow_extra_params=True,
        #     **model_args
        # )
        return model

    def get_data_iter(self, input_args):
        # batch_size = 1
        # num_lstm_layer = 3
        # num_hidden = 128
        # init_h = [('LSTM_init_h', (batch_size, num_lstm_layer, num_hidden))]
        # init_c = [('LSTM_init_c', (batch_size, num_lstm_layer, num_hidden))]
        # init_states = init_c + init_h
        # train_iter = DataLoader(self.train_list, input_args, init_states=init_states)
        # val_iter = DataLoader(self.val_list, input_args, init_states=init_states)

        train_iter = DataLoader(self.train_list, input_args)
        val_iter = DataLoader(self.val_list, input_args)
        return train_iter, val_iter

    def fit(self):
        kv = mx.kvstore.create(self.kv_store)
        if 'local' in kv.type and self.gpu_num < 2:
            kv = None
        # Make a folder to save model
        model_path = os.path.join(self.model_dir, self.model_prefix)
        if not os.path.isdir(model_path):
            os.mkdir(model_path)
        model_full_path = os.path.join(model_path, datetime.now().strftime('%Y_%m_%d_%H:%M:%S'))
        if not os.path.isdir(model_full_path):
            os.mkdir(model_full_path)

        # Save config in model folder
        with open(os.path.join(model_full_path,
                               'train_' + datetime.now().strftime('%Y_%m_%d_%H:%M:%S') + '.cfg'), 'w') as f:
            self.config.write(f)
        ex_utils.save_log(model_full_path)  # Save event log

        # Build symbol and train
        def do_checkpoint(prefix):
            def _callback(iter_no, sym, arg, aux):
                if iter_no % self.checkpoint_period == 0:
                    mx.model.save_checkpoint(prefix, iter_no + 1, sym, arg, aux)

            return _callback

        checkpoint = do_checkpoint(os.path.join(model_full_path, self.model_type))
        model = self.build_model()

        input_args = {'data_path': self.data_path,
                      'input_shape': self.input_shape,
                      'multi_thread': self.multi_thread,
                      'batch_size': self.batch_size,
                      'im_size': (self.img_height, self.img_width),
                      'ignore_label': self.ignore_label,
                      'data_shape': (self.input_shape, (2,)),
                      'folder': self.folder,
                      'data_dir': self.data_path
                      }

        train, val = self.get_data_iter(input_args)
        eval_metric = CompositeEvalMetric(metrics=[
            Accuracy(),
            F1()
            # AccWithIgnoreMetric(ignore_label=self.ignore_label),
            # IoUMetric(ignore_label=self.ignore_label, label_num=self.num_label),
            # SoftmaxLoss(ignore_label=self.ignore_label, label_num=self.num_label),
        ])

        call_back = ex_utils.Speedometer(self.batch_size, 10)
        model.fit(
            X=train,
            eval_data=val,
            eval_metric=eval_metric,
            kvstore=kv,
            epoch_end_callback=checkpoint,
            batch_end_callback=call_back,
        )
        import time
        time.sleep(35)

if __name__ == '__main__':
    config_path = '/home/xiaomin/wxm/Code/KaggleSeizure2016/cfgs/ex_mxnet_cnn.cfg'
    config = ConfigParser.RawConfigParser()
    config.read(config_path)
    fitter = Fitter(config)
    fitter.fit()
