from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os, random , sys, time

import numpy as np
import tensorflow as tf

import data_proc
from shooter_spliter import process_shooter_data
from model import Seq2SeqModel
from properties import Properties
from seq2seq import create_cell, get_batch, run_step
from data_proc import EOS_ID

def _check_restore_parameters(sess,path, saver):
    ckpt = tf.train.get_checkpoint_state(os.path.dirname(path))
    if ckpt and ckpt.model_checkpoint_path:
        print(">> loading parameters for the Chatbot")
        saver.restore(sess, ckpt.model_checkpoint_path)
    else:
        print(">> initializing fresh parameters for the Chatbot")

def _get_random_bucket(train_buckets_scale):
    """ Get a random bucket from which to choose a training sample """
    rand = random.random()
    return min([i for i in range(len(train_buckets_scale))
                if train_buckets_scale[i] > rand])

def _read_data(source_path, target_path, buckets, max_size=None):
  data_set = [[] for _ in buckets]
  with tf.gfile.GFile(source_path, mode="r") as source_file:
    with tf.gfile.GFile(target_path, mode="r") as target_file:
      source, target = source_file.readline(), target_file.readline()
      counter = 0
      while source and target and (not max_size or counter < max_size):
        counter += 1
        if counter % 100000 == 0:
          print(">> reading data line %d" % counter)
          sys.stdout.flush()
        source_ids = [int(x) for x in source.split()]
        target_ids = [int(x) for x in target.split()]
        target_ids.append(EOS_ID)
        for bucket_id, (source_size, target_size) in enumerate(buckets):
          if len(source_ids) < source_size and len(target_ids) < target_size:
            data_set[bucket_id].append([source_ids, target_ids])
            break
        source, target = source_file.readline(), target_file.readline()
  return data_set


def train(props):
    print(">> preparing data... ")
    enc_train, dec_train, enc_dev, dec_dev, _, _ = data_proc.prepare_data(props)
    # setup config to use BFC allocator
    config = tf.ConfigProto()
    config.gpu_options.allocator_type = 'BFC'
    cell = create_cell(props.getProperties('Attrs.celltype'))
    # in train mode, we need to create the backward path, so forwrad_only is False
    model = Seq2SeqModel(False, cell, props)
    model.build_graph()

    with tf.Session() as sess:
        print('>> running session')
        sess.run(tf.global_variables_initializer())
        _check_restore_parameters(sess, props.getProperties('Path.workspace'), model.saver)

        buckets = eval(props.getProperties('Attrs.buckets'))
        dev_set = _read_data(enc_dev, dec_dev, buckets)
        train_set = _read_data(enc_train, dec_train,buckets, int(props.getProperties('Attrs.max_train_data_size')))
        train_bucket_sizes = [len(train_set[b]) for b in range(len(buckets))]
        train_total_size = float(sum(train_bucket_sizes))

        # A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
        # to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
        # the size if i-th training bucket, as used later.
        train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
                               for i in range(len(train_bucket_sizes))]

        iteration = model.global_step.eval()
        skip_step = int(props.getProperties('Attrs.print_steps'))
        validation_step = int(props.getProperties('Attrs.validation_steps'))
        chkpt_step = int(props.getProperties('Attrs.steps_per_checkpoint'))
        chkpt_path = os.path.join(props.getProperties('Path.checkpoint'), 'chatbot.ckpt')
        total_loss = 0
        previous_losses = []
        while True:
            bucket_id = _get_random_bucket(train_buckets_scale)
            encoder_inputs, decoder_inputs, decoder_masks = get_batch(props, train_set, bucket_id)
            start = time.time()
            _, step_loss, _ = run_step(sess, props, model, encoder_inputs, decoder_inputs, decoder_masks, bucket_id, False)
            total_loss += step_loss
            iteration += 1

            if iteration % skip_step == 0:
                print('Iter {} (lr={}): log plerplexity {}, time {}'.
                      format(iteration, model.learning_rate.eval() ,total_loss/skip_step, time.time() - start))
                # Decrease learning rate if no improvement was seen over last 5 times.
                if len(previous_losses) > 2 and total_loss > max(previous_losses[-5:]):
                    sess.run(model.learning_rate_decay_op)
                previous_losses.append(total_loss)
                start = time.time()
                total_loss = 0
            if iteration % chkpt_step == 0:
                model.saver.save(sess, chkpt_path, global_step=model.global_step)
                start = time.time()
            if iteration % validation_step == 0:
                # Run evals on development set and print their loss
                for bucket_id in range(len(buckets)):
                    if len(dev_set[bucket_id]) == 0:
                        print("  eval: empty bucket %d" % (bucket_id))
                        continue
                    encoder_inputs, decoder_inputs, target_weights = \
                        get_batch(props, dev_set, bucket_id)
                    _, eval_loss, _ = run_step(sess, props, model, encoder_inputs, decoder_inputs,
                                                 target_weights, bucket_id, True)
                    print("  eval: bucket %d loss %f" % (bucket_id, eval_loss))
                start = time.time()
            sys.stdout.flush()


def main(argv=None):
    prop = Properties('standard.props')
    prop.readProperties()
    if not os.path.exists(prop.getProperties('Path.train.train_enc')):
        process_shooter_data()
    train(prop)

if __name__ == '__main__':
    tf.app.run()