# -*- coding:utf-8 -*-
import os
import sys
import time

import numpy as np
import tensorflow as tf
from PEutils import Load

from PE.initialization import xavier_weight_init

flags = tf.flags
logging = tf.logging
flags.DEFINE_string('model', 'small', 'type of model')
flags.DEFINE_string('action', 'train', 'type of action: train, pred')
flags.DEFINE_string('path', 'model/', 'path of data & model')
FLAGS = flags.FLAGS


class Config(object):
    # 每次跑几条
    batch_size = 5
    # 隐藏层个数
    hidden_size = 6
    # 训练次数
    max_epoch = 50
    # 正则化，防止过拟合
    l2 = 0.001
    # 学习率
    lr = 0.1
    # 输入层个数
    input_size = 10
    # 输出层节点个数
    target_size = -1
    # keep_prob
    dropout = 0.9


class Model(object):
    def __init__(self, config):
        input_size = config.input_size
        hidden_size = config.hidden_size
        target_size = config.target_size
        lr = config.lr
        self.config = config

        self._input_data = tf.placeholder(tf.float32, shape=[None, input_size], name='Input')
        self._target = tf.placeholder(tf.float32, shape=[None, target_size], name='Target')
        self._dropout = tf.placeholder(tf.float32, name='Dropout')

        with tf.variable_scope('layer1', initializer=xavier_weight_init()):
            w1 = tf.get_variable('w1', [input_size, hidden_size])
            b1 = tf.get_variable('b1', [hidden_size])
            y1 = tf.nn.tanh(tf.matmul(self._input_data, w1) + b1)
            if self.config.l2:
                tf.add_to_collection('total_loss', 0.5 * self.config.l2 * tf.nn.l2_loss(w1))

        with tf.variable_scope('Layer2', initializer=xavier_weight_init()):
            w2 = tf.get_variable('w2', [hidden_size, target_size])
            b2 = tf.get_variable('b2', [target_size])
            y2 = tf.nn.tanh(tf.matmul(y1, w2) + b2)
            if self.config.l2:
                tf.add_to_collection('total_loss', 0.5 * self.config.l2 * tf.nn.l2_loss(w2))

        output = tf.nn.dropout(y2, self._dropout)
        cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(output, self._target))
        tf.add_to_collection('total_loss', cross_entropy)
        self._loss = loss = tf.add_n(tf.get_collection('total_loss'))

        optimizer = tf.train.AdamOptimizer(lr)
        global_step = tf.Variable(0, name='global_step', trainable=False)

        self.prediction = tf.nn.softmax(output)
        one_hot_prediction = tf.argmax(self.prediction, 1)
        correct_prediction = tf.equal(tf.argmax(self.target, 1), one_hot_prediction)
        self.correct_prediction = tf.reduce_sum(tf.cast(correct_prediction, 'int32'))

        self._train_op = optimizer.minimize(loss, global_step=global_step)

    @property
    def input_data(self):
        return self._input_data

    @property
    def target(self):
        return self._target

    @property
    def dropout(self):
        return self._dropout

    @property
    def train_op(self):
        return self._train_op

    def run_epoch(self, session, input_data, input_target, verbose=True):
        total_steps = len(input_data) / self.config.batch_size
        total_loss = []
        total_correct_examples = 0
        total_processed_examples = 0
        load = Load(path=FLAGS.path)
        for step, (x, y) in enumerate(load.data_iterator(input_data, input_target, batch_size=self.config.batch_size,
                                                         target_size=self.config.target_size)):
            feed_dict = {self._input_data: x, self._target: y, self._dropout: self.config.dropout}
            loss, prediction, _ = session.run([self._loss, self.correct_prediction, self.train_op], feed_dict=feed_dict)
            total_processed_examples += len(x)
            total_correct_examples += prediction
            total_loss.append(loss)
            if verbose and step % verbose == 0:
                sys.stdout.write('\r{} / {} : loss = {}'.format(step, total_steps, np.mean(total_loss)))
                sys.stdout.flush
        if verbose:
            sys.stdout.write('\r')
            sys.stdout.flush()
        return np.mean(total_loss), total_correct_examples / float(total_processed_examples)

    # def predict(self, session, ):


def main():
    print("model config is {}".format(FLAGS.model))
    config = Config()
    load = Load(path=FLAGS.path)
    config.target_size = load.get_target_num

    confif = tf.ConfigProto()
    confif.gpu_options.allow_growth = True
    confif.gpu_options.per_process_gpu_memory_fraction = 0.5

    if FLAGS.action == 'train':
        train_input = load.get_input
        train_target = load.get_target
        with tf.Graph().as_default(), tf.Session(config=confif) as session:
            initializer = tf.truncated_normal_initializer(stddev=1.0)
            with tf.variable_scope("model", initializer=initializer):
                model = Model(config)
            tf.initialize_all_variables().run()
            saver = tf.train.Saver()
            for i in range(config.max_epoch):
                print('Epoch {}'.format(i))
                start = time.time()
                train_loss, train_acc = model.run_epoch(session, train_input, train_target)
                print('Training accuracy: {}'.format(train_acc))
                # val_loss, predictions = model.predict(session, entity, relation)
                # print('Valid Loss:{:.3f}'.format(val_loss))
                if train_acc > 0.987:
                    # save model
                    if not os.path.exists("./model"):
                        os.makedirs("./model")
                    saver.save(session, './model/PEDNN.ckpt')
                    break
                print('Total time: {}'.format(time.time() - start))


if __name__ == '__main__':
    main()
