# encoding=utf8
# -*- coding: utf-8 -*
__author__ = 'mmfu.cn@gmail.com'

import yaml
import os
import tensorflow as tf
import numpy as np
import data_utils
# from sklearn.cross_validation import KFold
from sklearn.model_selection import KFold
from CNN import TextCNN
from RNN import TextRNN
from DCNN import TextDCNN
from EA import TextEA
from SWEM import TextSWEM
from RNN_CNN import RNN_CNN
import math
import pickle
import utils
import time
from collections import OrderedDict
import random

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "7"

def main():
    t = str(int(time.time()))

    # 模型配置文件
    config = OrderedDict()
    is_train = True
    with open('./config.yml') as file_config:
        config = yaml.load(file_config)
    config['is_training']=is_train

    logger = utils.get_logger('logs/log_'+t+'_'+config['text_model']+'.log')

    print("loading data......")
    all_data, action_label_dic, target_label_dic = data_utils.load_data(config['all_file'],config['sequence_length'])
    train_data, _train_action_label_dic, _train_target_label_dic = data_utils.load_data(config['train_file'],config['sequence_length'])
    test_data, _test_action_label_dic, _test_target_label_dic = data_utils.load_data(config['test_file'],config['sequence_length'])
    embeds, word2id = data_utils.load_embed(config['word_emb_file'],dim=config['embed_size'])

    if os.path.exists(config['maps_file']):
        print("load maps...")
        pkl_file = open(config['maps_file'], 'rb')
        word2id, action_label_dic, target_label_dic = pickle.load(pkl_file)
        pkl_file.close()
    else:
        print("dump maps...")
        maps_f = open(config['maps_file'], 'wb')
        pickle.dump([word2id, action_label_dic, target_label_dic],maps_f)
        maps_f.close()

    id2action={}
    for k,v in action_label_dic.items():
        id2action[v]=k
    id2target={}
    for k,v in target_label_dic.items():
        id2target[v]=k
    random.shuffle(train_data)
    train_actions, train_targets, train_querys = data_utils.map2id(train_data,word2id,action_label_dic, target_label_dic)
    test_actions, test_targets, test_querys = data_utils.map2id(test_data,word2id,action_label_dic, target_label_dic)
    config['action_num_classes']=len(action_label_dic)
    config['target_num_classes']=len(target_label_dic)
    config['vocab_size']=len(word2id)

    train_actions,train_targets,train_querys = np.array(train_actions),np.array(train_targets),np.array(train_querys)
    test_actions,test_targets,test_querys = np.array(test_actions),np.array(test_targets),np.array(test_querys)

    print("load over !!!")

    utils.print_config(config,logger)

    # tf.set_random_seed(1949)
    # limit GPU memory
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    with tf.Session(config=tf_config) as sess:
        model=None
        if config['text_model']=='dcnn':
            model = TextDCNN(config)
        elif config['text_model']=='cnn':
            model = TextCNN(config)
        elif config['text_model']=='rnn':
            model = TextRNN(config)
        elif config['text_model']=='rnn_cnn':
            model = RNN_CNN(config)
        elif config['text_model']=='ea':
            model = TextEA(config)
        elif config['text_model']=='swem':
            model = TextSWEM(config)
        sess.run(tf.global_variables_initializer())
        sess.run(model.Embedding.assign(embeds))

        epoch=0
        logger.info("start training......")
        train_actions_t, train_targets_t, train_querys_t =train_actions,train_targets,train_querys
        train_steps=math.ceil(len(train_actions_t)/config['batch_size'])
        for epo in range(config['nb_epoch']):
            logger.info("第"+str(epo)+"次迭代......")


            # logger.info(train_index,dev_index)
            losses = []
            action_accs = []
            target_accs = []
            for i in range(train_steps):
                batch_train_action, batch_train_target, batch_train_query = train_actions_t[i*config['batch_size']:(i+1)*config['batch_size']], \
                                                                                train_targets_t[i*config['batch_size']:(i+1)*config['batch_size']], \
                                                                                train_querys_t[i*config['batch_size']:(i+1)*config['batch_size']]
                batch_train_action, batch_train_target, batch_train_query = np.asarray(batch_train_action), np.asarray(batch_train_target),np.asarray(batch_train_query)
                # batch_train_query_mask = np.sign(np.abs(batch_train_query))

                step,batch_loss, _, batch_action_acc, batch_target_acc = sess.run([model.global_step, model.loss, model.train_op, model.action_accuracy, model.target_accuracy],
                                                                                      {model.input_x: batch_train_query, model.input_action: batch_train_action,
                                                                                       model.input_target: batch_train_target, model.dropout_keep_prob: config['dropout_keep_prob']})

                losses.append(batch_loss)
                action_accs.append(batch_action_acc)
                target_accs.append(batch_target_acc)
                if step % config['steps_check'] == 0 or (i+1)==train_steps:
                    logger.info("iteration:{} step:{}, "
                                    " loss:{:>9.6f}, action_acc:{:>9.6f}, target_acc:{:>9.6f}".format(
                            epoch, step, np.mean(losses), np.mean(action_accs), np.mean(target_accs)))
                    losses = []
                    action_accs = []
                    target_accs = []

            #### test setp #####
            logger.info("Testing......")
            test_steps=math.ceil(len(test_actions)/config['batch_size'])
            test_intent_accs = []
            test_action_accs = []
            test_target_accs = []
            test_results=[]
            for k in range(test_steps):
                batch_test_action, batch_test_target, batch_test_query = test_actions[k*config['batch_size']:(k+1)*config['batch_size']], \
                                                                             test_targets[k*config['batch_size']:(k+1)*config['batch_size']], \
                                                                             test_querys[k*config['batch_size']:(k+1)*config['batch_size']]
                batch_test_action, batch_test_target, batch_test_query = np.asarray(batch_test_action), np.asarray(batch_test_target),np.asarray(batch_test_query)

                test_step, test_actions_pred, test_targets_pred, test_batch_action_acc, test_batch_target_acc, test_batch_intent_acc = sess.run([model.global_step, model.action_predictions,model.target_predictions,
                                                                                                                                                     model.action_accuracy, model.target_accuracy, model.intent_accuracy],
                                                                                                                                                    {model.input_x: batch_test_query, model.input_action: batch_test_action,
                                                                                                                                                     model.input_target: batch_test_target, model.dropout_keep_prob: 1.0})
                batch_res=[]
                for pred_action,pred_target,gold_action,gold_target,query in zip(test_actions_pred,test_targets_pred,batch_test_action,batch_test_target,batch_test_query):
                        batch_res.append([id2action[pred_action],id2action[gold_action],id2target[pred_target],id2target[gold_target],query])
                test_results.extend(batch_res)
                # test_losses.append(test_batch_loss)
                test_action_accs.append(test_batch_action_acc)
                test_target_accs.append(test_batch_target_acc)
                test_intent_accs.append(test_batch_intent_acc)
            logger.info("#######test######")
            logger.info("iteration:{} , "
                            " action_acc:{:>9.6f}, target_acc:{:>9.6f}, intent_acc:{:>9.6f}".format(
                    epoch, np.mean(test_action_accs), np.mean(test_target_accs), np.mean(test_intent_accs)))

            # 保存结果
            filename=os.path.join(config['result_path'],str(epoch)+'.txt')
            with open(filename,'w',encoding='utf8') as f:
                for result in test_results:
                    # print(result)
                    f.write(result[0]+' '+result[1]+'\t'+result[2]+' '+result[3]+'\n')
            #保存模型
            if not os.path.isdir(config["model_path"]):
                os.makedirs(config["model_path"])
            checkpoint_path = os.path.join(config["model_path"], str(epoch)+'_action_acc'+str(round(np.mean(test_action_accs),4))
                                               +'_target_acc'+str(round(np.mean(test_target_accs),4))+'_intent_acc'+
                                               str(round(np.mean(test_intent_accs),4))+".ckpt")
            saver = tf.train.Saver()
            saver.save(sess, checkpoint_path)
            print('Model Trained and Saved')

            #保存pb模型
            if not os.path.isdir(config["pb_path"]):
                os.makedirs(config["pb_path"])
            checkpoint_pb_path = os.path.join(config["pb_path"], str(epoch)+'_action_acc'+str(round(np.mean(test_action_accs),4))
                                                  +'_target_acc'+str(round(np.mean(test_target_accs),4))+'_intent_acc'+
                                                  str(round(np.mean(test_intent_accs),4))+".pb")
            output_graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, output_node_names=['action_predictions','target_predictions','action_prob','target_prob'])
            with tf.gfile.FastGFile(checkpoint_pb_path, mode='wb') as f:
                f.write(output_graph_def.SerializeToString())
                print("Saved model checkpoint to {}\n".format(checkpoint_pb_path))

            epoch+=1



if __name__ == '__main__':
    main()