import tensorflow as tf
from LSTMModel.model import Model
from LSTMModel.dataReader import DataReader
from LSTMModel.config import lstm_config
import os
from LSTMModel.utils import saveEpochFigure

# 数据集目录
train_dir=os.path.join(lstm_config.data_dir,"train")     # 训练集目录
test_dir=os.path.join(lstm_config.data_dir,"test")       # 测试集目录
val_dir=os.path.join(lstm_config.data_dir,"val")         # 验证集目录
ids_path=os.path.join(lstm_config.data_dir,"ids_three.json")  # 标签路径

# 创建权重保存路径
if not os.path.exists(lstm_config.weight_save_dir):
    os.makedirs(lstm_config.weight_save_dir)
weights_save_path=os.path.join(lstm_config.weight_save_dir,"weight_ckpt")

print("loading data...........")
# 获取数据
train_reader=DataReader(data_dir=train_dir,
                        ids_path=ids_path,
                        feature_len=lstm_config.feature_len,
                        seq_max_len=lstm_config.max_seq_len)
test_reader=DataReader(data_dir=test_dir,
                       ids_path=ids_path,
                       feature_len = lstm_config.feature_len,
                       seq_max_len = lstm_config.max_seq_len)
val_reader=DataReader(data_dir=val_dir,
                      ids_path=ids_path,
                      feature_len=lstm_config.feature_len,
                      seq_max_len=lstm_config.max_seq_len)
print("success load data!")

# 获取网络模型
graph=tf.Graph()
with graph.as_default():
    net=Model(num_layers=lstm_config.num_layers,
              hidden_size=lstm_config.hidden_size,
              max_seq_len=lstm_config.max_seq_len,
              input_vec_len=lstm_config.input_vec_len,
              outputs_size=lstm_config.outputs_size,
              learning_rate=lstm_config.learning_rate,
              bidirectional=lstm_config.bidirectional,
              clip_grad=lstm_config.clip_grad)
    saver=tf.train.Saver()

    # 准确率
    correct_pred=tf.equal(tf.argmax(net.labels,1),tf.argmax(net.logits,1))
    accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))

train_loss=[]
train_accuracy=[]
val_loss=[]
val_accuracy=[]

with tf.Session(graph=graph) as sess:
    # 是否进行增量学习
    if lstm_config.continue_train:
        print("load param from pretrained LSTMModel")
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir=lstm_config.weights_save_dir)
        saver.restore(sess,ckpt.model_checkpoint_path)
    else:
        print("build network and program")
        init = tf.global_variables_initializer()
        sess.run(init)  # 初始化全部变量

    for epoch_id in range(lstm_config.epochs):
        train_bath=train_reader.getBatchData(lstm_config.batch_size)
        print("current pass: {}, start read data".format(epoch_id))
        running_loss=0
        epoch_loss=0
        index=0
        for i,(train_bath_data,train_bath_label) in enumerate(train_bath):
            _,loss=sess.run([net.train_op,net.loss],
                            feed_dict={net.inputs_data:train_bath_data,
                                       net.labels:train_bath_label,
                                       net.dropout:lstm_config.dropout})
            running_loss+=loss
            index+=1
            # 每10个迭代显示损失
            if i % 10 == 9:
                epoch_loss += running_loss
                print("{} step loss:{:.5f}  train accuracy:{:.5f}%".format(i+1, running_loss/10,sess.run(accuracy,
                                                                           feed_dict={net.inputs_data:train_bath_data,
                                                                                      net.labels: train_bath_label,
                                                                                      net.dropout: 1.0})*100))
                running_loss=0
        train_loss.append(epoch_loss/index)
        train_accuracy.append(sess.run(accuracy,feed_dict={net.inputs_data:train_reader.data,
                                                           net.labels: train_reader.label,
                                                           net.dropout: 1.0}))
        val_cost = sess.run(net.loss, feed_dict={net.inputs_data: val_reader.data,
                                                 net.labels: val_reader.label,
                                                 net.dropout: 1.0})
        val_acc=sess.run(accuracy,feed_dict={net.inputs_data: val_reader.data,
                                             net.labels: val_reader.label,
                                             net.dropout: 1.0})
        val_loss.append(val_cost)
        val_accuracy.append(val_acc)
        print("val loss:{:.5f} accuracy:{:.5f}%".format(val_cost,val_acc* 100))

        # 每隔10代保存一次模型
        if epoch_id % lstm_config.save_epoch == lstm_config.save_epoch-1:
            print("======================================")
            saver.save(sess,weights_save_path)
            print("temp save {} epoch train result".format(epoch_id+1))
            print("=======================================")

    test_data, test_labels = test_reader.getAllData()
    print("test accuracy:{:.5f}%".format(
        sess.run(accuracy, feed_dict={net.inputs_data: test_data,
                                      net.labels: test_labels,
                                      net.dropout: 1.0}) * 100))

saveEpochFigure(draw_object=[train_loss,val_loss],
                x_length=lstm_config.epochs,
                title="loss",
                label_name=["epoch","loss"],
                legend_name=["train loss","val loss"],
                save_path=os.path.join(lstm_config.tmp_dir,
                                       "loss_v2_128_1_09.png"))
saveEpochFigure(draw_object=[train_accuracy,val_accuracy],
                x_length=lstm_config.epochs,
                title="accuracy",
                label_name=["epoch","accuracy"],
                legend_name=["train accuracy","val accuracy"],
                save_path=os.path.join(lstm_config.tmp_dir,
                                       "accuracy_v2_128_1_09.png"))



