import sys
sys.path.append("G:\pycharm-workspace\precisiongene\crcmodel")
import tensorflow as tf
import pickle
import numpy as np

from src.config import config, params
from src.datas import OtuData
from src.network import network

train_log_dir,test_log_dir,val_log_dir = OtuData.get_logs_dir()

tf.logging.set_verbosity(tf.logging.INFO)  # 打印日志

hps = params.get_default_params()

X_train, y_train = pickle.load(open(config.train_pkl, 'rb'))
X_test, y_test = pickle.load(open(config.test_pkl, 'rb'))
"""
val data
X_val, y_val = pickle.load(open(config.val_pkl, 'rb'))
"""
test_batch_size = X_test.shape[0] if X_test.shape[0] < hps.batch_size else hps.batch_size

hps.num_timesteps = X_train.shape[1]

train_dataset = OtuData.OtuData(
    X_train, y_train)
test_dataset = OtuData.OtuData(
    X_test, y_test)
"""
val data
val_dataset = OtuData.OtuData(
    X_val, y_val)
"""

placeholders, metrics, others = network.create_cnn_model(
    hps, params.num_classes)

inputs, outputs, keep_prob = placeholders
loss, accuracy = metrics
train_op, global_step, merged_summary,merged_summary_test = others

init_op = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init_op)
    train_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    test_writer = tf.summary.FileWriter(test_log_dir)
    val_writer = tf.summary.FileWriter(val_log_dir)
    for i in range(params.num_train_steps):
        # 获取下一个batch的训练和测试数据
        batch_inputs, batch_labels = train_dataset.next_batch(
            hps.batch_size)
        test_inputs_o, test_labels_o = test_dataset.next_batch(test_batch_size)
        test_inputs = test_inputs_o
        test_labels = test_labels_o
        if test_inputs.shape[0] < hps.batch_size:
            last_input = test_inputs[-1]
            last_label = test_labels[-1]
            for _ in range(hps.batch_size - test_inputs.shape[0]):
                test_inputs = np.insert(test_inputs, -1, values=last_input, axis=0)
                test_labels = np.insert(test_labels, -1, values=last_label, axis=0)
        """
        val data
        val_inputs, val_labels = val_dataset.next_batch(
            hps.batch_size)
        """

        eval_ops = [loss, accuracy, train_op, global_step]
        should_output_summary = ((i + 1) % params.output_summary_every_steps == 0)
        if should_output_summary:
            eval_ops.append(merged_summary)
        outputs_val = sess.run(eval_ops,
                               feed_dict={
                                   inputs: batch_inputs,
                                   outputs: batch_labels,
                                   keep_prob: params.train_keep_prob_value,
                               })
        loss_val, accuracy_val, _, global_step_val = outputs_val[0:4]
        if should_output_summary:
            train_summary_str = outputs_val[-1]
            train_writer.add_summary(train_summary_str, i + 1)

            accuracy_test,test_summary_str = sess.run([accuracy,merged_summary_test],
                                   feed_dict={
                                       inputs: test_inputs,
                                       outputs: test_labels,
                                       keep_prob: params.test_keep_prob_value,
                                   })
            test_writer.add_summary(test_summary_str, i + 1)
            print("测试数据的准确率：{}".format(accuracy_test))
            """
            val data
            accuracy_val, val_summary_str = sess.run([accuracy, merged_summary_test],
                                                       feed_dict={
                                                           inputs: val_inputs,
                                                           outputs: val_labels,
                                                           keep_prob: params.test_keep_prob_value,
                                                       })
            val_writer.add_summary(val_summary_str, i + 1)
            print("测试真实数据的准确率：{}".format(accuracy_val))
            """
        if global_step_val % 20 == 0:
            tf.logging.info("Step: %5d, loss: %3.3f, accuracy: %3.3f"
                            % (global_step_val, loss_val, accuracy_val))

        if i % 100 == 0:
            saver = tf.train.Saver()
            saver.save(sess, config.save_path)
    train_writer.close()
    test_writer.close()

