import sys

sys.path.append("/home/zxh/otu_classifier/")
from src.datas.dataPreprocessing import process_train_test_val, production_train_test_val
from src.datas.crossValidation import fractionaldata
from src.utils.fileUtils import del_model_and_logs
from src.datas.segmentationLabelsAndSample import labelAndSample
from sklearn.model_selection import train_test_split
import tensorflow as tf
import pickle
import numpy as np
import os

from src.config import config, params
from src.datas import OtuData, dataPreprocessing
from src.network.SENet import SeNet_model

del_model_and_logs()  # 删除原来的模型和logs文件
train_log_dir, test_log_dir, val_log_dir = OtuData.get_logs_dir()

tf.logging.set_verbosity(tf.logging.INFO)  # 打印日志

hps = params.get_default_params()

_, result_2, result_3, result_4, result_5, num_classes = pickle.load(open(config.all_data_pkl, 'rb'))
X_train_1, y_train_1 = pickle.load(open(config.train_pkl, 'rb'))
X_test_1, y_test_1 = pickle.load(open(config.test_pkl, 'rb'))
X_train_2, X_test_2, y_train_2, y_test_2 = production_train_test_val(None, None, result_2)
X_train_3, X_test_3, y_train_3, y_test_3 = production_train_test_val(None, None, result_3)
X_train_4, X_test_4, y_train_4, y_test_4 = production_train_test_val(None, None, result_4)
X_train_5, X_test_5, y_train_5, y_test_5 = production_train_test_val(None, None, result_5)

placeholders, metrics, others = SeNet_model(
    hps,  X_train_1.shape[1],X_train_1.shape[2])

inputs, outputs, keep_prob, is_training = placeholders
loss, accuracy = metrics
train_op, global_step, merged_summary, merged_summary_test = others

init_op = tf.global_variables_initializer()

saver = tf.train.Saver()
with tf.Session() as sess:
    sess.run(init_op)

    train_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    test_writer = tf.summary.FileWriter(test_log_dir)
    for epoch in range(1, 2): # 1,100
        if epoch % 2 == 0:
            X_train, X_test, y_train, y_test = X_train_2, X_test_2, y_train_2, y_test_2
        elif epoch % 3 == 0:
            X_train, X_test, y_train, y_test = X_train_3, X_test_3, y_train_3, y_test_3
        elif epoch % 4 == 0:
            X_train, X_test, y_train, y_test = X_train_4, X_test_4, y_train_4, y_test_4
        elif epoch % 5 == 0:
            X_train, X_test, y_train, y_test = X_train_5, X_test_5, y_train_5, y_test_5
        else:
            X_train, X_test, y_train, y_test = X_train_1, X_test_1, y_train_1, y_test_1

        y_train_lables, y_train_sample = labelAndSample(y_train)
        y_test_lables, y_test_sample = labelAndSample(y_test)

        hps.num_classes = num_classes
        train_dataset = OtuData.OtuData(
            X_train, y_train_lables, result_sample_id=y_train_sample)
        test_dataset = OtuData.OtuData(
            X_test, y_test_lables, result_sample_id=y_test_sample)

        for step in range(params.num_train_steps):
            # 获取下一个batch的训练和测试数据
            batch_inputs, batch_labels, _ = train_dataset.next_batch(
                hps.batch_size)
            test_inputs, test_labels, _ = test_dataset.next_batch(hps.batch_size)

            eval_ops = [loss, accuracy, train_op, global_step]
            should_output_summary = ((step + 1) % params.output_summary_every_steps == 0)
            if should_output_summary:
                eval_ops.append(merged_summary)
            outputs_val = sess.run(eval_ops,
                                   feed_dict={
                                       inputs: batch_inputs,
                                       outputs: batch_labels,
                                       keep_prob: params.train_keep_prob_value,
                                       is_training: True
                                   })
            loss_val, accuracy_val, _, global_step_val = outputs_val[0:4]
            if should_output_summary:
                train_summary_str = outputs_val[-1]
                global_step_tmp = params.num_train_steps * (epoch - 1) + step + 1
                train_writer.add_summary(train_summary_str, global_step_tmp)

                accuracy_test, test_summary_str = sess.run([accuracy, merged_summary_test],
                                                           feed_dict={
                                                               inputs: test_inputs,
                                                               outputs: test_labels,
                                                               keep_prob: params.test_keep_prob_value,
                                                               is_training: False
                                                           })
                test_writer.add_summary(test_summary_str, global_step_tmp)
                print("测试数据的准确率：{}".format(accuracy_test))
            if global_step_val % 20 == 0:
                tf.logging.info("Step: %5d, loss: %3.3f, accuracy: %3.3f"
                                % (global_step_val, loss_val, accuracy_val))

            if step % 100 == 0:
                # 保存一个模型的变量的 checkpoint 文件
                # saver.save(sess, os.path.join(config.latest_model_path, 'okp-%05d' % (step + 1)))
                saver.save(sess, config.save_path)
    train_writer.close()
    test_writer.close()
