import sys

sys.path.append("/home/zxh/otu_classifier/")
from src.utils.figureUtils import figureRoc
from src.datas.segmentationLabelsAndSample import labelAndSample
import tensorflow as tf
import pickle
import numpy as np
from sklearn.metrics import accuracy_score, confusion_matrix

from src.config import config, params
from src.datas import OtuData
import pandas as pd


def print_result(y_true, y_predict):
    con_matrix = confusion_matrix(y_true, y_predict)
    y_specificity_n = con_matrix[0][0] / (con_matrix[0][0] + con_matrix[1][0] + con_matrix[2][0])  # 特异性
    y_sensitivity_a = con_matrix[1][1] / (con_matrix[1][1] + con_matrix[0][1] + con_matrix[2][1])  # 敏感性
    y_specificity_c = con_matrix[2][2] / (con_matrix[2][2] + con_matrix[0][2] + con_matrix[1][2])
    print(con_matrix)
    print("特异性n:{}".format(y_specificity_n))
    print("敏感性a:{}".format(y_sensitivity_a))
    print("c:{}".format(y_specificity_c))
    print(1 - y_specificity_n)
    print(1 - y_sensitivity_a)
    print(1 - y_specificity_c)
    print(accuracy_score(y_true, y_predict))


hps = params.get_default_params()

tf.reset_default_graph()
init = tf.global_variables_initializer()

X_test, y_test = pickle.load(open(config.val_2_pkl, 'rb'))
y_test_lables, y_test_sample = labelAndSample(y_test)

test_dataset = OtuData.OtuData(
    X_test, y_test_lables, result_sample_id=y_test_sample)

y_predict = []
test_labels_list = []
test_sample_id_list = []
roc_scores = []
roc_test = []
with tf.Session() as sess:
    sess.run(init)
    latest_file = tf.train.latest_checkpoint(config.latest_model_path)
    saver = tf.train.import_meta_graph(latest_file + '.meta')
    saver.restore(sess, latest_file)

    y_probability = sess.graph.get_tensor_by_name("fully_connected_11/Relu:0") #fully_connected_11/Relu:0 fc/fc2/BiasAdd:0
    y_pred = sess.graph.get_tensor_by_name("y_pred_model:0")

    for i in range(X_test.shape[0]):
        test_inputs, test_labels, test_sample_id = test_dataset.next_batch(1)
        test_inputs = test_inputs + np.zeros(
            (hps.batch_size, test_inputs.shape[1], test_inputs.shape[2], test_inputs.shape[3]), dtype="float32")
        y_pred_val, y_probability_val = sess.run([y_pred, y_probability],
                                                 feed_dict={"inputs:0": test_inputs,
                                                            "keep_prob:0": params.test_keep_prob_value,
                                                            "is_training:0":False})
        print("{}\t{}\t{}\t{}\t{}".format(test_sample_id[0], y_probability_val[0][0], y_probability_val[0][1],
                                          y_pred_val[0], test_labels[0]))
        if test_labels[0] == 0:
            roc_test.append([1, 0, 0])
        elif test_labels[0] == 1:
            roc_test.append([0, 1, 0])
        else:
            roc_test.append([0, 0, 1])
        roc_scores.append(y_probability_val[0].tolist())
        test_labels_list.append(test_labels[0])
        y_predict.append(y_pred_val[0])
        test_sample_id_list.append(test_sample_id[0])
print("汇总")
print_result(test_labels_list, y_predict)

mapData = pd.read_csv(config.map_file, sep="\t", header=0)
sample_id_map_list_mi = mapData.loc[:, ["#SampleID", "Type", "source"]].values  # 样本编号和分类的映射

sources = mapData.loc[:, ["source"]].values  # 样本编号和分类的映射
sources = list(set(np.reshape(sources, (sources.shape[0])).tolist()))
all_sample_result = []
for source in sources:
    source_dict = {"key": source, "y_true": [], "y_pred": []}
    all_sample_result.append(source_dict)
for labels, predict, sample_id in zip(test_labels_list, y_predict, test_sample_id_list):
    for k in sample_id_map_list_mi.tolist():
        if k[0] == sample_id:
            source = k[2]
            break
    for cla in all_sample_result:
        if source == cla["key"]:
            cla["y_true"].append(labels)
            cla["y_pred"].append(predict)
for result in all_sample_result:
    if len(result["y_true"]) > 6:
        print("数据集{}，的结果：".format(result["key"]))
        print_result(result["y_true"], result["y_pred"])
    elif len(result["y_true"]) > 0:
        print("数据集{}，的结果：".format(result["key"]))
        print("准确率：{}".format(accuracy_score(result["y_true"], result["y_pred"])))

figureRoc(3, np.array(roc_test), np.array(roc_scores))
