import tensorflow as tf
import numpy as np
from src.utils.read import ReadImg
from src.utils.shuffle import Shuffle
from src.utils.txt import Txt
import time

# -------------------参数------------------------
width = 256
height = 256
channel = 3
learning_rate = 0.0002
n_epoch = 100  # 所有训练集数据训练n_epoch代
train_batch_size = 85
val_batch_size = 100

# -------------------参数------------------------

# -----------------构建网络----------------------
# 占位符，类似于变量，在计算图开始计算之前，它们可以用feed_dict将值传入对应的占位符。
x = tf.placeholder(tf.float32, shape=[None, width,height,channel], name='x')
y1_ = tf.placeholder(tf.float32, shape=[None,4 ], name='y1_')
y2_ = tf.placeholder(tf.float32, shape=[None,3 ], name='y1_')

# ********Multi_task有七层网络结构********

# 第一个卷积层
# 标准差 stddev，越小幅度越大
conv1 = tf.layers.conv2d(
    inputs=x,
    filters=16,
    kernel_size=[5, 5],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv1)

# 池化层
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
print(pool1)


# 第二个卷积层
conv2 = tf.layers.conv2d(
    inputs=pool1,
    filters=32,
    kernel_size=[5, 5],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv2)

# 池化层
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
print(pool2)


# 第三个卷积层
conv3 = tf.layers.conv2d(
    inputs=pool2,
    filters=64,
    kernel_size=[3, 3],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv3)

# 池化层
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)
print(pool3)


# 第四个卷积层
conv4 = tf.layers.conv2d(
    inputs=pool3,
    filters=128,
    kernel_size=[3, 3],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv4)

# 池化层
pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)
print(pool4)


# 将图像拉长
re1 = tf.reshape(pool4, [-1, 16 * 16 * 128])


# 全连接层
dense1 = tf.layers.dense(inputs=re1,
                         units=4096,
                         activation=tf.nn.relu,
                         kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                         kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))

print(dense1)


dense2 = tf.layers.dense(inputs=dense1,
                         units=2048,
                         activation=tf.nn.relu,
                         kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                         kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))

# dropout层感觉只需要一层接在倒数第二层
# 其实就是降低上一层某些输入的权重scale，甚至置为0，升高某些输入的权值，甚至置为2，防止评测曲线出现震荡，貌似样本较少时很必要
dropout_dense2 = tf.nn.dropout(dense2,keep_prob=0.8)

print(dropout_dense2)

# 最后一层（这里实际只用到一个softmax，取决于单独做哪个任务）
W_fc1 = tf.Variable(tf.truncated_normal([2048, 4], stddev=0.1))# weight：正态分布，标准差为0.1，默认最大为1，最小为-1，均值为0
b_fc1 = tf.Variable(tf.constant(0.1, shape=[4]))    # bias：创建一个结构为shape矩阵也可以说是数组shape声明其行列，初始化所有值为0.1
softmax1 = tf.nn.softmax(tf.matmul(dropout_dense2, W_fc1) + b_fc1)


W_fc2 = tf.Variable(tf.truncated_normal([2048, 3], stddev=0.1))# weight：正态分布，标准差为0.1，默认最大为1，最小为-1，均值为0
b_fc2 = tf.Variable(tf.constant(0.1, shape=[3]))    # bias：创建一个结构为shape矩阵也可以说是数组shape声明其行列，初始化所有值为0.1
softmax2 = tf.nn.softmax(tf.matmul(dropout_dense2, W_fc2) + b_fc2)

# ---------------------------网络结束---------------------------


# ---------------------------关键参数计算---------------------------

# 这里是多任务网络，所以两个Loss值是相加后进行梯度下降的
# *****Her2分数*****
cross_entropy1 = tf.reduce_mean(-tf.reduce_sum(y1_ * tf.log(softmax1), reduction_indices=[1]))  # 交叉熵（损失值）
loss1 = cross_entropy1
correct_prediction1 = tf.equal(tf.argmax(softmax1, 1), tf.argmax(y1_, 1))  # tf.argmax()返回的是某一维度上其数据最大所在的索引值，在这里即代表预测值和真实值
                                                                            # 判断预测值y和真实值y_中最大数的索引是否一致，y的值为各个种类的概率
accuracy1 = tf.reduce_mean(tf.cast(correct_prediction1, tf.float32))   # 用平均值来统计测试准确率
# *****Her2分数*****

# *****Her2放大倍数*****
cross_entropy2 = tf.reduce_mean(-tf.reduce_sum(y2_ * tf.log(softmax2), reduction_indices=[1]))  # 交叉熵（损失值）
loss2 = cross_entropy2
correct_prediction2 = tf.equal(tf.argmax(softmax2, 1), tf.argmax(y2_, 1))  # tf.argmax()返回的是某一维度上其数据最大所在的索引值，在这里即代表预测值和真实值
                                                                            # 判断预测值y和真实值y_中最大数的索引是否一致，y的值为各个种类的概率
accuracy2 = tf.reduce_mean(tf.cast(correct_prediction2, tf.float32))   # 用平均值来统计测试准确率
# *****Her2放大倍数*****

train_op_all = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss1 + loss2)   # 优化器：各种对于梯度下降算法的优化，不止AdamOptimizer这一种  https://blog.csdn.net/xierhacker/article/details/53174558




# ---------------------------关键参数计算---------------------------


def train_model(training_data_path, testing_data_path):
# ---------------------------会话开始---------------------------
    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())

    saver=tf.train.Saver(max_to_keep=1)   # max_to_keep 参数，这个是用来设置保存模型的个数，默认为5，只保存最新的，就设置1即可
    ri = ReadImg(width, height)


    train_op = train_op_all
    save_models_path = '../../data/models/Her2_Multi_task_model'

    max_scores_acc = 0
    max_magnification_acc = 0
    for epoch in range(n_epoch):

        # 训练集
        train_scores_loss, train_magnification_loss, train_scores_acc, train_magnification_acc, train_batch = 0, 0, 0, 0, 0
        print("*********第 %s 代***********" % (str(epoch + 1)))
        for x_train_a, y1_train_a, y2_train_a in ri.read_data2(training_data_path, train_batch_size, True):
            # 计算分类的损失值和准确度
            _, train_scores_err, train_magnification_err, train_scores_ac, train_magnification_ac = sess.run([train_op, loss1, loss2, accuracy1, accuracy2],feed_dict={x: x_train_a, y1_: y1_train_a, y2_: y2_train_a})


            train_scores_loss += train_scores_err;
            train_magnification_loss += train_magnification_err;
            train_scores_acc += train_scores_ac;
            train_magnification_acc += train_magnification_ac;
            train_batch += 1
        print("   train scores loss: %f" % (train_scores_loss / train_batch))
        print("   train scores acc: %f" % (train_scores_acc / train_batch))
        print("   train magnification loss: %f" % (train_magnification_loss / train_batch))
        print("   train magnification acc: %f" % (train_magnification_acc / train_batch))

        test_scores_loss, test_magnification_loss, test_scores_acc, test_magnification_acc, test_batch = 0, 0, 0, 0, 0

        # 测试集
        for x_test_a, y1_test_a, y2_test_a in ri.read_data2(testing_data_path, train_batch_size, True):
            # 计算分类的损失值和准确度
            test_scores_err, test_magnification_err, test_scores_ac, test_magnification_ac = sess.run([loss1, loss2, accuracy1, accuracy2],feed_dict={x: x_test_a, y1_: y1_test_a, y2_: y2_test_a})
            test_scores_loss += test_scores_err;
            test_magnification_loss += test_magnification_err;
            test_scores_acc += test_scores_ac;
            test_magnification_acc += test_magnification_ac;
            test_batch += 1
        print("   test scores loss: %f" % (test_scores_loss / test_batch))
        print("   test scores acc: %f" % (test_scores_acc / test_batch))
        print("   test magnification loss: %f" % (test_magnification_loss / test_batch))
        print("   test magnification acc: %f" % (test_magnification_acc / test_batch))

        # *************************测试模型保存（挑最好的保存）*************************
        if (train_scores_acc / train_batch) > max_scores_acc and (train_magnification_acc / train_batch) > max_magnification_acc and (train_scores_loss / train_batch) != 'nan' and (train_magnification_loss / test_batch) != 'nan':
            max_scores_acc = train_scores_acc / train_batch
            max_magnification_acc = train_magnification_acc / train_batch
            saver.save(sess, save_models_path, global_step=epoch+1)
        # *************************测试模型保存（挑最好的保存）*************************

    sess.close()

def run_model(model_path, val_data_path, kind):
    # ---------------------------会话开始---------------------------
    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())
    saver=tf.train.Saver(max_to_keep=1)  # max_to_keep 参数，这个是用来设置保存模型的个数，默认为5，只保存最新的，就设置1即可

    # 参数定义
    ri = ReadImg(width,height)
    error, ac = 0, 0
    call_loss, call_acc= 0, 0
    call_batch_output = []
    call_original_label = []
    call_result_label = []
    epoch = 0

    if kind == 1:
        loss = loss1
        accuracy = accuracy1
        softmax = tf.argmax(softmax1,1)
    elif kind == 2:
        loss = loss2
        accuracy = accuracy2
        softmax = tf.argmax(softmax2, 1)

    print('*********模型测试***********')
    model_file = tf.train.latest_checkpoint(model_path)
    saver.restore(sess, model_file)

    for call_HER2_data, call_HER2_label in ri.read_data(val_data_path, val_batch_size, kind, False):

        # 求出原来数据对应的标签是哪个（索引）
        call_HER2_label_list = call_HER2_label.tolist()
        for single_label in call_HER2_label_list:
            max_index = single_label.index(max(single_label))
            call_original_label.append(max_index)

        if kind == 1:
            call_loss, call_acc, call_batch_output= sess.run(
                [loss, accuracy, softmax],
                feed_dict={x: call_HER2_data, y1_: call_HER2_label})
        elif kind == 2:
            call_loss, call_acc, call_batch_output = sess.run(
                [loss, accuracy, softmax],
                feed_dict={x: call_HER2_data,y2_: call_HER2_label})

        error += call_loss
        ac += call_acc
        epoch += 1
        call_result_label.extend(call_batch_output)
        print('第%d批的统计结果：val_loss:%f, val_acc:%f'%(epoch,call_loss,call_acc))


    print('所有图片的统计结果：val_loss:%f, val_acc:%f' % (error/epoch, ac/epoch))
    print('*********模型测试结束***********')
    sess.close()

    return call_original_label,call_result_label

def use_model(model_path, classify_img_path, degree0_pro_txt_path=None, degree1_pro_txt_path=None, degree2_pro_txt_path=None, degree3_pro_txt_path=None):
    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver(max_to_keep=1)  # max_to_keep 参数，这个是用来设置保存模型的个数，默认为5，只保存最新的，就设置1即可

    # # 调用模型对标注区域的切片进行判定，若满足要求，则写入txt文件中
    ri = ReadImg(width, height)
    txt = Txt()
    use_degree_output = []
    all_degree_output = []

    i = 0  # 第几批

    # 模型运行
    print('*********模型分类运行开始***********')

    model_file = tf.train.latest_checkpoint(model_path)
    saver.restore(sess, model_file)

    for use_HER2_data in ri.read_data3(classify_img_path, val_batch_size, False):
        use_degree_output = sess.run([softmax1],feed_dict={x: use_HER2_data})

        use_degree_output_list = use_degree_output[0].tolist()
        all_degree_output.extend(use_degree_output_list)
        i += 1
        print(i)
    sess.close()

    # 将每一张图片的各个HER2等级的概率输出为txt文件
    if degree0_pro_txt_path != None:
        txt.write1(degree0_pro_txt_path, all_degree_output, 1)
    if degree1_pro_txt_path != None:
        txt.write1(degree1_pro_txt_path, all_degree_output, 2)
    if degree2_pro_txt_path != None:
        txt.write1(degree2_pro_txt_path, all_degree_output, 3)
    if degree3_pro_txt_path != None:
        txt.write1(degree3_pro_txt_path, all_degree_output, 4)

    print('*********模型分类运行结束***********')


def save_result(original_label,result_label, kind):
    if kind == 1:
        original_label_txt = '../../data/txt/model_result/Multi_task_original_scores_label.txt'
        result_label_txt = '../../data/txt/model_result/Multi_task_result_scores_label.txt'
    elif kind == 2:
        original_label_txt = '../../data/txt/model_result/Multi_task_original_magnification_label.txt'
        result_label_txt = '../../data/txt/model_result/Multi_task_result_magnification_label.txt'

    file = open(original_label_txt, 'w')
    for i in original_label:
        file.write(str(i) + '\n');
    file.close()

    file = open(result_label_txt, 'w')
    for i in result_label:
        file.write(str(i) + '\n');
    file.close()

if __name__ == '__main__':
    # 调用网络做哪种任务
    kind = 1

    # 训练网络
    # training_data_path = 'K:/formal_data/HER2/thrid_generation/train/'
    # testing_data_path = 'K:/formal_data/HER2/thrid_generation/test/'
    # train_model(training_data_path, testing_data_path)

    # 利用训练好的模型识别图块，并保存预先定义的标签和识别的结果为txt文件
    # val_data_path = 'K:/formal_data/HER2/thrid_generation/val/'
    # model_path = '../../data/models/'
    # original_label, result_label = run_model(model_path, val_data_path, kind)
    # save_result(original_label, result_label, kind)

    # 调用模型并储存HER2各等级相应的概率结果
    model_path = '../../data/models/'
    classify_img_path = r'K:\Original_Data\patches\1705206-5'  # 此处是某张切片标注区域切下来的区块集的文件夹
    degree0_pro_txt_path = '../../data/txt/roi/Multi_task_degree0.txt'
    degree1_pro_txt_path = '../../data/txt/roi/Multi_task_degree1.txt'
    degree2_pro_txt_path = '../../data/txt/roi/Multi_task_degree2.txt'
    degree3_pro_txt_path = '../../data/txt/roi/Multi_task_degree3.txt'
    use_model(model_path, classify_img_path, degree0_pro_txt_path, degree1_pro_txt_path, degree2_pro_txt_path, degree3_pro_txt_path)

