# 多任务HER2等级，放大倍数检测，基于test2
# （最后是一个全连接层（貌似））

import tensorflow as tf
import numpy as np
from src.utils.read import ReadImg
from src.utils.shuffle import Shuffle
from src.utils.read import Extract
import time

# 图片尺寸（调大了会有可能显存不够）
weight = 256
height = 256
channel = 3

# 图片路径
train_img_path = 'G:/formal_data/HER2/first_generation/train/'
test_img_path = 'G:/formal_data/HER2/first_generation/test/'


# 要训练和测试的数据集
ri_train = ReadImg(train_img_path,weight,height)
ri_test = ReadImg(test_img_path,weight,height)

train_HER2_data, train_HER2_degree_label, train_HER2_magnification_label = ri_train.read_dataSet_in_multi_task2(0.2)
test_HER2_data, test_HER2_degree_label, test_HER2_magnification_label = ri_test.read_dataSet_in_multi_task2(0.5)

train_num_example = train_HER2_data.shape[0]  # 训练图片数量
test_num_example = test_HER2_data.shape[0]  # 测试图片数量

# 打乱训练集
sf = Shuffle()
shuffled_train_HER2_data , shuffled_train_HER2_degree_label, shuffled_train_HER2_magnification_label = sf.shuffle_data3(train_HER2_data,train_HER2_degree_label,train_HER2_magnification_label)
shuffled_test_HER2_data , shuffled_test_HER2_degree_label, shuffled_test_HER2_magnification_label = sf.shuffle_data3(test_HER2_data,test_HER2_degree_label,test_HER2_magnification_label)


# 打乱的训练集和验证集
x_train = shuffled_train_HER2_data
y1_train = shuffled_train_HER2_degree_label
y2_train = shuffled_train_HER2_magnification_label

x_test = shuffled_test_HER2_data
y1_test = shuffled_test_HER2_degree_label
y2_test = shuffled_test_HER2_magnification_label

# 没打乱的训练集和验证集
# x_train = train_HER2_data
# y1_train = train_HER2_label
#
# x_test = test_HER2_data
# y1_test = test_HER2_label



# -----------------构建网络----------------------
# 占位符，类似于变量，在计算图开始计算之前，它们可以用feed_dict将值传入对应的占位符。
x = tf.placeholder(tf.float32, shape=[None, weight,height,channel], name='x')
y1_ = tf.placeholder(tf.int32, shape=[None ], name='y1_')
y2_ = tf.placeholder(tf.int32, shape=[None ], name='y2_')


# tensorboard_image1 = tf.summary.image('yeyex', x)

# 第一个卷积层
# 标准差 stddev，越小幅度越大
conv1 = tf.layers.conv2d(
    inputs=x,
    filters=16,
    kernel_size=[5, 5],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv1)


# 测试tensorboard
# tf.summary.image('conv1', conv1,8)

# 池化层
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
print(pool1)


# 第二个卷积层
conv2 = tf.layers.conv2d(
    inputs=pool1,
    filters=32,
    kernel_size=[5, 5],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv2)

# 池化层
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
print(pool2)


# 第三个卷积层
conv3 = tf.layers.conv2d(
    inputs=pool2,
    filters=64,
    kernel_size=[3, 3],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv3)

# 池化层
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)
print(pool3)


# 第四个卷积层
conv4 = tf.layers.conv2d(
    inputs=pool3,
    filters=128,
    kernel_size=[3, 3],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv4)

# 池化层
pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)
print(pool4)


# 将图像拉长
re1 = tf.reshape(pool4, [-1, 16 * 16 * 128])


# 全连接层
dense1 = tf.layers.dense(inputs=re1,
                         units=1024,
                         activation=tf.nn.relu,
                         kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                         kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))

print(dense1)


dense2 = tf.layers.dense(inputs=dense1,
                         units=512,
                         activation=tf.nn.relu,
                         kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                         kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))

# dropout层感觉只需要一层接在倒数第二层
# 其实就是降低上一层某些输入的权重scale，甚至置为0，升高某些输入的权值，甚至置为2，防止评测曲线出现震荡，貌似样本较少时很必要
dropout_dense2 = tf.nn.dropout(dense2,keep_prob=0.8)

print(dropout_dense2)

# 最后一层
logits1= tf.layers.dense(inputs=dropout_dense2,
                        units=4,
                        activation=None,
                        kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                        kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))

logits2= tf.layers.dense(inputs=dropout_dense2,
                        units=2,
                        activation=None,
                        kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                        kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))

# ---------------------------网络结束---------------------------


# ---------------------------关键参数计算---------------------------

# 计算损失值
# 【参考的手写识别，貌似要各弄一个损失率】
# 【不明白为什么手写是被要加多一个损失率求均值的过程，因为损失率出来是个常量？？？？？？？】
#  https://blog.csdn.net/qq_32166627/article/details/52734387

HER2_scores_loss = tf.losses.sparse_softmax_cross_entropy(labels=y1_,logits=logits1)
HER2_magnification_loss = tf.losses.sparse_softmax_cross_entropy(labels=y2_,logits=logits2)


# HER2_loss = tf.losses.sparse_softmax_cross_entropy(labels=y1_, logits=logits1)
loss = HER2_scores_loss + HER2_magnification_loss

# 优化器：各种对于梯度下降算法的优化，不止AdamOptimizer这一种  https://blog.csdn.net/xierhacker/article/details/53174558
train_op = tf.train.AdamOptimizer(learning_rate=0.0002).minimize(loss)

# 计算准确率的
# tf.cast用于类型转换，tf.argmax返回最大值的下标（1按行找，0按列找）
correct_scores_prediction = tf.equal(tf.cast(tf.argmax(logits1, 1), tf.int32), y1_)
HER2_scores_acc = tf.reduce_mean(tf.cast(correct_scores_prediction, tf.float32))

correct_magnification_prediction = tf.equal(tf.cast(tf.argmax(logits2, 1), tf.int32), y2_)
HER2_magnification_acc = tf.reduce_mean(tf.cast(correct_magnification_prediction, tf.float32))
# 汇总有关tensorboard的所有节点
# merge = tf.summary.merge_all()

# ---------------------------关键参数计算结束---------------------------



# ---------------------------会话开始---------------------------

# 正式进行训练和测试数据，可将n_epoch设置更大一些

n_epoch = 80  # 应该是所有训练集数据训练几遍的意思
batch_size = 40
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())


# max_to_keep 参数，这个是用来设置保存模型的个数，默认为5，只保存最新的，就设置1即可
# saver=tf.train.Saver(max_to_keep=1)

# 存入tensorboard文件的位置
# summary_dir = 'I:/pyCharmProjectSet/deepLearning/advanced_research/src/main/prototype/logs/'
# summary_writer = tf.summary.FileWriter(logdir=summary_dir, graph=sess.graph)

et = Extract()

for epoch in range(n_epoch):
    start_time = time.time()

    train_loss, train_scores_acc,train_magnification_acc, n_batch =  0, 0, 0, 0


    print("*********第 %s 代***********" % (str(epoch+1)))

    # training，训练集
    # 【感觉这里shuffle应该是false，原来是true】
    for x_train_a, y1_train_a, y2_train_a in et.multi_task_minibatches(x_train, y1_train, y2_train, batch_size, True):
        # 计算分类的损失值和准确度
        _, err, train_HER2_scores_ac, train_HER2_magnification_ac = sess.run([train_op, loss, HER2_scores_acc,HER2_magnification_acc],
                                             feed_dict={x: x_train_a, y1_: y1_train_a, y2_: y2_train_a})
        # _, summary ,err, class_ac = sess.run([train_op, merge ,loss, class_acc], feed_dict={x: x_train_a, y1_: y1_train_a})


        train_loss += err;
        train_scores_acc += train_HER2_scores_ac;
        train_magnification_acc += train_HER2_magnification_ac;
        n_batch += 1

        # summary_writer.add_summary(summary)


    print("   train loss: %f" % (train_loss / n_batch))
    print("   train_scores acc: %f" % (train_scores_acc / n_batch))
    print("   train_magnification acc: %f" % (train_magnification_acc / n_batch))

    test_loss, test_scores_acc,test_magnification_acc, n_batch =  0, 0, 0, 0

    output_set = []

    for x_test_a, y1_test_a, y2_test_a in et.multi_task_minibatches(x_test, y1_test, y2_test, batch_size, True):
        # 计算分类的损失值和准确度
        # err, HER2_ac = sess.run([loss, HER2_acc],
        #                             feed_dict={x: x_test_a, y1_: y1_test_a})
        test_err, test_HER2_scores_ac, test_HER2_magnification_ac = sess.run([loss, HER2_scores_acc,HER2_magnification_acc],
                                feed_dict={x: x_test_a, y1_: y1_test_a, y2_: y2_test_a})
        # _, summary ,err, class_ac = sess.run([train_op, merge ,loss, class_acc], feed_dict={x: x_train_a, y1_: y1_train_a})


        test_loss += test_err;
        # train_acc += ac;
        test_scores_acc += test_HER2_scores_ac;
        test_magnification_acc += test_HER2_magnification_ac;
        n_batch += 1

        # summary_writer.add_summary(summary)


    print("   test loss: %f" % (test_loss / n_batch))
    print("   test_scores acc: %f" % (test_scores_acc / n_batch))
    print("   test_magnification acc: %f" % (test_magnification_acc / n_batch))

# summary_writer.close()


# 这里由于试着保存最新的一代，所以放在循环外

# 此处路径最后的model是保存的模型的名字的前缀！！！！
# save_models_path = './models/model'
#
# saver.save(sess,save_models_path , global_step=n_epoch)



sess.close()

# ---------------------------会话结束---------------------------