# 多任务HER2等级，放大倍数检测，基于test2
# （最后是softmax层）

import tensorflow as tf
import numpy as np
from src.utils.read import ReadImg
from src.utils.shuffle import Shuffle
from src.utils.read import Extract
import time

# 图片尺寸（调大了会有可能显存不够）
weight = 256
height = 256
channel = 3


# -----------------构建网络----------------------
# 占位符，类似于变量，在计算图开始计算之前，它们可以用feed_dict将值传入对应的占位符。
x = tf.placeholder(tf.float32, shape=[None, weight,height,channel], name='x')
y1_ = tf.placeholder(tf.float32, shape=[None,4 ], name='y1_')
y2_ = tf.placeholder(tf.float32, shape=[None,2 ], name='y2_')


# tensorboard_image1 = tf.summary.image('yeyex', x)

# 第一个卷积层
# 标准差 stddev，越小幅度越大
conv1 = tf.layers.conv2d(
    inputs=x,
    filters=16,
    kernel_size=[5, 5],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv1)


# 测试tensorboard
# tf.summary.image('conv1', conv1,8)

# 池化层
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
print(pool1)


# 第二个卷积层
conv2 = tf.layers.conv2d(
    inputs=pool1,
    filters=32,
    kernel_size=[5, 5],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv2)

# 池化层
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
print(pool2)


# 第三个卷积层
conv3 = tf.layers.conv2d(
    inputs=pool2,
    filters=64,
    kernel_size=[3, 3],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv3)

# 池化层
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)
print(pool3)


# 第四个卷积层
conv4 = tf.layers.conv2d(
    inputs=pool3,
    filters=128,
    kernel_size=[3, 3],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv4)

# 池化层
pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)
print(pool4)


# 将图像拉长
re1 = tf.reshape(pool4, [-1, 16 * 16 * 128])


# 全连接层
dense1 = tf.layers.dense(inputs=re1,
                         units=1024,
                         activation=tf.nn.relu,
                         kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                         kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))

print(dense1)


dense2 = tf.layers.dense(inputs=dense1,
                         units=512,
                         activation=tf.nn.relu,
                         kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                         kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))

# dropout层感觉只需要一层接在倒数第二层
# 其实就是降低上一层某些输入的权重scale，甚至置为0，升高某些输入的权值，甚至置为2，防止评测曲线出现震荡，貌似样本较少时很必要
dropout_dense2 = tf.nn.dropout(dense2,keep_prob=0.8)

print(dropout_dense2)



# softmax层
def weight_variable(shape):
    # 正态分布，标准差为0.1，默认最大为1，最小为-1，均值为0
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)


def bias_variable(shape):
    # 创建一个结构为shape矩阵也可以说是数组shape声明其行列，初始化所有值为0.1
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)
W_fc1 = weight_variable([512, 4])
b_fc1 = bias_variable([4])
softmax1 = tf.nn.softmax(tf.matmul(dropout_dense2, W_fc1) + b_fc1)

print(softmax1)


W_fc2 = weight_variable([512, 2])
b_fc2 = bias_variable([2])
softmax2 = tf.nn.softmax(tf.matmul(dropout_dense2, W_fc2) + b_fc2)

print(softmax2)


# ---------------------------网络结束---------------------------


# ---------------------------关键参数计算---------------------------

# 计算损失值
# 【参考的手写识别，貌似要各弄一个损失率】
# 【不明白为什么手写是被要加多一个损失率求均值的过程，因为损失率出来是个常量？？？？？？？】
#  https://blog.csdn.net/qq_32166627/article/details/52734387

HER2_scores_loss = -tf.reduce_sum(y1_ * tf.log(softmax1)) # 定义交叉熵为loss函数，学习率太高可能会出现loss为nan的情况
HER2_magnification_loss = -tf.reduce_sum(y2_ * tf.log(softmax2)) # 定义交叉熵为loss函数，学习率太高可能会出现loss为nan的情况


# HER2_loss = tf.losses.sparse_softmax_cross_entropy(labels=y1_, logits=logits1)
loss = HER2_scores_loss + HER2_magnification_loss

# 优化器：各种对于梯度下降算法的优化，不止AdamOptimizer这一种  https://blog.csdn.net/xierhacker/article/details/53174558
train_op = tf.train.AdamOptimizer(learning_rate=0.00005).minimize(loss)

# 计算准确率的
# tf.cast用于类型转换，tf.argmax返回最大值的下标（1按行找，0按列找）
# correct_prediction = tf.equal(tf.cast(tf.argmax(softmax1, 1), tf.int32), y1_)
correct_scores_prediction = tf.equal(tf.argmax(softmax1,1), tf.argmax(y1_,1))
HER2_scores_acc = tf.reduce_mean(tf.cast(correct_scores_prediction, tf.float32))

correct_magnification_prediction = tf.equal(tf.argmax(softmax2,1), tf.argmax(y2_,1))
HER2_magnification_acc = tf.reduce_mean(tf.cast(correct_magnification_prediction, tf.float32))
# 汇总有关tensorboard的所有节点
# merge = tf.summary.merge_all()

# ---------------------------关键参数计算结束---------------------------



# ---------------------------会话开始---------------------------
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())


# max_to_keep 参数，这个是用来设置保存模型的个数，默认为5，只保存最新的，就设置1即可
saver=tf.train.Saver(max_to_keep=1)

# 此处路径最后的model是保存的模型的名字的前缀！！！！
call_models_path = '../../data/models/'
call_img_path = 'G:/formal_data/HER2/second_generation/val/'

# 一次从硬盘中读取多少图片
read_img_batch_size = 100

# # 试下调用模型

ri_call = ReadImg(call_img_path,weight,height)
error, degree_accuracy, magnification_accuracy = 0, 0, 0
call_loss, call_HER2_degree_acc, call_HER2_magnification_acc = 0, 0, 0
call_output1 = []
call_output2 = []
epoch = 0
print('*********模型测试***********')

for call_HER2_data, call_HER2_degree_label, call_HER2_magnification_label in ri_call.read_some_dataSet_for_softmax_in_multi_task3(read_img_batch_size):
    model_file = tf.train.latest_checkpoint(call_models_path)
    saver.restore(sess, model_file)
    call_loss, call_HER2_degree_acc,call_HER2_magnification_acc,call_output1,call_output2= sess.run([loss, HER2_scores_acc ,HER2_magnification_acc ,softmax1, softmax2], feed_dict={x: call_HER2_data, y1_: call_HER2_degree_label, y2_:call_HER2_magnification_label})
    error += call_loss
    degree_accuracy += call_HER2_degree_acc
    magnification_accuracy += call_HER2_magnification_acc
    # print(call_output1)
    # print(call_output2)
    epoch += 1
    print('第%d批的统计结果：val_loss:%f, val_degree_acc:%f, val_magnification_acc:%f'%(epoch,call_loss, call_HER2_degree_acc, call_HER2_magnification_acc))


print('所有图片的统计结果：val_loss:%f, val_degree_acc:%f, val_magnification_acc:%f' % (error/epoch, degree_accuracy/epoch, magnification_accuracy/epoch))
print('*********模型测试结束***********')
sess.close()

# ---------------------------会话结束---------------------------