# 多任务HER2等级，放大倍数检测，基于test2
# （最后是softmax层）

import tensorflow as tf
import numpy as np
from src.utils.read import ReadImg
from src.utils.shuffle import Shuffle
from src.utils.read import Extract
import time

# 图片尺寸（调大了会有可能显存不够）
weight = 256
height = 256
channel = 3


# -----------------构建网络----------------------
# 占位符，类似于变量，在计算图开始计算之前，它们可以用feed_dict将值传入对应的占位符。
x = tf.placeholder(tf.float32, shape=[None, weight,height,channel], name='x')
y1_ = tf.placeholder(tf.float32, shape=[None,4 ], name='y1_')
y2_ = tf.placeholder(tf.float32, shape=[None,2 ], name='y2_')


# tensorboard_image1 = tf.summary.image('yeyex', x)

# 第一个卷积层
# 标准差 stddev，越小幅度越大
conv1 = tf.layers.conv2d(
    inputs=x,
    filters=16,
    kernel_size=[5, 5],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv1)


# 测试tensorboard
# tf.summary.image('conv1', conv1,8)

# 池化层
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
print(pool1)


# 第二个卷积层
conv2 = tf.layers.conv2d(
    inputs=pool1,
    filters=32,
    kernel_size=[5, 5],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv2)

# 池化层
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
print(pool2)


# 第三个卷积层
conv3 = tf.layers.conv2d(
    inputs=pool2,
    filters=64,
    kernel_size=[3, 3],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv3)

# 池化层
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)
print(pool3)


# 第四个卷积层
conv4 = tf.layers.conv2d(
    inputs=pool3,
    filters=128,
    kernel_size=[3, 3],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv4)

# 池化层
pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)
print(pool4)


# 将图像拉长
re1 = tf.reshape(pool4, [-1, 16 * 16 * 128])


# 全连接层
dense1 = tf.layers.dense(inputs=re1,
                         units=1024,
                         activation=tf.nn.relu,
                         kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                         kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))

print(dense1)


dense2 = tf.layers.dense(inputs=dense1,
                         units=512,
                         activation=tf.nn.relu,
                         kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                         kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))

# dropout层感觉只需要一层接在倒数第二层
# 其实就是降低上一层某些输入的权重scale，甚至置为0，升高某些输入的权值，甚至置为2，防止评测曲线出现震荡，貌似样本较少时很必要
dropout_dense2 = tf.nn.dropout(dense2,keep_prob=0.8)

print(dropout_dense2)



# softmax层
def weight_variable(shape):
    # 正态分布，标准差为0.1，默认最大为1，最小为-1，均值为0
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)


def bias_variable(shape):
    # 创建一个结构为shape矩阵也可以说是数组shape声明其行列，初始化所有值为0.1
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)
W_fc1 = weight_variable([512, 4])
b_fc1 = bias_variable([4])
softmax1 = tf.nn.softmax(tf.matmul(dropout_dense2, W_fc1) + b_fc1)

print(softmax1)


W_fc2 = weight_variable([512, 2])
b_fc2 = bias_variable([2])
softmax2 = tf.nn.softmax(tf.matmul(dropout_dense2, W_fc2) + b_fc2)

print(softmax2)


# ---------------------------网络结束---------------------------


# ---------------------------关键参数计算---------------------------

# 计算损失值
# 【参考的手写识别，貌似要各弄一个损失率】
# 【不明白为什么手写是被要加多一个损失率求均值的过程，因为损失率出来是个常量？？？？？？？】
#  https://blog.csdn.net/qq_32166627/article/details/52734387

HER2_scores_loss = -tf.reduce_sum(y1_ * tf.log(softmax1)) # 定义交叉熵为loss函数，学习率太高可能会出现loss为nan的情况
HER2_magnification_loss = -tf.reduce_sum(y2_ * tf.log(softmax2)) # 定义交叉熵为loss函数，学习率太高可能会出现loss为nan的情况


# HER2_loss = tf.losses.sparse_softmax_cross_entropy(labels=y1_, logits=logits1)
loss = HER2_scores_loss + HER2_magnification_loss

# 优化器：各种对于梯度下降算法的优化，不止AdamOptimizer这一种  https://blog.csdn.net/xierhacker/article/details/53174558
train_op = tf.train.AdamOptimizer(learning_rate=0.00005).minimize(loss)

# 计算准确率的
# tf.cast用于类型转换，tf.argmax返回最大值的下标（1按行找，0按列找）
# correct_prediction = tf.equal(tf.cast(tf.argmax(softmax1, 1), tf.int32), y1_)
correct_scores_prediction = tf.equal(tf.argmax(softmax1,1), tf.argmax(y1_,1))
HER2_scores_acc = tf.reduce_mean(tf.cast(correct_scores_prediction, tf.float32))

correct_magnification_prediction = tf.equal(tf.argmax(softmax2,1), tf.argmax(y2_,1))
HER2_magnification_acc = tf.reduce_mean(tf.cast(correct_magnification_prediction, tf.float32))
# 汇总有关tensorboard的所有节点
# merge = tf.summary.merge_all()

# ---------------------------关键参数计算结束---------------------------



# ---------------------------会话开始---------------------------
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())


# max_to_keep 参数，这个是用来设置保存模型的个数，默认为5，只保存最新的，就设置1即可
saver=tf.train.Saver(max_to_keep=1)

# 此处路径最后的model是保存的模型的名字的前缀！！！！
use_models_path = '../../data/models/'
classify_img_path = r'G:\formal_data\HER2\ROI\1705206-5'  # 此处是某张切片标注区域切下来的区块集的文件夹
point_txt_path = '../../data/txt/roi/result_roi.txt'
output_txt_path = '../../data/txt/roi/recognized_roi.txt'
recognized_degree = 3  # 此处选择的等级决定了你热图保存哪些等级的坐标点（g0g1g2g3分别对应0123）

# 一次从硬盘中读取多少图片（分类时一张一张读）
read_img_batch_size = 500

# # 调用模型对标注区域的切片进行判定，若满足要求，则写入txt文件中

ri_use = ReadImg(classify_img_path,weight,height)
use_degree_output = []
use_magnification_output = []
all_degree_output = []

# 图片对应txt文件读取
txt_file = open(point_txt_path)
all_data_in_txt = ''
data_list_in_txt = ''
all_data_in_txt = txt_file.read()
data_list_in_txt = all_data_in_txt.split("\n")
all_seperate_data = []

for single_data in data_list_in_txt:
    seperate_data = single_data.split(",")
    # 因为原来代码只有x,y的坐标，所以a应该只有两个值的情况才对
    # 但生成的txt文件因为带有图片名称，所以有三个值的情况出现
    if len(seperate_data) != 2 and len(seperate_data) != 3:
        continue
        # 因为第三列是图片名称，这里用不到
    seperate_data = [int(i) for i in seperate_data[:2]]
    all_seperate_data.append(seperate_data)


i = 0  # 第几批

# 模型运行
print('*********模型分类运行开始***********')

model_file = tf.train.latest_checkpoint(use_models_path)
saver.restore(sess, model_file)

for use_HER2_data, use_HER2_degree_label, use_HER2_magnification_label in ri_use.read_single_folder_for_softmax_in_multi_task3(read_img_batch_size):
    # model_file = tf.train.latest_checkpoint(use_models_path)
    # saver.restore(sess, model_file)
    use_degree_output,use_magnification_output= sess.run([tf.argmax(softmax1,1), tf.argmax(softmax2,1)], feed_dict={x: use_HER2_data, y1_: use_HER2_degree_label, y2_:use_HER2_magnification_label})
    all_degree_output.extend(use_degree_output)
    i+=1
    print(i)
sess.close()

txt_file = open(output_txt_path, 'w')  # 写出符合要求的点

all_degree_output_num = len(all_degree_output)
all_seperate_data_num = len(all_seperate_data)
for i in range(all_degree_output_num):
    if all_degree_output[i] == recognized_degree:
        txt_file.write(str(all_seperate_data[i][0]) + ',' + str(all_seperate_data[i][1])+ '\n')

txt_file.close()
print('*********模型分类运行结束***********')
# ---------------------------会话结束---------------------------
