# 单任务HER2等级，HER2分数检测
# （根据网上代码和LENET5结构推导的LENET5结构）

import tensorflow as tf
import numpy as np
from src.utils.read import ReadImg
from src.utils.shuffle import Shuffle
from src.utils.read import Extract
import time

# 图片尺寸（调大了会有可能显存不够）
weight = 256
height = 256
channel = 3

# 图片路径
train_img_path = 'G:/formal_data/HER2/first_generation/train/20X/'
test_img_path = 'G:/formal_data/HER2/first_generation/test/20X/'


# 要训练和测试的数据集
ri_train = ReadImg(train_img_path,weight,height)
ri_test = ReadImg(test_img_path,weight,height)

train_HER2_data, train_HER2_degree_label = ri_train.read_some_dataSet_for_softmax_in_single_task2(0.2)
test_HER2_data, test_HER2_degree_label = ri_test.read_some_dataSet_for_softmax_in_single_task2(0.5)

train_num_example = train_HER2_data.shape[0]  # 训练图片数量
test_num_example = test_HER2_data.shape[0]  # 测试图片数量

# 打乱训练集
sf = Shuffle()
shuffled_train_HER2_data, shuffled_train_HER2_degree_label = sf.shuffle_data2(train_HER2_data,train_HER2_degree_label)
shuffled_test_HER2_data, shuffled_test_HER2_degree_label = sf.shuffle_data2(test_HER2_data,test_HER2_degree_label)


# 打乱的训练集和验证集
x_train = shuffled_train_HER2_data
y1_train = shuffled_train_HER2_degree_label

x_test = shuffled_test_HER2_data
y1_test = shuffled_test_HER2_degree_label

# 没打乱的训练集和验证集
# x_train = train_HER2_data
# y1_train = train_HER2_label
#
# x_test = test_HER2_data
# y1_test = test_HER2_label



# -----------------构建网络----------------------
# 占位符，类似于变量，在计算图开始计算之前，它们可以用feed_dict将值传入对应的占位符。
x = tf.placeholder(tf.float32, shape=[None, weight,height,channel], name='x')
y1_ = tf.placeholder(tf.float32, shape=[None,4 ], name='y1_')

# tensorboard_image1 = tf.summary.image('yeyex', x)

# ********Lenet5有七层网络结构********

# 第一个卷积层
# 标准差 stddev，越小幅度越大
conv1 = tf.layers.conv2d(
    inputs=x,
    filters=6,
    kernel_size=[5, 5],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
    bias_initializer= tf.constant_initializer(0.0))
print(conv1)


# 测试tensorboard
# tf.summary.image('conv1', conv1,8)

# 池化层
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2, padding='same')
print(pool1)


# 第二个卷积层
conv2 = tf.layers.conv2d(
    inputs=pool1,
    filters=16,
    kernel_size=[5, 5],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
    bias_initializer=tf.constant_initializer(0.0))
print(conv2)

# 池化层
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2, padding='same')
print(pool2)



# 将图像拉长
re1 = tf.reshape(pool2, [-1, 64 * 64 * 16])


# 全连接层
dense1 = tf.layers.dense(inputs=re1,
                         units=120,
                         activation=tf.nn.relu,
                         kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
                         bias_initializer=tf.constant_initializer(0.1))

print(dense1)

dropout_dense1 = tf.nn.dropout(dense1,keep_prob=0.8)

print(dropout_dense1)


dense2 = tf.layers.dense(inputs=dropout_dense1,
                         units=84,
                         activation=tf.nn.relu,
                         kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
                         bias_initializer=tf.constant_initializer(0.1))


# 最后一层
def weight_variable(shape):
    # 正态分布，标准差为0.1，默认最大为1，最小为-1，均值为0
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)


def bias_variable(shape):
    # 创建一个结构为shape矩阵也可以说是数组shape声明其行列，初始化所有值为0.1
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)
W_fc1 = weight_variable([84, 4])
b_fc1 = bias_variable([4])
softmax1 = tf.nn.softmax(tf.matmul(dense2, W_fc1) + b_fc1)

# ---------------------------网络结束---------------------------


# ---------------------------关键参数计算---------------------------

# 计算损失值
# 【参考的手写识别，貌似要各弄一个损失率】
# 【不明白为什么手写是被要加多一个损失率求均值的过程，因为损失率出来是个常量？？？？？？？】
#  https://blog.csdn.net/qq_32166627/article/details/52734387

cross_entropy = tf.reduce_mean(-tf.reduce_sum(y1_ * tf.log(softmax1), reduction_indices=[1]))


# HER2_loss = tf.losses.sparse_softmax_cross_entropy(labels=y1_, logits=logits1)
loss = cross_entropy

# 优化器：各种对于梯度下降算法的优化，不止AdamOptimizer这一种  https://blog.csdn.net/xierhacker/article/details/53174558
train_op = tf.train.AdamOptimizer(learning_rate=0.0002).minimize(loss)

# tf.argmax()返回的是某一维度上其数据最大所在的索引值，在这里即代表预测值和真实值
# 判断预测值y和真实值y_中最大数的索引是否一致，y的值为各个种类的概率
correct_prediction = tf.equal(tf.argmax(softmax1, 1), tf.argmax(y1_, 1))

# 用平均值来统计测试准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# 汇总有关tensorboard的所有节点
# merge = tf.summary.merge_all()

# ---------------------------关键参数计算结束---------------------------



# ---------------------------会话开始---------------------------

# 正式进行训练和测试数据，可将n_epoch设置更大一些

n_epoch = 80  # 应该是所有训练集数据训练几遍的意思
batch_size = 40
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())


# max_to_keep 参数，这个是用来设置保存模型的个数，默认为5，只保存最新的，就设置1即可
# saver=tf.train.Saver(max_to_keep=1)

# 存入tensorboard文件的位置
# summary_dir = 'I:/pyCharmProjectSet/deepLearning/advanced_research/src/main/prototype/logs/'
# summary_writer = tf.summary.FileWriter(logdir=summary_dir, graph=sess.graph)

et = Extract()

for epoch in range(n_epoch):
    start_time = time.time()

    train_loss, train_scores_acc, n_batch =  0, 0, 0


    print("*********第 %s 代***********" % (str(epoch+1)))

    # training，训练集
    # 【感觉这里shuffle应该是false，原来是true】
    for x_train_a, y1_train_a in et.minibatches(x_train, y1_train, batch_size, True):
        # 计算分类的损失值和准确度
        _, err, train_HER2_scores_ac = sess.run([train_op, loss, accuracy],
                                             feed_dict={x: x_train_a, y1_: y1_train_a})
        # _, summary ,err, class_ac = sess.run([train_op, merge ,loss, class_acc], feed_dict={x: x_train_a, y1_: y1_train_a})


        train_loss += err;
        train_scores_acc += train_HER2_scores_ac;
        n_batch += 1

        # summary_writer.add_summary(summary)


    print("   train loss: %f" % (train_loss / n_batch))
    print("   train_scores acc: %f" % (train_scores_acc / n_batch))

    test_loss, test_scores_acc, n_batch =  0, 0, 0

    output_set = []

    for x_test_a, y1_test_a in et.minibatches(x_test, y1_test, batch_size, True):
        # 计算分类的损失值和准确度
        # err, HER2_ac = sess.run([loss, HER2_acc],
        #                             feed_dict={x: x_test_a, y1_: y1_test_a})
        test_err, test_HER2_scores_ac = sess.run([loss, accuracy],
                                feed_dict={x: x_test_a, y1_: y1_test_a})
        # _, summary ,err, class_ac = sess.run([train_op, merge ,loss, class_acc], feed_dict={x: x_train_a, y1_: y1_train_a})


        test_loss += test_err;
        # train_acc += ac;
        test_scores_acc += test_HER2_scores_ac;
        n_batch += 1

        # summary_writer.add_summary(summary)


    print("   test loss: %f" % (test_loss / n_batch))
    print("   test_scores acc: %f" % (test_scores_acc / n_batch))

# summary_writer.close()


# 这里由于试着保存最新的一代，所以放在循环外

# 此处路径最后的model是保存的模型的名字的前缀！！！！
# save_models_path = './models/model'
#
# saver.save(sess,save_models_path , global_step=n_epoch)



sess.close()

# ---------------------------会话结束---------------------------