# 单任务放大倍数识别，效果很好


from skimage import io, transform
import glob
import os
import tensorflow as tf
import numpy as np
from main.utils.read import ReadImg
from main.utils.shuffle import Shuffle
from main.utils.read import Extract
import time

# 图片尺寸（调大了会有可能显存不够）
weight = 256
height = 256
channel = 3
#
# # 图片路径
# call_img_path = ''
#
#
# # 要训练和测试的数据集
# ri_call = ReadImg(call_img_path,weight,height)
#
# call_HER2_data, call_HER2_label = ri_call.read_dataSet_in_single_task2()
#
# call_num_example = call_HER2_data.shape[0]  # 训练图片数量
#
# # 打乱训练集
# sf = Shuffle()
# shuffled_call_HER2_data , shuffled_call_HER2_label = sf.shuffle_data(call_HER2_data,call_HER2_label)
#
#
# # 打乱的训练集和验证集
# x_train = shuffled_call_HER2_data
# y1_train = shuffled_call_HER2_label


# 没打乱的训练集和验证集
# x_train = train_HER2_data
# y1_train = train_HER2_label



# -----------------构建网络----------------------
# 占位符，类似于变量，在计算图开始计算之前，它们可以用feed_dict将值传入对应的占位符。
x = tf.placeholder(tf.float32, shape=[None, weight,height,channel], name='x')
y1_ = tf.placeholder(tf.int32, shape=[None, ], name='y1_')
# y2_ = tf.placeholder(tf.int32, shape=[None, ], name='y2_')


# tensorboard_image1 = tf.summary.image('yeyex', x)

# 第一个卷积层
# 标准差 stddev，越小幅度越大
conv1 = tf.layers.conv2d(
    inputs=x,
    filters=16,
    kernel_size=[5, 5],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv1)


# 测试tensorboard
# tf.summary.image('conv1', conv1,8)

# 池化层
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
print(pool1)


# 第二个卷积层
conv2 = tf.layers.conv2d(
    inputs=pool1,
    filters=32,
    kernel_size=[5, 5],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv2)

# 池化层
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
print(pool2)


# 第三个卷积层
conv3 = tf.layers.conv2d(
    inputs=pool2,
    filters=64,
    kernel_size=[3, 3],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv3)

# 池化层
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)
print(pool3)


# 第四个卷积层
conv4 = tf.layers.conv2d(
    inputs=pool3,
    filters=128,
    kernel_size=[3, 3],
    padding="same",
    activation=tf.nn.relu,
    kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print(conv4)

# 池化层
pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)
print(pool4)


# 将图像拉长
re1 = tf.reshape(pool4, [-1, 16 * 16 * 128])


# 全连接层
dense1 = tf.layers.dense(inputs=re1,
                         units=1024,
                         activation=tf.nn.relu,
                         kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                         kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))

print(dense1)

# dropout层
dropout_dense1 = tf.nn.dropout(dense1,keep_prob=0.5)

print(dropout_dense1)

dense2 = tf.layers.dense(inputs=dropout_dense1,
                         units=512,
                         activation=tf.nn.relu,
                         kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                         kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))

dropout_dense2 = tf.nn.dropout(dense2,keep_prob=0.5)

print(dropout_dense2)



# 这里应该全连接开始分开。【原文是softmax层，后续需要改进！！！！！！！！！！！！】

# 第一个输出层
logits1 = tf.layers.dense(inputs=dropout_dense2,
                         units=4,
                         activation=None,
                         kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                         kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))

print(logits1)

# ---------------------------网络结束---------------------------


# ---------------------------关键参数计算---------------------------

# 计算损失值
# 【参考的手写识别，貌似要各弄一个损失率】
# 【不明白为什么手写是被要加多一个损失率求均值的过程，因为损失率出来是个常量？？？？？？？】
#  https://blog.csdn.net/qq_32166627/article/details/52734387

HER2_loss = tf.losses.sparse_softmax_cross_entropy(labels=y1_, logits=logits1)
loss = HER2_loss

# 优化器：各种对于梯度下降算法的优化，不止AdamOptimizer这一种  https://blog.csdn.net/xierhacker/article/details/53174558
train_op = tf.train.AdamOptimizer(learning_rate=0.0005).minimize(loss)

# 应该是计算准确率的
# tf.cast用于类型转换，tf.argmax返回最大值的下标（1按行找，0按列找）
# 【貌似需要两个？？？？？】
correct_class_prediction = tf.equal(tf.cast(tf.argmax(logits1, 1), tf.int32), y1_)
HER2_acc = tf.reduce_mean(tf.cast(correct_class_prediction, tf.float32))

# 汇总有关tensorboard的所有节点
# merge = tf.summary.merge_all()

# ---------------------------关键参数计算结束---------------------------






# ---------------------------会话开始---------------------------

# 正式进行训练和测试数据，可将n_epoch设置更大一些

# n_epoch = 10  # 应该是所有训练集数据训练几遍的意思
# batch_size = 20
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())


# max_to_keep 参数，这个是用来设置保存模型的个数，默认为5，只保存最新的，就设置1即可
saver=tf.train.Saver(max_to_keep=1)

# 存入tensorboard文件的位置
# summary_dir = 'I:/pyCharmProjectSet/deepLearning/advanced_research/src/main/prototype/logs/'
# summary_writer = tf.summary.FileWriter(logdir=summary_dir, graph=sess.graph)

# et = Extract()
#
#
# for epoch in range(n_epoch):
#     start_time = time.time()
#
#     # 这里是计算损失值和准确率的地方。
#
#     # 因为是多任务的，所以这里需要将两种数据先提出来，因为后面损失值计算是两者相加后的损失值计算，所以这里得先取出来，好写代码
#     train_loss, train_acc, n_batch =  0, 0, 0
#
#
#     print("*********第 %s 代***********" % (str(epoch+1)))
#
#     # training，训练集
#     # 【感觉这里shufflw应该是false，原来是true】
#     for x_train_a, y1_train_a in et.minibatches(x_train, y1_train, batch_size, True):
#         # 计算分类的损失值和准确度
#         _, err, HER2_ac = sess.run([train_op, loss, HER2_acc],
#                                              feed_dict={x: x_train_a, y1_: y1_train_a})
#         # _, summary ,err, class_ac = sess.run([train_op, merge ,loss, class_acc], feed_dict={x: x_train_a, y1_: y1_train_a})
#
#
#         train_loss += err;
#         # train_acc += ac;
#         train_acc += HER2_ac;
#         n_batch += 1
#
#         # summary_writer.add_summary(summary)
#
#
#     print("   train loss: %f" % (train_loss / n_batch))
#     print("   train acc: %f" % (train_acc / n_batch))
#
#     test_loss, test_acc, n_batch = 0,0,0
#
#     output_set = []
#
#     for x_test_a, y1_test_a in et.minibatches(x_test, y1_test, batch_size, True):
#         # 计算分类的损失值和准确度
#         # err, HER2_ac = sess.run([loss, HER2_acc],
#         #                             feed_dict={x: x_test_a, y1_: y1_test_a})
#         test_err, test_HER2_ac = sess.run([loss, HER2_acc],
#                                 feed_dict={x: x_test_a, y1_: y1_test_a})
#         # _, summary ,err, class_ac = sess.run([train_op, merge ,loss, class_acc], feed_dict={x: x_train_a, y1_: y1_train_a})
#
#
#         test_loss += test_err;
#         # train_acc += ac;
#         test_acc += test_HER2_ac;
#         n_batch += 1
#
#         # summary_writer.add_summary(summary)
#
#
#     print("   test loss: %f" % (test_loss / n_batch))
#     print("   test acc: %f" % (test_acc / n_batch))

# summary_writer.close()


# 此处路径最后的model是保存的模型的名字的前缀！！！！
call_models_path = './models/'
call_img_path = 'C:/Users/Liu/Desktop/train/'

# # 试下调用模型

ri_call = ReadImg(call_img_path,weight,height)

call_HER2_data, call_HER2_label = ri_call.read_dataSet_in_single_task2()

call_num_example = call_HER2_data.shape[0]
call_batch_size = 100
call_epoch = int(call_num_example/call_batch_size)

# # 打乱训练集
sf = Shuffle()
shuffled_call_HER2_data , shuffled_call_HER2_label = sf.shuffle_data(call_HER2_data,call_HER2_label)

# call_HER2_data = shuffled_call_HER2_data[:100]
# call_HER2_label = shuffled_call_HER2_label[:100]

call_loss, call_HER2_acc = 0, 0
call_output1 = []

error,accuracy = 0,0
for i in range(call_epoch):
    call_HER2_data = shuffled_call_HER2_data[i*call_batch_size:(i+1)*call_batch_size]
    call_HER2_label = shuffled_call_HER2_label[i*call_batch_size:(i+1)*call_batch_size]

    model_file = tf.train.latest_checkpoint(call_models_path)
    saver.restore(sess, model_file)
    call_loss, call_HER2_acc,call_output1= sess.run([loss, HER2_acc ,logits1], feed_dict={x: call_HER2_data, y1_: call_HER2_label})
    error += call_loss
    accuracy += call_HER2_acc



print('*********模型测试***********')
print('val_loss:%f, val_class_acc:%f' % (error/call_epoch, accuracy/call_epoch))


# model_file = tf.train.latest_checkpoint(call_models_path)
# saver.restore(sess, model_file)
# call_loss, call_HER2_acc,call_output1= sess.run([loss, HER2_acc ,logits1], feed_dict={x: call_HER2_data, y1_: call_HER2_label})
# print('*********模型测试***********')
# print(call_output1)
# print('val_loss:%f, val_class_acc:%f' % (call_loss, call_HER2_acc))




sess.close()

# ---------------------------会话结束---------------------------