import tensorflow as tf
from tensorflow.contrib.slim import nets
import train_data
import test_data
import time
import os
import numpy as np
import xlwt
import random

slim = tf.contrib.slim  # 简化模型保存和读取的第三方库
train_data_path = "../data/train/tfrecords"
train_data_path_origin = "../data/train/origin"
test_data_path = "../data/test"
model_save_path = '../data/models/model.ckpt'
resnet50_v2_path = "../data/models/resnet_v2_50.ckpt"
logs_path = "../data/logs"


# 获取resnet50模型
def get_train_model(num_classes=2, is_training=True):
    inputs = tf.placeholder(tf.float32, shape=[None, 224, 224, 3], name='inputs')
    labels = tf.placeholder(tf.int32, shape=[None], name='labels')
    is_training = tf.placeholder(tf.bool, name='is_training')
    # 禁用resnet最后一层
    with slim.arg_scope(nets.resnet_v2.resnet_arg_scope()):
        net, endpoints = nets.resnet_v2.resnet_v2_50(inputs, num_classes=None, is_training=is_training)
    # 串接全连接层,设置为num_classes分类
    # 创建logits命名空间，用于分离原始resnet网络参数和新加层的参数
    with tf.variable_scope('logits'):
        net = tf.squeeze(net, axis=[1, 2])
        net = slim.dropout(net, keep_prob=0.5, scope='scope')
        logits = slim.fully_connected(net, num_outputs=num_classes,
                                      activation_fn=None, scope='fc')
    return logits, labels, inputs, is_training


def train():
    # 2022*440（原图*切割）
    batch_size = 32
    binary_classfication = True
    num_classes = 2 if binary_classfication is True else 11
    num_steps = 60000  # 大约30000一轮
    # 数据集
    traindata = train_data.TrainData()
    traindata.open(train_data_path, batch_size=batch_size, binary_classfication=binary_classfication)

    # 获取网络模型
    # with tf.device('/gpu:0'):
    #     logits, labels, inputs, is_training = get_train_model(num_classes=num_classes)
    # 本地测试用
    logits, labels, inputs, is_training = get_train_model(num_classes=num_classes)

    # loss定义
    losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=labels, logits=logits
    )
    loss = tf.reduce_mean(losses)

    # #策略2：在labels都是正样本时，降低loss权重至0.2
    # weight = tf.constant(0.2)
    # loss = tf.cond(1 not in labels,lambda :tf.multiply(weight,loss),lambda:tf.identity(loss))

    tf.summary.scalar('loss', loss)

    # 正确率计算.这里的正确率计算的是一组小图的正确率
    logits = tf.nn.softmax(logits, name='test_logits')
    classes = tf.argmax(logits, axis=1, name='classes')
    accuracy = tf.reduce_mean(tf.cast(
        tf.equal(tf.cast(classes, dtype=tf.int32), labels), dtype=tf.float32
    ), name='accuracy')
    tf.summary.scalar('accuracy_1', accuracy)

    # 优化器
    optimizer = tf.train.AdamOptimizer(learning_rate=0.0001)
    train_op = optimizer.minimize(loss)

    # 模型保存
    saver = tf.train.Saver(tf.global_variables())

    # 模型参数载入
    var_list = []
    logtis_list = []
    for var in slim.get_model_variables():
        if var.op.name.startswith('logits'):
            logtis_list.append(var)
        else:
            var_list.append(var)
    # saver_restore仅包含原resnet50的预训练参数，用于初始模型载入
    saver_restore = tf.train.Saver(var_list)
    init = tf.global_variables_initializer()
    train_loss_writer = open(logs_path + "/train_loss.txt", mode="w")
    # 用于本地测试
    with tf.Session() as sess:
        # with tf.Session(config=tf.ConfigProto(
        #         log_device_placement=True,
        #         allow_soft_placement=True)) as sess:
        sess.run(init)
        # 从预训练模型中载入参数
        saver_restore.restore(sess, resnet50_v2_path)
        merged = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter(logs_path, graph=sess.graph)
        for i in range(num_steps):
            start_time = time.time()
            images, _train_labels = traindata.get_batch()
            # 图像标准化
            images = tf.image.per_image_standardization(images)
            train_labels = [t[0] for t in _train_labels]
            # 策略1：欠抽样，正样本按照80%的概率随机丢弃
            # if 1 not in train_labels and (i+1)%12 != 0:
            #     if random.random() < 0.8:
            #         continue
            train_dict = {inputs: images,
                          labels: train_labels,
                          is_training: True}
            _, _loss, summary = sess.run([train_op, loss, merged], feed_dict=train_dict)
            summary_writer.add_summary(summary, global_step=i)
            train_loss_writer.write(loss, '\n')
            if i < 100 or i % 100 == 0:
                print('第', i, '轮，loss=', _loss, '耗时:', time.time() - start_time)
            if (i + 1) % 5000 == 0:  # 保存模型
                saver.save(sess, model_save_path, global_step=i + 1)
                print('save model to {}'.format(model_save_path))
        summary_writer.close()
    train_loss_writer.close()
    traindata.close()


def test(path=test_data_path):
    # 662*440
    batch_size = 8  # 一张图片55轮
    num_classes = 2
    small_pic_number = 440
    pic_list = []
    for pic in os.listdir(path):
        if pic.endswith(".jpg"): pic_list.append(pic)
    pic_number = pic_list.__len__()
    # 记录excel
    excel = xlwt.Workbook(encoding="utf-8")
    excel_sheet = excel.add_sheet('测试结果')
    excel_sheet.write(0, 0, label="文件名")
    excel_sheet.write(0, 1, label="标签")
    excel_sheet.write(0, 2, label="测试结果")
    excel_sheet.write(0, 3, label="相对概率")
    acc = 0
    # 用于本地测试
    with tf.Session() as sess:
        # with tf.Session(config=tf.ConfigProto(
        #         log_device_placement=True,
        #         allow_soft_placement=True)) as sess:
        # 读取训练好的模型，载入参数
        saver = tf.train.import_meta_graph('../data/models/model.ckpt-60000.meta')
        saver.restore(sess, tf.train.latest_checkpoint('../data/models'))
        graph = tf.get_default_graph()
        # 从训练好的模型中获取 输入、标签、训练模式标志、正确率、softmax输出
        inputs = graph.get_tensor_by_name("inputs:0")
        labels = graph.get_tensor_by_name("labels:0")
        is_training = graph.get_tensor_by_name("is_training:0")
        # accuracy = graph.get_tensor_by_name("accuracy:0")
        test_logits = graph.get_tensor_by_name("test_logits:0")
        for n, pic in enumerate(pic_list):
            print("pic:", n, '/', pic_number)
            testdata = test_data.TestData(os.path.abspath(test_data_path) + "/" + pic, binary_classfication=True)
            _images, _labels = testdata.get_batch()
            _images = np.asarray(_images).reshape((55, 8, 224, 224, 3))
            _labels = np.asarray(_labels).reshape((440))
            label = 1 if 1 in _labels else 0
            # 瑕疵的相对概率大于80%定义为有瑕疵
            start_time = time.time()

            # 遍历440张小图，看是否由瑕疵。这里判断是否由瑕疵的方式需要讨论
            test_result = 0
            for image in _images:
                # 前8张是大图缩放
                _test_logits = sess.run(test_logits, feed_dict={inputs: image, labels: _labels, is_training: False})
                _test_logits = _test_logits.T.reshape((2, -1))[1]
                for t in _test_logits:
                    if t > 0.8:
                        test_result = 1
                if test_result == 1:
                    break

            if label == test_result:
                acc += 1
            print(time.time() - start_time)
            excel_sheet.write(n + 1, 0, label=pic)
            excel_sheet.write(n + 1, 1, label=label)
            excel_sheet.write(n + 1, 2, label=test_result)
            excel_sheet.write(n + 1, 3, label="")

            # #计算小图的正确率
            # start_time = time.time()
            # _labels = np.asarray(_labels).reshape((55, 8))
            # for image, label in zip(_images, _labels):
            #     # 前8张是大图缩放
            #     _acc = sess.run(accuracy, feed_dict={inputs: image, labels: label, is_training: False})
            #     acc += _acc
            # print(time.time() - start_time)
    excel.save(logs_path + "/test_result.xls")
    print('测试集正确率：', acc / pic_number)


if __name__ == '__main__':
    train()
    test(train_data_path_origin)  # 测试在训练集上的正确率
    test(test_data_path)  # 测试在测试集上的正确率
