#coding:utf-8
# __user__ = hiicy redldw
# __time__ = 2019/9/18
# __file__ = DenseNet
# __desc__ =
import tensorflow as tf
from tensorflow.python import keras
from tensorflow.python.keras import backend as K
from tensorflow import Summary
from tensorflow import contrib
import numpy as np
import os
from pathlib import Path

os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"


class BRConv(keras.layers.Layer):
    def __init__(self, k, **kwargs):
        super(BRConv, self).__init__(**kwargs)
        self.k = k
        self.bn = keras.layers.BatchNormalization(axis=-1, name='norm1')
        self.relu = keras.layers.ReLU(name='relu1')
        self.conv1 = keras.layers.Conv2D(4 * k, (1, 1), padding='same', name='conv1')
        self.bn2 = keras.layers.BatchNormalization(axis=-1, name='norm2')
        self.relu2 = keras.layers.ReLU(name='relu2')
        self.conv3 = keras.layers.Conv2D(k, (3, 3), padding='same', name="conv2")

    # def build(self, input_shape):
    #     shape = tf.TensorShape((input_shape[1], self.k))
    #     self.kernel = self.add_weight(name='kernel',
    #                                   shape=shape,
    #                                   initializer=tf.initializers.he_uniform,
    #                                   trainable=True)
    #     super(BRConv,self).build(input_shape)

    def call(self, inputs):
        x = self.bn(inputs)
        x = self.relu(x)
        x = self.conv1(x)
        x = self.bn2(x)
        x = self.relu2(x)
        x = self.conv3(x)
        return x

    def compute_output_shape(self, input_shape):
        shape = tf.TensorShape(input_shape).as_list()
        shape[-1] = self.k
        return tf.TensorShape(shape)

    def get_config(self):
        base_config = super(BRConv, self).get_config()
        base_config['output_dim'] = self.k
        return base_config


class DenseBlock(keras.layers.Layer):
    def __init__(self, k, nums_layers, **kwargs):
        self.num_layers = nums_layers
        self.k = k
        self.concat = keras.layers.Concatenate()
        super(DenseBlock, self).__init__(**kwargs)

    def call(self, inputs, block=1):
        history = [inputs]
        for i in range(1, self.num_layers + 1):
            with tf.variable_scope(f"layer_{i}"):
                if i > 1:
                    inputs = self.concat(history)
                # REW:层只能用一次，不然就得新建
                outputs = BRConv(self.k)(inputs)
            history.append(outputs)  # 添加每一次的输出
        return self.concat(history)

    # dense_block 不会改变高宽
    def compute_output_shape(self, input_shape):
        shape = tf.TensorShape(input_shape).as_list()
        shape[-1] = shape[-1] + self.k * (self.num_layers - 1)
        return tf.TensorShape(shape)

    def get_config(self):
        base_config = super(DenseBlock, self).get_config()
        base_config['output_shape'] = self.output_shape
        return base_config


class Transition(keras.layers.Layer):
    def __init__(self, out_channel, **kwargs):
        super().__init__(**kwargs)
        self.bn = keras.layers.BatchNormalization()
        self.relu = keras.layers.ReLU()
        self.conv = keras.layers.Convolution2D(out_channel,
                                               (1, 1), padding='same')
        self.pool = keras.layers.AvgPool2D((2, 2), strides=2)

    def call(self, inputs: tf.Tensor):
        x = self.bn(inputs)
        x = self.relu(x)
        x = self.conv(x)
        x = self.pool(x)
        return x


class DenseNet_(keras.models.Model):
    def __init__(self, k=32, theta=0.5, num_classes=2, dense_blocks=[6, 12, 32, 32], *args, **kwargs):
        # 针对imagenet数据集 第一层卷积核数为2k  224*224
        super(DenseNet_,self).__init__(*args, **kwargs)
        self.num_classes=num_classes
        self.features = keras.models.Sequential([
            keras.layers.Conv2D(2 * k, (7, 7), strides=(2, 2), name='conv0'),
            keras.layers.BatchNormalization(name='norm0'),
            keras.layers.ReLU(name='relu0'),
            keras.layers.MaxPooling2D((3, 3), strides=(2, 2), name='pool0'),
        ], name='featurer')
        num_features = 2 * k
        # add denseblock模块
        for i, num_layers in enumerate(dense_blocks, 1):  # 初始化多学学使用Sequential
            block = DenseBlock(k, num_layers, name=f'dense_block_{i}')
            self.features.add(block)
            num_features = num_features + k * (num_layers - 1)  # dense_block输出特征维度计算
            if i != len(dense_blocks):
                num_features = int(num_features * theta)
                transition = Transition(num_features, name=f"transition_{i}")
                self.features.add(transition)
        self.features.add(keras.layers.BatchNormalization(name="norm3"))

        self.classifer = keras.models.Sequential([
            keras.layers.GlobalAveragePooling2D(),
            keras.layers.Dense(num_classes, activation='softmax'),
        ], name='output')

    def call(self, inputs, training=None, mask=None):
        features = self.features(inputs)
        out = keras.layers.ReLU()(features)
        out = self.classifer(out)
        return out


def Model_test():
    densnet = DenseNet_()
    data = tf.random_normal(shape=[1, 224, 224, 3])
    print(data)
    with tf.GradientTape() as tape:
        output = densnet(data)
    print(output.shape)


##### model build done!

from functools import partial

batch_size = 8
prefetch_buffer_size = 2
epochs = 5000


def precessing_img(image: tf.Tensor, size=(224, 224)):
    img = tf.image.decode_jpeg(image, channels=3)
    img = tf.image.convert_image_dtype(img, tf.uint8)
    img = tf.image.resize(img, size=size)
    img = tf.image.random_flip_up_down(img)  # 以一定概率上下翻转图片
    img = tf.image.random_brightness(img, max_delta=0.5)  # 在[-max_delta, max_delta)的范围随机调整图片的亮度。
    # 在[-max_delta, max_delta]的范围随机调整图片的色相。max_delta的取值在[0, 0.3]之间。
    # img = tf.image.random_hue(img, 0.3)
    # 在[0.1,4]的范围随机调整图的饱和度。
    # img = tf.image.random_saturation(img, 0.1, 4)
    img = img / 255.
    return img


def parse_img(*data, need="precessing"):
    image_file, label = data[0], data[1]
    image = tf.read_file(image_file)
    if need != "None":
        img = precessing_img(image)
    else:
        img = tf.image.decode_jpeg(image, channels=3)
    label = tf.one_hot(label, 2)
    return img, label


base_dir = r'F:\bigphoto\cat_dog'
train_dir, val_dir = Path(base_dir).iterdir()
train_content = [[str(image), i] for i, animal_dir in enumerate(Path(train_dir).iterdir())
                 for image in Path(animal_dir).iterdir()]
val_content = [[str(image), i] for i, animal_dir in enumerate(Path(val_dir).iterdir())
               for image in Path(animal_dir).iterdir()]

train_img, train_label = list(zip(*train_content))
val_img, val_label = list(zip(*val_content))
num_train = len(train_label)
num_val = len(val_label)

# FAQ:必须先转tensor?
train_sets = tf.data.Dataset.from_tensor_slices((tf.constant(train_img), tf.constant(train_label)))
val_sets = tf.data.Dataset.from_tensor_slices((tf.constant(val_img), tf.constant(val_label)))

# 优化性能：并行处理数据转换 + 并行处理批次 映射和批次转换“混合
# train_sets = train_sets.apply(tf.contrib.data.map_and_batch(
#             map_func=parse_img, batch_size=batch_size))
train_sets = train_sets.map(parse_img).batch(batch_size=batch_size)
train_sets = train_sets.prefetch(buffer_size=prefetch_buffer_size)  # 最后一个转换
# train_sets = train_sets.apply(tf.contrib.data.shuffle_and_repeat(1000,epochs))
train_sets = train_sets.shuffle(buffer_size=1000)

# partial 传参和关键字参数
# val_sets = val_sets.apply(tf.contrib.data.map_and_batch(
#     map_func=partial(parse_img,need=None),batch_size=batch_size))
val_sets = val_sets.map(partial(parse_img, need=None)).batch(batch_size)
val_sets = val_sets.shuffle(600)

import cv2
def data_test():
    iterion = train_sets.make_one_shot_iterator()
    iterr = iterion.get_next()
    with tf.Session() as sess:
        sess.run(tf.local_variables_initializer())
        sess.run(tf.global_variables_initializer())
        for i in range(2):
            iimm,_= sess.run(iterr)
            im = iimm[0]
            print(im.shape)
            cv2.imshow('kj',im)
            cv2.waitKey(50000)

# data_test()
####### data prepare done!


def binary_crossentropy(y_true: tf.Tensor, y_pred: tf.Tensor):
    """
    :param y_pred:B,n_class
    :param y_true: B,n_class
    :return:
    """
    # B = y_pred.get_shape().as_list()[0]
    myloss = -K.mean(K.batch_dot(y_true, K.transpose(y_pred)))
    return myloss  # onehot化  可以矩阵乘


def loss_test():
    from sklearn.metrics import log_loss
    # j = [[1, 0]]
    # a = [[0.5,0.5]]
    j = [[1, 0], [0, 1]]
    a = [[0.4, 0.6], [0.3, 0.7]]
    import torch.nn as nn
    import torch.nn.functional as F
    import torch
    toloss = nn.NLLLoss()
    tj, ta = torch.from_numpy(np.array([0, 1])), torch.from_numpy(np.array(a))
    tj = tj.long()
    tchloss = toloss(ta, tj).item() * len(a)

    jt = tf.constant(j, dtype=tf.float32)
    at = tf.constant(a, dtype=tf.float32)
    atp = K.transpose(at)
    skloss = log_loss([0, 1], a)
    keloss = -K.mean(K.binary_crossentropy(jt, at))

    with tf.Session() as sess:
        myloss = binary_crossentropy(jt, at)
        print(sess.run(myloss))
        print(sess.run(keloss))
        print(tchloss)
        print(skloss)


##### loss done!

def Accuracy(y_true, y_pred):
    y_pred.get_shape().assert_is_compatible_with(y_true.get_shape())
    if y_true.dtype != y_pred.dtype:
        y_pred = K.cast(y_pred, y_true.dtype)
    return K.mean(K.cast(K.equal(y_true, y_pred), K.floatx()))


def acc_test():
    y_ture = [1, 2, 1, 2]
    y_pred = [0, 2, 1, 2]
    yt = tf.constant(y_ture)
    yp = tf.constant(y_pred)
    acc = Accuracy(yt, yp)
    with tf.Session() as sess:
        print(sess.run(acc))


######## metrics acc done!


def train(lr=0.01,
          min_lr=1e-5,
          factor=0.8,
          ckptpath=r"E:\memory\dense\mymodel",
          logpath=r"E:\memory\dense\mylog"):
    densenet = DenseNet_()
    ckpt = keras.callbacks.ModelCheckpoint(os.path.join(ckptpath,'mymodel'), save_best_only=True,
                                           period=10)
    loger = keras.callbacks.TensorBoard(logpath, write_images=True)
    lrschle = keras.callbacks.ReduceLROnPlateau(min_lr=min_lr,
                                                factor=factor,
                                                patience=2,
                                                )
    er = keras.callbacks.EarlyStopping(patience=20, restore_best_weights=True)

    optimizer = keras.optimizers.SGD(lr, decay=1e-6, momentum=0.9)

    densenet.compile(optimizer=optimizer,
                     loss="categorical_crossentropy",
                     metrics=["accuracy"])
    # TODO:多GPU
    densenet.fit(train_sets, epochs=epochs, steps_per_epoch=num_train // batch_size,
                 validation_data=val_sets, validation_steps=num_val,
                 callbacks=[ckpt, loger, lrschle, er])
    densenet.evaluate(val_sets, steps=num_val)


train(0.001)
