# _*_ coding:utf-8 _*_
"""
__Author__    :  yuan
__Date__      :  2020/4/4
__File__      :  net.py
__Desc__      :
"""
import warnings
import os

warnings.filterwarnings('ignore')
with warnings.catch_warnings():
    warnings.filterwarnings("ignore", category=FutureWarning)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter("ignore", category=Warning)

import tensorflow as tf
import tensorflow.contrib.slim as slim



__all__ = ["se_resnet"]


def senet_module(x: tf.Tensor, rank, r=16, name=None):
    """
	:param x:[None,H,W,C]
	:r: 通道压缩比例
	:return:
	"""
    with tf.name_scope(f"senet_{rank}"):
        _, H, W, C = x.shape
        # REW:Squeeze操作：全局平均池化,1*1*C
        x = slim.avg_pool2d(x, [H, W], stride=1)
        rC = C // r  # 返回都是 op;
        # 用卷积代替全连接
        with slim.arg_scope([slim.conv2d], padding='SAME',
                            stride=1, data_format="NHWC"):
            x = slim.conv2d(x, rC, (1, 1), activation_fn=tf.nn.relu)
            scale = slim.conv2d(x, C, (1, 1), activation_fn=tf.nn.relu)
        return scale  # (N,1,1,C)


def res_se_module(x: tf.Tensor, rank, downsample=None, stride=1, r=16, nout=64, expand=4):
    with tf.name_scope(f"res_senet_{rank}"):
        identity = tf.identity(x)
        with slim.arg_scope([slim.conv2d], stride=1,
                            padding="SAME", activation_fn=None):
            x = slim.conv2d(x, nout, [1, 1])
            x = slim.batch_norm(x, activation_fn=tf.nn.relu)
            x = slim.conv2d(x, nout, [3, 3], stride=stride)
            x = slim.batch_norm(x, activation_fn=tf.nn.relu)
            x = slim.conv2d(x, nout * expand, [1, 1])
            x = slim.batch_norm(x, activation_fn=None)

            scale = senet_module(x, rank, r=r)
            x = slim.math_ops.multiply(x, scale)  # 通道相乘，赋予通道重要性权重
            if downsample:
                # identity = downsample(identity)
                identity = slim.conv2d(identity, nout * expand, [1, 1], stride=stride, padding="SAME")
                identity = slim.batch_norm(identity, activation_fn=None)
            x = slim.math_ops.add(x, identity)
            x = tf.nn.relu(x)
            return x


def se_resnet(x: tf.Tensor, blocks=[3, 4, 6, 3], r=16, numclass=3):
    # with tf.device("/job:localhost/replica:0/task:0/device:XLA_GPU:1"):
    inplances = [64]
    def _make_layers(x: tf.Tensor, plane, block, expand=4, stride=1, rank=0):
        downsample = None
        if stride != 1 or inplances[0] != plane * expand:  # 这是为了避免输入的通道和残差后的模块通道不一致所改进的
            downsample = True
        x = res_se_module(x, f"{rank}", downsample, stride, r, nout=plane)
        inplances[0] = plane * expand
        for i in range(1, block):
            x = res_se_module(x, f"{rank}_{i}", nout=plane)
        return x
    x = slim.conv2d(x, inplances[0], [7, 7], stride=2, padding="SAME", activation_fn=None)
    x = slim.batch_norm(x, activation_fn=tf.nn.relu)  # 1/2W,
    x = slim.max_pool2d(x, [3, 3], stride=2, padding="SAME")  # 大小计算: W // stride,会自己计算需要填充的高宽
    # 四个大组件
    print(x.shape)
    x = _make_layers(x, 64, blocks[0], rank=0)
    x = _make_layers(x, 128, blocks[1], stride=2, rank=1)
    x = _make_layers(x, 256, blocks[2], stride=2, rank=2)
    x = _make_layers(x, 512, blocks[3], stride=2, rank=3)
    x = slim.avg_pool2d(x, [x.shape[1], x.shape[2]], stride=1)
    x = slim.flatten(x)
    logit = slim.fully_connected(x, numclass, activation_fn=None)
    return logit


if __name__ == "__main__":
    import numpy as np

    H = 900
    W = 900
    inputs = tf.placeholder(tf.float32, [None, H, W, 3])
    labels = tf.placeholder(tf.uint8, [None, 3])
    logit = se_resnet(inputs)
    x = np.random.rand(6, H, W, 3)
    with tf.Session() as sess:
        sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))
        sess.run(logit, feed_dict={inputs: x})
