# _*_ coding:utf-8 _*_
"""
__Author__    :  yuan
__Date__      :  2020/4/4
__File__      :  net.py
__Desc__      :
"""
import warnings
import os
import numpy as np
warnings.filterwarnings('ignore')
with warnings.catch_warnings():
    warnings.filterwarnings("ignore", category=FutureWarning)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter("ignore", category=Warning)

import tensorflow as tf
import tensorflow.contrib.slim as slim
__all__ = ["pry_stn_seresnet"]


# 金字塔多重concate 这个是通过空洞卷积来的
def pyramid(x: tf.Tensor, rates=[1, 2, 3, 4], n_filter=64):
    with tf.name_scope(f"pyramid"):
        with slim.arg_scope([slim.conv2d], kernel_size=(3, 3),
                            stride=(1,1), padding="VALID", data_format="NHWC"):
            xs = []
            for rate in rates:
                xs.append(slim.conv2d(x, n_filter, rate=rate))
        xss = []
        shape = xs[-1].get_shape()
        for xi in xs[:-1]:
            xt = tf.image.resize_bilinear(xi, (shape[1], shape[2]), align_corners=True)
            xss.append(xt)
        xss.append(xs[-1])
        return tf.concat(xss, axis=-1)


def affine_grid_generator(height, width, theta):
    """
        This function returns a sampling grid, which when
        used with the bilinear sampler on the input feature
        map, will create an output feature map that is an
        affine transformation [1] of the input feature map.
    -----
     Returns
    -------
    - normalized grid (-1, 1) of shape (num_batch, 2, H, W).
      The 2nd dimension has 2 components: (x, y) which are the
      sampling points of the original image for each point in the
      target image.
    Note
    """
    N = tf.shape(theta)[0]

    x = tf.linspace(-1., 1., width)
    y = tf.linspace(-1., 1., height)
    x_t, y_t = tf.meshgrid(x, y)

    x_t_flat = tf.reshape(x_t, [-1])
    y_t_flat = tf.reshape(y_t, [-1])

    # reshape to [x_t, y_t , 1] - (同种格式)
    ones = tf.ones_like(x_t_flat)
    sampling_grid = tf.stack([x_t_flat, y_t_flat, ones])

    # repeat grid num_batch times
    sampling_grid = tf.expand_dims(sampling_grid, 0)
    sampling_grid = tf.tile(sampling_grid, tf.stack([N, 1, 1]))

    theta = tf.cast(theta, 'float32')
    sampling_grid = tf.cast(sampling_grid, 'float32')

    # transform the sampling grid - batch multiply
    batch_grids = tf.matmul(theta, sampling_grid)
    # batch grid has shape (num_batch, 2, H*W)

    # reshape to (num_batch,2, H, W)
    batch_grids = tf.reshape(batch_grids, [N, 2, height, width])
    return batch_grids


def get_pixel_value(img, x, y):
    """
        Utility function to get pixel value for coordinate
        vectors x and y from a  4D tensor image.
        Input
        -----
        - img: tensor of shape (B, H, W, C)
        - x: flattened tensor of shape (B*H*W,)
        - y: flattened tensor of shape (B*H*W,)
        Returns
            -------
        - output: tensor of shape (B, H, W, C)
    """
    shape = tf.shape(x)
    batch_size = shape[0]
    height = shape[1]
    width = shape[2]

    batch_idx = tf.range(0, batch_size)
    batch_idx = tf.reshape(batch_idx, (batch_size, 1, 1))
    b = tf.tile(batch_idx, (1, height, width))

    indices = tf.stack([b, y, x], 3)
    return tf.gather_nd(img, indices)


def bilinear_sample(img, x, y):
    """
    Performs bilinear sampling of the input images according to the
    normalized coordinates provided by the sampling grid. Note that
    the sampling is done identically for each channel of the input.
    To test if the function works properly, output image should be
    identical to input image when theta is initialized to identity
    transform.

    grid: x, y which is the output of affine_grid_generator.
    """
    H = tf.shape(img)[1]
    W = tf.shape(img)[2]
    # TODO:stn再研究
    max_y = tf.cast(H - 1, 'int32')
    max_x = tf.cast(W - 1, 'int32')
    zero = tf.zeros([], dtype='int32')

    # rescale x and y to [0,W-1orH-1]
    x = tf.cast(x, 'float32')
    y = tf.cast(y, 'float32')
    x = 0.5 * ((x + 1.) * tf.cast(max_x - 1, 'float32'))  # 反归一化
    y = 0.5 * ((y + 1.) * tf.cast(max_y - 1, 'float32'))

    # grab 4 nearest corner points for each(x_i,y_i)
    x0 = tf.cast(tf.floor(x), 'int32')
    x1 = x0 + 1
    y0 = tf.cast(tf.floor(y), 'int32')
    y1 = y0 + 1

    # clip to range [0, H-1/W-1] to not violate img boundaries
    x0 = tf.clip_by_value(x0, zero, max_x)
    x1 = tf.clip_by_value(x1, zero, max_x)
    y0 = tf.clip_by_value(y0, zero, max_y)
    y1 = tf.clip_by_value(y1, zero, max_y)

    # get pixel value at cornet coords
    Ia = get_pixel_value(img, x0, y0)
    Ib = get_pixel_value(img, x0, y1)
    Ic = get_pixel_value(img, x1, y0)
    Id = get_pixel_value(img, x1, y1)

    # recast as float for delta calculation
    x0 = tf.cast(x0, 'float32')
    x1 = tf.cast(x1, 'float32')
    y0 = tf.cast(y0, 'float32')
    y1 = tf.cast(y1, 'float32')

    # calculate deltas 双线性
    wa = (x1 - x) * (y1 - y)
    wb = (x1 - x) * (y - y0)
    wc = (x - x0) * (y1 - y)
    wd = (x - x0) * (y - y0)

    # add dimension for addition
    wa = tf.expand_dims(wa, axis=3)
    wb = tf.expand_dims(wb, axis=3)
    wc = tf.expand_dims(wc, axis=3)
    wd = tf.expand_dims(wd, axis=3)

    # compute output
    out = tf.add_n([wa * Ia, wb * Ib, wc * Ic, wd * Id])

    return out


def stn_transform(x: tf.Tensor, theta, out_dims=None, **kwargs):
    B = tf.shape(x)[0]
    H = tf.shape(x)[1]
    W = tf.shape(x)[2]

    # [2,3] theta的矩阵
    theta = tf.reshape(theta, [B, 2, 3])
    if out_dims:
        out_H = out_dims[0]
        out_W = out_dims[1]
        batch_grids = affine_grid_generator(out_H, out_W, theta)
    else:
        batch_grids = affine_grid_generator(H, W, theta)
    x_s = batch_grids[:, 0, :, :]  # [B,H,W]
    y_s = batch_grids[:, 1, :, :]  # [B,H,W]

    # smaple input with grid to get output
    out_fmap = bilinear_sample(x, x_s, y_s)
    return out_fmap


def stn_location(x: tf.Tensor):
    with slim.arg_scope([slim.conv2d], kernel_size=(3, 3),
                        stride=(2, 2), padding="SAME", activation_fn=tf.nn.relu):
        x_par = slim.stack(x, slim.conv2d, [64, 128, 256])
        x_par = slim.flatten(x_par)
        x_par = slim.fully_connected(x_par, 128, activation_fn=tf.nn.relu)  # fixme:通道数待修改
        return x_par


def stn_net(x: tf.Tensor):
    degree = np.deg2rad(45)
    theta = np.array([
        [np.cos(degree), -np.sin(degree), 0],
        [np.sin(degree), np.cos(degree), 0]
    ])
    with tf.name_scope("spatial_transformer"):
        theta = theta.astype("float32")
        theta = theta.flatten()
        x_par = stn_location(x)
        shape = tf.shape(x_par)
        # 定位网络
        # loc_in = tf.reduce_prod(shape[1:])
        loc_out = 6  # 输出维度
        w_loc = tf.Variable(tf.zeros([128, loc_out]), name='w_loc')
        b_loc = tf.Variable(initial_value=theta, name='b_loc')
        fc_loc = tf.matmul(x_par, w_loc) + b_loc  # 得到的其实就是我们指定的旋转角度对应的6维变换参数
        """
        STN加入到网络后，训练参数有：
        二维图像到一维特征向量的卷积+全连接网络的权重和偏置
        一维向量到6维变换参数的权重和偏置"""
        h_trans = stn_transform(x, fc_loc)
        return h_trans


def senet_module(x: tf.Tensor, rank, r=16, name=None):
    """
	:param x:[None,H,W,C]
	:r: 通道压缩比例
	:return:
	"""
    with tf.name_scope(f"senet_{rank}"):
        _, _, _, C = x.get_shape()
        # REW:Squeeze操作：全局平均池化,1*1*C
        # x = slim.avg_pool2d(x, [H, W], stride=1)
        x=tf.reduce_mean(x,[1,2],keep_dims=True)
        rC = C // r  # 返回都是 op;
        # 用卷积代替全连接
        with slim.arg_scope([slim.conv2d], padding='SAME',
                            stride=1, data_format="NHWC"):
            x = slim.conv2d(x, rC, (1, 1), activation_fn=tf.nn.relu)
            scale = slim.conv2d(x, C, (1, 1), activation_fn=tf.nn.relu)
        return scale  # (N,1,1,C)


def res_se_module(x: tf.Tensor, rank, downsample=None, stride=1, r=16, nout=64, expand=4):
    with tf.name_scope(f"res_senet_{rank}"):
        identity = tf.identity(x)
        with slim.arg_scope([slim.conv2d], stride=1,
                            padding="SAME", activation_fn=None):
            x = slim.conv2d(x, nout, [1, 1])
            x = slim.batch_norm(x, activation_fn=tf.nn.relu)
            x = slim.conv2d(x, nout, [3, 3], stride=stride)
            x = slim.batch_norm(x, activation_fn=tf.nn.relu)
            x = slim.conv2d(x, nout * expand, [1, 1])
            x = slim.batch_norm(x, activation_fn=None)

            scale = senet_module(x, rank, r=r)
            x = slim.math_ops.multiply(x, scale)  # 通道相乘，赋予通道重要性权重
            if downsample:
                # identity = downsample(identity)
                identity = slim.conv2d(identity, nout * expand, [1, 1], stride=stride, padding="SAME")
                identity = slim.batch_norm(identity, activation_fn=None)
            x = slim.math_ops.add(x, identity)
            x = tf.nn.relu(x)
            return x


def pry_stn_seresnet(x: tf.Tensor, blocks=[3, 4, 6, 3], r=16, numclass=3):
    # with tf.device("/job:localhost/replica:0/task:0/device:XLA_GPU:1"):
    inplances = [64]

    def _make_layers(x: tf.Tensor, plane, block, expand=4, stride=1, rank=0):
        downsample = None
        if stride != 1 or inplances[0] != plane * expand:  # 这是为了避免输入的通道和残差后的模块通道不一致所改进的
            downsample = True
        x = res_se_module(x, f"{rank}", downsample, stride, r, nout=plane)
        inplances[0] = plane * expand
        for i in range(1, block):
            x = res_se_module(x, f"{rank}_{i}", nout=plane)
        return x

    x = slim.conv2d(x, inplances[0], [7, 7], stride=2, padding="SAME", activation_fn=None)
    x = slim.batch_norm(x, activation_fn=tf.nn.relu)  # 1/2W,
    x = slim.max_pool2d(x, [3, 3], stride=2, padding="SAME")  # 大小计算: W // stride,会自己计算需要填充的高宽
    # 空洞卷积 金字塔
    x = pyramid(x, rates=[1, 3, 5, 7],n_filter=64)
    x = pyramid(x, rates=[1, 3, 5, 7],n_filter=64)
    x = pyramid(x, rates=[1, 2, 3, 4],n_filter=64)
    x = pyramid(x, rates=[1, 2, 3, 4],n_filter=64)
    # stn组件
    x = stn_net(x)
    # 四个大组件
    x = _make_layers(x, 64, blocks[0], rank=0)
    x = _make_layers(x, 128, blocks[1], stride=2, rank=1)
    x = _make_layers(x, 256, blocks[2], stride=2, rank=2)
    x = _make_layers(x, 512, blocks[3], stride=2, rank=3)
    # x = slim.avg_pool2d(x, [x.shape[1], x.shape[2]], stride=1)
    x = tf.reduce_mean(x,[1,2],keep_dims=True)
    x = slim.flatten(x)
    logit = slim.fully_connected(x, numclass, activation_fn=None)
    return logit


if __name__ == "__main__":
    import numpy as np

    H = 900
    W = 900
    inputs = tf.placeholder(tf.float32, [None, H, W, 3])
    labels = tf.placeholder(tf.uint8, [None, 3])
    logit = pry_stn_seresnet(inputs)
    x = np.random.rand(6, H, W, 3)
    with tf.Session() as sess:
        sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))
        sess.run(logit, feed_dict={inputs: x})
