from functools import reduce
import math
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
from tensorflow.keras import layers
from tensorflow.keras import activations
from tensorflow.keras.initializers import RandomNormal, TruncatedNormal
from tensorflow.keras.regularizers import L2

def compose(*blocks):
    return reduce(lambda prev_layer,next_layer: lambda *args, **kwargs: next_layer(prev_layer(*args, **kwargs)), blocks)

def _compose(*blocks): # ! ADD
    return reduce(lambda node_1,node_2: lambda *args, **kwargs: node_1(*args,**kwargs) + node_2(*args,**kwargs), blocks)

def Focus(x,c1,c2,k=1,s=1,p=None,g=1,act=True,node=0,name="Focus"):
    # ! name
    if name != "Focus": name = "%s.Focus"%name

    #! Structure
    return Conv(c1*4,c2,k,s,p,g,act,node=node,name=name)(
        layers.Concatenate(name="%d.%s.Concat"%(node,name))([
            x[:, ::2, ::2,:],
            x[:,1::2, ::2,:],
            x[:, ::2,1::2,:],
            x[:,1::2,1::2,:],
        ])
    )

def Conv(c1,c2,k=1,s=1,p=None,g=1,act=True,node=0,name="Conv"):
    # ! name
    if name != "Conv": name = "%s.Conv"%name
    if p is None:
        e = (k-s) // 2
        b = (k-s) - e
    else:
        e = p // 2
        b = p - e

    # ! Structure
    if act is True:
        activation = layers.Activation(activation="swish",name="%d.%s.SiLU"%(node,name))
    elif isinstance(act,layers.Activation):
        activation = act
    else:
        activation = layers.Activation(activation="linear",name="%d.%s.Identy"%(node,name))

    return compose(
        layers.ZeroPadding2D([(b,e),(b,e)],name="%d.%s.Pad"%(node,name)),
        layers.Conv2D(c2,(k,k),(s,s),"VALID",
            use_bias=True,
            # kernel_initializer=TruncatedNormal(stddev=0.02),
            # kernel_regularizer=L2(5e-4),
            name="%d.%s"%(node,name)
        ),
        layers.BatchNormalization(
            # momentum=0.97,
            # epsilon=0.001,
            name="%d.%s.BN"%(node,name)
        ),
        activation
    )

def C3(x,c1,c2,n=1,shortcut=True,g=1,e=0.5,node=0,name="C3"):
    # ! name
    if name != "C3": name = "%s.C3"%name

    # ! Structure
    c_ = int(c2 * e)
    cv1 = Conv(c1,c_,1,1,node=node,name="%s.cv1"%(name))
    cv2 = Conv(c1,c_,1,1,node=node,name="%s.cv2"%(name))
    cv3 = Conv(2*c_,c2,1,node=node,name="%s.cv3"%(name))
    concat = layers.Concatenate(name="%d.%s.Concat"%(node,name))
    y = cv1(x)
    for i in range(n): # m
        y = Bottleneck(y,c_,c_,shortcut,g,e=1.0,node=node,name="%s.m.%d"%(name,i))
    return cv3(concat([y,cv2(x)]))

def Bottleneck(x,c1,c2,shortcut=True,g=1,e=0.5,node=0,name="Bottleneck"):
    # ! name
    if name != "Bottleneck": name = "%s.Bottleneck"%name

    # ! Structure
    c_ = int(c2 * e)
    cv1 = Conv(c1,c_,1,1,    node=node,name="%s.cv1"%name)
    cv2 = Conv(c_,c2,3,1,g=g,node=node,name="%s.cv2"%name)
    add = shortcut and c1 == c2
    return x + cv2(cv1(x)) if add else cv2(cv1(x))

def SPP(x,c1,c2,k=(5,9,13),node=0,name="SPP"):
    # ! name
    if name != "SPP": name = "%s.SPP"%name

    # ! Structure
    c_ = c1 // 2
    cv1 = Conv(c1,           c_,1,1,node=node,name="%s.cv1"%(name))
    cv2 = Conv(c_*(len(k)+1),c2,1,1,node=node,name="%s.cv2"%(name))
    m = [layers.MaxPool2D((kx,kx),strides=(1,1),padding="SAME",name="%d.%s.MaxPool.%d"%(node,name,i))
            for i,kx in enumerate(k)]
    concat = layers.Concatenate(name="%d.%s.Concat"%(node,name))
    y = cv1(x)
    return cv2(concat([y] + [mx(y) for mx in m]))

def DWConv(c1,c2,k=1,s=1,act=True,node=0,name="DWConv"):
    if name != "DWConv": name = "%s.DWConv"%name
    return Conv(c1,c2,k=k,s=s,g=math.gcd(c1,c2),act=act,node=node,name=name)

def TransformerLayer(x,c,num_heads,node=0,name="TransformerLayer"):
    """
        x为三维张量(None,w*h,c), 对图片而言
    """
    if name != "TransformerLayer": name = "%s.TransformerLayer"%name
    q = layers.Dense(c,use_bias=False,name="%d.%s.q"%(node,name))
    k = layers.Dense(c,use_bias=False,name="%d.%s.k"%(node,name))
    v = layers.Dense(c,use_bias=False,name="%d.%s.v"%(node,name))
    ma = layers.Attention(name="%d.%s.ma"%(node,name))
    fc1 = layers.Dense(c,use_bias=False,name="%d.%s.fc1"%(node,name))
    fc2 = layers.Dense(c,use_bias=False,name="%d.%s.fc2"%(node,name))
    y = ma([q(x),k(x),v(x)]) + x
    return fc2(fc1(y)) + y
    
def TransformerBlock(x,c1,c2,num_heads,num_layers,node=0,name="TransformerBlock"):
    if name != "TransformerBlock": name = "%s.TransformerBlock"%name
    conv = None
    if c1 != c2:
        conv = Conv(c1,c2,node=node,name="%s.conv"%name) # ! (B,_,_,c2)
    linear = layers.Dense(c2,name="%d.%s.linear"%(node,name))

    if conv is not None:
        y = conv(x)
    else:
        y = x

    # _,h,w,_ = keras.tensor_shape(y)
    _,h,w,_ = y.get_shape()
    
    y = layers.Reshape([-1,c2],name="%d.%s.Reshape.1"%(node,name))(y)
    y = y + linear(y)

    for i in range(num_layers):
        y = TransformerLayer(y,c2,num_heads,node=node,name="%s.%d"%(name,i))

    return layers.Reshape([h,w,c2],name="%d.%s.Reshape.2"%(node,name))
   
def BottleneckCSP(x,c1,c2,n=1,shortcut=True,g=1,e=0.5,node=0,name="BottleneckCSP"):
    if name != "BottleneckCSP": name = "%s.BottleneckCSP"%name
    c_ = int(c2 * e)  # hidden channels
    cv1 = Conv(c1,c_,1,1,node=node,name="%s.cv1"%(name))
    cv2 = layers.Conv2D(c_,(1,1),(1,1),use_bias=False,
                        name="%d.%s.cv2"%(node,name),
                        kernel_initializer=TruncatedNormal(stddev=0.02),
                        kernel_regularizer=L2(5e-4))
    cv3 = layers.Conv2D(c_,(1,1),(1,1),use_bias=False,
                        name="%d.%s.cv3"%(node,name),
                        kernel_initializer=TruncatedNormal(stddev=0.02),
                        kernel_regularizer=L2(5e-4))
    cv4 = Conv(2*c_,c2,1,1,node=node,name="%s.cv4"%(name))
    bn = layers.BatchNormalization(momentum=0.97,epsilon=0.001,
                                   name="%d.%s.BN"%(node,name))
    act = layers.Activation("leaky_relu",name="%d.%s.LeakyReLU"%(node,name))
    concat = layers.Concatenate(name="%d.%s.Concat"%(node,name))

    y = cv1(x)
    for i in range(n):
        y = Bottleneck(y,c_,c_,shortcut,g,e=1.0,node=node,name="%d.%s.m.%d"%(node,name,i))
    y1 = cv3(y)
    y2 = cv2(x)
    return cv4(act(bn(concat([y1,y2]))))


def C3TR(x,c1,c2,n=1,shortcut=True,g=1,e=0.5,node=0,name="C3TR"):
    if name != "C3TR": name = "%s.C3TR"%name
    c_ = int(c2 * e)
    cv1 = Conv(c1,c_,1,1,node=node,name="%s.cv1"%(name))
    cv2 = Conv(c1,c_,1,1,node=node,name="%s.cv2"%(name))
    cv3 = Conv(2*c_,c2,1,node=node,name="%s.cv3"%(name))
    concat = layers.Concatenate(name="%d.%s.Concat"%(node,name))
    y = cv1(x)
    y = TransformerBlock(y,c_,c_,4,n,node=node,name="%s.m"%name) # m    
    return cv3(concat([y,cv2(x)]))

def Detect(xs,nc,imgsz,strides=[8,16,32],anchors=None,ch=[128,256,512],node=0,name="Detect"):
# def Detect(xs,nc,imgsz,strides=[32,16,8],anchors=None,ch=[128,256,512],node=0,name="Detect"):
    """
        x: from image, h,w,c = image.shape -> outputs of conv is (h,w,c)
        nc: num of classes
        imgsz(w,h)
        anchors:
        strides: stride of grid
        channels: filters of inputs
    """
    if name != "Detect": name = "%s.Detect"%name
    if anchors is None:
        anchors = [
            [ 10,13,  16, 30,  33, 23],
            [ 30,61,  62, 45,  59,119],
            [116,90, 156,198, 373,326],
        ]      
    else:
        assert len(anchors) == 3
    nl = len(anchors)
    assert len(xs) == len(strides) == nl
    na = len(anchors[0]) // 2
    no = nc + 5

    ys = []
    for idx,chx in enumerate(ch):
        y = layers.Conv2D(no*na,(1,1),(1,1),"SAME",
                        #   kernel_initializer=TruncatedNormal(stddev=0.02),
                        #   kernel_regularizer=L2(5e-4),
                          name="%d.%s.m.%d"%(node,name,idx))(xs[idx])
        #! (B,h,w,na*no) h先变化,w后变化 h在第一个维度上变化, w在第二个维度上变化
        #? 个人觉得: 此时y的所有值均为计算得到, 是否具有实际意义待定, 暂时不做维度变换, 且一次性将形状变换到位
        nx, ny = imgsz[0]//strides[idx], imgsz[1]//strides[idx]
        y = layers.Reshape([nx*ny,na,no],name="%d.%s.Reshape.%d"%(node,name,idx))(y) # (B,w*h,na,no)
        #! 模型求解的是 xc,yc 的偏移量 和 w,h 的缩放倍数
        #! 结果还原在 loss环节 或者 后处理环节 去处理
        #! sigmoid 压缩也暂不处理 在 loss环节 或者 后处理环节 处理
        ys.append(y)
        #! 决定不concat(ys), 因为不同layer可能会有不同的后续算法
    return ys

# * * *

def _make_grid(imgsz,na=3,strides=[8,16,32],dtype=tf.float32):
    """
        imgsz(w,h)
    """
    grids = []
    for i in range(len(strides)):
        nx, ny = imgsz[0]//strides[i], imgsz[1]//strides[i]
        xv, yv = tf.meshgrid(tf.range(nx,dtype=dtype),tf.range(ny,dtype=dtype))
        #! 列坐标, 行坐标 = tf.meshgrid(列数,行数) -> 生成维度信息 (行数, 列数) 即 (ny,nx,2)
        grid = tf.stack([xv,yv],axis=-1)
        #! tf.stack() 哪个张量在前, 则在stack时位置靠前
        #! (ny,nx,2) 最后一维元素信息如下: (x坐标,y坐标), 最后一维元素之间的变化是: x坐标先变, y坐标后变
        #! 从图像角度来看坐标, 即: 从左向右, 从上到下, 一次扫过图像
        #! 当使用tf.transpose(grid,(1,0,2))时, 调整维度即: (ny,nx,2) -> (nx,ny,2), **隐含调整最后一维x,y的变动顺序**
        grid = tf.tile(tf.reshape(grid,[-1,1,2]),[1,na,1])
        #! reshape之后, xy坐标变化顺序不变 维度信息为 (ny*nx,na,2)

        # ! 必须将grid和conv的结果按照相同的方式处理
        grids.append(grid) # 会自动广播 (nx*ny,2) -> (nx*ny,3,2)
    return grids

# * * *