from functools import reduce
import math
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
from tensorflow.keras import layers
from tensorflow.keras import activations
from tensorflow.keras.initializers import RandomNormal, TruncatedNormal
from tensorflow.keras.regularizers import L2

def autopad(x, k, p=None, name='autopad', node=0):
    if name != autopad.__name__: name = "%s.%s"%(name, autopad.__name__)

    if p is None:
        padding = [(k//2,k//2),(k//2,k//2)] if isinstance(k, int) else [(kx//2, kx//2) for kx in k]
    else:
        padding = [(p,p),(p,p)] if isinstance(p, int) else [(px,px) for px in p]
    
    return layers.ZeroPadding2D(padding, name="%d.%s.Pad"%(node, name))(x)

def DWConv(x, c1, c2, k=1, s=1, act=True, name='DWConv', node=0):
    if name != DWConv.__name__: name = "%s.%s"%(name, DWConv.__name__)

    return Conv(x, c1, c2, k, s, g=math.gcd(c1,c2), act=act, name=name, node=node)

def Conv(x, c1, c2, k=1, s=1, p=None, g=1, act=True, name='Conv', node=0):
    if name != Conv.__name__: name = "%s.%s"%(name, Conv.__name__)

    activation = layers.Activation('swish', name="%d.%s.SiLU"%(node, name)) if act \
        else (act if isinstance(act, layers.Activation) else layers.Activation('linear', name='%d.%s.Identy'%(node, name)))
    
    y = autopad(x, k, p, name=name, node=node)
    y = layers.Conv2D(c2, (k,k), (s,s), padding="VALID", groups=g, use_bias=True, name="%d.%s"%(node, name))(y)
    y = layers.BatchNormalization(momentum=0.9, epsilon=0.00001, name="%d.%s.BN"%(node, name))(y)
    out = activation(y)

    return out

def TransformerLayer(x, c, num_heads, name='TransformerLayer', node=0):
    if name != TransformerLayer.__name__: name = "%s.%s"%(name, TransformerLayer.__name__)

    q = layers.Dense(c, use_bias=False, name="%d.%s.q"%(node,name))
    k = layers.Dense(c, use_bias=False, name="%d.%s.k"%(node,name))
    v = layers.Dense(c, use_bias=False, name="%d.%s.v"%(node,name))
    ma = layers.MultiHeadAttention(num_heads=num_heads, key_dim=c, name="%d.%s.ma"%(node,name))
    fc1 = layers.Dense(c, use_bias=False, name="%d.%s.fc1"%(node,name))
    fc2 = layers.Dense(c, use_bias=False, name="%d.%s.fc2"%(node,name))
    
    y = ma(q(x), k(x), v(x))[0] + x
    out = fc2(fc1(y)) + y

    return out

def TransformerBlock(x, c1, c2, num_heads, num_layers, name='TransformerBlock', node=0):
    if name != TransformerBlock.__name__: name = "%s.%s"%(name, TransformerBlock.__name__)

    linear = layers.Dense(c2, name="%d.%s.linear"%(node, name))

    y = Conv(x, c1, c2, name=name, node=node) if c1 != c2 else x #! (N,H,W,C)
    n, h, w, _ = y.shape
    p = tf.reshape(y,[n,-1,c2]) #! (N,HW,C)
    p = tf.transpose(p,[1,0,2]) #! (HW,N,C)
    e = linear(p)               #! (HW,N,C)
    y = p + e                   #! (HW,N,C)

    for i in range(num_layers):
        y = TransformerLayer(y, c2, num_heads, name="%d.%s"%(i, name), node=node) #! (HW,N,C)

    y = tf.expand_dims(y,3)        #! (HW,N,C,1)
    y = tf.transpose(y,[1,0,2,3])  #! (N,HW,C,1)
    out = tf.reshape(y,[n,h,w,c2]) #! 

    return out

def Bottleneck(x, c1, c2, shortcut=True, g=1, e=0.5, name='Bottleneck', node=0):
    if name != Bottleneck.__name__: name = "%s.%s"%(name, Bottleneck.__name__)

    c_ = int(c2 * e)
    add = shortcut and c1 == c2

    y = Conv(x, c1, c_, 1, 1, name="%s.cv1"%name, node=node)
    y = Conv(y, c_, c2, 3, 1, g=g, name="%s.cv2"%name, node=node)
    out = x + y if add else y

    return out

def BottleneckCSP(x, c1, c2, n=1, shortcut=True, g=1, e=0.5, name='BottleneckCSP', node=0):
    if name != BottleneckCSP.__name__: name = "%s.%s"%(name, BottleneckCSP.__name__)

    c_ = int(c2 * e)
    cv2 = layers.Conv2D(c_, (1,1), (1,1), use_bias=False, name="%d.%s.cv2"%(node, name))
    cv3 = layers.Conv2D(c_, (1,1), (1,1), use_bias=False, name="%d.%s.cv3"%(node, name))
    bn = layers.BatchNormalization(momentum=0.9,epsilon=0.00001, name="%d.%s.BN"%(node, name))
    act = layers.LeakyReLU(alpha=0.1, name="%d.%s.LeakyReLU"%(node, name))

    y1 = Conv(x, c1, c_, 1, 1, name="%s.cv1"%name, node=node)

    for i in range(n):
        y1 = Bottleneck(y1, c_, c_, shortcut, g, e=1.0, name="%d.%s"%(i,name), node=node)

    y1 = cv3(y1)
    y2 = cv2(x)
    y = layers.Concatenate(name="%d.%s.Concat"%(node, name))([y1, y2])
    y = act(bn(y))
    out = Conv(y, 2*c_, c2, 1, 1, name="%s.cv4"%name, node=node)

    return out
    
def C3(x, c1, c2, n=1, shortcut=True, g=1, e=0.5, name="C3", node=0):
    if name != C3.__name__: name = "%s.%s"%(name, C3.__name__)

    c_ = int(c2 * e)

    y1 = Conv(x, c1, c_, 1, 1, name="%s.cv1"%name, node=node)
    y2 = Conv(x, c1, c_, 1, 1, name="%s.cv2"%name, node=node)

    for i in range(n):
        y1 = Bottleneck(y1, c_, c_, shortcut, g, e=1.0, name="m.%d.%s"%(i, name), node=node)
    
    y = layers.Concatenate(name="%d.%s.Concat"%(node, name))([y1, y2])
    out = Conv(y, 2*c_, c2, 1, name="%s.cv3"%name, node=node)

    return out

def C3TR(x, c1, c2, n=1, shortcut=True, g=1, e=0.5, name="C3TR", node=0):
    if name != C3TR.__name__: name = "%s.%s"%(name, C3TR.__name__)

    c_ = int(c2 * e)

    y1 = Conv(x, c1, c_, 1, 1, name="%s.cv1"%name, node=node)
    y2 = Conv(x, c1, c_, 1, 1, name="%s.cv2"%name, node=node)

    y1 = TransformerBlock(y1, c_, c_, 4, n, name="m.%s", node=node)
    
    y = layers.Concatenate(name="%d.%s.Concat"%(node, name))([y1, y2])
    out = Conv(y, 2*c_, c2, 1, name="%s.cv3"%name, node=node)

    return out

def SPP(x, c1, c2, k=(5, 9, 13), name="SPP", node=0):
    if name != SPP.__name__: name = "%s.%s"%(name, SPP.__name__)

    c_ = c1 // 2

    y = Conv(x, c1, c_, 1, 1, name="%s.cv1"%name, node=node)
    z = [layers.MaxPool2D(pool_size=(kx,kx), strides=(1,1), padding="SAME", name="%d.%d.%s.MaxPool"%(node, i, name))(y) \
         for i, kx in enumerate(k)]
    z = layers.Concatenate(name="%d.%s.Concat"%(node, name))([y] + z)
    out = Conv(z, c_ * (len(k)+1), c2, 1, 1, name="%s.cv2"%name, node=node)
    
    return out

def Focus(x, c1, c2, k=1, s=1, p=None, g=1, act=None, name="Focus", node=0):
    if name != Focus.__name__: name = "%s.%s"%(name, Focus.__name__)

    y = layers.Concatenate(name="%d.%s.Concat"%(node, name))([
        x[:, ::2, ::2,:],
        x[:,1::2, ::2,:],
        x[:, ::2,1::2,:],
        x[:,1::2,1::2,:],        
    ])

    out = Conv(y, c1 * 4, c2, k, s, p, g, act, name=name, node=node)
    
    return out

def Detect(feats, imgsz, nc=80, ch=(), strides=(), anchors=(), node=0, name="Detect"):
    """
        feats: n,h,w,c out of conv
        imgsz: (w,h)
        ch: filters of inputs
    """
    if name != Detect.__name__: name = "%s.%s"%(name, Detect.__name__)

    if not ch: ch = [128, 256, 512]
    if not strides: strides = [8, 16, 32]
    if not isinstance(anchors, np.ndarray):
        anchors = [[ 10,13,  16, 30,  33, 23],[ 30,61,  62, 45,  59,119],[116,90, 156,198, 373,326]]
        nl = len(anchors)
        na = len(anchors[0]) // 2
        anchors = np.array(anchors).reshape([nl, na, 2])
    else: nl, na, _ = anchors.shape
    assert len(feats) == len(strides) == nl == 3

    no = nc + 5
    grid, anchor_grid = _make_grid(imgsz, nl, na, strides=strides, anchors=anchors)

    outs = []
    for (i, in_filter) in enumerate(ch):
        # y = layers.Conv2D(na*no, (1,1), (1,1), activation="sigmoid",\
        #                   name="%d.m.%d.%s"%(node, i, name))(feats[i]) #! (B,h,w,na*no)
        y = layers.Conv2D(na*no, (1,1), (1,1), \
                          name="%d.m.%d.%s"%(node, i, name))(feats[i]) #! (B,h,w,na*no)
        
        w, h = imgsz[0]//strides[i], imgsz[1]//strides[i]
        # y = layers.Reshape([h, w, na, no], name="%d.%d.%s.Reshape1"%(node, i, name))(y) #! (B, h, w, na, no)
        # y = tf.transpose(y,[0,3,1,2,4])
        # y = layers.Reshape([na, h*w, no], name="%d.%d.%s.Reshape2"%(node, i, name))(y) #! (B, na, h*w, no)
        y = layers.Reshape([h*w, na, no], name="%d.%d.%s.Reshape"%(node, i, name))(y)   #! (B, h*w, na, no)
        y = tf.transpose(y,[0,2,1,3])                                                   #! (B, na, h*w, no)

        #! 看了yolov5官方仓库, 在某些版本中是把xy wh还原到imgsz上的
        xy = (tf.sigmoid(y[..., 0:2]) * 2 - 0.5 + grid[i]) * strides[i] #! 把预测框还原到imgsz上的大小
        wh = (tf.sigmoid(y[..., 2:4]) * 2) ** 2 * anchor_grid[i]        #! anchors的大小是在imgsz上衡量
        out = layers.Concatenate(name="%d.%d.%s.Concat"%(node, i, name))([xy,wh,y[..., 4:]])

        outs.append(out)

    return outs

def _make_grid(imgsz, nl, na, strides=(), anchors=(), dtype=tf.float32):
    if not strides: strides = [8,16,32]
    if not isinstance(anchors, np.ndarray):
        anchors = [[ 10,13,  16, 30,  33, 23],[ 30,61,  62, 45,  59,119],[116,90, 156,198, 373,326]]
        nl = len(anchors)
        na = len(anchors[0]) // 2
        anchors = np.array(anchors).reshape([nl, na, 2])        

    grids = []
    anchor_grids = []

    anchors = tf.convert_to_tensor(anchors, dtype=dtype)

    for i in range(nl):
        nx, ny = imgsz[0] // strides[i], imgsz[1] // strides[i]
        #! 列坐标, 行坐标 = tf.meshgrid(列数,行数) -> 生成维度信息 (行数, 列数) 即 (ny,nx,2)
        xv, yv = tf.meshgrid(tf.range(nx,dtype=dtype),tf.range(ny,dtype=dtype))
        grid = tf.stack([xv,yv],axis=-1)       #! (ny,nx,2)
        grid = tf.tile(grid[None],[na,1,1,1])  #! (na,ny,nx,2)
        grid = tf.reshape(grid,[na, ny*nx, 2]) #! (na,ny*nx,2)
        grids.append(grid)

        anchor_grid = anchors[i]               #! (na,2)
        anchor_grid = anchor_grid[:,None]      #! (na,1,2)
        anchor_grids.append(anchor_grid)       

    return grids, anchor_grids