import warnings
warnings.filterwarnings("ignore")
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"]="3"


from nets.custom_layer import (PatchEmbeddingLayer,SwinTransformerBlockLayer,
                            PatchMergingLayer,)
from tensorflow.keras import Input,Model
from tensorflow.keras.layers import (Conv2D,)
import math
import tensorflow as tf


def swin_encoder(input_shape=[512,512,1]):
    patch_size = 4
    # window_size = 8 # if input_shape=512
    window_size = 7 # if input_shape=224
    embed_dims = 48
    num_heads = 8
    if input_shape[0]%4!=0:
        raise ValueError("patch_embedding不能整除")
    if (input_shape[0]//patch_size)%window_size!=0:
        raise ValueError("划分窗口不能整除")
    if (embed_dims%num_heads!=0):
        raise ValueError("embed_dims不能整除num_heads")

    inputs = Input(shape=input_shape) # 调试的时候添加batch_size参数

    # 做patch_embedding -> 512//4,512//4,embed_dim
    x = PatchEmbeddingLayer(img_size=input_shape[:-1],
                            patch_size=[patch_size,patch_size],
                            embed_dims=embed_dims)(inputs)
    feat1 = x

    # 第1个stage
    for _ in range(2):
        x = SwinTransformerBlockLayer(dim=embed_dims,
                                    input_resolution=[input_shape[0]//patch_size,input_shape[1]//patch_size],
                                    num_heads=num_heads,
                                    window_size=window_size,
                                    shift_size=0,
                                    mlp_ratio=4)(x)
        x = SwinTransformerBlockLayer(dim=embed_dims,
                                    input_resolution=[input_shape[0]//patch_size,input_shape[1]//patch_size],
                                    num_heads=num_heads,
                                    window_size=window_size,
                                    shift_size=window_size//2,
                                    mlp_ratio=4)(x)
    # input(x.shape)
    # 第1次patch_merging
    
    _,num_patches,dims = x.shape
    img_size = int(math.sqrt(num_patches))
    feat2 = tf.reshape(x,[-1,img_size,img_size,dims])

    x = PatchMergingLayer([img_size,img_size],dims)(x)
    
    
    input_resolution = int(math.sqrt(x.shape[1]))
    dims = x.shape[-1]
    # 第2个stage
    for _ in range(2):
        x = SwinTransformerBlockLayer(dim=dims,
                                    input_resolution=[input_resolution,input_resolution],
                                    num_heads=num_heads,
                                    window_size=window_size,
                                    shift_size=0,
                                    mlp_ratio=4)(x)
        x = SwinTransformerBlockLayer(dim=dims,
                                    input_resolution=[input_resolution,input_resolution],
                                    num_heads=num_heads,
                                    window_size=window_size,
                                    shift_size=window_size//2,
                                    mlp_ratio=4)(x)
    
    # 第2次patch_merging
    _,num_patches,dims = x.shape
    img_size = int(math.sqrt(num_patches))
    feat3 = tf.reshape(x,[-1,img_size,img_size,dims])
    x = PatchMergingLayer([img_size,img_size],dims)(x)
    # print(x.shape)
    
    

    input_resolution = int(math.sqrt(x.shape[1]))
    # 第3个stage
    dims = x.shape[-1]
    for _ in range(6):
        x = SwinTransformerBlockLayer(dim=dims,
                                    input_resolution=[input_resolution,input_resolution],
                                    num_heads=num_heads,
                                    window_size=window_size,
                                    shift_size=0,
                                    mlp_ratio=4)(x)
        x = SwinTransformerBlockLayer(dim=dims,
                                    input_resolution=[input_resolution,input_resolution],
                                    num_heads=num_heads,
                                    window_size=window_size,
                                    shift_size=window_size//2,
                                    mlp_ratio=4)(x)

    # print(x.shape) # (None, 1024, 384)
    
    # 第3次patch_merging
    _,num_patches,dims = x.shape
    img_size = int(math.sqrt(num_patches))
    feat4 = tf.reshape(x,[-1,img_size,img_size,dims])
    x = PatchMergingLayer([img_size,img_size],dims)(x)
    # print(x.shape) # (None, 256, 768)    
    

    input_resolution = int(math.sqrt(x.shape[1]))
    dims = x.shape[-1]
    # 第4个stage
    for _ in range(2):
        x = SwinTransformerBlockLayer(dim=dims,
                                    input_resolution=[input_resolution,input_resolution],
                                    num_heads=num_heads,
                                    window_size=window_size,
                                    shift_size=0,
                                    mlp_ratio=4)(x)
        x = SwinTransformerBlockLayer(dim=dims,
                                    input_resolution=[input_resolution,input_resolution],
                                    num_heads=num_heads,
                                    window_size=window_size,
                                    shift_size=window_size//2,
                                    mlp_ratio=4)(x)
    # print(x.shape) # (None, 256, 768)
    # input(x.shape)
    # 第4次patch_merging
    _,num_patches,dims = x.shape
    img_size = int(math.sqrt(num_patches))
    feat5 = tf.reshape(x,[-1,img_size,img_size,dims])
    # x = PatchMergingLayer([img_size,img_size],dims)(x)
    # print(x.shape) # (None, 64, 1536)
    

    # input_resolution = int(math.sqrt(x.shape[1]))
    # x = tf.reshape(x,shape=[-1,input_resolution,input_resolution,x.shape[-1]])

    # x = Conv2D(512,3,padding='same')(x)
    # x = Conv2D(256,1)(x)

    # outputs = Conv2D(num_classes,1,activation='softmax',name='pred')(x)

    # model = Model(inputs,outputs,name='swin-seg-tf2')
    # return model
    return inputs,feat2,feat3,feat4,feat5


        
# model = swin_model(input_shape=[224,224,3],num_classes=3)
# model.summary()
# print(f'layer length={len(model.layers)}')

