#
from typing import Dict
import torch
import torch.nn as nn
from functools import partial
from tost import VisionTransformer, AttentionTSSA, ConvPatchEmbed

class WbtApp(object):
    def __init__(self):
        self.name = 'apps.wbt.wbt_app.WbtApp'

    @staticmethod
    def startup(params:Dict = {}) -> None:
        print(f'白盒Transformer研究 v0.0.2')
        # WbtApp.do_patch_embed(params=params)
        WbtApp.study_VisionTransformer(params=params)

    @staticmethod
    def do_patch_embed(params:Dict = {}) -> None:
        img_size = 224
        embed_dim = 384
        patch_size = 16
        patch_embed = ConvPatchEmbed(img_size=img_size, embed_dim=embed_dim,
                                          patch_size=patch_size)
        x = torch.zeros((2, 3, img_size, img_size), dtype=torch.float32)
        z, (Hp, Wp) = patch_embed(x)
        print(f'z: {z.shape}; Hp: {type(Hp)} {Hp}, Wp: {type(Wp)} {Wp};')

    @staticmethod
    def study_VisionTransformer(params:Dict = {}) -> None:
        kwargs = {'num_classes': 100, 'drop_rate': 0.0, 'drop_path_rate': 0.05}
        model = VisionTransformer(
            patch_size=16, embed_dim=384, depth=12, num_heads=8, mlp_ratio=4, qkv_bias=True,
            norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=True, attn_layer=AttentionTSSA, **kwargs
        )
        img_size = 224
        x = torch.zeros((2, 3, img_size, img_size), dtype=torch.float32)
        x1, x2 = model(x)
        print(f'x1: {type(x1)} {x1.shape}; x2: {type(x2)} {x2.shape};')
