import torch
from torch import nn

from transformer import TransformerEncoder
from gxl_ai_utils.utils import utils_file
import os
import sys

# 获取当前脚本所在的目录路径
current_dir = os.path.dirname(os.path.abspath(__file__))

# 将当前目录添加到 PYTHONPATH
sys.path.append(os.path.dirname(current_dir))
from zipformer import Zipformer2
from scaling import ScheduledFloat


def _to_int_tuple(s: str):
    return tuple(map(int, s.split(",")))


def dotest1():
    module_gxl = TransformerEncoder(
        input_size=128,
        output_size=128,
        linear_units=258,
        num_blocks=4,
    )
    print(module_gxl)
    utils_file.print_model_size(module_gxl)
    input_tensor = torch.randn(4, 10, 128)
    input_tensor_length = torch.tensor([10, 8, 9, 7])
    output_tensor, mask = module_gxl(input_tensor, input_tensor_length)
    print(output_tensor.shape)
    print(mask.shape)
    print(mask)

    encoder = Zipformer2(
        output_downsampling_factor=2,
        downsampling_factor=_to_int_tuple("1,2,4,8,4,2"),
        num_encoder_layers=_to_int_tuple("2,2,3,4,3,2"),
        encoder_dim=_to_int_tuple("192,256,384,512,384,256"),
        encoder_unmasked_dim=_to_int_tuple("192,192,256,256,256,192"),
        query_head_dim=_to_int_tuple("32"),
        pos_head_dim=_to_int_tuple("4"),
        value_head_dim=_to_int_tuple("12"),
        pos_dim=48,
        num_heads=_to_int_tuple("4,4,4,8,4,4"),
        feedforward_dim=_to_int_tuple("512,768,1024,1536,1024,768"),
        cnn_module_kernel=_to_int_tuple("31,31,15,15,15,31"),
        warmup_batches=4000.0,
        causal=False,
        chunk_size=_to_int_tuple("16,32,64,-1"),
        left_context_frames=_to_int_tuple("64,128,256,-1"),
    )
    print(encoder)
    utils_file.print_model_size(encoder)
    input_tensor = torch.randn(4, 10, 80)
    input_tensor_length = torch.tensor([10, 8, 9, 7])
    output_tensor, x_lens = encoder(input_tensor, input_tensor_length)
    print(output_tensor.shape)
    print(x_lens.shape)
    print(x_lens)


if __name__ == "__main__":
    transformer = nn.TransformerEncoder(
        nn.TransformerEncoderLayer(128, 4, dim_feedforward=int(128*1.5)),
        4
    )
    print(transformer)
    utils_file.print_model_size(transformer)
    input_x = torch.randn(4, 10, 128)
    input_x_lens = torch.tensor([10, 8, 9, 7])
    output_x = transformer(input_x, input_x_lens)
    print(output_x.shape)