import torch
from transformers import LlamaModel, LlamaConfig
from gxl_ai_utils.utils import utils_file

utils_file.hello_gxl()
utils_file.logging_print("do_test")
utils_file.logging_print("开始得到config")
timer = utils_file.GxlTimer()
config = LlamaConfig(
    vocab_size=4096 + 5,
    # self.hparams.GPT2_vocab_size,  # padding=0, text ind, codec index, 2EOS
    max_position_embeddings=100 * 60,  # 支持的最长长度
    n_ctx=100 * 60,  # 等同于n_positions
    hidden_size=1024,  # 隐藏层dim
    intermediate_size=2048,
    num_hidden_layers=20,  # 多少层
    num_attention_heads=8,
    activation_function='gelu_new',
    resid_pdrop=0.1,
    embd_pdrop=0.1,
    attn_pdrop=0.1,
    layer_norm_epsilon=1e-05,
    initializer_range=0.02,
    pad_token_id=4096,
    bos_token_id_text=4097,
    eos_token_id_text=4098,
    bos_token_id_mel=4099,
    eos_token_id_mel=4100,
)
utils_file.logging_print("得到config完毕")
timer.stop_halfway()
utils_file.logging_print("开始得到模型")
model = LlamaModel(config=config)
utils_file.logging_print("得到模型完毕")
timer.stop_halfway()

utils_file.do_print_param_num_all(model)

input_embeds = torch.randn(17, 20, 1024)

input_idx = torch.randint(0, 4096, (17, 20))

