# from model_mistral import MistralForCausalLM
from transformers import LlamaForCausalLM,LlamaConfig
import torch
from transformers.utils.fx import symbolic_trace


cfg = LlamaConfig()
cfg.num_hidden_layers = 2
model = LlamaForCausalLM(cfg)
print(model)
batch_size = 1
sequence_length = 10
hidden_dim = cfg.hidden_size

dummy_input_embeds = {
    'inputs_embeds': torch.rand(batch_size, sequence_length, hidden_dim)
}


traced = symbolic_trace(model,input_names=list(dummy_input_embeds.keys()))

print(traced)