import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

# Define the model path
model_dir = './xindaoyuce/qwen'

# Load the model
# Using device_map="auto" to handle model loading on available devices
print(f"Loading model from {model_dir}...")
model = AutoModelForCausalLM.from_pretrained(
    model_dir,
    device_map="auto",
    torch_dtype=torch.bfloat16,
    trust_remote_code=True
)
print("Model loaded successfully.")

# Print all named modules
print("\n" + "="*50)
print("Available modules in the Qwen model:")
print("="*50)
for name, module in model.named_modules():
    if name:  # Exclude the top-level empty name
        print(name)
print("="*50) 