from transformers import AutoModelForCausalLM

model_name_or_path = "Qwen3-0.6B"  # 或者您本地的路径
# 确保 trust_remote_code=True，因为 Qwen 模型通常需要它
base_model = AutoModelForCausalLM.from_pretrained(
    model_name_or_path,
    trust_remote_code=True
    # 如果显存不足以加载完整模型，可以先在 CPU 上加载，或者加载一个更小的模型进行结构探索
    # device_map="cpu"
)

# 打印所有命名模块
print("All named modules in the model:")
for name, module in base_model.named_modules():
    print(name)

# 更进一步，您可以筛选出可能是线性层或注意力相关层的模块
# 例如，通常我们会对 nn.Linear 层或者与注意力相关的特定层应用LoRA
print("\nPotential target modules (e.g., nn.Linear layers):")
import torch.nn as nn

for name, module in base_model.named_modules():
    if isinstance(module, nn.Linear):
        print(f"Layer Name: {name}, Type: {type(module)}")
    # 您也可以根据名称中的关键词来判断，比如 'attn', 'mlp', 'query', 'key', 'value', 'dense', 'proj'等
    # if 'attn' in name or 'mlp' in name or 'Wqkv' in name or 'fc' in name or 'proj' in name:
    #     print(f"Layer Name: {name}, Type: {type(module)}")

