Spaces:
Sleeping
Sleeping
#!/usr/bin/env python | |
""" | |
list_modules.py | |
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
Print (and optionally save) the dotted names of **all** subβmodules | |
inside a PyTorch model. Handy for locating the correct layer name | |
for GradβCAM, feature hooks, etc. | |
USAGE | |
----- | |
edit MODEL_SOURCE and MODEL_TYPE below, then: | |
python list_modules.py | |
Outputs: | |
β’ console β first `LIMIT` names (to keep logs short) | |
β’ file β full list written to `modules_<model>.txt` | |
""" | |
from __future__ import annotations | |
import torch, argparse, pathlib, sys | |
from transformers import AutoModel | |
# ββββββββββββββ CONFIG βββββββββββββββββββββββββββββββββββββββββββββββ | |
MODEL_SOURCE = "haywoodsloan/ai-image-detector-deploy" | |
MODEL_TYPE = "huggingface" | |
LIMIT = 2000 # how many lines to print to stdout (None = all) | |
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def load_model(src: str, src_type: str): | |
if src_type == "huggingface": | |
model = AutoModel.from_pretrained(src) | |
elif src_type == "torchscript": | |
model = torch.jit.load(src) | |
else: | |
raise ValueError("MODEL_TYPE must be 'huggingface' or 'torchscript'") | |
model.eval() | |
return model | |
def dump_module_names(model: torch.nn.Module, | |
out_file: pathlib.Path, | |
limit: int | None = None): | |
names = [n for n, _ in model.named_modules()] # includes root '' at idx 0 | |
total = len(names) | |
print(f"\nβΆ total {total} subβmodules found\n") | |
for idx, name in enumerate(names): | |
if limit is None or idx < limit: | |
print(f"{idx:4d}: {name}") | |
out_file.write_text("\n".join(names), encoding="utfβ8") | |
print(f"\nβΆ wrote full list to {out_file}") | |
def main(): | |
model = load_model(MODEL_SOURCE, MODEL_TYPE) | |
txt_path = pathlib.Path(f"modules_{MODEL_SOURCE.split('/')[-1].replace('.','_')}.txt") | |
dump_module_names(model, txt_path, LIMIT) | |
if __name__ == "__main__": | |
main() | |