|
from transformers import AutoModelForImageClassification, BlipImageProcessor |
|
from huggingface_hub import hf_hub_download |
|
from safetensors import safe_open |
|
|
|
from PIL import Image |
|
import sys |
|
import torch |
|
|
|
image_processor = BlipImageProcessor.from_pretrained("imatag/stable-signature-bzh-detector-resnet18") |
|
model = AutoModelForImageClassification.from_pretrained("imatag/stable-signature-bzh-detector-resnet18") |
|
calibration = hf_hub_download("imatag/stable-signature-bzh-detector-resnet18", filename="calibration.safetensors") |
|
with safe_open(calibration, framework="pt") as f: |
|
calibration_logits = f.get_tensor("logits") |
|
|
|
img = Image.open(sys.argv[1]).convert("RGB") |
|
inputs = image_processor(img, return_tensors="pt") |
|
with torch.no_grad(): |
|
p = model(**inputs).logits[...,0] |
|
p = (1 + torch.searchsorted(calibration_logits, p)) / calibration_logits.shape[0] |
|
p = p.item() |
|
|
|
print(f"approximate p-value: {p}") |
|
|