File size: 3,113 Bytes
ba58d56 7fb4357 ba58d56 d91fc17 ba58d56 7fb4357 ba58d56 7fb4357 ba58d56 6a2df97 ba58d56 3e40e47 31a29e4 ba58d56 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 |
---
language: en
license: mit
arxiv: 2403.14852
---
<div align="center">
<h1>
CVLFace Pretrained Model (ADAFACE VIT BASE KPRPE WEBFACE4M)
</h1>
</div>
<p align="center">
🌎 <a href="https://github.com/mk-minchul/CVLface" target="_blank">GitHub</a> • 🤗 <a href="https://huggingface.co/minchul" target="_blank">Hugging Face</a>
</p>
-----
## 1. Introduction
Model Name: ADAFACE VIT BASE KPRPE WEBFACE4M
Related Paper: KeyPoint Relative Position Encoding for Face Recognition (https://arxiv.org/abs/2403.14852)
Please cite the orignal paper and follow the license of the training dataset.
## 2. Quick Start
```python
from transformers import AutoModel
from huggingface_hub import hf_hub_download
import shutil
import os
import torch
import sys
# helpfer function to download huggingface repo and use model
def download(repo_id, path, HF_TOKEN=None):
os.makedirs(path, exist_ok=True)
files_path = os.path.join(path, 'files.txt')
if not os.path.exists(files_path):
hf_hub_download(repo_id, 'files.txt', token=HF_TOKEN, local_dir=path, local_dir_use_symlinks=False)
with open(os.path.join(path, 'files.txt'), 'r') as f:
files = f.read().split('\n')
for file in [f for f in files if f] + ['config.json', 'wrapper.py', 'model.safetensors']:
full_path = os.path.join(path, file)
if not os.path.exists(full_path):
hf_hub_download(repo_id, file, token=HF_TOKEN, local_dir=path, local_dir_use_symlinks=False)
# helpfer function to download huggingface repo and use model
def load_model_from_local_path(path, HF_TOKEN=None):
cwd = os.getcwd()
os.chdir(path)
sys.path.insert(0, path)
model = AutoModel.from_pretrained(path, trust_remote_code=True, token=HF_TOKEN)
os.chdir(cwd)
sys.path.pop(0)
return model
# helpfer function to download huggingface repo and use model
def load_model_by_repo_id(repo_id, save_path, HF_TOKEN=None, force_download=False):
if force_download:
if os.path.exists(save_path):
shutil.rmtree(save_path)
download(repo_id, save_path, HF_TOKEN)
return load_model_from_local_path(save_path, HF_TOKEN)
if __name__ == '__main__':
HF_TOKEN = 'YOUR_HUGGINGFACE_TOKEN'
path = os.path.expanduser('~/.cvlface_cache/minchul/cvlface_adaface_vit_base_kprpe_webface4m')
repo_id = 'minchul/cvlface_adaface_vit_base_kprpe_webface4m'
model = load_model_by_repo_id(repo_id, path, HF_TOKEN)
# input is a rgb image normalized.
from torchvision.transforms import Compose, ToTensor, Normalize
from PIL import Image
img = Image.open('path/to/image.jpg')
trans = Compose([ToTensor(), Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
input = trans(img).unsqueeze(0) # torch.randn(1, 3, 112, 112)
# KPRPE also takes keypoints locations as input
aligner = load_model_by_repo_id('minchul/cvlface_DFA_mobilenet', path, HF_TOKEN)
aligned_x, orig_ldmks, aligned_ldmks, score, thetas, bbox = aligner(input)
keypoints = orig_ldmks # torch.randn(1, 5, 2)
out = model(input, keypoints)
```
|