Spaces:
Running
Running
File size: 7,479 Bytes
c24a176 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 |
import json
import math
from dataclasses import dataclass, field
from os import PathLike, cpu_count
from pathlib import Path
from typing import Any, Optional, TypeAlias
import colorcet as cc
import cv2
import numpy as np
import pandas as pd
import timm
import torch
from matplotlib.colors import LinearSegmentedColormap
from PIL import Image
from timm.data import create_transform, resolve_data_config
from timm.models import VisionTransformer
from torch import Tensor, nn
from torch.nn import functional as F
from torchvision import transforms as T
from .common import Heatmap, ImageLabels, LabelData, load_labels_hf, pil_ensure_rgb, pil_make_grid
# working dir, either file parent dir or cwd if interactive
work_dir = (Path(__file__).parent if "__file__" in locals() else Path.cwd()).resolve()
temp_dir = work_dir.joinpath("temp")
temp_dir.mkdir(exist_ok=True, parents=True)
# model cache
model_cache: dict[str, VisionTransformer] = {}
transform_cache: dict[str, T.Compose] = {}
# device to use
torch_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class RGBtoBGR(nn.Module):
def forward(self, x: Tensor) -> Tensor:
if x.ndim == 4:
return x[:, [2, 1, 0], :, :]
return x[[2, 1, 0], :, :]
def model_device(model: nn.Module) -> torch.device:
return next(model.parameters()).device
def load_model(repo_id: str) -> VisionTransformer:
global model_cache
if model_cache.get(repo_id, None) is None:
# save model to cache
model_cache[repo_id] = timm.create_model("hf-hub:" + repo_id, pretrained=True).eval().to(torch_device)
return model_cache[repo_id]
def load_model_and_transform(repo_id: str) -> tuple[VisionTransformer, T.Compose]:
global transform_cache
global model_cache
if model_cache.get(repo_id, None) is None:
# save model to cache
model_cache[repo_id] = timm.create_model("hf-hub:" + repo_id, pretrained=True).eval()
model = model_cache[repo_id]
if transform_cache.get(repo_id, None) is None:
transforms = create_transform(**resolve_data_config(model.pretrained_cfg, model=model))
# hack in the RGBtoBGR transform, save to cache
transform_cache[repo_id] = T.Compose(transforms.transforms + [RGBtoBGR()])
transform = transform_cache[repo_id]
return model, transform
def get_tags(
probs: Tensor,
labels: LabelData,
gen_threshold: float,
char_threshold: float,
):
# Convert indices+probs to labels
probs = list(zip(labels.names, probs.numpy()))
# First 4 labels are actually ratings
rating_labels = dict([probs[i] for i in labels.rating])
# General labels, pick any where prediction confidence > threshold
gen_labels = [probs[i] for i in labels.general]
gen_labels = dict([x for x in gen_labels if x[1] > gen_threshold])
gen_labels = dict(sorted(gen_labels.items(), key=lambda item: item[1], reverse=True))
# Character labels, pick any where prediction confidence > threshold
char_labels = [probs[i] for i in labels.character]
char_labels = dict([x for x in char_labels if x[1] > char_threshold])
char_labels = dict(sorted(char_labels.items(), key=lambda item: item[1], reverse=True))
# Combine general and character labels, sort by confidence
combined_names = [x for x in gen_labels]
combined_names.extend([x for x in char_labels])
# Convert to a string suitable for use as a training caption
caption = ", ".join(combined_names).replace("(", "\(").replace(")", "\)")
booru = caption.replace("_", " ")
return caption, booru, rating_labels, char_labels, gen_labels
@torch.no_grad()
def render_heatmap(
image: Tensor,
gradients: Tensor,
image_feats: Tensor,
image_probs: Tensor,
image_labels: list[str],
cmap: LinearSegmentedColormap = cc.m_linear_bmy_10_95_c71,
pos_embed_dim: int = 784,
image_size: tuple[int, int] = (448, 448),
font_args: dict = {
"fontFace": cv2.FONT_HERSHEY_SIMPLEX,
"fontScale": 1,
"color": (255, 255, 255),
"thickness": 2,
"lineType": cv2.LINE_AA,
},
partial_rows: bool = True,
) -> tuple[list[Heatmap], Image.Image]:
hmap_dim = int(math.sqrt(pos_embed_dim))
image_hmaps = gradients.mean(2, keepdim=True).mul(image_feats.unsqueeze(0)).squeeze()
image_hmaps = image_hmaps.mean(-1).reshape(len(image_labels), hmap_dim, hmap_dim)
image_hmaps = image_hmaps.max(torch.zeros_like(image_hmaps))
image_hmaps /= image_hmaps.reshape(image_hmaps.shape[0], -1).max(-1)[0].unsqueeze(-1).unsqueeze(-1)
# normalize to 0-1
image_hmaps = torch.stack([(x - x.min()) / (x.max() - x.min()) for x in image_hmaps]).unsqueeze(1)
# interpolate to input image size
image_hmaps = F.interpolate(image_hmaps, size=image_size, mode="bilinear").squeeze(1)
hmap_imgs: list[Heatmap] = []
for tag, hmap, score in zip(image_labels, image_hmaps, image_probs.cpu()):
image_pixels = image.add(1).mul(127.5).squeeze().permute(1, 2, 0).cpu().numpy().astype(np.uint8)
hmap_pixels = cmap(hmap.cpu().numpy(), bytes=True)[:, :, :3]
hmap_cv2 = cv2.cvtColor(hmap_pixels, cv2.COLOR_RGB2BGR)
hmap_image = cv2.addWeighted(image_pixels, 0.5, hmap_cv2, 0.5, 0)
if tag is not None:
cv2.putText(hmap_image, tag, (10, 30), **font_args)
cv2.putText(hmap_image, f"{score:.3f}", org=(10, 60), **font_args)
hmap_pil = Image.fromarray(cv2.cvtColor(hmap_image, cv2.COLOR_BGR2RGB))
hmap_imgs.append(Heatmap(tag, score.item(), hmap_pil))
hmap_imgs = sorted(hmap_imgs, key=lambda x: x.score, reverse=True)
hmap_grid = pil_make_grid([x.image for x in hmap_imgs], partial_rows=partial_rows)
return hmap_imgs, hmap_grid
def process_heatmap(
model: VisionTransformer,
image: Tensor,
labels: LabelData,
threshold: float = 0.5,
partial_rows: bool = True,
) -> tuple[list[tuple[float, str, Image.Image]], Image.Image, ImageLabels]:
torch_device = model_device(model)
with torch.set_grad_enabled(True):
features = model.forward_features(image.to(torch_device))
probs = model.forward_head(features)
probs = F.sigmoid(probs).squeeze(0)
probs_mask = probs > threshold
heatmap_probs = probs[probs_mask]
label_indices = torch.nonzero(probs_mask, as_tuple=False).squeeze(1)
image_labels = [labels.names[label_indices[i]] for i in range(len(label_indices))]
eye = torch.eye(heatmap_probs.shape[0], device=torch_device)
grads = torch.autograd.grad(
outputs=heatmap_probs,
inputs=features,
grad_outputs=eye,
is_grads_batched=True,
retain_graph=True,
)
grads = grads[0].detach().requires_grad_(False)[:, 0, :, :].unsqueeze(1)
with torch.set_grad_enabled(False):
hmap_imgs, hmap_grid = render_heatmap(
image=image,
gradients=grads,
image_feats=features,
image_probs=heatmap_probs,
image_labels=image_labels,
partial_rows=partial_rows,
)
caption, booru, ratings, character, general = get_tags(
probs=probs.cpu(),
labels=labels,
gen_threshold=threshold,
char_threshold=threshold,
)
labels = ImageLabels(caption, booru, ratings, general, character)
return hmap_imgs, hmap_grid, labels
|