Spaces:
Running
Running
| from functools import partial | |
| import numpy as np | |
| import torch | |
| from timm.models.efficientnet import tf_efficientnet_b4_ns, tf_efficientnet_b3_ns, \ | |
| tf_efficientnet_b5_ns, tf_efficientnet_b2_ns, tf_efficientnet_b6_ns, tf_efficientnet_b7_ns | |
| from torch import nn | |
| from torch.nn.modules.dropout import Dropout | |
| from torch.nn.modules.linear import Linear | |
| from torch.nn.modules.pooling import AdaptiveAvgPool2d | |
| encoder_params = { | |
| "tf_efficientnet_b3_ns": { | |
| "features": 1536, | |
| "init_op": partial(tf_efficientnet_b3_ns, pretrained=True, drop_path_rate=0.2) | |
| }, | |
| "tf_efficientnet_b2_ns": { | |
| "features": 1408, | |
| "init_op": partial(tf_efficientnet_b2_ns, pretrained=False, drop_path_rate=0.2) | |
| }, | |
| "tf_efficientnet_b4_ns": { | |
| "features": 1792, | |
| "init_op": partial(tf_efficientnet_b4_ns, pretrained=True, drop_path_rate=0.5) | |
| }, | |
| "tf_efficientnet_b5_ns": { | |
| "features": 2048, | |
| "init_op": partial(tf_efficientnet_b5_ns, pretrained=True, drop_path_rate=0.2) | |
| }, | |
| "tf_efficientnet_b4_ns_03d": { | |
| "features": 1792, | |
| "init_op": partial(tf_efficientnet_b4_ns, pretrained=True, drop_path_rate=0.3) | |
| }, | |
| "tf_efficientnet_b5_ns_03d": { | |
| "features": 2048, | |
| "init_op": partial(tf_efficientnet_b5_ns, pretrained=True, drop_path_rate=0.3) | |
| }, | |
| "tf_efficientnet_b5_ns_04d": { | |
| "features": 2048, | |
| "init_op": partial(tf_efficientnet_b5_ns, pretrained=True, drop_path_rate=0.4) | |
| }, | |
| "tf_efficientnet_b6_ns": { | |
| "features": 2304, | |
| "init_op": partial(tf_efficientnet_b6_ns, pretrained=True, drop_path_rate=0.2) | |
| }, | |
| "tf_efficientnet_b7_ns": { | |
| "features": 2560, | |
| "init_op": partial(tf_efficientnet_b7_ns, pretrained=False, drop_path_rate=0.2) | |
| }, | |
| "tf_efficientnet_b6_ns_04d": { | |
| "features": 2304, | |
| "init_op": partial(tf_efficientnet_b6_ns, pretrained=True, drop_path_rate=0.4) | |
| }, | |
| } | |
| def setup_srm_weights(input_channels: int = 3) -> torch.Tensor: | |
| """Creates the SRM kernels for noise analysis.""" | |
| # note: values taken from Zhou et al., "Learning Rich Features for Image Manipulation Detection", CVPR2018 | |
| srm_kernel = torch.from_numpy(np.array([ | |
| [ # srm 1/2 horiz | |
| [0., 0., 0., 0., 0.], # noqa: E241,E201 | |
| [0., 0., 0., 0., 0.], # noqa: E241,E201 | |
| [0., 1., -2., 1., 0.], # noqa: E241,E201 | |
| [0., 0., 0., 0., 0.], # noqa: E241,E201 | |
| [0., 0., 0., 0., 0.], # noqa: E241,E201 | |
| ], [ # srm 1/4 | |
| [0., 0., 0., 0., 0.], # noqa: E241,E201 | |
| [0., -1., 2., -1., 0.], # noqa: E241,E201 | |
| [0., 2., -4., 2., 0.], # noqa: E241,E201 | |
| [0., -1., 2., -1., 0.], # noqa: E241,E201 | |
| [0., 0., 0., 0., 0.], # noqa: E241,E201 | |
| ], [ # srm 1/12 | |
| [-1., 2., -2., 2., -1.], # noqa: E241,E201 | |
| [2., -6., 8., -6., 2.], # noqa: E241,E201 | |
| [-2., 8., -12., 8., -2.], # noqa: E241,E201 | |
| [2., -6., 8., -6., 2.], # noqa: E241,E201 | |
| [-1., 2., -2., 2., -1.], # noqa: E241,E201 | |
| ] | |
| ])).float() | |
| srm_kernel[0] /= 2 | |
| srm_kernel[1] /= 4 | |
| srm_kernel[2] /= 12 | |
| return srm_kernel.view(3, 1, 5, 5).repeat(1, input_channels, 1, 1) | |
| def setup_srm_layer(input_channels: int = 3) -> torch.nn.Module: | |
| """Creates a SRM convolution layer for noise analysis.""" | |
| weights = setup_srm_weights(input_channels) | |
| conv = torch.nn.Conv2d(input_channels, out_channels=3, kernel_size=5, stride=1, padding=2, bias=False) | |
| with torch.no_grad(): | |
| conv.weight = torch.nn.Parameter(weights, requires_grad=False) | |
| return conv | |
| class DeepFakeClassifierSRM(nn.Module): | |
| def __init__(self, encoder, dropout_rate=0.5) -> None: | |
| super().__init__() | |
| self.encoder = encoder_params[encoder]["init_op"]() | |
| self.avg_pool = AdaptiveAvgPool2d((1, 1)) | |
| self.srm_conv = setup_srm_layer(3) | |
| self.dropout = Dropout(dropout_rate) | |
| self.fc = Linear(encoder_params[encoder]["features"], 1) | |
| def forward(self, x): | |
| noise = self.srm_conv(x) | |
| x = self.encoder.forward_features(noise) | |
| x = self.avg_pool(x).flatten(1) | |
| x = self.dropout(x) | |
| x = self.fc(x) | |
| return x | |
| class GlobalWeightedAvgPool2d(nn.Module): | |
| """ | |
| Global Weighted Average Pooling from paper "Global Weighted Average | |
| Pooling Bridges Pixel-level Localization and Image-level Classification" | |
| """ | |
| def __init__(self, features: int, flatten=False): | |
| super().__init__() | |
| self.conv = nn.Conv2d(features, 1, kernel_size=1, bias=True) | |
| self.flatten = flatten | |
| def fscore(self, x): | |
| m = self.conv(x) | |
| m = m.sigmoid().exp() | |
| return m | |
| def norm(self, x: torch.Tensor): | |
| return x / x.sum(dim=[2, 3], keepdim=True) | |
| def forward(self, x): | |
| input_x = x | |
| x = self.fscore(x) | |
| x = self.norm(x) | |
| x = x * input_x | |
| x = x.sum(dim=[2, 3], keepdim=not self.flatten) | |
| return x | |
| class DeepFakeClassifier(nn.Module): | |
| def __init__(self, encoder, dropout_rate=0.0) -> None: | |
| super().__init__() | |
| self.encoder = encoder_params[encoder]["init_op"]() | |
| self.avg_pool = AdaptiveAvgPool2d((1, 1)) | |
| self.dropout = Dropout(dropout_rate) | |
| self.fc = Linear(encoder_params[encoder]["features"], 1) | |
| def forward(self, x): | |
| x = self.encoder.forward_features(x) | |
| x = self.avg_pool(x).flatten(1) | |
| x = self.dropout(x) | |
| x = self.fc(x) | |
| return x | |
| class DeepFakeClassifierGWAP(nn.Module): | |
| def __init__(self, encoder, dropout_rate=0.5) -> None: | |
| super().__init__() | |
| self.encoder = encoder_params[encoder]["init_op"]() | |
| self.avg_pool = GlobalWeightedAvgPool2d(encoder_params[encoder]["features"]) | |
| self.dropout = Dropout(dropout_rate) | |
| self.fc = Linear(encoder_params[encoder]["features"], 1) | |
| def forward(self, x): | |
| x = self.encoder.forward_features(x) | |
| x = self.avg_pool(x).flatten(1) | |
| x = self.dropout(x) | |
| x = self.fc(x) | |
| return x |