File size: 3,214 Bytes
7c916ae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 |
import torch
import transformers
from torch import nn
from transformers.modeling_outputs import SemanticSegmenterOutput
def encode_down(c_in: int, c_out: int):
return nn.Sequential(
nn.Conv2d(in_channels=c_in, out_channels=c_out, kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=c_out),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=c_out, out_channels=c_out, kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=c_out),
nn.ReLU(inplace=True),
)
def decode_up(c: int):
return nn.ConvTranspose2d(
in_channels=c,
out_channels=int(c / 2),
kernel_size=2,
stride=2,
)
class FaceUNet(nn.Module):
def __init__(self, num_classes: int):
super().__init__()
self.num_classes = num_classes
self.down_1 = nn.Conv2d(
in_channels=3,
out_channels=64,
kernel_size=3,
padding=1,
)
self.down_2 = encode_down(64, 128)
self.down_3 = encode_down(128, 256)
self.down_4 = encode_down(256, 512)
self.down_5 = encode_down(512, 1024)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.up_1 = decode_up(1024)
self.up_c1 = encode_down(1024, 512)
self.up_2 = decode_up(512)
self.up_c2 = encode_down(512, 256)
self.up_3 = decode_up(256)
self.up_c3 = encode_down(256, 128)
self.up_4 = decode_up(128)
self.up_c4 = encode_down(128, 64)
self.segment = nn.Conv2d(
in_channels=64,
out_channels=self.num_classes,
kernel_size=3,
padding=1,
)
def forward(self, x):
d1 = self.down_1(x)
d2 = self.pool(d1)
d3 = self.down_2(d2)
d4 = self.pool(d3)
d5 = self.down_3(d4)
d6 = self.pool(d5)
d7 = self.down_4(d6)
d8 = self.pool(d7)
d9 = self.down_5(d8)
u1 = self.up_1(d9)
x = self.up_c1(torch.cat([d7, u1], 1))
u2 = self.up_2(x)
x = self.up_c2(torch.cat([d5, u2], 1))
u3 = self.up_3(x)
x = self.up_c3(torch.cat([d3, u3], 1))
u4 = self.up_4(x)
x = self.up_c4(torch.cat([d1, u4], 1))
x = self.segment(x)
return x
class Segformer(transformers.PreTrainedModel):
config_class = transformers.SegformerConfig
def __init__(self, config):
super().__init__(config)
self.config = config
self.model = FaceUNet(num_classes=config.num_classes)
def forward(self, tensor):
return self.model.forward_features(tensor)
class SegformerForSemanticSegmentation(transformers.PreTrainedModel):
config_class = transformers.SegformerConfig
def __init__(self, config):
super().__init__(config)
self.config = config
self.model = FaceUNet(num_classes=config.num_classes)
def forward(self, pixel_values, labels=None):
logits = self.model(pixel_values)
values = {"logits": logits}
if labels is not None:
loss = torch.nn.cross_entropy(logits, labels)
values["loss"] = loss
return SemanticSegmenterOutput(**values)
|