|
import torch
|
|
import torch.nn as nn
|
|
import torchvision.transforms as transforms
|
|
import gradio as gr
|
|
from PIL import Image
|
|
|
|
class VGG19(nn.Module):
|
|
def __init__(self):
|
|
super(VGG19, self).__init__()
|
|
|
|
self.features = nn.Sequential(
|
|
nn.Conv2d(3, 64, kernel_size=3, padding=1),
|
|
nn.ReLU(inplace=True),
|
|
nn.Conv2d(64, 64, kernel_size=3, padding=1),
|
|
nn.ReLU(inplace=True),
|
|
nn.MaxPool2d(kernel_size=2, stride=2),
|
|
|
|
nn.Conv2d(64, 128, kernel_size=3, padding=1),
|
|
nn.ReLU(inplace=True),
|
|
nn.Conv2d(128, 128, kernel_size=3, padding=1),
|
|
nn.ReLU(inplace=True),
|
|
nn.MaxPool2d(kernel_size=2, stride=2),
|
|
|
|
nn.Conv2d(128, 256, kernel_size=3, padding=1),
|
|
nn.ReLU(inplace=True),
|
|
nn.Conv2d(256, 256, kernel_size=3, padding=1),
|
|
nn.ReLU(inplace=True),
|
|
nn.Conv2d(256, 256, kernel_size=3, padding=1),
|
|
nn.ReLU(inplace=True),
|
|
nn.Conv2d(256, 256, kernel_size=3, padding=1),
|
|
nn.ReLU(inplace=True),
|
|
nn.MaxPool2d(kernel_size=2, stride=2),
|
|
|
|
nn.Conv2d(256, 512, kernel_size=3, padding=1),
|
|
nn.ReLU(inplace=True),
|
|
nn.Conv2d(512, 512, kernel_size=3, padding=1),
|
|
nn.ReLU(inplace=True),
|
|
nn.Conv2d(512, 512, kernel_size=3, padding=1),
|
|
nn.ReLU(inplace=True),
|
|
nn.Conv2d(512, 512, kernel_size=3, padding=1),
|
|
nn.ReLU(inplace=True),
|
|
nn.MaxPool2d(kernel_size=2, stride=2),
|
|
|
|
nn.Conv2d(512, 512, kernel_size=3, padding=1),
|
|
nn.ReLU(inplace=True),
|
|
nn.Conv2d(512, 512, kernel_size=3, padding=1),
|
|
nn.ReLU(inplace=True),
|
|
nn.Conv2d(512, 512, kernel_size=3, padding=1),
|
|
nn.ReLU(inplace=True),
|
|
nn.Conv2d(512, 512, kernel_size=3, padding=1),
|
|
nn.ReLU(inplace=True),
|
|
nn.MaxPool2d(kernel_size=2, stride=2),
|
|
)
|
|
|
|
self.classifier = nn.Sequential(
|
|
nn.Linear(512 * 7 * 7, 4096),
|
|
nn.ReLU(inplace=True),
|
|
nn.Dropout(0.5),
|
|
nn.Linear(4096, 4096),
|
|
nn.ReLU(inplace=True),
|
|
nn.Dropout(0.5),
|
|
nn.Linear(4096, 1000)
|
|
)
|
|
|
|
def forward(self, x):
|
|
x = self.features(x)
|
|
x = torch.flatten(x, 1)
|
|
x = self.classifier(x)
|
|
return x
|
|
|
|
model = VGG19()
|
|
model.load_state_dict(torch.load("vgg_model.pth", map_location="cpu"))
|
|
model.eval()
|
|
|
|
class_names=['NORMAL', 'PNEUMONIA']
|
|
|
|
transform = transforms.Compose([
|
|
transforms.Resize((224, 224)),
|
|
transforms.ToTensor(),
|
|
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
|
])
|
|
|
|
def predict(img):
|
|
img = transform(img).unsqueeze(0)
|
|
with torch.inference_mode():
|
|
pred_probs = torch.softmax(model(img), dim=1)
|
|
|
|
pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
|
|
return pred_labels_and_probs
|
|
|
|
|
|
title = "Zatürre Bulucu"
|
|
description = "Gönderilen fotoğrafa göre Sağlıklı mı yoksa Zatürre mi olduğunu tahmin eder."
|
|
|
|
demo = gr.Interface(
|
|
fn=predict,
|
|
inputs=gr.Image(type="pil"),
|
|
outputs=[gr.Label(num_top_classes=2, label="Predictions")],
|
|
title=title,
|
|
description=description
|
|
)
|
|
|
|
demo.launch(debug=False, share=True) |