File size: 3,031 Bytes
fc2cffa
0cfb5b9
 
 
fc741f1
0cfb5b9
c685cf7
 
cde77b5
abbeca9
fc2cffa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0cfb5b9
fc741f1
 
fc2cffa
fc741f1
 
 
cde77b5
0cfb5b9
fc741f1
fc2cffa
fc741f1
 
 
 
 
 
c685cf7
0cfb5b9
fc741f1
 
 
c685cf7
0cfb5b9
fc741f1
 
 
 
cde77b5
0cfb5b9
 
 
fc741f1
0cfb5b9
 
 
 
26e6832
fc741f1
 
 
 
 
0cfb5b9
 
 
 
 
 
 
26e6832
fc741f1
0cfb5b9
 
 
 
 
cde77b5
0cfb5b9
26e6832
fc741f1
0cfb5b9
 
 
fc741f1
49a8630
7b52e04
74f29e9
fc741f1
0cfb5b9
fc741f1
0cfb5b9
fc741f1
7b52e04
fc741f1
0cfb5b9
fc741f1
 
 
 
c685cf7
74f29e9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import cv2
import tensorflow as tf
import numpy as np
from fastapi import FastAPI, UploadFile, File
from pydantic import BaseModel
import uvicorn

app = FastAPI()


def crop_image_from_gray(img, tol=7):
    if img.ndim == 2:
        mask = img > tol
        return img[np.ix_(mask.any(1), mask.any(0))]
    elif img.ndim == 3:
        gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        mask = gray_img > tol
        check_shape = img[:, :, 0][np.ix_(mask.any(1), mask.any(0))].shape[0]
        if check_shape == 0:
            return img
        else:
            img1 = img[:, :, 0][np.ix_(mask.any(1), mask.any(0))]
            img2 = img[:, :, 1][np.ix_(mask.any(1), mask.any(0))]
            img3 = img[:, :, 2][np.ix_(mask.any(1), mask.any(0))]
            img = np.stack([img1, img2, img3], axis=-1)
        return img


def load_ben_color(image, sigmaX=10):
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image = crop_image_from_gray(image)
    image = cv2.resize(image, (224, 224))
    image = cv2.addWeighted(image, 4, cv2.GaussianBlur(image, (0, 0), sigmaX), -4, 128)
    return image


def clahe(image):
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    r, g, b = cv2.split(image)
    r = clahe.apply(r)
    g = clahe.apply(g)
    b = clahe.apply(b)
    result = cv2.merge((r, g, b))
    return result


def filter1(image):
    image = load_ben_color(image)
    return image


def filter2(image):
    image = clahe(image)
    image = cv2.resize(image, (224, 224))
    return image


def predict(image, model, filter):
    model_image = filter(image)
    model_image = np.array([model_image], dtype=np.float32) / 255.0
    infer = model.signatures["serving_default"]
    predictions = infer(tf.constant(model_image))[next(iter(infer.structured_outputs.keys()))].numpy()
    return predictions


def result(predictions):
    class_labels = ["Age related Macular Degeneration", "Cataract", "Diabetes", "Glaucoma", "Hypertension", "Normal", "Others", "Pathological Myopia"]
    predictions = np.array(predictions)
    predictions = predictions.tolist()[0]
    predictions_index = np.argmax(predictions)

    result_json = {
        "class": class_labels[predictions_index],
        "probability": predictions[predictions_index]
    }

    return result_json

# Model tanımlamaları
models_names = ["ODIR-B-2K-5Class-LastTrain-Xception"]

models_paths = [
    "ODIR-B-2K-5Class-LastTrain-Xception"
]

filters = [filter1, filter1, filter1, filter1]

models = []

for model_path in models_paths:
    model = tf.saved_model.load(model_path)
    models.append(model)


@app.post("/predict")
async def predict_endpoint(file: UploadFile = File(...)):

    contents = await file.read()
    nparr = np.fromstring(contents, np.uint8)
    image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)

    result_json = {}

    for i in range(len(models)):
        model = models[i]
        prediction = predict(image, model, filters[i])
        result_json[models_names[i]] = result(prediction)

    return result_json