File size: 4,031 Bytes
cd748e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d81b7f5
cd748e0
 
 
d81b7f5
cd748e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d81b7f5
cd748e0
d81b7f5
cd748e0
 
 
 
d81b7f5
cd748e0
 
d81b7f5
cd748e0
 
 
 
d81b7f5
cd748e0
d81b7f5
cd748e0
 
 
 
 
 
d81b7f5
cd748e0
 
 
 
 
 
 
 
 
 
d81b7f5
cd748e0
 
d81b7f5
cd748e0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import gradio as gr
import json
import requests

import torch
import torchvision
import torchvision.models as models

from torchvision import datasets, transforms
from torchvision.models import mobilenet_v2
from torch import nn, optim
from torch.utils.data import DataLoader, TensorDataset

from tqdm.auto import tqdm
from jcopdl.callback import Callback, set_config

import pandas as pd
import numpy as np
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device


import openai
import os
import time
from pathlib import Path

from PIL import Image
import io
import cv2

torch.manual_seed(0)

class CustomMobileNetv2(nn.Module):
    def __init__(self, output_size):
        super().__init__()
        self.mnet = mobilenet_v2(pretrained=True)
        self.freeze()

        self.mnet.classifier = nn.Sequential(
            nn.Linear(1280, output_size),
            nn.LogSoftmax()
            )

    def forward(self, x):
        return self.mnet(x)

    def freeze(self):
        for param in self.mnet.parameters():
            param.requires_grad = False

    def unfreeze(self):
        for param in self.mnet.parameters():
            param.requires_grad = True

kue_lokal_model = torch.load('rickyig_mobilenetv2_kue_lokal_classifier_entire_model.pth', map_location=torch.device('cpu'))

dict_for_inference = {0: 'kue dadar gulung',
                      1: 'kue kastengel',
                      2: 'kue klepon',
                      3: 'kue lapis',
                      4: 'kue lumpur',
                      5: 'kue putri salju',
                      6: 'kue risoles',
                      7: 'kue serabi'}

def get_completion(prompt, model="gpt-3.5-turbo"):
    messages = [{"role": "user", "content": prompt}]
    response = openai.ChatCompletion.create(
        model=model,
        messages=messages,
        temperature=0,
        )
    return response.choices[0].message["content"]

def get_response(classify_result):
    prompt = "Apa itu {} dan sebutkan resep dari {}.".format(classify_result, classify_result)
    response = get_completion(prompt)
    return response

def classify_image(input_image):
    
    kue_lokal_model.eval()

    image_for_testing = input_image
    img = Image.open(image_for_testing)

    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
    ])

    input_data = transform(img).unsqueeze(0).to(device='cpu')

    class_to_label = dict_for_inference

    with torch.no_grad():
        output = kue_lokal_model(input_data)
        probs = torch.nn.functional.softmax(output, dim=1)
    conf, predicted_class = torch.max(probs, 1)

    # Create a dictionary of class labels and their probabilities
    output_dict = {"predicted_label": class_to_label[predicted_class.item()], "probability": conf.item()}

    # Convert the dictionary to JSON format
    output_json = json.dumps(output_dict)
    output_bentuk_text = "Hasil Klasifikasi Gambar \nKue : {} \nProbability: {:.2f}%".format(class_to_label[predicted_class.item()], conf.item()*100)
    output_response = get_response(class_to_label[predicted_class.item()])

    return output_json, output_bentuk_text, output_response

# Create a Gradio interface
input_image = gr.Image(label="input_image", type="filepath")
output_json = gr.JSON(label="Output (JSON)")
output_bentuk_text = gr.Textbox(label="Hasil Output")
output_response = gr.Textbox(label="Resep Kue")
example_input_image = "3.jpg"

interface = gr.Interface(
    fn=classify_image,
    inputs=input_image,
    outputs=[output_json, output_bentuk_text, output_response],  # Use JSON output
    title="Resep Kue Lokal",
    examples=[
        [example_input_image]
    ],
    description="Unggah foto kue lokal dan dapatkan hasil klasifikasi gambar beserta resep kue.<br>Kue yang tersedia: kue dadar gulung, kue kastengel, kue klepon, kue lapis, kue lumpur, kue putri salju, kue risoles, kue serabi.",
)

# Start the Gradio app
interface.launch(share=True, debug=True)

# ---- End of your code ----