RickyIG commited on
Commit
cd748e0
1 Parent(s): 8cdd2f6

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +122 -33
app.py CHANGED
@@ -1,44 +1,133 @@
1
- import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import openai
3
- import sys
 
 
4
 
5
- import gradio as gr
6
- from langchain import OpenAI, PromptTemplate
7
- from langchain.text_splitter import CharacterTextSplitter
8
- from langchain.chains.summarize import load_summarize_chain
9
- from langchain.document_loaders import PyPDFLoader
10
- from langchain.chains import RetrievalQA
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- sys.path.append('../..')
13
 
14
- from dotenv import load_dotenv, find_dotenv
15
- _ = load_dotenv(find_dotenv()) # read local .env file
 
 
16
 
17
- openai.api_key = os.environ['OPENAI_API_KEY']
 
18
 
19
- llm = OpenAI(temperature=0)
20
- text_splitter = CharacterTextSplitter()
 
 
21
 
22
- def summarize_pdf(pdf_file):
23
- pdf_file = pdf_file.name
24
- loader = PyPDFLoader(pdf_file)
25
- docs = loader.load_and_split()
26
- chain = load_summarize_chain(llm, chain_type="map_reduce")
27
- summary = chain.run(docs)
28
- return summary
29
 
30
- def main():
31
- output_summary = gr.Textbox(label="Summary")
 
 
 
 
32
 
33
- iface = gr.Interface(
34
- fn=summarize_pdf,
35
- inputs=["file"],
36
- outputs=[output_summary],
37
- title="PDF Summarizer",
38
- description="Langchain based summarization application that's given a PDF file, then create a summary of the text content. <br> Enter the PDF file and get its summary.",
39
- )
 
 
 
40
 
41
- iface.launch(share=True)
 
42
 
43
- if __name__ == "__main__":
44
- main()
 
1
+ import gradio as gr
2
+ import json
3
+ import requests
4
+
5
+ import torch
6
+ import torchvision
7
+ import torchvision.models as models
8
+
9
+ from torchvision import datasets, transforms
10
+ from torchvision.models import mobilenet_v2
11
+ from torch import nn, optim
12
+ from torch.utils.data import DataLoader, TensorDataset
13
+
14
+ from tqdm.auto import tqdm
15
+ from jcopdl.callback import Callback, set_config
16
+
17
+ import pandas as pd
18
+ import numpy as np
19
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
20
+ device
21
+
22
+
23
  import openai
24
+ import os
25
+ import time
26
+ from pathlib import Path
27
 
28
+ from PIL import Image
29
+ import io
30
+ import cv2
31
+
32
+ torch.manual_seed(0)
33
+
34
+ class CustomMobileNetv2(nn.Module):
35
+ def __init__(self, output_size):
36
+ super().__init__()
37
+ self.mnet = mobilenet_v2(pretrained=True)
38
+ self.freeze()
39
+
40
+ self.mnet.classifier = nn.Sequential(
41
+ nn.Linear(1280, output_size),
42
+ nn.LogSoftmax()
43
+ )
44
+
45
+ def forward(self, x):
46
+ return self.mnet(x)
47
+
48
+ def freeze(self):
49
+ for param in self.mnet.parameters():
50
+ param.requires_grad = False
51
+
52
+ def unfreeze(self):
53
+ for param in self.mnet.parameters():
54
+ param.requires_grad = True
55
+
56
+ kue_lokal_model = torch.load('rickyig_mobilenetv2_kue_lokal_classifier_entire_model.pth', map_location=torch.device('cpu'))
57
+
58
+ dict_for_inference = {0: 'kue dadar gulung',
59
+ 1: 'kue kastengel',
60
+ 2: 'kue klepon',
61
+ 3: 'kue lapis',
62
+ 4: 'kue lumpur',
63
+ 5: 'kue putri salju',
64
+ 6: 'kue risoles',
65
+ 7: 'kue serabi'}
66
+
67
+ def get_completion(prompt, model="gpt-3.5-turbo"):
68
+ messages = [{"role": "user", "content": prompt}]
69
+ response = openai.ChatCompletion.create(
70
+ model=model,
71
+ messages=messages,
72
+ temperature=0,
73
+ )
74
+ return response.choices[0].message["content"]
75
+
76
+ def get_response(classify_result):
77
+ prompt = "Apa itu {} dan sebutkan resep dari {}.".format(classify_result, classify_result)
78
+ response = get_completion(prompt)
79
+ return response
80
+
81
+ def classify_image(input_image):
82
+
83
+ kue_lokal_model.eval()
84
+
85
+ image_for_testing = input_image
86
+ img = Image.open(image_for_testing)
87
+
88
+ transform = transforms.Compose([
89
+ transforms.Resize((224, 224)),
90
+ transforms.ToTensor(),
91
+ ])
92
+
93
+ input_data = transform(img).unsqueeze(0).to(device='cpu')
94
 
95
+ class_to_label = dict_for_inference
96
 
97
+ with torch.no_grad():
98
+ output = kue_lokal_model(input_data)
99
+ probs = torch.nn.functional.softmax(output, dim=1)
100
+ conf, predicted_class = torch.max(probs, 1)
101
 
102
+ # Create a dictionary of class labels and their probabilities
103
+ output_dict = {"predicted_label": class_to_label[predicted_class.item()], "probability": conf.item()}
104
 
105
+ # Convert the dictionary to JSON format
106
+ output_json = json.dumps(output_dict)
107
+ output_bentuk_text = "Hasil Klasifikasi Gambar \nKue : {} \nProbability: {:.2f}%".format(class_to_label[predicted_class.item()], conf.item()*100)
108
+ output_response = get_response(class_to_label[predicted_class.item()])
109
 
110
+ return output_json, output_bentuk_text, output_response
 
 
 
 
 
 
111
 
112
+ # Create a Gradio interface
113
+ input_image = gr.Image(label="input_image", type="filepath")
114
+ output_json = gr.JSON(label="Output (JSON)")
115
+ output_bentuk_text = gr.Textbox(label="Hasil Output")
116
+ output_response = gr.Textbox(label="Resep Kue")
117
+ example_input_image = "3.jpg"
118
 
119
+ interface = gr.Interface(
120
+ fn=classify_image,
121
+ inputs=input_image,
122
+ outputs=[output_json, output_bentuk_text, output_response], # Use JSON output
123
+ title="Resep Kue Lokal",
124
+ examples=[
125
+ [example_input_image]
126
+ ],
127
+ description="Unggah foto kue lokal dan dapatkan hasil klasifikasi gambar beserta resep kue.<br>Kue yang tersedia: kue dadar gulung, kue kastengel, kue klepon, kue lapis, kue lumpur, kue putri salju, kue risoles, kue serabi.",
128
+ )
129
 
130
+ # Start the Gradio app
131
+ interface.launch(share=True, debug=True)
132
 
133
+ # ---- End of your code ----