Upload 10 files
Browse files- app/__init__.py +0 -0
- app/__pycache__/__init__.cpython-310.pyc +0 -0
- app/__pycache__/main.cpython-310.pyc +0 -0
- app/main.py +50 -0
- front/app.py +26 -0
- requirements.txt +7 -0
- utils/__init__.py +0 -0
- utils/__pycache__/__init__.cpython-310.pyc +0 -0
- utils/__pycache__/model_func.cpython-310.pyc +0 -0
- utils/model_func.py +27 -0
app/__init__.py
ADDED
File without changes
|
app/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (151 Bytes). View file
|
|
app/__pycache__/main.cpython-310.pyc
ADDED
Binary file (1.54 kB). View file
|
|
app/main.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import PIL
|
2 |
+
from fastapi import FastAPI, File, UploadFile
|
3 |
+
from pydantic import BaseModel
|
4 |
+
from fastapi.responses import JSONResponse
|
5 |
+
from utils.model_func import class_id_to_label, load_model, transform_image
|
6 |
+
|
7 |
+
model = None
|
8 |
+
app = FastAPI()
|
9 |
+
|
10 |
+
|
11 |
+
class ImageClass(BaseModel):
|
12 |
+
prediction: str
|
13 |
+
|
14 |
+
class TextClass(BaseModel):
|
15 |
+
text: str
|
16 |
+
|
17 |
+
|
18 |
+
@app.on_event("startup")
|
19 |
+
async def startup_event():
|
20 |
+
global model
|
21 |
+
# Здесь используйте функцию из utils.model_func для загрузки модели
|
22 |
+
model = load_model()
|
23 |
+
|
24 |
+
|
25 |
+
# @app.post('/classify')
|
26 |
+
# async def classify_image(file: UploadFile = File(...)):
|
27 |
+
# # Здесь используйте функцию из utils.model_func для классификации изображения
|
28 |
+
# image_bytes = await file.read()
|
29 |
+
# prediction = transform_image(image_bytes, model)
|
30 |
+
# return {"prediction": prediction}
|
31 |
+
|
32 |
+
@app.post('/classify')
|
33 |
+
async def classify_image(file: UploadFile = File(...)):
|
34 |
+
# Use the function from utils.model_func to classify the image
|
35 |
+
image = PIL.Image.open(file.file)
|
36 |
+
adapted_image = transform_image(image)
|
37 |
+
pred_index = model(adapted_image.unsqeeze(0).detach().cpu().numpy().argmax())
|
38 |
+
imagenet_class = class_id_to_label(pred_index)
|
39 |
+
response = ImageClass(prediction=imagenet_class)
|
40 |
+
return response
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
@app.post('/clf_text')
|
46 |
+
async def classify_text(text_data: TextClass):
|
47 |
+
# Здесь используйте функцию из utils.model_func для классификации текста
|
48 |
+
text = text_data.text
|
49 |
+
prediction = class_id_to_label(text, model)
|
50 |
+
return {"prediction": prediction}
|
front/app.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import requests
|
3 |
+
import json
|
4 |
+
|
5 |
+
def main():
|
6 |
+
|
7 |
+
st.title("FastAPI - Streamlit Integration")
|
8 |
+
|
9 |
+
# Эндпоинт для классификации текста
|
10 |
+
text_input = st.text_input("Enter text for classification:")
|
11 |
+
if st.button("Classify Text"):
|
12 |
+
response = requests.post("http://127.0.0.1:8000/clf_text", json={"text": text_input})
|
13 |
+
result = response.json()
|
14 |
+
st.success(f"Classification result: {result['prediction']}")
|
15 |
+
|
16 |
+
# Эндпоинт для классификации изображений
|
17 |
+
image_path = st.file_uploader("Upload an image for classification:", type=["jpg", "png"])
|
18 |
+
if image_path is not None:
|
19 |
+
if st.button("Classify Image"):
|
20 |
+
files = {"file": image_path.read()}
|
21 |
+
response = requests.post("http://127.0.0.1:8000/classify", files=files)
|
22 |
+
result = response.json()
|
23 |
+
st.success(f"Classification result: {result['prediction']}")
|
24 |
+
|
25 |
+
if __name__ == '__main__':
|
26 |
+
main()
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi==0.105.0
|
2 |
+
uvicorn==0.24.0.post1
|
3 |
+
streamlit==1.29.0
|
4 |
+
torch== 2.1.2
|
5 |
+
torchvision==0.16.2
|
6 |
+
Pillow==10.1.0
|
7 |
+
python-multipart==0.0.6
|
utils/__init__.py
ADDED
File without changes
|
utils/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (153 Bytes). View file
|
|
utils/__pycache__/model_func.cpython-310.pyc
ADDED
Binary file (1.34 kB). View file
|
|
utils/model_func.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torchvision import models, transforms
|
3 |
+
from PIL import Image
|
4 |
+
import json
|
5 |
+
|
6 |
+
def load_classes():
|
7 |
+
with open('utils/imagenet-simple-labels.json') as f:
|
8 |
+
labels = json.load(f)
|
9 |
+
return labels
|
10 |
+
|
11 |
+
def class_id_to_label(i):
|
12 |
+
labels = load_classes()
|
13 |
+
return labels[i]
|
14 |
+
|
15 |
+
def load_model():
|
16 |
+
model = models.mobilenet_v2(pretrained=True)
|
17 |
+
model.eval()
|
18 |
+
return model
|
19 |
+
|
20 |
+
def transform_image(img):
|
21 |
+
transform = transforms.Compose([
|
22 |
+
transforms.Resize((224, 224)),
|
23 |
+
transforms.ToTensor(),
|
24 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
25 |
+
])
|
26 |
+
return transform(img)
|
27 |
+
|