Spaces:
Runtime error
Runtime error
File size: 1,641 Bytes
e613065 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import streamlit as st
import requests
from PIL import Image
from io import BytesIO
import torch
from transformers import ViTFeatureExtractor, ViTForImageClassification
# # Init model, transforms
# model = ViTForImageClassification.from_pretrained('nateraw/vit-age-classifier')
# transforms = ViTFeatureExtractor.from_pretrained('nateraw/vit-age-classifier')
@st.cache_resource
def get_model_transforms():
model = ViTForImageClassification.from_pretrained('nateraw/vit-age-classifier')
transforms = ViTFeatureExtractor.from_pretrained('nateraw/vit-age-classifier')
return model, transforms
model, transforms = get_model_transforms()
st.title('๋์ด๋ฅผ ๋ง์ถฐ๋ด
์๋ค.')
file_name = st.file_uploader('๋์ด๋ฅผ ์์ธกํ ์ฌ๋์ ์ด๋ฏธ์ง๋ฅผ ์
๋ก๋ํ์ธ์.', type=['png', 'jpg', 'jpeg'])
if file_name is not None:
image = Image.open(file_name)
st.image(image, use_column_width=True)
# Transform our image and pass it through the model
inputs = transforms(image, return_tensors='pt')
output = model(**inputs)
# Predicted Class probabilities
proba = output.logits.softmax(1)
# Predicted Classes
preds = proba.argmax(1)
values, indices = torch.topk(proba, k=5)
result_dict = {model.config.id2label[i.item()]: v.item() for i, v in zip(indices.numpy()[0], values.detach().numpy()[0])}
first_result = list(result_dict.keys())[0]
print(f'predicted result:{result_dict}')
print(f'1st: {first_result}')
st.header('๊ฒฐ๊ณผ')
st.subheader(f'์์ธก๋ ๋์ด: {first_result}')
for k, v in result_dict.items():
st.write(f'{k}: {v * 100:.2f}%')
|