yonicho commited on
Commit
1913931
ยท
1 Parent(s): fd0ed6d
Files changed (4) hide show
  1. .gitignore +1 -0
  2. app.py +36 -0
  3. requirements.txt +6 -0
  4. sample.py +29 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .venv*
app.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from PIL import Image
3
+ from io import BytesIO
4
+ import torch
5
+ import streamlit as st
6
+
7
+ from transformers import ViTFeatureExtractor, ViTForImageClassification
8
+
9
+ st.title('๋‚˜์ด๋ฅผ ์˜ˆ์ธก')
10
+ uploaded_file = st.file_uploader("๋‚˜์ด ์˜ˆ์ธก ์šฉ ํŒŒ์ผ ์—…๋กœ๋“œ")
11
+ if uploaded_file is not None:
12
+ im = Image.open(uploaded_file)
13
+ # Get example image from official fairface repo + read it in as an image
14
+ # r = requests.get('https://github.com/dchen236/FairFace/blob/master/detected_faces/race_Asian_face0.jpg?raw=true')
15
+ # im = Image.open(BytesIO(r.content))
16
+
17
+ # Init model, transforms
18
+ model = ViTForImageClassification.from_pretrained('nateraw/vit-age-classifier')
19
+ transforms = ViTFeatureExtractor.from_pretrained('nateraw/vit-age-classifier')
20
+
21
+ # Transform our image and pass it through the model
22
+ inputs = transforms(im, return_tensors='pt')
23
+ output = model(**inputs)
24
+
25
+ # Predicted Class probabilities
26
+ proba = output.logits.softmax(1)
27
+
28
+ values, indices = torch.topk(proba, k=5)
29
+
30
+ result_dict = {model.config.id2label[i.item()]: v.item() for i, v in zip(indices.numpy()[0], values.detach().numpy()[0])}
31
+ first_result = list(result_dict.keys())[0]
32
+
33
+ print(f'predicted result:{result_dict}')
34
+ print(f'first_result: {first_result}')
35
+
36
+ st.write(f'์˜ˆ์ธก๋‚˜์ด : {first_result}')
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ transformers
2
+ pillow
3
+ streamlit
4
+ torch
5
+ torchvision
6
+ torchaudio
sample.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from PIL import Image
3
+ from io import BytesIO
4
+ import torch
5
+
6
+ from transformers import ViTFeatureExtractor, ViTForImageClassification
7
+
8
+ # Get example image from official fairface repo + read it in as an image
9
+ r = requests.get('https://github.com/dchen236/FairFace/blob/master/detected_faces/race_Asian_face0.jpg?raw=true')
10
+ im = Image.open(BytesIO(r.content))
11
+
12
+ # Init model, transforms
13
+ model = ViTForImageClassification.from_pretrained('nateraw/vit-age-classifier')
14
+ transforms = ViTFeatureExtractor.from_pretrained('nateraw/vit-age-classifier')
15
+
16
+ # Transform our image and pass it through the model
17
+ inputs = transforms(im, return_tensors='pt')
18
+ output = model(**inputs)
19
+
20
+ # Predicted Class probabilities
21
+ proba = output.logits.softmax(1)
22
+
23
+ values, indices = torch.topk(proba, k=5)
24
+
25
+ result_dict = {model.config.id2label[i.item()]: v.item() for i, v in zip(indices.numpy()[0], values.detach().numpy()[0])}
26
+ first_result = list(result_dict.keys())[0]
27
+
28
+ print(f'predicted result:{result_dict}')
29
+ print(f'first_result: {first_result}')