Upload 2 files
Browse files- mini-app.py +87 -0
- requirements.txt +7 -0
mini-app.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import io
|
3 |
+
from PIL import Image
|
4 |
+
import numpy as np
|
5 |
+
import cv2
|
6 |
+
# from transformers import AutoImageProcessor, AutoModelForImageClassification
|
7 |
+
from PIL import Image
|
8 |
+
|
9 |
+
import requests
|
10 |
+
from transformers import pipeline
|
11 |
+
|
12 |
+
from torchvision import transforms
|
13 |
+
import torch
|
14 |
+
|
15 |
+
|
16 |
+
def kwg(photo):
|
17 |
+
obj_detect = pipeline("object-detection", model='hustvl/yolos-small')
|
18 |
+
age_detect = pipeline(model='nateraw/vit-age-classifier')
|
19 |
+
classifier = pipeline(model="openai/clip-vit-large-patch14")
|
20 |
+
objects_detected = obj_detect(photo)
|
21 |
+
person_box_list = []
|
22 |
+
for obj in objects_detected:
|
23 |
+
if obj['label'] == 'person':
|
24 |
+
person_box_list.append(obj['box'])
|
25 |
+
if not person_box_list:
|
26 |
+
st.write('На фото нет людей')
|
27 |
+
else:
|
28 |
+
st.write(f'на фото {len(person_box_list)} персон(а)')
|
29 |
+
|
30 |
+
ages = []
|
31 |
+
persons_coord_list = []
|
32 |
+
img = np.array(photo)
|
33 |
+
for box in person_box_list:
|
34 |
+
person_coord = [box['ymin'], box['ymax'], box['xmin'], box['xmax']]
|
35 |
+
persons_coord_list.append(person_coord)
|
36 |
+
person_list = []
|
37 |
+
for coords in persons_coord_list:
|
38 |
+
person_list.append(Image.fromarray(img[coords[0]:coords[1],coords[2]:coords[3]]))
|
39 |
+
for person in person_list:
|
40 |
+
age = age_detect(person)
|
41 |
+
ages.append(age[0]['label'])
|
42 |
+
if '0-2' in ages or '3-9' in ages or '10-19' in ages:
|
43 |
+
st.write('На фото есть дети')
|
44 |
+
else:
|
45 |
+
st.write('Здесь только взрослые')
|
46 |
+
|
47 |
+
res = classifier(photo, candidate_labels=["kid with gun", "kid with toy", "kid with alcohol drink"])
|
48 |
+
if res[0]['label'] == "kid with gun":
|
49 |
+
st.write('ОБОЖЕМОЙ у РЕБЕнкА ОРУЖИЕ СДЕЛАЙТЕ ЧТО-НИБУДЬ')
|
50 |
+
elif res[0]['label'] == "kid with alcohol drink":
|
51 |
+
st.write('ОТДАЙ ПИВО')
|
52 |
+
else:
|
53 |
+
st.write('какой милый ребеночек :3')
|
54 |
+
|
55 |
+
|
56 |
+
st.set_page_config(
|
57 |
+
page_title="Emotion App!",
|
58 |
+
page_icon="😎",
|
59 |
+
layout="wide"
|
60 |
+
)
|
61 |
+
|
62 |
+
st.markdown("### Привет!")
|
63 |
+
# st.write("Загрузите фото")
|
64 |
+
# text = st.text_area("Введите текст:")
|
65 |
+
|
66 |
+
|
67 |
+
file = st.file_uploader("Загрузите своё фото:", type=['png','jpeg','jpg'])
|
68 |
+
if file:
|
69 |
+
image_data = file.getvalue()
|
70 |
+
# Показ загруженного изображения на Web-странице средствами Streamlit
|
71 |
+
# st.image(image_data)
|
72 |
+
# Возврат изображения в формате PIL
|
73 |
+
image = Image.open(io.BytesIO(image_data))
|
74 |
+
# image = Image.open("test"+username+".jpg").convert('RGB')
|
75 |
+
st.image(image) # показать картинку
|
76 |
+
# preprocessor = AutoImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224")
|
77 |
+
# model = AutoModelForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224")
|
78 |
+
kwg(image)
|
79 |
+
|
80 |
+
|
81 |
+
|
82 |
+
#
|
83 |
+
# else:
|
84 |
+
# image = Image.open("testJulifil.jpg")
|
85 |
+
|
86 |
+
|
87 |
+
# img = st.image()
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
opencv-python==4.7.0.72
|
2 |
+
sacremoses==0.0.53
|
3 |
+
torch==2.0.0
|
4 |
+
torchvision==0.15.1
|
5 |
+
transformers
|
6 |
+
sentencepiece
|
7 |
+
|