Spaces:
Runtime error
Runtime error
test
Browse files- app.py +21 -2
- config.py +6 -0
- model_processing.py +158 -0
- requirements.txt +256 -0
- video_input.py +27 -0
app.py
CHANGED
@@ -1,4 +1,23 @@
|
|
1 |
-
import streamlit as st
|
2 |
|
|
|
|
|
3 |
|
4 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
|
3 |
+
from model_processing import *
|
4 |
+
from video_input import video_input
|
5 |
|
6 |
+
st.set_page_config(
|
7 |
+
page_title=r"Analysis TikTok",
|
8 |
+
page_icon=":dizzy:",
|
9 |
+
layout="wide"
|
10 |
+
)
|
11 |
+
|
12 |
+
model = TikTokAnalytics()
|
13 |
+
video_path, video_container, message_container = video_input()
|
14 |
+
st.session_state['video_path'] = video_path
|
15 |
+
|
16 |
+
if 'video_path' in st.session_state:
|
17 |
+
with st.spinner('Wait. We analyze your video'):
|
18 |
+
final_metric = [0, 0.1] # model(video_path)[0]
|
19 |
+
message_container.write(final_metric)
|
20 |
+
|
21 |
+
message_container.write(
|
22 |
+
'People will like your video' if final_metric[1] > final_metric[0] else "People won't like your video"
|
23 |
+
)
|
config.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FPS_DIV = 3 # Во сколько раз уменьшаем fps. Все видео в тиктоке 30 fps. По сути анализируем с fps = 10
|
2 |
+
MAX_LENGTH = 90 # Фиксированное количество кадров. То есть обрезаем каждое видео до max_length / 30 * fps_mult секунд
|
3 |
+
|
4 |
+
BATCH_SIZE = 4 # Количество кадров для обработки батчем
|
5 |
+
|
6 |
+
MODEL_PATH = './src/cash/best-model.th'
|
model_processing.py
ADDED
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ffmpegio
|
2 |
+
import gc
|
3 |
+
import torch
|
4 |
+
from transformers import MobileViTImageProcessor, MobileViTForSemanticSegmentation
|
5 |
+
from config import FPS_DIV, MAX_LENGTH, BATCH_SIZE, MODEL_PATH
|
6 |
+
|
7 |
+
|
8 |
+
class PreprocessModel(torch.nn.Module):
|
9 |
+
device = 'cpu'
|
10 |
+
|
11 |
+
def __init__(self):
|
12 |
+
super().__init__()
|
13 |
+
self.feature_extractor = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-small")
|
14 |
+
self.mobile_vit = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-small")
|
15 |
+
self.convs = torch.nn.Sequential(
|
16 |
+
torch.nn.MaxPool2d(2, 2)
|
17 |
+
)
|
18 |
+
|
19 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
20 |
+
x = self.mobile_vit(x).logits
|
21 |
+
x = self.convs(x)
|
22 |
+
return x
|
23 |
+
|
24 |
+
def read_video(self, path: str) -> torch.Tensor:
|
25 |
+
"""
|
26 |
+
Читает видео и возвращает тензор с фичами
|
27 |
+
"""
|
28 |
+
|
29 |
+
_, video = ffmpegio.video.read(path, t=1.0)
|
30 |
+
video = video[::FPS_DIV][:MAX_LENGTH]
|
31 |
+
|
32 |
+
out_seg_video = []
|
33 |
+
|
34 |
+
for i in range(0, video.shape[0], BATCH_SIZE):
|
35 |
+
frames = [video[j] for j in range(i, min(i + BATCH_SIZE, video.shape[0]))]
|
36 |
+
frames = self.feature_extractor(images=frames, return_tensors='pt')['pixel_values']
|
37 |
+
|
38 |
+
out = self.forward(frames.to(self.device)).detach().to('cpu')
|
39 |
+
out_seg_video.append(out)
|
40 |
+
|
41 |
+
del frames, out
|
42 |
+
gc.collect()
|
43 |
+
if self.device == 'cuda':
|
44 |
+
torch.cuda.empty_cache()
|
45 |
+
|
46 |
+
return torch.cat(out_seg_video)
|
47 |
+
|
48 |
+
|
49 |
+
class VideoModel(torch.nn.Module):
|
50 |
+
def __init__(self):
|
51 |
+
super().__init__()
|
52 |
+
p = 0.5
|
53 |
+
self.pic_cnn = torch.nn.Sequential(
|
54 |
+
torch.nn.Conv2d(21, 128, (2, 2), stride=2),
|
55 |
+
torch.nn.BatchNorm2d(128),
|
56 |
+
torch.nn.LeakyReLU(),
|
57 |
+
torch.nn.Conv2d(128, 256, (2, 2), stride=2),
|
58 |
+
torch.nn.BatchNorm2d(256),
|
59 |
+
torch.nn.Dropout2d(p),
|
60 |
+
torch.nn.LeakyReLU(),
|
61 |
+
torch.nn.Conv2d(256, 256, (4, 4), stride=2),
|
62 |
+
torch.nn.BatchNorm2d(256),
|
63 |
+
torch.nn.Dropout2d(p),
|
64 |
+
torch.nn.Flatten()
|
65 |
+
)
|
66 |
+
|
67 |
+
self.vid_cnn = torch.nn.Sequential(
|
68 |
+
torch.nn.Conv2d(21, 128, (2, 2), stride=2),
|
69 |
+
torch.nn.BatchNorm2d(128),
|
70 |
+
torch.nn.Tanh(),
|
71 |
+
torch.nn.Conv2d(128, 256, (2, 2), stride=2),
|
72 |
+
torch.nn.BatchNorm2d(256),
|
73 |
+
torch.nn.Dropout2d(p),
|
74 |
+
torch.nn.LeakyReLU(),
|
75 |
+
torch.nn.Conv2d(256, 512, (2, 2), stride=2),
|
76 |
+
torch.nn.BatchNorm2d(512),
|
77 |
+
torch.nn.Dropout2d(p),
|
78 |
+
torch.nn.Flatten()
|
79 |
+
)
|
80 |
+
|
81 |
+
self.lstm = torch.nn.LSTM(2048, 256, 1, batch_first=True, bidirectional=True)
|
82 |
+
self.fc1 = torch.nn.Linear(256 * 2, 1024)
|
83 |
+
self.fc_norm = torch.nn.BatchNorm1d(256 * 2)
|
84 |
+
self.tanh = torch.nn.Tanh()
|
85 |
+
self.fc2 = torch.nn.Linear(1024, 2)
|
86 |
+
self.sigmoid = torch.nn.Sigmoid()
|
87 |
+
self.dropout = torch.nn.Dropout(p)
|
88 |
+
|
89 |
+
# xaiver init
|
90 |
+
for m in self.modules():
|
91 |
+
if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Conv3d):
|
92 |
+
torch.nn.init.xavier_uniform_(m.weight)
|
93 |
+
if m.bias is not None:
|
94 |
+
torch.nn.init.zeros_(m.bias)
|
95 |
+
|
96 |
+
elif isinstance(m, torch.nn.Linear):
|
97 |
+
torch.nn.init.xavier_uniform_(m.weight)
|
98 |
+
if m.bias is not None:
|
99 |
+
torch.nn.init.zeros_(m.bias)
|
100 |
+
|
101 |
+
def forward(self, video: torch.Tensor) -> torch.Tensor:
|
102 |
+
"""
|
103 |
+
Использует превью как начальное скрытое состояние, а кадры видео как последовательность.
|
104 |
+
video[0] - превью, video[1] - видео
|
105 |
+
|
106 |
+
:param video: torch.Tensor, shape = (batch_size, frames + 1, 1344)
|
107 |
+
"""
|
108 |
+
frames = video.shape[0]
|
109 |
+
video = torch.nn.functional.pad(video, (0, 0, 0, 0, 0, 0, MAX_LENGTH + 1 - frames, 0))
|
110 |
+
video = video.unsqueeze(0)
|
111 |
+
_batch_size = video.shape[0]
|
112 |
+
|
113 |
+
_preview = video[:, 0, :, :]
|
114 |
+
_video = video[:, 1:, :, :]
|
115 |
+
|
116 |
+
h0 = self.pic_cnn(_preview).unsqueeze(0)
|
117 |
+
h0 = torch.nn.functional.pad(h0, (0, 0, 0, 0, 0, 1))
|
118 |
+
c0 = torch.zeros_like(h0)
|
119 |
+
|
120 |
+
_video = self.vid_cnn(_video.reshape(-1, 21, 16, 16))
|
121 |
+
_video = _video.reshape(_batch_size, 90, -1)
|
122 |
+
|
123 |
+
context, _ = self.lstm(_video, (h0, c0))
|
124 |
+
out = self.fc_norm(context[:, -1])
|
125 |
+
out = self.tanh(self.fc1(out))
|
126 |
+
out = self.dropout(out)
|
127 |
+
out = self.sigmoid(self.fc2(out))
|
128 |
+
return out
|
129 |
+
|
130 |
+
|
131 |
+
# @st.cache_resource
|
132 |
+
class TikTokAnalytics(torch.nn.Module):
|
133 |
+
|
134 |
+
def __init__(self):
|
135 |
+
super().__init__()
|
136 |
+
self.preprocessing_model = PreprocessModel()
|
137 |
+
self.predict_model = torch.load(MODEL_PATH, map_location=self.preprocessing_model.device)
|
138 |
+
|
139 |
+
self.preprocessing_model.eval()
|
140 |
+
self.predict_model.eval()
|
141 |
+
|
142 |
+
def forward(self, path: str) -> torch.Tensor:
|
143 |
+
"""
|
144 |
+
Вызываем препроцесс, потом предикт
|
145 |
+
:param path:
|
146 |
+
:return:
|
147 |
+
"""
|
148 |
+
tensor = self.preprocessing_model.read_video(path)
|
149 |
+
predict = self.predict_model(tensor)
|
150 |
+
|
151 |
+
return predict
|
152 |
+
|
153 |
+
|
154 |
+
# if __name__ == '__main__':
|
155 |
+
# model = TikTokAnalytics()
|
156 |
+
# model = model(
|
157 |
+
# '/Users/victorbarbarich/PycharmProjects/nueramic/vktrbr-video-tiktok/data/videos/video-6930454291186502917.mp4')
|
158 |
+
# print(model)
|
requirements.txt
ADDED
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file may be used to create an environment using:
|
2 |
+
# $ conda create --name <env> --file <this file>
|
3 |
+
# platform: osx-arm64
|
4 |
+
aiohttp=3.8.3=pypi_0
|
5 |
+
aiomoex=2.0.0=pypi_0
|
6 |
+
aiosignal=1.2.0=pypi_0
|
7 |
+
alembic=1.8.1=pypi_0
|
8 |
+
altair=4.2.0=pypi_0
|
9 |
+
ansi2html=1.8.0=pypi_0
|
10 |
+
anyio=3.6.1=pypi_0
|
11 |
+
appnope=0.1.3=pyhd8ed1ab_0
|
12 |
+
apscheduler=3.9.1=pypi_0
|
13 |
+
arch=5.2.0=pypi_0
|
14 |
+
argon2-cffi=21.3.0=pyhd8ed1ab_0
|
15 |
+
argon2-cffi-bindings=21.2.0=py39h02fc5c5_3
|
16 |
+
asttokens=2.1.0=pyhd8ed1ab_0
|
17 |
+
async-generator=1.10=pypi_0
|
18 |
+
async-timeout=4.0.2=pypi_0
|
19 |
+
attrs=22.1.0=pyh71513ae_1
|
20 |
+
backcall=0.2.0=pyh9f0ad1d_0
|
21 |
+
backports=1.0=py_2
|
22 |
+
backports.functools_lru_cache=1.6.4=pyhd8ed1ab_0
|
23 |
+
beautifulsoup4=4.11.1=pyha770c72_0
|
24 |
+
bleach=5.0.1=pyhd8ed1ab_0
|
25 |
+
blinker=1.5=pypi_0
|
26 |
+
brotli=1.0.9=pypi_0
|
27 |
+
brotlipy=0.7.0=py39h02fc5c5_1005
|
28 |
+
build=0.8.0=pypi_0
|
29 |
+
bzip2=1.0.8=h3422bc3_4
|
30 |
+
ca-certificates=2022.12.7=h4653dfc_0
|
31 |
+
cachetools=5.2.0=pypi_0
|
32 |
+
catboost=1.0.6=pypi_0
|
33 |
+
certifi=2022.12.7=pyhd8ed1ab_0
|
34 |
+
certipy=0.1.3=pypi_0
|
35 |
+
cffi=1.15.1=py39h7e6b969_1
|
36 |
+
charset-normalizer=2.1.1=pyhd8ed1ab_0
|
37 |
+
click=8.1.3=pypi_0
|
38 |
+
colorama=0.4.6=pyhd8ed1ab_0
|
39 |
+
commonmark=0.9.1=pypi_0
|
40 |
+
conda=23.1.0=py39h2804cbe_0
|
41 |
+
conda-package-handling=1.9.0=py39h02fc5c5_1
|
42 |
+
cryptography=38.0.3=py39he2a39a8_0
|
43 |
+
cycler=0.11.0=pypi_0
|
44 |
+
dash=2.6.1=pypi_0
|
45 |
+
dash-core-components=2.0.0=pypi_0
|
46 |
+
dash-html-components=2.0.0=pypi_0
|
47 |
+
dash-table=5.0.0=pypi_0
|
48 |
+
dataclasses=0.6=pypi_0
|
49 |
+
debugpy=1.6.3=py39h23fbdae_1
|
50 |
+
decorator=5.1.1=pyhd8ed1ab_0
|
51 |
+
defusedxml=0.7.1=pyhd8ed1ab_0
|
52 |
+
docutils=0.18.1=pypi_0
|
53 |
+
entrypoints=0.4=pyhd8ed1ab_0
|
54 |
+
esgscraper=1.0.0=pypi_0
|
55 |
+
et-xmlfile=1.1.0=pypi_0
|
56 |
+
exceptiongroup=1.0.4=pypi_0
|
57 |
+
executing=1.2.0=pyhd8ed1ab_0
|
58 |
+
ffmpegio=0.7.0=pypi_0
|
59 |
+
ffmpegio-core=0.8.0=pypi_0
|
60 |
+
flask=2.2.2=pypi_0
|
61 |
+
flask-compress=1.12=pypi_0
|
62 |
+
flit-core=3.8.0=pyhd8ed1ab_0
|
63 |
+
fonttools=4.33.3=pypi_0
|
64 |
+
frozenlist=1.3.1=pypi_0
|
65 |
+
gh=2.22.1=h75b854d_0
|
66 |
+
gitdb=4.0.9=pypi_0
|
67 |
+
gitpython=3.1.27=pypi_0
|
68 |
+
greenlet=2.0.1=pypi_0
|
69 |
+
h11=0.12.0=pypi_0
|
70 |
+
httpcore=0.15.0=pypi_0
|
71 |
+
httpx=0.23.0=pypi_0
|
72 |
+
huggingface-hub=0.13.1=pypi_0
|
73 |
+
idna=3.4=pyhd8ed1ab_0
|
74 |
+
importlib-metadata=5.0.0=pyha770c72_1
|
75 |
+
importlib_resources=5.10.0=pyhd8ed1ab_0
|
76 |
+
iniconfig=1.1.1=pypi_0
|
77 |
+
ipykernel=6.13.0=py39h32adebf_0
|
78 |
+
ipython=8.6.0=pyhd1c38e8_1
|
79 |
+
ipython_genutils=0.2.0=py_1
|
80 |
+
ipywidgets=7.7.1=pypi_0
|
81 |
+
itsdangerous=2.1.2=pypi_0
|
82 |
+
jedi=0.18.1=pyhd8ed1ab_2
|
83 |
+
jinja2=3.1.2=pyhd8ed1ab_1
|
84 |
+
joblib=1.1.0=pypi_0
|
85 |
+
jsonschema=4.17.0=pyhd8ed1ab_0
|
86 |
+
jupyter=1.0.0=py39h2804cbe_7
|
87 |
+
jupyter-dash=0.4.2=pypi_0
|
88 |
+
jupyter-telemetry=0.1.0=pypi_0
|
89 |
+
jupyter_client=7.3.1=pyhd8ed1ab_0
|
90 |
+
jupyter_console=6.4.3=pyhd8ed1ab_0
|
91 |
+
jupyter_core=5.0.0=py39h2804cbe_0
|
92 |
+
jupyterhub=3.1.0=pypi_0
|
93 |
+
jupyterlab_pygments=0.2.2=pyhd8ed1ab_0
|
94 |
+
jupyterlab_widgets=1.1.1=pyhd8ed1ab_0
|
95 |
+
kaggle=1.5.12=pypi_0
|
96 |
+
kaleido=0.2.1=pypi_0
|
97 |
+
keyring=23.5.1=pypi_0
|
98 |
+
kiwisolver=1.4.2=pypi_0
|
99 |
+
libblas=3.9.0=16_osxarm64_openblas
|
100 |
+
libcblas=3.9.0=16_osxarm64_openblas
|
101 |
+
libcxx=14.0.6=h2692d47_0
|
102 |
+
libffi=3.4.2=h3422bc3_5
|
103 |
+
libgfortran=5.0.0=11_3_0_hd922786_26
|
104 |
+
libgfortran5=11.3.0=hdaf2cc0_26
|
105 |
+
liblapack=3.9.0=16_osxarm64_openblas
|
106 |
+
libopenblas=0.3.21=openmp_hc731615_3
|
107 |
+
libsodium=1.0.18=h27ca646_1
|
108 |
+
libsqlite=3.40.0=h76d750c_0
|
109 |
+
libzlib=1.2.13=h03a7124_4
|
110 |
+
lightgbm=3.3.2=py39h8685b90_0
|
111 |
+
llvm-openmp=15.0.5=h7cfbb63_0
|
112 |
+
llvmlite=0.38.1=pypi_0
|
113 |
+
lxml=4.9.0=pypi_0
|
114 |
+
mako=1.2.4=pypi_0
|
115 |
+
markupsafe=2.1.1=py39h02fc5c5_2
|
116 |
+
matplotlib=3.5.2=pypi_0
|
117 |
+
matplotlib-inline=0.1.6=pyhd8ed1ab_0
|
118 |
+
mistune=0.8.4=py39h5161555_1005
|
119 |
+
mpmath=1.2.1=pypi_0
|
120 |
+
multidict=6.0.2=pypi_0
|
121 |
+
multitasking=0.0.10=pypi_0
|
122 |
+
nbclient=0.5.13=pyhd8ed1ab_0
|
123 |
+
nbconvert=6.4.5=py39h2804cbe_0
|
124 |
+
nbformat=5.7.0=pyhd8ed1ab_0
|
125 |
+
ncurses=6.3=h07bb92c_1
|
126 |
+
nest-asyncio=1.5.6=pyhd8ed1ab_0
|
127 |
+
networkx=2.8.4=pypi_0
|
128 |
+
notebook=6.4.11=pyha770c72_0
|
129 |
+
nueramic-mathml=0.75.2=pypi_0
|
130 |
+
numpy=1.22.4=pypi_0
|
131 |
+
oauthlib=3.2.2=pypi_0
|
132 |
+
openpyxl=3.0.10=pypi_0
|
133 |
+
openssl=3.0.8=h03a7124_0
|
134 |
+
outcome=1.2.0=pypi_0
|
135 |
+
packaging=21.3=pyhd8ed1ab_0
|
136 |
+
pamela=1.0.0=pypi_0
|
137 |
+
pandas=1.4.2=pypi_0
|
138 |
+
pandoc=2.19.2=hce30654_1
|
139 |
+
pandocfilters=1.5.0=pyhd8ed1ab_0
|
140 |
+
parso=0.8.3=pyhd8ed1ab_0
|
141 |
+
patsy=0.5.2=pypi_0
|
142 |
+
pep517=0.12.0=pypi_0
|
143 |
+
pexpect=4.8.0=pyh1a96a4e_2
|
144 |
+
pickleshare=0.7.5=py_1003
|
145 |
+
pillow=9.2.0=pypi_0
|
146 |
+
pip=22.3.1=pypi_0
|
147 |
+
pkginfo=1.8.2=pypi_0
|
148 |
+
pkgutil-resolve-name=1.3.10=pyhd8ed1ab_0
|
149 |
+
platformdirs=2.5.2=pyhd8ed1ab_1
|
150 |
+
playwright=1.28.0=pypi_0
|
151 |
+
plotly=5.8.2=pypi_0
|
152 |
+
pluggy=1.0.0=pyhd8ed1ab_5
|
153 |
+
prometheus_client=0.15.0=pyhd8ed1ab_0
|
154 |
+
prompt-toolkit=3.0.30=pypi_0
|
155 |
+
prompt_toolkit=3.0.32=hd8ed1ab_0
|
156 |
+
property-cached=1.6.4=pypi_0
|
157 |
+
protobuf=3.20.3=pypi_0
|
158 |
+
psutil=5.9.4=py39h02fc5c5_0
|
159 |
+
ptyprocess=0.7.0=pyhd3deb0d_0
|
160 |
+
pure_eval=0.2.2=pyhd8ed1ab_0
|
161 |
+
pyarrow=9.0.0=pypi_0
|
162 |
+
pycosat=0.6.4=py39h02fc5c5_1
|
163 |
+
pycparser=2.21=pyhd8ed1ab_0
|
164 |
+
pydeck=0.8.0b3=pypi_0
|
165 |
+
pyee=9.0.4=pypi_0
|
166 |
+
pygments=2.13.0=pyhd8ed1ab_0
|
167 |
+
pympler=1.0.1=pypi_0
|
168 |
+
pyopenssl=22.1.0=pyhd8ed1ab_0
|
169 |
+
pyparsing=3.0.9=pyhd8ed1ab_0
|
170 |
+
pyrsistent=0.19.2=py39h02fc5c5_0
|
171 |
+
pysocks=1.7.1=pyha2e5f31_6
|
172 |
+
pytest=7.2.0=pypi_0
|
173 |
+
python=3.9.13=h96fcbfb_0_cpython
|
174 |
+
python-dateutil=2.8.2=pyhd8ed1ab_0
|
175 |
+
python-fastjsonschema=2.16.2=pyhd8ed1ab_0
|
176 |
+
python-graphviz=0.20=pypi_0
|
177 |
+
python-json-logger=2.0.4=pypi_0
|
178 |
+
python-slugify=6.1.2=pypi_0
|
179 |
+
python-telegram-bot=20.0a4=pypi_0
|
180 |
+
python_abi=3.9=2_cp39
|
181 |
+
pytz=2022.1=pypi_0
|
182 |
+
pytz-deprecation-shim=0.1.0.post0=pypi_0
|
183 |
+
pyyaml=6.0=pypi_0
|
184 |
+
pyzmq=24.0.1=py39h0553236_1
|
185 |
+
readline=8.1.2=h46ed386_0
|
186 |
+
readme-renderer=35.0=pypi_0
|
187 |
+
regex=2022.10.31=pypi_0
|
188 |
+
requests=2.28.1=pyhd8ed1ab_1
|
189 |
+
requests-toolbelt=0.9.1=pypi_0
|
190 |
+
retrying=1.3.3=pypi_0
|
191 |
+
rfc3986=1.5.0=pypi_0
|
192 |
+
rich=12.4.4=pypi_0
|
193 |
+
ruamel.yaml=0.17.21=py39h02fc5c5_2
|
194 |
+
ruamel.yaml.clib=0.2.7=py39h02fc5c5_1
|
195 |
+
ruamel_yaml=0.15.80=py39h02fc5c5_1008
|
196 |
+
scikit-learn=1.1.1=pypi_0
|
197 |
+
scipy=1.8.1=pypi_0
|
198 |
+
selenium=4.6.1=pypi_0
|
199 |
+
semver=2.13.0=pypi_0
|
200 |
+
send2trash=1.8.0=pyhd8ed1ab_0
|
201 |
+
setuptools=62.6.0=pypi_0
|
202 |
+
six=1.16.0=pyh6c4a22f_0
|
203 |
+
slicer=0.0.7=pypi_0
|
204 |
+
smmap=5.0.0=pypi_0
|
205 |
+
sniffio=1.3.0=pypi_0
|
206 |
+
sortedcontainers=2.4.0=pypi_0
|
207 |
+
soupsieve=2.3.2.post1=pyhd8ed1ab_0
|
208 |
+
split-folders=0.5.1=pypi_0
|
209 |
+
sqlalchemy=1.4.41=pypi_0
|
210 |
+
sqldf=0.4.2=pypi_0
|
211 |
+
sqlite=3.40.0=h2229b38_0
|
212 |
+
stack_data=0.6.1=pyhd8ed1ab_0
|
213 |
+
statsmodels=0.13.2=pypi_0
|
214 |
+
streamlit=1.20.0=pypi_0
|
215 |
+
sympy=1.10.1=pypi_0
|
216 |
+
telegram=0.0.1=pypi_0
|
217 |
+
tenacity=8.0.1=pypi_0
|
218 |
+
terminado=0.15.0=py39h2804cbe_0
|
219 |
+
testpath=0.6.0=pyhd8ed1ab_0
|
220 |
+
text-unidecode=1.3=pypi_0
|
221 |
+
threadpoolctl=3.1.0=pyh8a188c0_0
|
222 |
+
tiktokapi=5.2.2=pypi_0
|
223 |
+
tk=8.6.12=he1e0b03_0
|
224 |
+
tokenizers=0.13.2=pypi_0
|
225 |
+
toml=0.10.2=pypi_0
|
226 |
+
tomli=2.0.1=pypi_0
|
227 |
+
toolz=0.12.0=pyhd8ed1ab_0
|
228 |
+
torch=1.13.0.dev20220529=pypi_0
|
229 |
+
torchaudio=0.11.0=pypi_0
|
230 |
+
torchvision=0.12.0=pypi_0
|
231 |
+
tornado=6.2=py39h02fc5c5_1
|
232 |
+
tqdm=4.64.0=pypi_0
|
233 |
+
traitlets=5.3.0=pypi_0
|
234 |
+
transformers=4.26.1=pypi_0
|
235 |
+
trio=0.21.0=pypi_0
|
236 |
+
trio-websocket=0.9.2=pypi_0
|
237 |
+
twine=4.0.1=pypi_0
|
238 |
+
typing-extensions=4.2.0=pypi_0
|
239 |
+
typing_extensions=4.4.0=pyha770c72_0
|
240 |
+
tzdata=2022.4=pypi_0
|
241 |
+
tzlocal=4.2=pypi_0
|
242 |
+
urllib3=1.26.11=pyhd8ed1ab_0
|
243 |
+
validators=0.20.0=pypi_0
|
244 |
+
wcwidth=0.2.5=pyh9f0ad1d_2
|
245 |
+
webencodings=0.5.1=py_1
|
246 |
+
werkzeug=2.2.2=pypi_0
|
247 |
+
wheel=0.38.4=pyhd8ed1ab_0
|
248 |
+
widgetsnbextension=3.6.0=py39h2804cbe_0
|
249 |
+
wsproto=1.2.0=pypi_0
|
250 |
+
xz=5.2.6=h57fd34a_0
|
251 |
+
yaml=0.2.5=h3422bc3_2
|
252 |
+
yarl=1.8.1=pypi_0
|
253 |
+
yfinance=0.1.70=pypi_0
|
254 |
+
zeromq=4.3.4=hbdafb3b_1
|
255 |
+
zipp=3.10.0=pyhd8ed1ab_0
|
256 |
+
zlib=1.2.13=h03a7124_4
|
video_input.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
|
4 |
+
def video_input() -> [str, st.container, st.container]:
|
5 |
+
"""
|
6 |
+
|
7 |
+
:return: путь к файлу
|
8 |
+
"""
|
9 |
+
|
10 |
+
st.title("Video Input")
|
11 |
+
video_file = st.file_uploader("Upload a video", type=["mp4"])
|
12 |
+
|
13 |
+
if video_file is None:
|
14 |
+
st.stop()
|
15 |
+
|
16 |
+
_, video_container, message_container, _ = st.columns([1, 2, 2, 1])
|
17 |
+
video_container: st.empty()
|
18 |
+
|
19 |
+
if video_file is not None:
|
20 |
+
video_container.video(video_file)
|
21 |
+
|
22 |
+
file_path = f'./{abs(hash(str(video_file)))}.mp4'
|
23 |
+
|
24 |
+
with open(file_path, 'wb') as file:
|
25 |
+
file.write(video_file.read())
|
26 |
+
|
27 |
+
return file_path, video_container, message_container
|