Charig Yang
commited on
Commit
•
03d46ba
1
Parent(s):
b3c0fe5
test
Browse files- app.py +35 -0
- full+++++.pth +3 -0
- requirements.txt +6 -0
app.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
+
import torchvision.models as models
|
7 |
+
import einops
|
8 |
+
|
9 |
+
def predict(img):
|
10 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
11 |
+
model = models.resnet50()
|
12 |
+
model.fc = nn.Linear(2048, 720)
|
13 |
+
resume_path = 'full+++++.pth'
|
14 |
+
model.load_state_dict(torch.load(resume_path))
|
15 |
+
model.to(device)
|
16 |
+
with torch.no_grad():
|
17 |
+
model.eval()
|
18 |
+
img = cv2.resize(img, (224, 224))/255.
|
19 |
+
img = np.stack([einops.rearrange(img, 'h w c -> c h w')], 0)
|
20 |
+
img = torch.Tensor(img).float().to(device)
|
21 |
+
pred = model(img)
|
22 |
+
max_pred = torch.argsort(pred, dim=1, descending=True)
|
23 |
+
max_h = (max_pred[0][0] // 60).item()
|
24 |
+
max_m = (max_pred[0][0] % 60).item()
|
25 |
+
return '{}:{}'.format(str(max_h), str(max_m).zfill(2))
|
26 |
+
|
27 |
+
inputs = gr.inputs.Image()
|
28 |
+
|
29 |
+
io = gr.Interface(
|
30 |
+
fn=predict,
|
31 |
+
inputs=inputs,
|
32 |
+
outputs="text",
|
33 |
+
)
|
34 |
+
|
35 |
+
io.launch(share=True)
|
full+++++.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7f709d84fc468882e84007a810253c02933154988009016fe0cb76a5cc99e5b6
|
3 |
+
size 100250049
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
cv2
|
2 |
+
numpy
|
3 |
+
torch
|
4 |
+
torchvision
|
5 |
+
einops
|
6 |
+
gradio
|