nanushio
commited on
Commit
•
90e9a46
1
Parent(s):
b3881ed
- [MINOR] [SOURCE] [UPDATE] 1. update app.py
Browse files
app.py
CHANGED
@@ -10,6 +10,7 @@ from decord import VideoReader
|
|
10 |
import numpy as np
|
11 |
import yaml
|
12 |
import matplotlib.pyplot as plt
|
|
|
13 |
|
14 |
from cover.datasets import UnifiedFrameSampler, spatial_temporal_view_decomposition
|
15 |
from cover.models import COVER
|
@@ -60,18 +61,26 @@ def compare_score(score, score_list):
|
|
60 |
return f"Better than {percentage:.0f}% videos in YT-UGC" if percentage > 50 else f"Worse than {100-percentage:.0f}% videos in YT-UGC"
|
61 |
|
62 |
def create_bar_chart(scores, comparisons):
|
63 |
-
labels = ['Semantic', '
|
64 |
-
|
65 |
|
66 |
fig, ax = plt.subplots(figsize=(10, 5))
|
67 |
|
68 |
-
for i, (label, score, comparison,
|
69 |
-
|
70 |
-
|
71 |
-
ax.
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
|
73 |
ax.set_yticks(range(len(labels)))
|
74 |
-
ax.set_yticklabels(labels)
|
|
|
|
|
75 |
ax.set_xlim(0, 5)
|
76 |
ax.set_xlabel('Score')
|
77 |
|
@@ -104,7 +113,6 @@ def inference_one_video(input_video):
|
|
104 |
sopt["clip_len"] = clip_len * 2
|
105 |
if stype == 'technical':
|
106 |
sopt["aligned"] = sopt["clip_len"]
|
107 |
-
print(sopt["clip_len"], sopt["t_frag"])
|
108 |
temporal_samplers[stype] = UnifiedFrameSampler(
|
109 |
sopt["clip_len"] // sopt["t_frag"],
|
110 |
sopt["t_frag"],
|
|
|
10 |
import numpy as np
|
11 |
import yaml
|
12 |
import matplotlib.pyplot as plt
|
13 |
+
import matplotlib.patches as patches
|
14 |
|
15 |
from cover.datasets import UnifiedFrameSampler, spatial_temporal_view_decomposition
|
16 |
from cover.models import COVER
|
|
|
61 |
return f"Better than {percentage:.0f}% videos in YT-UGC" if percentage > 50 else f"Worse than {100-percentage:.0f}% videos in YT-UGC"
|
62 |
|
63 |
def create_bar_chart(scores, comparisons):
|
64 |
+
labels = ['Semantic', 'Aesthetic', 'Technical', 'Overall']
|
65 |
+
base_colors = ['#d62728', '#ff7f0e', '#1f77b4', '#bcbd22']
|
66 |
|
67 |
fig, ax = plt.subplots(figsize=(10, 5))
|
68 |
|
69 |
+
for i, (label, score, comparison, base_color) in enumerate(zip(labels, scores, comparisons, base_colors)):
|
70 |
+
color = plt.cm.ScalarMappable(cmap=plt.cm.RdYlBu).to_rgba((score / 5))
|
71 |
+
gradient = patches.Rectangle((0, i), 5, 1, color=base_color, alpha=0.5)
|
72 |
+
ax.add_patch(gradient)
|
73 |
+
|
74 |
+
# Add the actual score line
|
75 |
+
ax.plot([score, score], [i, i+0.9], color='black', linewidth=2)
|
76 |
+
|
77 |
+
ax.text(score + 0.1, i + 0.5, f'{score:.1f}', va='center', ha='left', color=base_color)
|
78 |
+
ax.text(5.1, i + 0.5, comparison, va='center', ha='left', color=base_color)
|
79 |
|
80 |
ax.set_yticks(range(len(labels)))
|
81 |
+
ax.set_yticklabels(labels, color=base_colors)
|
82 |
+
ax.set_xticks([0, 1, 2, 3, 4, 5])
|
83 |
+
ax.set_xticklabels([0, 1, 2, 3, 4, 5])
|
84 |
ax.set_xlim(0, 5)
|
85 |
ax.set_xlabel('Score')
|
86 |
|
|
|
113 |
sopt["clip_len"] = clip_len * 2
|
114 |
if stype == 'technical':
|
115 |
sopt["aligned"] = sopt["clip_len"]
|
|
|
116 |
temporal_samplers[stype] = UnifiedFrameSampler(
|
117 |
sopt["clip_len"] // sopt["t_frag"],
|
118 |
sopt["t_frag"],
|