gospacex commited on
Commit
77c0f59
1 Parent(s): 23e0154

Add result

Browse files
Files changed (2) hide show
  1. results.json +2274 -0
  2. run_vtimellm.txt +9 -0
results.json ADDED
@@ -0,0 +1,2274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "conversations": [
4
+ {
5
+ "from": "human",
6
+ "value": "What does the adult man in black hold?"
7
+ },
8
+ {
9
+ "from": "gpt",
10
+ "value": "The adult man in black is holding a glass cup. The cup appears in the video from frame 20 at position [0.378, 0.529, 0.428, 0.658] and stops appearing at position [0.287, 0.504, 0.338, 0.658] in frame 99."
11
+ }
12
+ ],
13
+ "meta": {
14
+ "vid": "2727682922",
15
+ "frame_count": 100,
16
+ "start_frame": 20,
17
+ "end_frame": 99,
18
+ "start_bbox": [
19
+ 0.378,
20
+ 0.529,
21
+ 0.428,
22
+ 0.658
23
+ ],
24
+ "end_bbox": [
25
+ 0.287,
26
+ 0.504,
27
+ 0.338,
28
+ 0.658
29
+ ],
30
+ "width": 320,
31
+ "height": 240
32
+ }
33
+ },
34
+ {
35
+ "conversations": [
36
+ {
37
+ "from": "human",
38
+ "value": "Is there a white horse beneath the adult man in black?"
39
+ },
40
+ {
41
+ "from": "gpt",
42
+ "value": "In the video, the white horse is initially located at position [0.82, 0.379, 0.998, 0.721] in frame 10 and stops appearing at position [0.502, 0.35, 0.895, 0.767] in frame 99. The adult man in black is above the white horse in the video."
43
+ }
44
+ ],
45
+ "meta": {
46
+ "vid": "2769996915",
47
+ "frame_count": 100,
48
+ "start_frame": 10,
49
+ "end_frame": 99,
50
+ "start_bbox": [
51
+ 0.82,
52
+ 0.379,
53
+ 0.998,
54
+ 0.721
55
+ ],
56
+ "end_bbox": [
57
+ 0.502,
58
+ 0.35,
59
+ 0.895,
60
+ 0.767
61
+ ],
62
+ "width": 640,
63
+ "height": 480
64
+ }
65
+ },
66
+ {
67
+ "conversations": [
68
+ {
69
+ "from": "human",
70
+ "value": "Who is the child wearing blue pants towards to?"
71
+ },
72
+ {
73
+ "from": "gpt",
74
+ "value": "In the video, a child wearing blue pants is towards an adult wearing a mask. The adult can be seen initially at position [0.156, 0.075, 0.361, 0.714] in frame 23 and stops appearing at position [0.169, 0.067, 0.378, 0.731] in frame 42."
75
+ }
76
+ ],
77
+ "meta": {
78
+ "vid": "4655213495",
79
+ "frame_count": 100,
80
+ "start_frame": 23,
81
+ "end_frame": 42,
82
+ "start_bbox": [
83
+ 0.156,
84
+ 0.075,
85
+ 0.361,
86
+ 0.714
87
+ ],
88
+ "end_bbox": [
89
+ 0.169,
90
+ 0.067,
91
+ 0.378,
92
+ 0.731
93
+ ],
94
+ "width": 640,
95
+ "height": 360
96
+ }
97
+ },
98
+ {
99
+ "conversations": [
100
+ {
101
+ "from": "human",
102
+ "value": "What is the child next to on the ground?"
103
+ },
104
+ {
105
+ "from": "gpt",
106
+ "value": "In frame 39, the child is next to a toy at position [0.225, 0.628, 0.35, 0.833] on the ground. However, in frame 48, the toy stops appearing at position [0.178, 0.578, 0.289, 0.742]. Therefore, the answer is that the child was next to a toy on the ground."
107
+ }
108
+ ],
109
+ "meta": {
110
+ "vid": "4598213889",
111
+ "frame_count": 100,
112
+ "start_frame": 39,
113
+ "end_frame": 48,
114
+ "start_bbox": [
115
+ 0.225,
116
+ 0.628,
117
+ 0.35,
118
+ 0.833
119
+ ],
120
+ "end_bbox": [
121
+ 0.178,
122
+ 0.578,
123
+ 0.289,
124
+ 0.742
125
+ ],
126
+ "width": 640,
127
+ "height": 360
128
+ }
129
+ },
130
+ {
131
+ "conversations": [
132
+ {
133
+ "from": "human",
134
+ "value": "Who hugs the other child in black in the room?"
135
+ },
136
+ {
137
+ "from": "gpt",
138
+ "value": "In frame 65, there is a child in white at position [0.398, 0.121, 0.867, 0.867] hugging another child in black in the room. The child in white stops appearing in frame 92 at position [0.531, 0.281, 0.767, 0.827]. So, the child in white hugs the other child in black in the room."
139
+ }
140
+ ],
141
+ "meta": {
142
+ "vid": "4284560730",
143
+ "frame_count": 100,
144
+ "start_frame": 65,
145
+ "end_frame": 92,
146
+ "start_bbox": [
147
+ 0.398,
148
+ 0.121,
149
+ 0.867,
150
+ 0.867
151
+ ],
152
+ "end_bbox": [
153
+ 0.531,
154
+ 0.281,
155
+ 0.767,
156
+ 0.827
157
+ ],
158
+ "width": 640,
159
+ "height": 480
160
+ }
161
+ },
162
+ {
163
+ "conversations": [
164
+ {
165
+ "from": "human",
166
+ "value": "Is the child wearing black pants away from the adult in the video?"
167
+ },
168
+ {
169
+ "from": "gpt",
170
+ "value": "Yes, in frame 69, the child wearing black pants [0.713, 0.519, 0.852, 0.997] is away from the adult. The child stops appearing at position [0.709, 0.422, 0.792, 0.736] in frame 93."
171
+ }
172
+ ],
173
+ "meta": {
174
+ "vid": "6391532457",
175
+ "frame_count": 100,
176
+ "start_frame": 69,
177
+ "end_frame": 93,
178
+ "start_bbox": [
179
+ 0.713,
180
+ 0.519,
181
+ 0.852,
182
+ 0.997
183
+ ],
184
+ "end_bbox": [
185
+ 0.709,
186
+ 0.422,
187
+ 0.792,
188
+ 0.736
189
+ ],
190
+ "width": 640,
191
+ "height": 360
192
+ }
193
+ },
194
+ {
195
+ "conversations": [
196
+ {
197
+ "from": "human",
198
+ "value": "Who hugs the toy in the video?"
199
+ },
200
+ {
201
+ "from": "gpt",
202
+ "value": "In the video, a child in a hat is seen hugging a toy. The child is initially located at position [0.247, 0.25, 0.359, 0.738] in frame 46 and stops appearing at position [0.312, 0.225, 0.553, 0.996] in frame 99."
203
+ }
204
+ ],
205
+ "meta": {
206
+ "vid": "2440057130",
207
+ "frame_count": 100,
208
+ "start_frame": 46,
209
+ "end_frame": 99,
210
+ "start_bbox": [
211
+ 0.247,
212
+ 0.25,
213
+ 0.359,
214
+ 0.738
215
+ ],
216
+ "end_bbox": [
217
+ 0.312,
218
+ 0.225,
219
+ 0.553,
220
+ 0.996
221
+ ],
222
+ "width": 320,
223
+ "height": 240
224
+ }
225
+ },
226
+ {
227
+ "conversations": [
228
+ {
229
+ "from": "human",
230
+ "value": "Who opens the bus outdoors in the video?"
231
+ },
232
+ {
233
+ "from": "gpt",
234
+ "value": "In the video, an adult in green opens a bus outdoors. The target adult appears at position [0.388, 0.317, 0.453, 0.592] in frame 31 and stops appearing at position [0.389, 0.319, 0.458, 0.597] in frame 38."
235
+ }
236
+ ],
237
+ "meta": {
238
+ "vid": "7791533260",
239
+ "frame_count": 100,
240
+ "start_frame": 31,
241
+ "end_frame": 38,
242
+ "start_bbox": [
243
+ 0.388,
244
+ 0.317,
245
+ 0.453,
246
+ 0.592
247
+ ],
248
+ "end_bbox": [
249
+ 0.389,
250
+ 0.319,
251
+ 0.458,
252
+ 0.597
253
+ ],
254
+ "width": 640,
255
+ "height": 360
256
+ }
257
+ },
258
+ {
259
+ "conversations": [
260
+ {
261
+ "from": "human",
262
+ "value": "Who is in front of the black sofa on the blanket?"
263
+ },
264
+ {
265
+ "from": "gpt",
266
+ "value": "In the video, there is a child [0.573, 0.006, 0.752, 0.808] in gray and white on the blanket. The child appears in front of the black sofa on the blanket from frame 85 to frame 90. Therefore, the answer is a child."
267
+ }
268
+ ],
269
+ "meta": {
270
+ "vid": "8212773822",
271
+ "frame_count": 100,
272
+ "start_frame": 85,
273
+ "end_frame": 90,
274
+ "start_bbox": [
275
+ 0.573,
276
+ 0.006,
277
+ 0.752,
278
+ 0.808
279
+ ],
280
+ "end_bbox": [
281
+ 0.598,
282
+ 0.006,
283
+ 0.817,
284
+ 0.478
285
+ ],
286
+ "width": 640,
287
+ "height": 360
288
+ }
289
+ },
290
+ {
291
+ "conversations": [
292
+ {
293
+ "from": "human",
294
+ "value": "Where is the dog away from another dog in the room?"
295
+ },
296
+ {
297
+ "from": "gpt",
298
+ "value": "In frame 88, the dog is located at position [0.372, 0.282, 0.556, 0.527] away from another dog in the room. However, by frame 91, the dog has moved to position [0.481, 0.277, 0.662, 0.452], no longer being away from the other dog. Therefore, the dog initially positioned at [0.372, 0.282, 0.556, 0.527] is now away from the other dog in the room."
299
+ }
300
+ ],
301
+ "meta": {
302
+ "vid": "6341769390",
303
+ "frame_count": 100,
304
+ "start_frame": 88,
305
+ "end_frame": 91,
306
+ "start_bbox": [
307
+ 0.372,
308
+ 0.282,
309
+ 0.556,
310
+ 0.527
311
+ ],
312
+ "end_bbox": [
313
+ 0.481,
314
+ 0.277,
315
+ 0.662,
316
+ 0.452
317
+ ],
318
+ "width": 640,
319
+ "height": 1138
320
+ }
321
+ },
322
+ {
323
+ "conversations": [
324
+ {
325
+ "from": "human",
326
+ "value": "Can you describe the position of the child next to the adult in white on the sofa?"
327
+ },
328
+ {
329
+ "from": "gpt",
330
+ "value": "The child is initially located at position [0.423, 0.311, 0.517, 0.622] in frame 85 and stops appearing at position [0.072, 0.003, 0.191, 0.689] in frame 98 next to the adult in white on the sofa."
331
+ }
332
+ ],
333
+ "meta": {
334
+ "vid": "8890945814",
335
+ "frame_count": 100,
336
+ "start_frame": 85,
337
+ "end_frame": 98,
338
+ "start_bbox": [
339
+ 0.423,
340
+ 0.311,
341
+ 0.517,
342
+ 0.622
343
+ ],
344
+ "end_bbox": [
345
+ 0.072,
346
+ 0.003,
347
+ 0.191,
348
+ 0.689
349
+ ],
350
+ "width": 640,
351
+ "height": 360
352
+ }
353
+ },
354
+ {
355
+ "conversations": [
356
+ {
357
+ "from": "human",
358
+ "value": "Who hugs the red toy in the video?"
359
+ },
360
+ {
361
+ "from": "gpt",
362
+ "value": "In the video, a baby with blond hair is seen hugging the red toy. The baby is initially located at position [0.272, 0.0, 0.802, 0.875] in frame 32 and stops appearing at position [0.384, 0.181, 0.808, 0.965] in frame 36."
363
+ }
364
+ ],
365
+ "meta": {
366
+ "vid": "6314288870",
367
+ "frame_count": 100,
368
+ "start_frame": 32,
369
+ "end_frame": 36,
370
+ "start_bbox": [
371
+ 0.272,
372
+ 0.0,
373
+ 0.802,
374
+ 0.875
375
+ ],
376
+ "end_bbox": [
377
+ 0.384,
378
+ 0.181,
379
+ 0.808,
380
+ 0.965
381
+ ],
382
+ "width": 640,
383
+ "height": 480
384
+ }
385
+ },
386
+ {
387
+ "conversations": [
388
+ {
389
+ "from": "human",
390
+ "value": "What does the baby with blond-hair hug?"
391
+ },
392
+ {
393
+ "from": "gpt",
394
+ "value": "The baby with blond-hair is hugging a red toy. The toy is initially located at [0.269, 0.092, 0.558, 0.569] in frame 32 and stops appearing at position [0.269, 0.338, 0.497, 0.779] in frame 36."
395
+ }
396
+ ],
397
+ "meta": {
398
+ "vid": "6314288870",
399
+ "frame_count": 100,
400
+ "start_frame": 32,
401
+ "end_frame": 36,
402
+ "start_bbox": [
403
+ 0.269,
404
+ 0.092,
405
+ 0.558,
406
+ 0.569
407
+ ],
408
+ "end_bbox": [
409
+ 0.269,
410
+ 0.338,
411
+ 0.497,
412
+ 0.779
413
+ ],
414
+ "width": 640,
415
+ "height": 480
416
+ }
417
+ },
418
+ {
419
+ "conversations": [
420
+ {
421
+ "from": "human",
422
+ "value": "Who is hugging the toy in red on the bed?"
423
+ },
424
+ {
425
+ "from": "gpt",
426
+ "value": "In the video, a baby is hugging a toy in red on the bed. The baby is initially located at position [0.155, 0.191, 0.616, 0.997] in frame 96 and stops appearing at position [0.181, 0.036, 0.652, 0.981] in frame 99."
427
+ }
428
+ ],
429
+ "meta": {
430
+ "vid": "8109383060",
431
+ "frame_count": 100,
432
+ "start_frame": 96,
433
+ "end_frame": 99,
434
+ "start_bbox": [
435
+ 0.155,
436
+ 0.191,
437
+ 0.616,
438
+ 0.997
439
+ ],
440
+ "end_bbox": [
441
+ 0.181,
442
+ 0.036,
443
+ 0.652,
444
+ 0.981
445
+ ],
446
+ "width": 640,
447
+ "height": 362
448
+ }
449
+ },
450
+ {
451
+ "conversations": [
452
+ {
453
+ "from": "human",
454
+ "value": "Who is biting the yellow bread in the video?"
455
+ },
456
+ {
457
+ "from": "gpt",
458
+ "value": "In the video, an adult woman in black is biting a yellow bread. The person initially appears at position [0.175, 0.0, 0.917, 0.998] in frame 81 and stops appearing at position [0.13, 0.002, 0.998, 0.998] in frame 85."
459
+ }
460
+ ],
461
+ "meta": {
462
+ "vid": "3127391012",
463
+ "frame_count": 100,
464
+ "start_frame": 81,
465
+ "end_frame": 85,
466
+ "start_bbox": [
467
+ 0.175,
468
+ 0.0,
469
+ 0.917,
470
+ 0.998
471
+ ],
472
+ "end_bbox": [
473
+ 0.13,
474
+ 0.002,
475
+ 0.998,
476
+ 0.998
477
+ ],
478
+ "width": 640,
479
+ "height": 480
480
+ }
481
+ },
482
+ {
483
+ "conversations": [
484
+ {
485
+ "from": "human",
486
+ "value": "Who holds the toy in the video?"
487
+ },
488
+ {
489
+ "from": "gpt",
490
+ "value": "In frame 34, a child in green clothes is holding a toy at position [0.4, 0.37, 0.667, 0.652]. However, the child stops appearing at position [0.006, 0.113, 0.909, 0.999] in frame 53."
491
+ }
492
+ ],
493
+ "meta": {
494
+ "vid": "13497317534",
495
+ "frame_count": 100,
496
+ "start_frame": 34,
497
+ "end_frame": 53,
498
+ "start_bbox": [
499
+ 0.4,
500
+ 0.37,
501
+ 0.667,
502
+ 0.652
503
+ ],
504
+ "end_bbox": [
505
+ 0.006,
506
+ 0.113,
507
+ 0.909,
508
+ 0.999
509
+ ],
510
+ "width": 640,
511
+ "height": 1138
512
+ }
513
+ },
514
+ {
515
+ "conversations": [
516
+ {
517
+ "from": "human",
518
+ "value": "What does the child in green clothes hold?"
519
+ },
520
+ {
521
+ "from": "gpt",
522
+ "value": "The child in green clothes is holding a toy. The toy is initially located at position [0.419, 0.533, 0.469, 0.568] in frame 34 and stops appearing at position [0.078, 0.74, 0.283, 0.919] in frame 53."
523
+ }
524
+ ],
525
+ "meta": {
526
+ "vid": "13497317534",
527
+ "frame_count": 100,
528
+ "start_frame": 34,
529
+ "end_frame": 53,
530
+ "start_bbox": [
531
+ 0.419,
532
+ 0.533,
533
+ 0.469,
534
+ 0.568
535
+ ],
536
+ "end_bbox": [
537
+ 0.078,
538
+ 0.74,
539
+ 0.283,
540
+ 0.919
541
+ ],
542
+ "width": 640,
543
+ "height": 1138
544
+ }
545
+ },
546
+ {
547
+ "conversations": [
548
+ {
549
+ "from": "human",
550
+ "value": "Who is next to a handbag in the video?"
551
+ },
552
+ {
553
+ "from": "gpt",
554
+ "value": "In the video, the adult is next to a handbag on a motorcycle. The adult is initially located at position [0.172, 0.028, 0.309, 0.842] in frame 11 and stops appearing at position [0.391, 0.0, 0.5, 0.519] in frame 19."
555
+ }
556
+ ],
557
+ "meta": {
558
+ "vid": "7499375636",
559
+ "frame_count": 100,
560
+ "start_frame": 11,
561
+ "end_frame": 19,
562
+ "start_bbox": [
563
+ 0.172,
564
+ 0.028,
565
+ 0.309,
566
+ 0.842
567
+ ],
568
+ "end_bbox": [
569
+ 0.391,
570
+ 0.0,
571
+ 0.5,
572
+ 0.519
573
+ ],
574
+ "width": 640,
575
+ "height": 360
576
+ }
577
+ },
578
+ {
579
+ "conversations": [
580
+ {
581
+ "from": "human",
582
+ "value": "Who pushes the toy outside the house?"
583
+ },
584
+ {
585
+ "from": "gpt",
586
+ "value": "In the video, a baby pushes a toy outside the house. The baby is initially located at position [0.319, 0.017, 0.981, 0.996] in frame 68 and stops appearing at position [0.378, 0.092, 0.716, 0.821] in frame 78."
587
+ }
588
+ ],
589
+ "meta": {
590
+ "vid": "3837521252",
591
+ "frame_count": 100,
592
+ "start_frame": 68,
593
+ "end_frame": 78,
594
+ "start_bbox": [
595
+ 0.319,
596
+ 0.017,
597
+ 0.981,
598
+ 0.996
599
+ ],
600
+ "end_bbox": [
601
+ 0.378,
602
+ 0.092,
603
+ 0.716,
604
+ 0.821
605
+ ],
606
+ "width": 320,
607
+ "height": 240
608
+ }
609
+ },
610
+ {
611
+ "conversations": [
612
+ {
613
+ "from": "human",
614
+ "value": "What does the baby push outside the house in the video?"
615
+ },
616
+ {
617
+ "from": "gpt",
618
+ "value": "In the video, the baby is pushing a toy outside the house. The toy is initially located at [0.022, 0.192, 0.519, 0.996] in frame 68 and stops appearing at position [0.022, 0.342, 0.769, 0.996] in frame 78."
619
+ }
620
+ ],
621
+ "meta": {
622
+ "vid": "3837521252",
623
+ "frame_count": 100,
624
+ "start_frame": 68,
625
+ "end_frame": 78,
626
+ "start_bbox": [
627
+ 0.022,
628
+ 0.192,
629
+ 0.519,
630
+ 0.996
631
+ ],
632
+ "end_bbox": [
633
+ 0.022,
634
+ 0.342,
635
+ 0.769,
636
+ 0.996
637
+ ],
638
+ "width": 320,
639
+ "height": 240
640
+ }
641
+ },
642
+ {
643
+ "conversations": [
644
+ {
645
+ "from": "human",
646
+ "value": "Who pushes the toy outside the house?"
647
+ },
648
+ {
649
+ "from": "gpt",
650
+ "value": "In the video, a baby pushes a toy outside the house. The baby is initially located at position [0.287, 0.071, 0.463, 0.492] in frame 52 and stops appearing at position [0.138, 0.271, 0.516, 0.787] in frame 77."
651
+ }
652
+ ],
653
+ "meta": {
654
+ "vid": "3837521252",
655
+ "frame_count": 100,
656
+ "start_frame": 52,
657
+ "end_frame": 77,
658
+ "start_bbox": [
659
+ 0.287,
660
+ 0.071,
661
+ 0.463,
662
+ 0.492
663
+ ],
664
+ "end_bbox": [
665
+ 0.138,
666
+ 0.271,
667
+ 0.516,
668
+ 0.787
669
+ ],
670
+ "width": 320,
671
+ "height": 240
672
+ }
673
+ },
674
+ {
675
+ "conversations": [
676
+ {
677
+ "from": "human",
678
+ "value": "What does the baby push outside the house?"
679
+ },
680
+ {
681
+ "from": "gpt",
682
+ "value": "The baby pushes a toy outside the house. The toy is initially at position [0.144, 0.225, 0.984, 0.946] in frame 52 and stops appearing at position [0.412, 0.396, 0.997, 0.996] in frame 77."
683
+ }
684
+ ],
685
+ "meta": {
686
+ "vid": "3837521252",
687
+ "frame_count": 100,
688
+ "start_frame": 52,
689
+ "end_frame": 77,
690
+ "start_bbox": [
691
+ 0.144,
692
+ 0.225,
693
+ 0.984,
694
+ 0.946
695
+ ],
696
+ "end_bbox": [
697
+ 0.412,
698
+ 0.396,
699
+ 0.997,
700
+ 0.996
701
+ ],
702
+ "width": 320,
703
+ "height": 240
704
+ }
705
+ },
706
+ {
707
+ "conversations": [
708
+ {
709
+ "from": "human",
710
+ "value": "Who pushes the toy outside the house?"
711
+ },
712
+ {
713
+ "from": "gpt",
714
+ "value": "In the video, a baby is pushing the toy outside the house. The baby is initially located at position [0.047, 0.312, 0.6, 0.912] in frame 8 and stops appearing at position [0.384, 0.317, 0.688, 0.996] in frame 35."
715
+ }
716
+ ],
717
+ "meta": {
718
+ "vid": "3837521252",
719
+ "frame_count": 100,
720
+ "start_frame": 8,
721
+ "end_frame": 35,
722
+ "start_bbox": [
723
+ 0.047,
724
+ 0.312,
725
+ 0.6,
726
+ 0.912
727
+ ],
728
+ "end_bbox": [
729
+ 0.384,
730
+ 0.317,
731
+ 0.688,
732
+ 0.996
733
+ ],
734
+ "width": 320,
735
+ "height": 240
736
+ }
737
+ },
738
+ {
739
+ "conversations": [
740
+ {
741
+ "from": "human",
742
+ "value": "What does the baby push outside the house?"
743
+ },
744
+ {
745
+ "from": "gpt",
746
+ "value": "In the video, a baby is pushing a toy outside the house. The toy is initially located at position [0.491, 0.442, 0.994, 0.996] in frame 8 and stops appearing at position [0.463, 0.267, 0.769, 0.646] in frame 35."
747
+ }
748
+ ],
749
+ "meta": {
750
+ "vid": "3837521252",
751
+ "frame_count": 100,
752
+ "start_frame": 8,
753
+ "end_frame": 35,
754
+ "start_bbox": [
755
+ 0.491,
756
+ 0.442,
757
+ 0.994,
758
+ 0.996
759
+ ],
760
+ "end_bbox": [
761
+ 0.463,
762
+ 0.267,
763
+ 0.769,
764
+ 0.646
765
+ ],
766
+ "width": 320,
767
+ "height": 240
768
+ }
769
+ },
770
+ {
771
+ "conversations": [
772
+ {
773
+ "from": "human",
774
+ "value": "What is away from the child in the swimming pool in the video?"
775
+ },
776
+ {
777
+ "from": "gpt",
778
+ "value": "In the video, there is a ball that is away from the child in the swimming pool. The ball is initially located at position [0.545, 0.456, 0.583, 0.502] in frame 5 and stops appearing at position [0.97, 0.554, 0.998, 0.61] in frame 30."
779
+ }
780
+ ],
781
+ "meta": {
782
+ "vid": "3557607884",
783
+ "frame_count": 100,
784
+ "start_frame": 5,
785
+ "end_frame": 30,
786
+ "start_bbox": [
787
+ 0.545,
788
+ 0.456,
789
+ 0.583,
790
+ 0.502
791
+ ],
792
+ "end_bbox": [
793
+ 0.97,
794
+ 0.554,
795
+ 0.998,
796
+ 0.61
797
+ ],
798
+ "width": 640,
799
+ "height": 480
800
+ }
801
+ },
802
+ {
803
+ "conversations": [
804
+ {
805
+ "from": "human",
806
+ "value": "Is there a child above the watercraft in the river in the video?"
807
+ },
808
+ {
809
+ "from": "gpt",
810
+ "value": "In the video, there is a child [0.378, 0.5, 0.494, 0.622] above a watercraft in the river. The child is initially seen at position [0.378, 0.5, 0.494, 0.622] in frame 71 and stops appearing at position [0.283, 0.475, 0.486, 0.622] in frame 99. Therefore, the answer is yes."
811
+ }
812
+ ],
813
+ "meta": {
814
+ "vid": "7896372830",
815
+ "frame_count": 100,
816
+ "start_frame": 71,
817
+ "end_frame": 99,
818
+ "start_bbox": [
819
+ 0.378,
820
+ 0.5,
821
+ 0.494,
822
+ 0.622
823
+ ],
824
+ "end_bbox": [
825
+ 0.283,
826
+ 0.475,
827
+ 0.486,
828
+ 0.622
829
+ ],
830
+ "width": 640,
831
+ "height": 360
832
+ }
833
+ },
834
+ {
835
+ "conversations": [
836
+ {
837
+ "from": "human",
838
+ "value": "Who touches the toy on the watercraft?"
839
+ },
840
+ {
841
+ "from": "gpt",
842
+ "value": "In the video, there is an adult with green pants touching a toy on the watercraft. The adult initially appears at position [0.0, 0.0, 0.256, 0.996] in frame 86 and stops appearing at position [0.008, 0.013, 0.912, 0.85] in frame 95."
843
+ }
844
+ ],
845
+ "meta": {
846
+ "vid": "4855999801",
847
+ "frame_count": 100,
848
+ "start_frame": 86,
849
+ "end_frame": 95,
850
+ "start_bbox": [
851
+ 0.0,
852
+ 0.0,
853
+ 0.256,
854
+ 0.996
855
+ ],
856
+ "end_bbox": [
857
+ 0.008,
858
+ 0.013,
859
+ 0.912,
860
+ 0.85
861
+ ],
862
+ "width": 640,
863
+ "height": 480
864
+ }
865
+ },
866
+ {
867
+ "conversations": [
868
+ {
869
+ "from": "human",
870
+ "value": "Who touches the toy on the watercraft?"
871
+ },
872
+ {
873
+ "from": "gpt",
874
+ "value": "In the video, an adult with green pants is touching a toy on the watercraft. The adult is initially located at [0.02, 0.006, 0.694, 0.754] in frame 4, and stops appearing at position [0.0, 0.21, 0.511, 0.998] in frame 28."
875
+ }
876
+ ],
877
+ "meta": {
878
+ "vid": "4855999801",
879
+ "frame_count": 100,
880
+ "start_frame": 4,
881
+ "end_frame": 28,
882
+ "start_bbox": [
883
+ 0.02,
884
+ 0.006,
885
+ 0.694,
886
+ 0.754
887
+ ],
888
+ "end_bbox": [
889
+ 0.0,
890
+ 0.21,
891
+ 0.511,
892
+ 0.998
893
+ ],
894
+ "width": 640,
895
+ "height": 480
896
+ }
897
+ },
898
+ {
899
+ "conversations": [
900
+ {
901
+ "from": "human",
902
+ "value": "What is beneath the bottle in the room?"
903
+ },
904
+ {
905
+ "from": "gpt",
906
+ "value": "In the video, there is a bottle above a table in a room. The table is initially located at position [0.016, 0.508, 0.995, 0.794] in frame 6 and stops appearing at position [0.031, 0.504, 0.998, 0.583] in frame 99. Therefore, the object beneath the bottle in the room is a table."
907
+ }
908
+ ],
909
+ "meta": {
910
+ "vid": "8614946329",
911
+ "frame_count": 100,
912
+ "start_frame": 6,
913
+ "end_frame": 99,
914
+ "start_bbox": [
915
+ 0.016,
916
+ 0.508,
917
+ 0.995,
918
+ 0.794
919
+ ],
920
+ "end_bbox": [
921
+ 0.031,
922
+ 0.504,
923
+ 0.998,
924
+ 0.583
925
+ ],
926
+ "width": 640,
927
+ "height": 480
928
+ }
929
+ },
930
+ {
931
+ "conversations": [
932
+ {
933
+ "from": "human",
934
+ "value": "Who does the adult in red hold in the video?"
935
+ },
936
+ {
937
+ "from": "gpt",
938
+ "value": "In the video, the adult in red is holding a baby with few hairs. The baby is initially located at position [0.573, 0.083, 0.744, 0.922] in frame 36 and stops appearing at position [0.6, 0.089, 0.784, 0.958] in frame 86."
939
+ }
940
+ ],
941
+ "meta": {
942
+ "vid": "3898784753",
943
+ "frame_count": 100,
944
+ "start_frame": 36,
945
+ "end_frame": 86,
946
+ "start_bbox": [
947
+ 0.573,
948
+ 0.083,
949
+ 0.744,
950
+ 0.922
951
+ ],
952
+ "end_bbox": [
953
+ 0.6,
954
+ 0.089,
955
+ 0.784,
956
+ 0.958
957
+ ],
958
+ "width": 640,
959
+ "height": 360
960
+ }
961
+ },
962
+ {
963
+ "conversations": [
964
+ {
965
+ "from": "human",
966
+ "value": "Is there an adult in red holding a baby with few hair in the video?"
967
+ },
968
+ {
969
+ "from": "gpt",
970
+ "value": "In the video, there is an adult in red holding a baby with few hair. The baby is initially located at position [0.63, 0.089, 0.814, 0.958] in frame 3 and stops appearing at position [0.505, 0.078, 0.752, 0.997] in frame 93. Therefore, the answer is yes."
971
+ }
972
+ ],
973
+ "meta": {
974
+ "vid": "3898784753",
975
+ "frame_count": 100,
976
+ "start_frame": 3,
977
+ "end_frame": 93,
978
+ "start_bbox": [
979
+ 0.63,
980
+ 0.089,
981
+ 0.814,
982
+ 0.958
983
+ ],
984
+ "end_bbox": [
985
+ 0.505,
986
+ 0.078,
987
+ 0.752,
988
+ 0.997
989
+ ],
990
+ "width": 640,
991
+ "height": 360
992
+ }
993
+ },
994
+ {
995
+ "conversations": [
996
+ {
997
+ "from": "human",
998
+ "value": "Who does the adult in red hold in the video?"
999
+ },
1000
+ {
1001
+ "from": "gpt",
1002
+ "value": "In the video, there is an adult in red who holds a baby with few hair. The baby is initially at position [0.417, 0.008, 0.697, 0.997] in frame 14 and stops appearing at position [0.417, 0.022, 0.753, 0.997] in frame 83."
1003
+ }
1004
+ ],
1005
+ "meta": {
1006
+ "vid": "3898784753",
1007
+ "frame_count": 100,
1008
+ "start_frame": 14,
1009
+ "end_frame": 83,
1010
+ "start_bbox": [
1011
+ 0.417,
1012
+ 0.008,
1013
+ 0.697,
1014
+ 0.997
1015
+ ],
1016
+ "end_bbox": [
1017
+ 0.417,
1018
+ 0.022,
1019
+ 0.753,
1020
+ 0.997
1021
+ ],
1022
+ "width": 640,
1023
+ "height": 360
1024
+ }
1025
+ },
1026
+ {
1027
+ "conversations": [
1028
+ {
1029
+ "from": "human",
1030
+ "value": "Who does the adult in red hold in the video?"
1031
+ },
1032
+ {
1033
+ "from": "gpt",
1034
+ "value": "In the video, there is an adult in red holding a baby with few hair. The baby is initially located at position [0.4, 0.058, 0.723, 0.997] in frame 12 and stops appearing at position [0.327, 0.05, 0.683, 0.997] in frame 84. Therefore, the adult in red is holding the baby with few hair in the video."
1035
+ }
1036
+ ],
1037
+ "meta": {
1038
+ "vid": "3898784753",
1039
+ "frame_count": 100,
1040
+ "start_frame": 12,
1041
+ "end_frame": 84,
1042
+ "start_bbox": [
1043
+ 0.4,
1044
+ 0.058,
1045
+ 0.723,
1046
+ 0.997
1047
+ ],
1048
+ "end_bbox": [
1049
+ 0.327,
1050
+ 0.05,
1051
+ 0.683,
1052
+ 0.997
1053
+ ],
1054
+ "width": 640,
1055
+ "height": 360
1056
+ }
1057
+ },
1058
+ {
1059
+ "conversations": [
1060
+ {
1061
+ "from": "human",
1062
+ "value": "Who does the adult in red hold in the video?"
1063
+ },
1064
+ {
1065
+ "from": "gpt",
1066
+ "value": "In the video, there is an adult in red holding a baby with few hair. The baby appears at position [0.333, 0.042, 0.688, 0.964] in frame 32 and stops appearing at position [0.342, 0.075, 0.697, 0.997] in frame 59."
1067
+ }
1068
+ ],
1069
+ "meta": {
1070
+ "vid": "3898784753",
1071
+ "frame_count": 100,
1072
+ "start_frame": 32,
1073
+ "end_frame": 59,
1074
+ "start_bbox": [
1075
+ 0.333,
1076
+ 0.042,
1077
+ 0.688,
1078
+ 0.964
1079
+ ],
1080
+ "end_bbox": [
1081
+ 0.342,
1082
+ 0.075,
1083
+ 0.697,
1084
+ 0.997
1085
+ ],
1086
+ "width": 640,
1087
+ "height": 360
1088
+ }
1089
+ },
1090
+ {
1091
+ "conversations": [
1092
+ {
1093
+ "from": "human",
1094
+ "value": "Is there a table beneath the cup in the room?"
1095
+ },
1096
+ {
1097
+ "from": "gpt",
1098
+ "value": "In frame 46, the table is located at position [0.002, 0.146, 0.489, 0.998] beneath the cup in the room. However, the table stops appearing at position [0.297, 0.854, 0.705, 0.998] in frame 57."
1099
+ }
1100
+ ],
1101
+ "meta": {
1102
+ "vid": "2521681557",
1103
+ "frame_count": 100,
1104
+ "start_frame": 46,
1105
+ "end_frame": 57,
1106
+ "start_bbox": [
1107
+ 0.002,
1108
+ 0.146,
1109
+ 0.489,
1110
+ 0.998
1111
+ ],
1112
+ "end_bbox": [
1113
+ 0.297,
1114
+ 0.854,
1115
+ 0.705,
1116
+ 0.998
1117
+ ],
1118
+ "width": 640,
1119
+ "height": 480
1120
+ }
1121
+ },
1122
+ {
1123
+ "conversations": [
1124
+ {
1125
+ "from": "human",
1126
+ "value": "What does the child in striped clothes watch?"
1127
+ },
1128
+ {
1129
+ "from": "gpt",
1130
+ "value": "In the video, the child in striped clothes is watching a red and purple toy. The toy is initially located at [0.116, 0.557, 0.638, 0.997] in frame 4 and stops appearing at position [0.386, 0.144, 0.592, 0.997] in frame 99."
1131
+ }
1132
+ ],
1133
+ "meta": {
1134
+ "vid": "2677153721",
1135
+ "frame_count": 100,
1136
+ "start_frame": 4,
1137
+ "end_frame": 99,
1138
+ "start_bbox": [
1139
+ 0.116,
1140
+ 0.557,
1141
+ 0.638,
1142
+ 0.997
1143
+ ],
1144
+ "end_bbox": [
1145
+ 0.386,
1146
+ 0.144,
1147
+ 0.592,
1148
+ 0.997
1149
+ ],
1150
+ "width": 500,
1151
+ "height": 375
1152
+ }
1153
+ },
1154
+ {
1155
+ "conversations": [
1156
+ {
1157
+ "from": "human",
1158
+ "value": "Who grabs the cup in the classroom?"
1159
+ },
1160
+ {
1161
+ "from": "gpt",
1162
+ "value": "In the video, an adult [0.503, 0.317, 0.684, 0.558] grabs a cup in the classroom from frame 52 to frame 56."
1163
+ }
1164
+ ],
1165
+ "meta": {
1166
+ "vid": "3113067503",
1167
+ "frame_count": 100,
1168
+ "start_frame": 52,
1169
+ "end_frame": 56,
1170
+ "start_bbox": [
1171
+ 0.503,
1172
+ 0.317,
1173
+ 0.684,
1174
+ 0.558
1175
+ ],
1176
+ "end_bbox": [
1177
+ 0.509,
1178
+ 0.321,
1179
+ 0.681,
1180
+ 0.562
1181
+ ],
1182
+ "width": 320,
1183
+ "height": 240
1184
+ }
1185
+ },
1186
+ {
1187
+ "conversations": [
1188
+ {
1189
+ "from": "human",
1190
+ "value": "Who carries the green toy at home?"
1191
+ },
1192
+ {
1193
+ "from": "gpt",
1194
+ "value": "In the video, a child with black hair is carrying a green toy at home. The child is initially located at position [0.833, 0.579, 0.98, 0.998] in frame 59, and stops appearing at position [0.744, 0.458, 0.911, 0.925] in frame 77."
1195
+ }
1196
+ ],
1197
+ "meta": {
1198
+ "vid": "3487539480",
1199
+ "frame_count": 100,
1200
+ "start_frame": 59,
1201
+ "end_frame": 77,
1202
+ "start_bbox": [
1203
+ 0.833,
1204
+ 0.579,
1205
+ 0.98,
1206
+ 0.998
1207
+ ],
1208
+ "end_bbox": [
1209
+ 0.744,
1210
+ 0.458,
1211
+ 0.911,
1212
+ 0.925
1213
+ ],
1214
+ "width": 640,
1215
+ "height": 480
1216
+ }
1217
+ },
1218
+ {
1219
+ "conversations": [
1220
+ {
1221
+ "from": "human",
1222
+ "value": "Who is leaning on the sofa at home in the video?"
1223
+ },
1224
+ {
1225
+ "from": "gpt",
1226
+ "value": "In the video, an adult in grey is leaning on a sofa at home. The target adult initially appears at position [0.004, 0.0, 0.998, 0.995] in frame 73 and stops appearing at position [0.0, 0.0, 0.998, 0.998] in frame 91."
1227
+ }
1228
+ ],
1229
+ "meta": {
1230
+ "vid": "5773250639",
1231
+ "frame_count": 100,
1232
+ "start_frame": 73,
1233
+ "end_frame": 91,
1234
+ "start_bbox": [
1235
+ 0.004,
1236
+ 0.0,
1237
+ 0.998,
1238
+ 0.995
1239
+ ],
1240
+ "end_bbox": [
1241
+ 0.0,
1242
+ 0.0,
1243
+ 0.998,
1244
+ 0.998
1245
+ ],
1246
+ "width": 480,
1247
+ "height": 640
1248
+ }
1249
+ },
1250
+ {
1251
+ "conversations": [
1252
+ {
1253
+ "from": "human",
1254
+ "value": "Who is leaning on the sofa at home?"
1255
+ },
1256
+ {
1257
+ "from": "gpt",
1258
+ "value": "In the video, the target object 'adult' is leaning on the sofa at home. The adult is initially located at position [0.0, 0.292, 0.996, 0.998] in frame 4 and stops appearing at position [0.0, 0.0, 0.61, 0.998] in frame 60."
1259
+ }
1260
+ ],
1261
+ "meta": {
1262
+ "vid": "5773250639",
1263
+ "frame_count": 100,
1264
+ "start_frame": 4,
1265
+ "end_frame": 60,
1266
+ "start_bbox": [
1267
+ 0.0,
1268
+ 0.292,
1269
+ 0.996,
1270
+ 0.998
1271
+ ],
1272
+ "end_bbox": [
1273
+ 0.0,
1274
+ 0.0,
1275
+ 0.61,
1276
+ 0.998
1277
+ ],
1278
+ "width": 480,
1279
+ "height": 640
1280
+ }
1281
+ },
1282
+ {
1283
+ "conversations": [
1284
+ {
1285
+ "from": "human",
1286
+ "value": "Is there a guitar above the child with blond hair in the video?"
1287
+ },
1288
+ {
1289
+ "from": "gpt",
1290
+ "value": "In the video, there is a guitar located above a child with blond hair. The guitar is initially positioned at [0.122, 0.527, 0.864, 0.956] in frame 36 and it stops appearing at position [0.302, 0.121, 0.902, 0.444] in frame 42. Therefore, the answer to your question is yes."
1291
+ }
1292
+ ],
1293
+ "meta": {
1294
+ "vid": "6591879935",
1295
+ "frame_count": 100,
1296
+ "start_frame": 36,
1297
+ "end_frame": 42,
1298
+ "start_bbox": [
1299
+ 0.122,
1300
+ 0.527,
1301
+ 0.864,
1302
+ 0.956
1303
+ ],
1304
+ "end_bbox": [
1305
+ 0.302,
1306
+ 0.121,
1307
+ 0.902,
1308
+ 0.444
1309
+ ],
1310
+ "width": 640,
1311
+ "height": 480
1312
+ }
1313
+ },
1314
+ {
1315
+ "conversations": [
1316
+ {
1317
+ "from": "human",
1318
+ "value": "Who does the adult chase on the grass?"
1319
+ },
1320
+ {
1321
+ "from": "gpt",
1322
+ "value": "In the video, an adult [0.897, 0.217, 0.998, 0.75] chases another adult on the grass. The adult stops appearing at position [0.244, 0.353, 0.358, 0.817] in frame 59."
1323
+ }
1324
+ ],
1325
+ "meta": {
1326
+ "vid": "6859274622",
1327
+ "frame_count": 100,
1328
+ "start_frame": 48,
1329
+ "end_frame": 59,
1330
+ "start_bbox": [
1331
+ 0.897,
1332
+ 0.217,
1333
+ 0.998,
1334
+ 0.75
1335
+ ],
1336
+ "end_bbox": [
1337
+ 0.244,
1338
+ 0.353,
1339
+ 0.358,
1340
+ 0.817
1341
+ ],
1342
+ "width": 640,
1343
+ "height": 360
1344
+ }
1345
+ },
1346
+ {
1347
+ "conversations": [
1348
+ {
1349
+ "from": "human",
1350
+ "value": "Who is towards the other child in the yard?"
1351
+ },
1352
+ {
1353
+ "from": "gpt",
1354
+ "value": "In the video, there is a child initially located at [0.673, 0.244, 0.867, 0.972] in frame 59 and moves towards another child in the yard. The child stops appearing at position [0.433, 0.2, 0.559, 0.767] in frame 99. Therefore, the answer is the child who is initially at [0.673, 0.244, 0.867, 0.972] in frame 59."
1355
+ }
1356
+ ],
1357
+ "meta": {
1358
+ "vid": "7971444414",
1359
+ "frame_count": 100,
1360
+ "start_frame": 59,
1361
+ "end_frame": 99,
1362
+ "start_bbox": [
1363
+ 0.673,
1364
+ 0.244,
1365
+ 0.867,
1366
+ 0.972
1367
+ ],
1368
+ "end_bbox": [
1369
+ 0.433,
1370
+ 0.2,
1371
+ 0.559,
1372
+ 0.767
1373
+ ],
1374
+ "width": 640,
1375
+ "height": 360
1376
+ }
1377
+ },
1378
+ {
1379
+ "conversations": [
1380
+ {
1381
+ "from": "human",
1382
+ "value": "Who is the child in white towards in the yard?"
1383
+ },
1384
+ {
1385
+ "from": "gpt",
1386
+ "value": "In the video, there is a child [0.361, 0.444, 0.419, 0.706] in white towards another child in a yard. The child stops appearing at position [0.362, 0.344, 0.473, 0.681] in frame 99. Therefore, the child towards in the yard is the target child."
1387
+ }
1388
+ ],
1389
+ "meta": {
1390
+ "vid": "7971444414",
1391
+ "frame_count": 100,
1392
+ "start_frame": 59,
1393
+ "end_frame": 99,
1394
+ "start_bbox": [
1395
+ 0.361,
1396
+ 0.444,
1397
+ 0.419,
1398
+ 0.706
1399
+ ],
1400
+ "end_bbox": [
1401
+ 0.362,
1402
+ 0.344,
1403
+ 0.473,
1404
+ 0.681
1405
+ ],
1406
+ "width": 640,
1407
+ "height": 360
1408
+ }
1409
+ },
1410
+ {
1411
+ "conversations": [
1412
+ {
1413
+ "from": "human",
1414
+ "value": "Can you tell me who is playing the guitar in the video?"
1415
+ },
1416
+ {
1417
+ "from": "gpt",
1418
+ "value": "In the video, an adult in a black hat is playing the guitar. The adult is initially located at position [0.659, 0.256, 0.77, 0.753] in frame 6 and stops appearing at position [0.628, 0.253, 0.747, 0.769] in frame 99."
1419
+ }
1420
+ ],
1421
+ "meta": {
1422
+ "vid": "5776386760",
1423
+ "frame_count": 100,
1424
+ "start_frame": 6,
1425
+ "end_frame": 99,
1426
+ "start_bbox": [
1427
+ 0.659,
1428
+ 0.256,
1429
+ 0.77,
1430
+ 0.753
1431
+ ],
1432
+ "end_bbox": [
1433
+ 0.628,
1434
+ 0.253,
1435
+ 0.747,
1436
+ 0.769
1437
+ ],
1438
+ "width": 640,
1439
+ "height": 360
1440
+ }
1441
+ },
1442
+ {
1443
+ "conversations": [
1444
+ {
1445
+ "from": "human",
1446
+ "value": "What is above the white baby seat at home?"
1447
+ },
1448
+ {
1449
+ "from": "gpt",
1450
+ "value": "In the video, there is a green toy above a white baby seat at home. The toy is initially located at [0.483, 0.325, 0.558, 0.411] in frame 3 and stops appearing at [0.292, 0.736, 0.508, 0.867] in frame 99."
1451
+ }
1452
+ ],
1453
+ "meta": {
1454
+ "vid": "14040889372",
1455
+ "frame_count": 100,
1456
+ "start_frame": 3,
1457
+ "end_frame": 99,
1458
+ "start_bbox": [
1459
+ 0.483,
1460
+ 0.325,
1461
+ 0.558,
1462
+ 0.411
1463
+ ],
1464
+ "end_bbox": [
1465
+ 0.292,
1466
+ 0.736,
1467
+ 0.508,
1468
+ 0.867
1469
+ ],
1470
+ "width": 640,
1471
+ "height": 360
1472
+ }
1473
+ },
1474
+ {
1475
+ "conversations": [
1476
+ {
1477
+ "from": "human",
1478
+ "value": "Who pushes the stool on the floor?"
1479
+ },
1480
+ {
1481
+ "from": "gpt",
1482
+ "value": "In the video, a child in green clothes is pushing a stool on the floor. The child starts appearing at position [0.305, 0.039, 0.711, 0.997] in frame 76 and stops appearing at position [0.241, 0.175, 0.591, 0.997] in frame 89."
1483
+ }
1484
+ ],
1485
+ "meta": {
1486
+ "vid": "5907883532",
1487
+ "frame_count": 100,
1488
+ "start_frame": 76,
1489
+ "end_frame": 89,
1490
+ "start_bbox": [
1491
+ 0.305,
1492
+ 0.039,
1493
+ 0.711,
1494
+ 0.997
1495
+ ],
1496
+ "end_bbox": [
1497
+ 0.241,
1498
+ 0.175,
1499
+ 0.591,
1500
+ 0.997
1501
+ ],
1502
+ "width": 640,
1503
+ "height": 360
1504
+ }
1505
+ },
1506
+ {
1507
+ "conversations": [
1508
+ {
1509
+ "from": "human",
1510
+ "value": "Who pushes the stool on the floor in the video?"
1511
+ },
1512
+ {
1513
+ "from": "gpt",
1514
+ "value": "In the video, a child in green clothes is pushing a stool on the floor. The child appears at position [0.322, 0.211, 0.577, 0.969] in frame 28 and stops appearing at position [0.273, 0.117, 0.503, 0.922] in frame 71."
1515
+ }
1516
+ ],
1517
+ "meta": {
1518
+ "vid": "5907883532",
1519
+ "frame_count": 100,
1520
+ "start_frame": 28,
1521
+ "end_frame": 71,
1522
+ "start_bbox": [
1523
+ 0.322,
1524
+ 0.211,
1525
+ 0.577,
1526
+ 0.969
1527
+ ],
1528
+ "end_bbox": [
1529
+ 0.273,
1530
+ 0.117,
1531
+ 0.503,
1532
+ 0.922
1533
+ ],
1534
+ "width": 640,
1535
+ "height": 360
1536
+ }
1537
+ },
1538
+ {
1539
+ "conversations": [
1540
+ {
1541
+ "from": "human",
1542
+ "value": "Who pushes the stool on the floor in the video?"
1543
+ },
1544
+ {
1545
+ "from": "gpt",
1546
+ "value": "In the video, a child in green clothes is pushing a stool on the floor. The child can be seen at position [0.425, 0.272, 0.652, 0.969] in frame 19 and stops appearing at position [0.273, 0.175, 0.527, 0.994] in frame 64."
1547
+ }
1548
+ ],
1549
+ "meta": {
1550
+ "vid": "5907883532",
1551
+ "frame_count": 100,
1552
+ "start_frame": 19,
1553
+ "end_frame": 64,
1554
+ "start_bbox": [
1555
+ 0.425,
1556
+ 0.272,
1557
+ 0.652,
1558
+ 0.969
1559
+ ],
1560
+ "end_bbox": [
1561
+ 0.273,
1562
+ 0.175,
1563
+ 0.527,
1564
+ 0.994
1565
+ ],
1566
+ "width": 640,
1567
+ "height": 360
1568
+ }
1569
+ },
1570
+ {
1571
+ "conversations": [
1572
+ {
1573
+ "from": "human",
1574
+ "value": "Who pushes the stool on the floor in the video?"
1575
+ },
1576
+ {
1577
+ "from": "gpt",
1578
+ "value": "In the video, a child in green clothes pushes a stool on the floor. The child can be seen at position [0.303, 0.0, 0.653, 0.997] in frame 47, and stops appearing at position [0.325, 0.131, 0.748, 0.997] in frame 69."
1579
+ }
1580
+ ],
1581
+ "meta": {
1582
+ "vid": "5907883532",
1583
+ "frame_count": 100,
1584
+ "start_frame": 47,
1585
+ "end_frame": 69,
1586
+ "start_bbox": [
1587
+ 0.303,
1588
+ 0.0,
1589
+ 0.653,
1590
+ 0.997
1591
+ ],
1592
+ "end_bbox": [
1593
+ 0.325,
1594
+ 0.131,
1595
+ 0.748,
1596
+ 0.997
1597
+ ],
1598
+ "width": 640,
1599
+ "height": 360
1600
+ }
1601
+ },
1602
+ {
1603
+ "conversations": [
1604
+ {
1605
+ "from": "human",
1606
+ "value": "Who pushes the stool on the floor?"
1607
+ },
1608
+ {
1609
+ "from": "gpt",
1610
+ "value": "In the video, a child in green clothes is pushing a stool on the floor. The child appears at position [0.258, 0.003, 0.706, 0.997] in frame 34 and stops appearing at position [0.227, 0.011, 0.675, 0.992] in frame 55."
1611
+ }
1612
+ ],
1613
+ "meta": {
1614
+ "vid": "5907883532",
1615
+ "frame_count": 100,
1616
+ "start_frame": 34,
1617
+ "end_frame": 55,
1618
+ "start_bbox": [
1619
+ 0.258,
1620
+ 0.003,
1621
+ 0.706,
1622
+ 0.997
1623
+ ],
1624
+ "end_bbox": [
1625
+ 0.227,
1626
+ 0.011,
1627
+ 0.675,
1628
+ 0.992
1629
+ ],
1630
+ "width": 640,
1631
+ "height": 360
1632
+ }
1633
+ },
1634
+ {
1635
+ "conversations": [
1636
+ {
1637
+ "from": "human",
1638
+ "value": "Who is pushing the stool on the floor in the video?"
1639
+ },
1640
+ {
1641
+ "from": "gpt",
1642
+ "value": "In the video, a child in green clothes is pushing the stool on the floor. The child is initially located at position [0.247, 0.147, 0.695, 0.997] in frame 20 and stops appearing at position [0.555, 0.114, 0.864, 0.972] in frame 69."
1643
+ }
1644
+ ],
1645
+ "meta": {
1646
+ "vid": "5907883532",
1647
+ "frame_count": 100,
1648
+ "start_frame": 20,
1649
+ "end_frame": 69,
1650
+ "start_bbox": [
1651
+ 0.247,
1652
+ 0.147,
1653
+ 0.695,
1654
+ 0.997
1655
+ ],
1656
+ "end_bbox": [
1657
+ 0.555,
1658
+ 0.114,
1659
+ 0.864,
1660
+ 0.972
1661
+ ],
1662
+ "width": 640,
1663
+ "height": 360
1664
+ }
1665
+ },
1666
+ {
1667
+ "conversations": [
1668
+ {
1669
+ "from": "human",
1670
+ "value": "Who pushes the stool on the floor?"
1671
+ },
1672
+ {
1673
+ "from": "gpt",
1674
+ "value": "In the video, a child in green clothes is pushing a stool on the floor. The child appears at position [0.022, 0.378, 0.509, 0.997] in frame 44 and stops appearing at position [0.052, 0.044, 0.675, 0.997] in frame 78."
1675
+ }
1676
+ ],
1677
+ "meta": {
1678
+ "vid": "5907883532",
1679
+ "frame_count": 100,
1680
+ "start_frame": 44,
1681
+ "end_frame": 78,
1682
+ "start_bbox": [
1683
+ 0.022,
1684
+ 0.378,
1685
+ 0.509,
1686
+ 0.997
1687
+ ],
1688
+ "end_bbox": [
1689
+ 0.052,
1690
+ 0.044,
1691
+ 0.675,
1692
+ 0.997
1693
+ ],
1694
+ "width": 640,
1695
+ "height": 360
1696
+ }
1697
+ },
1698
+ {
1699
+ "conversations": [
1700
+ {
1701
+ "from": "human",
1702
+ "value": "Who pushes the stool on the floor?"
1703
+ },
1704
+ {
1705
+ "from": "gpt",
1706
+ "value": "In the video, a child in green clothes is seen pushing a stool on the floor. The child is initially at position [0.095, 0.0, 0.694, 0.983] in frame 4 and stops appearing at position [0.127, 0.0, 0.586, 0.997] in frame 23."
1707
+ }
1708
+ ],
1709
+ "meta": {
1710
+ "vid": "5907883532",
1711
+ "frame_count": 100,
1712
+ "start_frame": 4,
1713
+ "end_frame": 23,
1714
+ "start_bbox": [
1715
+ 0.095,
1716
+ 0.0,
1717
+ 0.694,
1718
+ 0.983
1719
+ ],
1720
+ "end_bbox": [
1721
+ 0.127,
1722
+ 0.0,
1723
+ 0.586,
1724
+ 0.997
1725
+ ],
1726
+ "width": 640,
1727
+ "height": 360
1728
+ }
1729
+ },
1730
+ {
1731
+ "conversations": [
1732
+ {
1733
+ "from": "human",
1734
+ "value": "Who is the adult towards on the ground?"
1735
+ },
1736
+ {
1737
+ "from": "gpt",
1738
+ "value": "In frame 63, there is an adult at position [0.411, 0.177, 0.502, 0.812] who is positioned towards another adult on the ground. The adult stops appearing at position [0.164, 0.171, 0.334, 0.848] in frame 67."
1739
+ }
1740
+ ],
1741
+ "meta": {
1742
+ "vid": "4747203885",
1743
+ "frame_count": 100,
1744
+ "start_frame": 63,
1745
+ "end_frame": 67,
1746
+ "start_bbox": [
1747
+ 0.411,
1748
+ 0.177,
1749
+ 0.502,
1750
+ 0.812
1751
+ ],
1752
+ "end_bbox": [
1753
+ 0.164,
1754
+ 0.171,
1755
+ 0.334,
1756
+ 0.848
1757
+ ],
1758
+ "width": 640,
1759
+ "height": 480
1760
+ }
1761
+ },
1762
+ {
1763
+ "conversations": [
1764
+ {
1765
+ "from": "human",
1766
+ "value": "What does the child watch inside the car?"
1767
+ },
1768
+ {
1769
+ "from": "gpt",
1770
+ "value": "In the video, there is a child watching a toy inside the car. The toy is initially located at [0.267, 0.327, 0.578, 0.725] in frame 54 and stops appearing at position [0.281, 0.4, 0.58, 0.731] in frame 78."
1771
+ }
1772
+ ],
1773
+ "meta": {
1774
+ "vid": "6783572295",
1775
+ "frame_count": 100,
1776
+ "start_frame": 54,
1777
+ "end_frame": 78,
1778
+ "start_bbox": [
1779
+ 0.267,
1780
+ 0.327,
1781
+ 0.578,
1782
+ 0.725
1783
+ ],
1784
+ "end_bbox": [
1785
+ 0.281,
1786
+ 0.4,
1787
+ 0.58,
1788
+ 0.731
1789
+ ],
1790
+ "width": 640,
1791
+ "height": 480
1792
+ }
1793
+ },
1794
+ {
1795
+ "conversations": [
1796
+ {
1797
+ "from": "human",
1798
+ "value": "Who is the adult in white away the child in the room?"
1799
+ },
1800
+ {
1801
+ "from": "gpt",
1802
+ "value": "The adult in white is away the child in the room. The adult is initially located at [0.069, 0.0, 0.317, 0.794] in frame 17 and stops appearing at position [0.084, 0.027, 0.308, 0.804] in frame 22."
1803
+ }
1804
+ ],
1805
+ "meta": {
1806
+ "vid": "4242713789",
1807
+ "frame_count": 100,
1808
+ "start_frame": 17,
1809
+ "end_frame": 22,
1810
+ "start_bbox": [
1811
+ 0.069,
1812
+ 0.0,
1813
+ 0.317,
1814
+ 0.794
1815
+ ],
1816
+ "end_bbox": [
1817
+ 0.084,
1818
+ 0.027,
1819
+ 0.308,
1820
+ 0.804
1821
+ ],
1822
+ "width": 640,
1823
+ "height": 480
1824
+ }
1825
+ },
1826
+ {
1827
+ "conversations": [
1828
+ {
1829
+ "from": "human",
1830
+ "value": "What is behind the adult man in white inside the subway station?"
1831
+ },
1832
+ {
1833
+ "from": "gpt",
1834
+ "value": "In frame 62, an adult man in white is in front of a red chair inside the subway station, with the chair located at position [0.059, 0.654, 0.103, 0.783]. Therefore, behind the adult man in white, at that moment, is a red chair located at that position. Is there anything else you want to know?"
1835
+ }
1836
+ ],
1837
+ "meta": {
1838
+ "vid": "2420718744",
1839
+ "frame_count": 100,
1840
+ "start_frame": 62,
1841
+ "end_frame": 69,
1842
+ "start_bbox": [
1843
+ 0.059,
1844
+ 0.654,
1845
+ 0.103,
1846
+ 0.783
1847
+ ],
1848
+ "end_bbox": [
1849
+ 0.059,
1850
+ 0.65,
1851
+ 0.103,
1852
+ 0.783
1853
+ ],
1854
+ "width": 320,
1855
+ "height": 240
1856
+ }
1857
+ },
1858
+ {
1859
+ "conversations": [
1860
+ {
1861
+ "from": "human",
1862
+ "value": "Who is the adult away from in the room?"
1863
+ },
1864
+ {
1865
+ "from": "gpt",
1866
+ "value": "In the video, there is an adult at position [0.644, 0.3, 0.761, 0.792] in frame 73. The adult is away from another adult in the room at position [0.831, 0.244, 0.95, 0.689] in frame 99."
1867
+ }
1868
+ ],
1869
+ "meta": {
1870
+ "vid": "7162680320",
1871
+ "frame_count": 100,
1872
+ "start_frame": 73,
1873
+ "end_frame": 99,
1874
+ "start_bbox": [
1875
+ 0.644,
1876
+ 0.3,
1877
+ 0.761,
1878
+ 0.792
1879
+ ],
1880
+ "end_bbox": [
1881
+ 0.831,
1882
+ 0.244,
1883
+ 0.95,
1884
+ 0.689
1885
+ ],
1886
+ "width": 640,
1887
+ "height": 360
1888
+ }
1889
+ },
1890
+ {
1891
+ "conversations": [
1892
+ {
1893
+ "from": "human",
1894
+ "value": "Who is the adult that is away in the video?"
1895
+ },
1896
+ {
1897
+ "from": "gpt",
1898
+ "value": "In the video, there is an adult in white who is away from a squatting adult. The target adult object can be seen initially at position [0.377, 0.085, 0.695, 0.998] in frame 22 and stops appearing at position [0.006, 0.475, 0.2, 0.998] in frame 36."
1899
+ }
1900
+ ],
1901
+ "meta": {
1902
+ "vid": "3347531832",
1903
+ "frame_count": 100,
1904
+ "start_frame": 22,
1905
+ "end_frame": 36,
1906
+ "start_bbox": [
1907
+ 0.377,
1908
+ 0.085,
1909
+ 0.695,
1910
+ 0.998
1911
+ ],
1912
+ "end_bbox": [
1913
+ 0.006,
1914
+ 0.475,
1915
+ 0.2,
1916
+ 0.998
1917
+ ],
1918
+ "width": 640,
1919
+ "height": 480
1920
+ }
1921
+ },
1922
+ {
1923
+ "conversations": [
1924
+ {
1925
+ "from": "human",
1926
+ "value": "Who is leaning on the chair in the video?"
1927
+ },
1928
+ {
1929
+ "from": "gpt",
1930
+ "value": "In the video, an adult in black clothes is leaning on a chair. The adult is initially located at position [0.17, 0.427, 0.514, 0.998] in frame 27 and stops appearing at position [0.136, 0.0, 0.516, 0.9] in frame 99."
1931
+ }
1932
+ ],
1933
+ "meta": {
1934
+ "vid": "6140084750",
1935
+ "frame_count": 100,
1936
+ "start_frame": 27,
1937
+ "end_frame": 99,
1938
+ "start_bbox": [
1939
+ 0.17,
1940
+ 0.427,
1941
+ 0.514,
1942
+ 0.998
1943
+ ],
1944
+ "end_bbox": [
1945
+ 0.136,
1946
+ 0.0,
1947
+ 0.516,
1948
+ 0.9
1949
+ ],
1950
+ "width": 640,
1951
+ "height": 480
1952
+ }
1953
+ },
1954
+ {
1955
+ "conversations": [
1956
+ {
1957
+ "from": "human",
1958
+ "value": "Who is in front of the adult in gray in the video?"
1959
+ },
1960
+ {
1961
+ "from": "gpt",
1962
+ "value": "In the video, the child with dress is in front of the adult in gray. The child is initially located at position [0.747, 0.414, 0.967, 0.972] in frame 23 and stops appearing at position [0.719, 0.397, 0.917, 0.994] in frame 43. Therefore, the answer is the child with dress."
1963
+ }
1964
+ ],
1965
+ "meta": {
1966
+ "vid": "7604112162",
1967
+ "frame_count": 100,
1968
+ "start_frame": 23,
1969
+ "end_frame": 43,
1970
+ "start_bbox": [
1971
+ 0.747,
1972
+ 0.414,
1973
+ 0.967,
1974
+ 0.972
1975
+ ],
1976
+ "end_bbox": [
1977
+ 0.719,
1978
+ 0.397,
1979
+ 0.917,
1980
+ 0.994
1981
+ ],
1982
+ "width": 640,
1983
+ "height": 360
1984
+ }
1985
+ },
1986
+ {
1987
+ "conversations": [
1988
+ {
1989
+ "from": "human",
1990
+ "value": "Who is riding the ski in the snow?"
1991
+ },
1992
+ {
1993
+ "from": "gpt",
1994
+ "value": "An adult in black clothes is riding a ski in the snow. The adult appears in frame 4 at position [0.391, 0.338, 0.475, 0.654] and stops appearing in frame 73 at position [0.102, 0.0, 0.492, 0.998]."
1995
+ }
1996
+ ],
1997
+ "meta": {
1998
+ "vid": "8728461458",
1999
+ "frame_count": 100,
2000
+ "start_frame": 4,
2001
+ "end_frame": 73,
2002
+ "start_bbox": [
2003
+ 0.391,
2004
+ 0.338,
2005
+ 0.475,
2006
+ 0.654
2007
+ ],
2008
+ "end_bbox": [
2009
+ 0.102,
2010
+ 0.0,
2011
+ 0.492,
2012
+ 0.998
2013
+ ],
2014
+ "width": 640,
2015
+ "height": 480
2016
+ }
2017
+ },
2018
+ {
2019
+ "conversations": [
2020
+ {
2021
+ "from": "human",
2022
+ "value": "What is beneath the child wearing the blue hat?"
2023
+ },
2024
+ {
2025
+ "from": "gpt",
2026
+ "value": "In frame 12, the child wearing the blue hat is above a skateboard at position [0.449, 0.656, 0.504, 0.791]. However, by frame 97, the skateboard is no longer present beneath the child, as it stops appearing at position [0.408, 0.816, 0.546, 0.997]."
2027
+ }
2028
+ ],
2029
+ "meta": {
2030
+ "vid": "4805511103",
2031
+ "frame_count": 100,
2032
+ "start_frame": 12,
2033
+ "end_frame": 97,
2034
+ "start_bbox": [
2035
+ 0.449,
2036
+ 0.656,
2037
+ 0.504,
2038
+ 0.791
2039
+ ],
2040
+ "end_bbox": [
2041
+ 0.408,
2042
+ 0.816,
2043
+ 0.546,
2044
+ 0.997
2045
+ ],
2046
+ "width": 568,
2047
+ "height": 320
2048
+ }
2049
+ },
2050
+ {
2051
+ "conversations": [
2052
+ {
2053
+ "from": "human",
2054
+ "value": "Is there a black dog away from a car in the video?"
2055
+ },
2056
+ {
2057
+ "from": "gpt",
2058
+ "value": "In the video, there is a black dog positioned away from a car. The dog is initially at position [0.425, 0.422, 0.466, 0.486] in frame 58 and stops appearing at position [0.409, 0.425, 0.469, 0.553] in frame 86. Therefore, the answer to your question is yes."
2059
+ }
2060
+ ],
2061
+ "meta": {
2062
+ "vid": "3190710669",
2063
+ "frame_count": 100,
2064
+ "start_frame": 58,
2065
+ "end_frame": 86,
2066
+ "start_bbox": [
2067
+ 0.425,
2068
+ 0.422,
2069
+ 0.466,
2070
+ 0.486
2071
+ ],
2072
+ "end_bbox": [
2073
+ 0.409,
2074
+ 0.425,
2075
+ 0.469,
2076
+ 0.553
2077
+ ],
2078
+ "width": 640,
2079
+ "height": 360
2080
+ }
2081
+ },
2082
+ {
2083
+ "conversations": [
2084
+ {
2085
+ "from": "human",
2086
+ "value": "What is the black dog away from in the video?"
2087
+ },
2088
+ {
2089
+ "from": "gpt",
2090
+ "value": "In the video, there is a black dog [0.375, 0.358, 0.464, 0.447] away from a car. The car is located at position [0.312, 0.206, 0.402, 0.306] in frame 86. Therefore, the black dog is away from the car in the video."
2091
+ }
2092
+ ],
2093
+ "meta": {
2094
+ "vid": "3190710669",
2095
+ "frame_count": 100,
2096
+ "start_frame": 58,
2097
+ "end_frame": 86,
2098
+ "start_bbox": [
2099
+ 0.375,
2100
+ 0.358,
2101
+ 0.464,
2102
+ 0.447
2103
+ ],
2104
+ "end_bbox": [
2105
+ 0.312,
2106
+ 0.206,
2107
+ 0.402,
2108
+ 0.306
2109
+ ],
2110
+ "width": 640,
2111
+ "height": 360
2112
+ }
2113
+ },
2114
+ {
2115
+ "conversations": [
2116
+ {
2117
+ "from": "human",
2118
+ "value": "What is the sofa towards to in the room?"
2119
+ },
2120
+ {
2121
+ "from": "gpt",
2122
+ "value": "In the room, there is a sofa towards a screen/monitor. The screen/monitor is initially located at position [0.567, 0.204, 0.847, 0.483] in frame 25 and stops appearing at position [0.594, 0.194, 0.866, 0.463] in frame 29."
2123
+ }
2124
+ ],
2125
+ "meta": {
2126
+ "vid": "5220256786",
2127
+ "frame_count": 100,
2128
+ "start_frame": 25,
2129
+ "end_frame": 29,
2130
+ "start_bbox": [
2131
+ 0.567,
2132
+ 0.204,
2133
+ 0.847,
2134
+ 0.483
2135
+ ],
2136
+ "end_bbox": [
2137
+ 0.594,
2138
+ 0.194,
2139
+ 0.866,
2140
+ 0.463
2141
+ ],
2142
+ "width": 640,
2143
+ "height": 480
2144
+ }
2145
+ },
2146
+ {
2147
+ "conversations": [
2148
+ {
2149
+ "from": "human",
2150
+ "value": "Who is towards the adult outdoors?"
2151
+ },
2152
+ {
2153
+ "from": "gpt",
2154
+ "value": "In the video, there is a child in black towards an adult outdoors. The child is initially located at [0.803, 0.794, 0.895, 0.975] in frame 4 and stops appearing at position [0.564, 0.814, 0.633, 0.997] in frame 7."
2155
+ }
2156
+ ],
2157
+ "meta": {
2158
+ "vid": "8577770314",
2159
+ "frame_count": 100,
2160
+ "start_frame": 4,
2161
+ "end_frame": 7,
2162
+ "start_bbox": [
2163
+ 0.803,
2164
+ 0.794,
2165
+ 0.895,
2166
+ 0.975
2167
+ ],
2168
+ "end_bbox": [
2169
+ 0.564,
2170
+ 0.814,
2171
+ 0.633,
2172
+ 0.997
2173
+ ],
2174
+ "width": 640,
2175
+ "height": 360
2176
+ }
2177
+ },
2178
+ {
2179
+ "conversations": [
2180
+ {
2181
+ "from": "human",
2182
+ "value": "Who is the child in black towards outdoors in the video?"
2183
+ },
2184
+ {
2185
+ "from": "gpt",
2186
+ "value": "In the video, there is a child in black towards an adult outdoors. The adult is initially located at position [0.489, 0.475, 0.6, 0.994] in frame 4 and stops appearing at position [0.409, 0.611, 0.517, 0.994] in frame 7. Therefore, the answer to your question is that the child in black is towards an adult outdoors."
2187
+ }
2188
+ ],
2189
+ "meta": {
2190
+ "vid": "8577770314",
2191
+ "frame_count": 100,
2192
+ "start_frame": 4,
2193
+ "end_frame": 7,
2194
+ "start_bbox": [
2195
+ 0.489,
2196
+ 0.475,
2197
+ 0.6,
2198
+ 0.994
2199
+ ],
2200
+ "end_bbox": [
2201
+ 0.409,
2202
+ 0.611,
2203
+ 0.517,
2204
+ 0.994
2205
+ ],
2206
+ "width": 640,
2207
+ "height": 360
2208
+ }
2209
+ },
2210
+ {
2211
+ "conversations": [
2212
+ {
2213
+ "from": "human",
2214
+ "value": "What does the child in gray hold at home?"
2215
+ },
2216
+ {
2217
+ "from": "gpt",
2218
+ "value": "In the video, a child in gray holds a silver cellphone at home. The cellphone can be initially seen at position [0.353, 0.486, 0.455, 0.575] in frame 27 and stops appearing at position [0.289, 0.922, 0.434, 0.989] in frame 89."
2219
+ }
2220
+ ],
2221
+ "meta": {
2222
+ "vid": "3152504882",
2223
+ "frame_count": 100,
2224
+ "start_frame": 27,
2225
+ "end_frame": 89,
2226
+ "start_bbox": [
2227
+ 0.353,
2228
+ 0.486,
2229
+ 0.455,
2230
+ 0.575
2231
+ ],
2232
+ "end_bbox": [
2233
+ 0.289,
2234
+ 0.922,
2235
+ 0.434,
2236
+ 0.989
2237
+ ],
2238
+ "width": 640,
2239
+ "height": 360
2240
+ }
2241
+ },
2242
+ {
2243
+ "conversations": [
2244
+ {
2245
+ "from": "human",
2246
+ "value": "Who caresses the black cat in the video?"
2247
+ },
2248
+ {
2249
+ "from": "gpt",
2250
+ "value": "In the video, an adult in striped clothes is caressing the black cat. The adult is initially located at position [0.406, 0.05, 0.931, 0.997] in frame 8 and stops appearing at position [0.502, 0.058, 0.992, 0.997] in frame 15."
2251
+ }
2252
+ ],
2253
+ "meta": {
2254
+ "vid": "7499914046",
2255
+ "frame_count": 100,
2256
+ "start_frame": 8,
2257
+ "end_frame": 15,
2258
+ "start_bbox": [
2259
+ 0.406,
2260
+ 0.05,
2261
+ 0.931,
2262
+ 0.997
2263
+ ],
2264
+ "end_bbox": [
2265
+ 0.502,
2266
+ 0.058,
2267
+ 0.992,
2268
+ 0.997
2269
+ ],
2270
+ "width": 640,
2271
+ "height": 360
2272
+ }
2273
+ }
2274
+ ]
run_vtimellm.txt CHANGED
@@ -2,3 +2,12 @@ python -m vtimellm.inference --model_base lmsys/vicuna-7b-v1.5 \
2
  --pretrain_mm_mlp_adapter checkpoints/vicuna-7b-v1.5/vtimellm-vicuna-v1-5-7b/vtimellm-vicuna-v1-5-7b-stage1/mm_projector.bin \
3
  --stage2 checkpoints/vicuna-7b-v1.5/vtimellm-vicuna-v1-5-7b/vtimellm-vicuna-v1-5-7b-stage2 \
4
  --stage3 checkpoints/vicuna-7b-v1.5/vtimellm-vicuna-v1-5-7b/vtimellm-vicuna-v1-5-7b-stage3
 
 
 
 
 
 
 
 
 
 
2
  --pretrain_mm_mlp_adapter checkpoints/vicuna-7b-v1.5/vtimellm-vicuna-v1-5-7b/vtimellm-vicuna-v1-5-7b-stage1/mm_projector.bin \
3
  --stage2 checkpoints/vicuna-7b-v1.5/vtimellm-vicuna-v1-5-7b/vtimellm-vicuna-v1-5-7b-stage2 \
4
  --stage3 checkpoints/vicuna-7b-v1.5/vtimellm-vicuna-v1-5-7b/vtimellm-vicuna-v1-5-7b-stage3
5
+
6
+
7
+ python demo_gradio.py --model_base lmsys/vicuna-7b-v1.5 \
8
+ --pretrain_mm_mlp_adapter ../checkpoints/vicuna-7b-v1.5/vtimellm-vicuna-v1-5-7b/vtimellm-vicuna-v1-5-7b-stage1/mm_projector.bin \
9
+ --stage2 ../checkpoints/vicuna-7b-v1.5/vtimellm-vicuna-v1-5-7b/vtimellm-vicuna-v1-5-7b-stage2 \
10
+ --stage3 ../checkpoints/vicuna-7b-v1.5/vtimellm-vicuna-v1-5-7b/vtimellm-vicuna-v1-5-7b-stage3
11
+
12
+
13
+ ssh -t -t -i /home/datasets/xitong_id_rsa xitong@newton.ist.ucf.edu -L 7860:localhost:7860 ssh evc23 -L 7860:localhost:7860