-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathoutput.py
executable file
·459 lines (397 loc) · 19.9 KB
/
output.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
import os
import subprocess
import tempfile
from pathlib import Path
from typing import List
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from tqdm import tqdm
from metavideo import get_metagrid
from object_tracking_operations import (extract_masked_object_clips,
extract_obj_gifs_parallel,
extract_object_thumbs,
interpolate_missing_data,
merge_with_chromakey)
from utils import ensure_coords, ensure_dir, find_video_by_id, uniquify
def select_shots_by_entity(annotation_data: pd.DataFrame,
key: List[str], threshold=0.5,
search_categories=False,
padding_before=0,
padding_after=0):
"""
Select shots by key entity
:param annotation_data: dataframe with annotation data
:param key: key entity to filter by
:param threshold: minimum confidence score to consider
:param search_categories: search in categories as well
:param padding_before: amount of seconds to subtract from start time
:param padding_after: amount of seconds to add to end time
:return: dataframe with selected shots
"""
if type(key) == str:
key = [key]
if search_categories and 'category' not in annotation_data.columns:
raise KeyError("The dataframe has no 'category' column")
if 'entity' not in annotation_data.columns:
raise KeyError("The dataframe has no 'entity' column")
elif all(k not in annotation_data['entity'].unique() for k in key):
raise ValueError(f"Key entity {key} not found in dataframe")
entity_shots = annotation_data[annotation_data['entity'].str.lower().isin(key)].reset_index(drop=True)
if search_categories:
category_shots = annotation_data[annotation_data['category'].str.lower().isin(key)].reset_index(drop=True)
selected_shots = pd.concat([entity_shots, category_shots]).drop_duplicates(keep='first')
else:
selected_shots = entity_shots
selected_shots = selected_shots[selected_shots['confidence'] >= threshold].reset_index(drop=True)
if padding_before:
selected_shots['start_sec'] -= padding_before
selected_shots['start_sec'] = selected_shots['start_sec'].apply(lambda x: max(x, 0))
if padding_after:
end_time = entity_shots['end_sec'].max()
selected_shots['end_sec'] += padding_after
selected_shots['end_sec'] = selected_shots['end_sec'].apply(lambda x: max(x, end_time))
return selected_shots
def select_shots_by_keyword(annotation_data: pd.DataFrame,
key: List[str],
threshold=0.5,
padding_before=5,
padding_after=3):
key = [key.lower()] if type(key) == str else [k.lower() for k in key]
if 'word' not in annotation_data.columns:
raise KeyError("The dataframe has no 'word' column")
elif all(k not in annotation_data['word'].unique() for k in key):
raise ValueError(f"Keyword {key} not found in dataframe")
selected_shots = annotation_data[annotation_data['word'].str.lower().str.replace(r'[^\w\s]+', '').isin(key)]
selected_shots = selected_shots[selected_shots['confidence'] >= threshold]
selected_shots = add_padding_shots(annotation_data, padding_after, padding_before, selected_shots)
selected_shots = selected_shots.drop_duplicates(keep='first')
selected_shots = selected_shots.sort_index()
selected_shots = selected_shots.reset_index(drop=True)
starts = selected_shots['start_sec'].tolist()
ends = selected_shots['end_sec'].tolist()
selected_shots = merge_consecutive(selected_shots)
return selected_shots
def select_shots_by_consecutive_words(annotation_data: pd.DataFrame,
key: List[str]):
key = [k.lower().strip() for k in key] if isinstance(key, list) else [k.lower().strip() for k in key.split(',')]
annotation_data['word'] = annotation_data['word'].str.lower().str.replace(r'[^\w\s]+', '').str.strip()
selected_shots = pd.DataFrame()
indexes = annotation_data[annotation_data['word'].isin(key)].index.values
# for each index in indexes, get the following n indexes, where n is the length of the key
for i in indexes:
following_indexes = [i + j for j in range(len(key))]
# check that the 'word' column of the first row in the following_indexes list matches the first word in the key
# that the second row in the following_indexes list matches the second word in the key, etc.
if all(annotation_data.iloc[m]['word'] == key[n] for n, m in enumerate(following_indexes)):
# if all the words in the key match the words in the following_indexes list, append a new row to the
# selected_shots dataframe with the start and end times of the first and last rows in the following_indexes
# list, and the words in the key
selected_shots = selected_shots.append({
'id': annotation_data.iloc[following_indexes[0]]['id'],
'word': ' '.join(key),
'start_sec': annotation_data.iloc[following_indexes[0]]['start_sec'],
'end_sec': annotation_data.iloc[following_indexes[-1]]['end_sec'],
'id0': following_indexes[0],
'id1': following_indexes[-1]
}, ignore_index=True)
# selected_shots = selected_shots.append(annotation_data.iloc[following_indexes])
# remove duplicates and sort by index
selected_shots = selected_shots.drop_duplicates(keep='first')
selected_shots = selected_shots.sort_index()
return selected_shots
def merge_consecutive(selected_shots):
prev_len = len(selected_shots)
while True:
for i, row in selected_shots.iterrows():
if i < len(selected_shots) - 1:
current_end = float(row['end_sec'])
next_start = float(selected_shots.iloc[i + 1]['start_sec'])
if 0 <= next_start - current_end <= 2:
selected_shots.loc[i, "end_sec"] = selected_shots.iloc[i + 1]["end_sec"]
selected_shots.loc[i, "word"] = selected_shots.iloc[i]["word"] + " " + selected_shots.iloc[i + 1][
"word"]
selected_shots = selected_shots.drop(i + 1)
selected_shots = selected_shots.reset_index(drop=True)
current_len = len(selected_shots)
print("current_len: ", current_len, end="\r")
if current_len < prev_len:
prev_len = current_len
else:
break
return selected_shots
def add_padding_shots(annotation_data, padding_after, padding_before, selected_shots):
for n, shot in selected_shots.iterrows():
if n == 0:
continue
for i in range(1, padding_before + 1):
if n - i < 0:
break
current_start = int(shot['start_sec'])
previous_end = int(annotation_data.iloc[n - i]['end_sec'])
if current_start - previous_end <= 2:
selected_shots = selected_shots.append(annotation_data.iloc[n - i])
else:
break
for i in range(1, padding_after + 1):
if n + i > len(annotation_data):
break
current_end = int(shot['end_sec'])
next_start = int(annotation_data.iloc[n + i]['start_sec'])
diff = next_start - current_end
if next_start - current_end <= 2:
selected_shots = selected_shots.append(annotation_data.iloc[n + i])
else:
break
return selected_shots
def add_padding_to_consecutive_keywords(annotation_data, padding_after, padding_before, selected_shots):
for n, shot in selected_shots.iterrows():
if n == 0:
continue
for i in range(1, padding_before + 1):
if n - i < 0:
break
current_start = int(shot['start_sec'])
previous_end = int(annotation_data.iloc[shot['id0'] - i]['end_sec'])
if current_start - previous_end <= 2:
selected_shots = selected_shots.append(annotation_data.iloc[shot['id0'] - i])
else:
break
for i in range(1, padding_after + 1):
if n + i > len(annotation_data):
break
current_end = int(shot['end_sec'])
next_start = int(annotation_data.iloc[shot['id1'] + i]['start_sec'])
diff = next_start - current_end
if next_start - current_end <= 2:
selected_shots = selected_shots.append(annotation_data.iloc[shot['id1'] + i])
else:
break
print(selected_shots)
return selected_shots
def extract_shots(_df: pd.DataFrame, in_dir: Path, out_dir: Path, text: str = False):
"""
:param text:
:param out_dir:
:param in_dir:
:param _df: DataFrame with data on which shots to extract
:return: None
"""
df = _df.sort_values(by=['id', 'start_sec'])
out_dir.mkdir(parents=True, exist_ok=True)
for index, row in tqdm(df.iterrows(), total=df.shape[0], desc='Extracting shots'):
entity = row[text] if text else row[0]
video_id = row['id']
filename = Path(in_dir, find_video_by_id(row['id'], in_dir))
start = "%.2f" % row['start_sec']
end = "%.2f" % row['end_sec']
if start == end:
continue
in_path = filename.resolve().as_posix()
out_path = uniquify(os.path.join(out_dir.as_posix(), f"{video_id}.mp4"))
command = ["ffmpeg"]
options = ["-i", in_path, "-ss", start, "-to", end, "-y", "-movflags",
"faststart", "-avoid_negative_ts", "1", "-acodec", "copy", out_path]
if text:
text_filter = ["drawtext=", "fontfile=Inter-Regular.ttf:", f"text='{entity}':",
"fontcolor=white:", "fontsize=24:", "box=1:", "[email protected]:", "boxborderw=5:",
"x=(w-text_w)/2:", "y=24"]
filter_args = "".join(text_filter)
options.insert(6, "-vf")
options.insert(7, filter_args)
args = command + options
operation = subprocess.run(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
if operation.returncode != 0:
print(operation.stderr)
raise RuntimeError("Ué uagliù è succiesso nu guaio mentre stev tagliann e' video, liv 'a miezz "
"'stderr=subprocess.DEVNULL' e vir nu poc ch'è succiess")
# TODO add threading and hardware acceleration because this can get pretty long and boring
def merge_shots(in_dir: Path, out_dir: Path):
"""
Merge shots into one video
:param selected_shots_path: path to selected shots
:return: None
"""
out_dir.mkdir(parents=True, exist_ok=True)
files = [file.as_posix() for file in in_dir.glob('*.mp4')]
# select only files have an audio and a video stream
files = [file for file in files if len(subprocess.run(
["ffprobe", "-v", "error", "-select_streams", "a:0", "-show_entries", "stream=codec_type", "-of",
"default=noprint_wrappers=1:nokey=1", file],
stdout=subprocess.PIPE).stdout.decode('utf-8').splitlines()) > 0 and len(subprocess.run(
["ffprobe", "-v", "error", "-select_streams", "v:0", "-show_entries", "stream=codec_type", "-of",
"default=noprint_wrappers=1:nokey=1", file],
stdout=subprocess.PIPE).stdout.decode('utf-8').splitlines()) > 0]
files.sort()
out_path = uniquify(Path(out_dir, 'merged.mp4').as_posix())
command = ["ffmpeg"]
input_files = [["-i", file] for file in files]
input_files = [item for sublist in input_files for item in sublist]
streams = [f"[{i}:v][{i}:a]" for i in range(len(files))]
concat = [f"concat=n={len(files)}:v=1:a=1[v][a]"]
mapper = ['-map', '[v]', '-map', '[a]']
sync = ["-vsync", "2", '-threads', '0']
options = input_files + ["-filter_complex"] + [f"{''.join(streams + concat)}"] + mapper + sync + [out_path]
args = command + options
print(args)
operation = subprocess.run(args, stdout=subprocess.DEVNULL)
def agnostic_merge(video_dir, output_dir):
output_directory = ensure_dir(output_dir)
files = [os.path.join(video_dir, file) for file in os.listdir(video_dir) if file.endswith('.mp4')]
out_path = uniquify(os.path.join(output_directory, 'merged.mp4'))
command = ["ffmpeg"]
input_files = [["-i", file] for file in files]
input_files = [item for sublist in input_files for item in sublist]
aspect_ratio_handler = [
f'[{i}]scale=1280:720:force_original_aspect_ratio=decrease,pad=1280:720:(ow-iw)/2:(oh-ih)/2,setsar=1[v{i}];' for
i in range(len(files))]
streams = [f'[v{i}][{i}:a:0]' for i in range(len(files))]
concat = [f"concat=n={len(files)}:v=1:a=1[v][a]"]
mapper = ['-map', '[v]', '-map', '[a]']
sync = ["-vsync", "2"]
options = input_files + ["-filter_complex"] + [
f"{''.join(aspect_ratio_handler + streams + concat)}"] + mapper + sync + [out_path]
args = command + options
subprocess.run(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def render_heatmap(out_dir: Path, data: pd.DataFrame, key: list, **kwargs) -> None:
"""
:param data: DataFrame with data to plot
:param out_dir: folder where to save the heatmap
:keyword resolution: tuple(width, height)
:return: None
"""
print(data)
data = data[data['object_name'].isin(key)]
print(data)
res = (1920, 1080) if kwargs.get('resolution') is None else kwargs.get('resolution')
data.fillna(0, inplace=True)
img = np.zeros((res[1], res[0], 4), dtype=np.uint)
for _, row in data.iterrows():
left, top, right, bottom = ensure_coords(row['left'], row['top'], row['right'], row['bottom'])
img[int(top * res[1]):int(bottom * res[1]), int(left * res[0]):int(right * res[0]), 0:3] += 1
img = img / img.max()
img[0:res[1], 0:res[0], 3] = 1
from scipy.ndimage.filters import gaussian_filter
img = gaussian_filter(img, sigma=5)
from matplotlib.colors import LinearSegmentedColormap
cmap = LinearSegmentedColormap.from_list("", ["blue", "red"])
video_id, object_name = data['id'].iat[0], data['object_name'].iat[0]
out_path = out_dir / f"{video_id}_{object_name}.png"
plt.imshow(img, cmap=cmap)
plt.imsave(uniquify(out_path.as_posix()), img, cmap=cmap)
def berensham(x1, y1, x2, y2):
dx = abs(x2 - x1)
dy = abs(y2 - y1)
sx = 1 if x1 < x2 else -1
sy = 1 if y1 < y2 else -1
err = dx - dy
while True:
yield x1, y1
if x1 == x2 and y1 == y2:
break
e2 = 2 * err
if e2 > -dy:
err -= dy
x1 += sx
if e2 < dx:
err += dx
y1 += sy
def render_traces(out_dir: Path, data: pd.DataFrame, key: list, **kwargs) -> None:
"""
:param data: DataFrame with data to plot
:param out_dir: folder where to save the traces
:keyword resolution: tuple(width, height)
:return: None
"""
print(list(data))
data = data[data['object_name'].isin(key)]
res = (1920, 1080) if kwargs.get('resolution') is None else kwargs.get('resolution')
data.fillna(0, inplace=True)
img = np.zeros((res[1], res[0], 4), dtype=np.uint)
# we need to cluster data by obj_id in order to follow the same object troughout the video
data = data.groupby('object_id').apply(lambda x: x.sort_values('time_seconds'))
# since centroids's positions are not guaranteed to be continuous, we need tro draw a line from point 'n' to point 'n+1'
# we will use Bresenham's line algorithm to do so
for _, group in data.groupby('object_id'):
for i in range(len(group)-1):
left, top, right, bottom = ensure_coords(group['left'].iloc[i], group['top'].iloc[i], group['right'].iloc[i], group['bottom'].iloc[i])
centroid = (int((left + right) / 2 * res[0]), int((top + bottom) / 2 * res[1]))
n_left, n_top, n_right, n_bottom = ensure_coords(group['left'].iloc[i+1], group['top'].iloc[i+1], group['right'].iloc[i+1], group['bottom'].iloc[i+1])
next_centroid = (int((n_left + n_right) / 2 * res[0]), int((n_top + n_bottom) / 2 * res[1]))
for x, y in berensham(centroid[0], centroid[1], next_centroid[0], next_centroid[1]):
img[(y-5):(y+5), (x-5):(x), 0:3] += 1
img = img / img.max()
img[0:res[1], 0:res[0], 3] = 1
video_id, object_name = data['id'].iat[0], data['object_name'].iat[0]
out_path = out_dir / f"{video_id}_{object_name}.png"
plt.imsave(uniquify(out_path.as_posix()), img)
print(f"Rendered trace for {video_id} {object_name}")
return None
def extract_object_thumbnails(in_dir: Path, out_dir: Path, data: pd.DataFrame, key: list) -> None:
"""
Given a video and a dataframe with object tracking annotations extracts thumbnails of selected the objects
:rtype: None
:param key: name of the object to extract
:param in_dir: directory where the source video is stored
:param out_dir: directory where to save the thumbnails
:param data: dataframe with object tracking annotations
:return: None
"""
data = data[data['object_name'].isin(key)]
data = interpolate_missing_data(data)
extract_object_thumbs(in_dir, out_dir, data)
return None
def extract_object_gifs(in_dir: Path, out_dir: Path, data: pd.DataFrame, key: list) -> None:
"""
Given a video and a dataframe with object tracking annotations extracts gifs of selected the objects
:rtype: None
:param key: name of the object to extract
:param in_dir: directory where the source video is stored
:param out_dir: directory where to save the thumbnails
:param data: dataframe with object tracking annotations
:return: None
"""
data = data[data['object_name'].isin(key)]
data = interpolate_missing_data(data)
extract_obj_gifs_parallel(in_dir, out_dir, data)
return None
def extract_masked_clips(in_dir: Path, out_dir: Path, data: pd.DataFrame, key: list) -> None:
"""
Given a video and a dataframe with object tracking annotations extracts clips of selected the objects
:param key: name of the object to extract
:param in_dir: directory where the source video is stored
:param out_dir: directory where to save the clips
:param data: dataframe with object tracking annotations
:return: None
"""
data = data[data['object_name'].isin(key)]
data = interpolate_missing_data(data)
extract_masked_object_clips(in_dir, out_dir, data)
def extract_object_metavideo(in_dir: Path, out_dir: Path, data: pd.DataFrame, key: list) -> None:
"""
Given a video and a dataframe with object tracking annotations extracts clips of selected the objects
:param key: name of the object to extract
:param in_dir: directory where the source video is stored
:param out_dir: directory where to save the clips
:param data: dataframe with object tracking annotations
:return: None
"""
data = data[data['object_name'].isin(key)]
data = interpolate_missing_data(data)
temp_dir = tempfile.TemporaryDirectory()
extract_masked_object_clips(in_dir, Path(temp_dir.name), data, color=(0, 255, 0, 0))
merge_with_chromakey(Path(temp_dir.name), out_dir)
def extract_object_metagrid(in_dir: Path, out_dir: Path, data: pd.DataFrame, key: list) -> None:
"""
Given a video and a dataframe with object tracking annotations extracts clips of selected the objects
:param key: name of the object to extract
:param in_dir: directory where the source video is stored
:param out_dir: directory where to save the clips
:param data: dataframe with object tracking annotations
:return: None
"""
data = data[data['object_name'].isin(key)]
data = interpolate_missing_data(data)
temp_dir = tempfile.TemporaryDirectory()
extract_object_thumbs(in_dir, Path(temp_dir.name), data)
get_metagrid(Path(temp_dir.name), out_dir)