in stack/lambdas/rekopoc-apply-faces-to-video-docker/video_processor.py [0:0]
def apply_faces_to_video(final_timestamps, local_path_to_video, local_output, video_metadata, color=(255,0,0), thickness=2):
# Extract video info
frame_rate = video_metadata["FrameRate"]
frame_height = video_metadata["FrameHeight"]
frame_width = video_metadata["FrameWidth"]
width_delta = int(frame_width / 250)
height_delta = int(frame_height / 100)
# Set up support for OpenCV
frame_counter = 0
fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
# Create the file pointers
v = cv2.VideoCapture(local_path_to_video)
print("VideoCapture - local path to video")
out = cv2.VideoWriter(
filename=local_output,
fourcc=fourcc,
fps=int(frame_rate),
frameSize=(frame_width, frame_height)
)
# Open the video
while v.isOpened():
has_frame, frame = v.read()
if has_frame:
for t in final_timestamps:
faces = final_timestamps.get(t)
lower_bound = int(int(t) / 1000 * frame_rate)
upper_bound = int(int(t) / 1000 * frame_rate + frame_rate / 2) + 1
if (frame_counter >= lower_bound) and (frame_counter <= upper_bound):
for f in faces:
x = int(f['Left'] * frame_width) - width_delta
y = int(f['Top'] * frame_height) - height_delta
w = int(f['Width'] * frame_width) + 2 * width_delta
h = int(f['Height'] * frame_height) + 2 * height_delta
x1, y1 = x, y
x2, y2 = x1 + w, y1 + h
to_blur = frame[y1:y2, x1:x2]
blurred = anonymize_face_pixelate(to_blur, blocks=10)
frame[y1:y2, x1:x2] = blurred
# frame = cv2.rectangle(frame, (x,y), (x+w,y+h), (255,0,0), 3)
out.write(frame)
frame_counter += 1
else:
break
out.release()
v.release()
cv2.destroyAllWindows()
print(f"Complete. {frame_counter} frames were written.")