in src/open_vp_cal/framework/identify_separation.py [0:0]
def _find_first_red_and_green_frames(self) -> None:
"""
Iterates over all frames in the sequence loader, computes the mean colour of each frame, and finds the first
frame that is red and the first frame that is green.
The results are stored in the separation_results attribute.
"""
frame_numbers = []
distances = []
previous_mean_frame = None
slate_frame_plate_gamut = self.led_wall.sequence_loader.get_frame(
self.led_wall.sequence_loader.start_frame
)
# Ensure the slate frame is in reference gamut
slate_frame = imaging_utils.apply_color_conversion(
slate_frame_plate_gamut.image_buf,
str(self.led_wall.input_plate_gamut),
str(self.led_wall.project_settings.reference_gamut)
)
white_balance_matrix = imaging_utils.calculate_white_balance_matrix_from_img_buf(
slate_frame)
for frame in self.led_wall.sequence_loader:
# Load the image from the frame and ensure it is in reference gamut
image_plate_gamut = frame.extract_roi(self.led_wall.roi)
image = imaging_utils.apply_color_conversion(
image_plate_gamut,
str(self.led_wall.input_plate_gamut),
str(self.led_wall.project_settings.reference_gamut)
)
# Apply the white balance matrix to the frame
image = imaging_utils.apply_matrix_to_img_buf(
image, white_balance_matrix
)
# Compute the average for all the values which are above the initial average
mean_color, _ = imaging_utils.get_average_value_above_average(image)
distance = 0
if previous_mean_frame:
distance = imaging_utils.calculate_distance(
mean_color, previous_mean_frame)
# Store the frame number and distance
frame_numbers.append(frame.frame_num)
distances.append(distance)
previous_mean_frame = mean_color
# Check if the image is red or we detect a significant change in the mean
if self.check_red(mean_color):
if self.separation_results.first_red_frame is None:
self.separation_results.first_red_frame = frame
continue
# Check if the image is green or if we detect a significant change in the
# mean colour
if self.check_green(mean_color):
if self.separation_results.first_green_frame is None:
self.separation_results.first_green_frame = frame
continue
distances_array = np.array(distances)
peaks, _ = find_peaks(distances_array, height=1)
if len(peaks) >= 4:
first_peak_frame_num = frame_numbers[peaks[0]]
first_peak_frame = self.led_wall.sequence_loader.get_frame(
first_peak_frame_num
)
# If we didn't find a red frame, set the first red frame to the first
# peak
if self.separation_results.first_red_frame is None:
self.separation_results.first_red_frame = first_peak_frame
# If the detected red frame is not within 3 frames of the first peak
# we detected the second red patch so we should use the first peak as
# the first red frame
if not imaging_utils.is_within_range(
self.separation_results.first_red_frame.frame_num,
first_peak_frame_num, 3):
self.separation_results.first_red_frame = first_peak_frame
second_peak_frame_num = frame_numbers[peaks[1]]
second_peak_frame = self.led_wall.sequence_loader.get_frame(
second_peak_frame_num
)
# If we didn't find a green frame, set the first green frame to the
# second peak
if self.separation_results.first_green_frame is None:
self.separation_results.first_green_frame = second_peak_frame
# If the detected green frame is not within 3 frames of the second peak
# we detected the second green patch so we should use the first peak as
# the first red frame
if not imaging_utils.is_within_range(
self.separation_results.first_green_frame.frame_num,
second_peak_frame_num, 3):
self.separation_results.first_green_frame = second_peak_frame
if (self.separation_results.first_red_frame is not None
and self.separation_results.first_green_frame is not None):
break