in src/open_vp_cal/widgets/swatch_analysis_widget.py [0:0]
def update_exposure(self) -> None:
"""Update exposure based on slider value."""
if not self.led_walls:
return
display_transform = self.display_transform_combo_box.currentText()
view_transform = self.view_combo_box.currentText()
if not display_transform or not view_transform:
return
self.clear()
width = 0
exposure_slider_value = self.slider.value()
preview_calibration = self.preview_calibration_checkbox.isChecked()
for led_wall in self.led_walls:
if (not led_wall.auto_wb_source and not led_wall.match_reference_wall
and not led_wall.use_white_point_offset):
apply_white_balance_checked = False
else:
apply_white_balance_checked = self.apply_white_balance_checkbox.isChecked()
cache_key = (f"{display_transform}_{view_transform}_{exposure_slider_value}_"
f"{apply_white_balance_checked}_{preview_calibration}_{led_wall.name}")
if cache_key not in self._image_cache:
sample_buffers_processed = []
reference_buffers_processed = []
# Take the image buffers in their input space (ACES2065-1) and apply the calibration if we have one
for count, sample in enumerate(led_wall.processing_results.sample_buffers):
sample_buffers_stitched = sample
sample_reference_buffers_stitched = led_wall.processing_results.sample_reference_buffers[count]
sp_np = imaging_utils.image_buf_to_np_array(sample_buffers_stitched)
exposure_scaling_factor = None
if led_wall.processing_results.calibration_results:
exposure_scaling_factor = led_wall.processing_results.calibration_results[
constants.Results.EXPOSURE_SCALING_FACTOR]
if not exposure_scaling_factor:
if led_wall.processing_results.pre_calibration_results:
exposure_scaling_factor = led_wall.processing_results.pre_calibration_results[
constants.Results.EXPOSURE_SCALING_FACTOR]
sp_np = sp_np / exposure_scaling_factor
if apply_white_balance_checked and led_wall.processing_results:
white_balance_matrix = None
if led_wall.processing_results.calibration_results:
white_balance_matrix = led_wall.processing_results.calibration_results[
constants.Results.WHITE_BALANCE_MATRIX]
if not white_balance_matrix:
if led_wall.processing_results.pre_calibration_results:
white_balance_matrix = led_wall.processing_results.pre_calibration_results[
constants.Results.WHITE_BALANCE_MATRIX]
if white_balance_matrix:
working_cs = colour.RGB_COLOURSPACES[self.project_settings.reference_gamut]
native_camera_gamut_cs = core_utils.get_native_camera_colourspace_for_led_wall(led_wall)
camera_conversion_cat = constants.CAT.CAT_CAT02
if native_camera_gamut_cs.name == constants.CameraColourSpace.RED_WIDE_GAMUT:
camera_conversion_cat = constants.CAT.CAT_BRADFORD
# Convert the samples from working to camera native gamut
sp_np = colour.RGB_to_RGB(
sp_np, working_cs, native_camera_gamut_cs, camera_conversion_cat
)
# Apply the white balance matrix
sp_np = [ca.vector_dot(white_balance_matrix, m) for m in sp_np]
# Convert the samples from camera native gamut to working
sp_np = colour.RGB_to_RGB(
sp_np, native_camera_gamut_cs, working_cs, camera_conversion_cat
)
sp_np = sp_np.astype(np.float32)
# Calibration Is Applied
if led_wall.processing_results:
if led_wall.processing_results.ocio_config_output_file and preview_calibration:
calibration_cs_metadata = OcioConfigWriter.get_calibration_preview_space_metadata(led_wall)
imaging_utils.apply_color_converstion_to_np_array(
sp_np,
self.project_settings.reference_gamut,
calibration_cs_metadata[0],
color_config=led_wall.processing_results.ocio_config_output_file
)
rf_np = imaging_utils.image_buf_to_np_array(sample_reference_buffers_stitched)
# For the Macbeth Samples We Need TO Scale Them Down To 100 Nits Range
if count >= len(led_wall.processing_results.sample_buffers) - 18:
sp_np /= (led_wall.target_max_lum_nits * 0.01)
rf_np /= (led_wall.target_max_lum_nits * 0.01)
# Expose up the array linearly
sp_np_exposed = sp_np * (2.0 ** exposure_slider_value)
rf_np_exposed = rf_np * (2.0 ** exposure_slider_value)
# Convert back to an image buffer and convert to srgb for display
exposed_sp_buffer = imaging_utils.img_buf_from_numpy_array(sp_np_exposed)
exposed_rf_buffer = imaging_utils.img_buf_from_numpy_array(rf_np_exposed)
sample_buffers_processed.append(exposed_sp_buffer)
reference_buffers_processed.append(exposed_rf_buffer)
# Stitch The Processed Buffers Together
exposed_sp_buffer, exposed_rf_buffer = imaging_utils.create_and_stitch_analysis_strips(
reference_buffers_processed, sample_buffers_processed)
# Nest The Image Together
sample_swatch_nested = imaging_utils.nest_analysis_swatches(
exposed_sp_buffer,
exposed_rf_buffer
)
# Convert To Display
exposed_display_buffer = imaging_utils.apply_display_conversion(
sample_swatch_nested, display_transform, view_transform
)
# Add Text Label Above Each Strip
header_height = int(exposed_display_buffer.spec().height * 0.1)
text_size = int(exposed_display_buffer.spec().height * 0.05)
text_buffer = imaging_utils.new_image(exposed_display_buffer.spec().width, header_height)
name = led_wall.name
text_color = [1, 1, 1]
imaging_utils.add_text_to_image_buffer(name, text_buffer, text_color, text_size)
stitched_value = imaging_utils.stitch_images_vertically([text_buffer, exposed_display_buffer])
# Convert back to a numpy array and create a QImage from it
image = utils.create_qimage_rgb8_from_numpy_array(
imaging_utils.image_buf_to_np_array(stitched_value)
)
self._image_cache[cache_key] = (
image, stitched_value.spec().width, stitched_value.spec().height)
image, im_width, _ = self._image_cache[cache_key]
item = self.scene.addPixmap(QPixmap.fromImage(image))
item.setPos(width, 0)
width += im_width