in sam2/sam2_video_predictor.py [0:0]
def remove_object(self, inference_state, obj_id, strict=False, need_output=True):
"""
Remove an object id from the tracking state. If strict is True, we check whether
the object id actually exists and raise an error if it doesn't exist.
"""
old_obj_idx_to_rm = inference_state["obj_id_to_idx"].get(obj_id, None)
updated_frames = []
# Check whether this object_id to remove actually exists and possibly raise an error.
if old_obj_idx_to_rm is None:
if not strict:
return inference_state["obj_ids"], updated_frames
raise RuntimeError(
f"Cannot remove object id {obj_id} as it doesn't exist. "
f"All existing object ids: {inference_state['obj_ids']}."
)
# If this is the only remaining object id, we simply reset the state.
if len(inference_state["obj_id_to_idx"]) == 1:
self.reset_state(inference_state)
return inference_state["obj_ids"], updated_frames
# There are still remaining objects after removing this object id. In this case,
# we need to delete the object storage from inference state tensors.
# Step 0: clear the input on those frames where this object id has point or mask input
# (note that this step is required as it might downgrade conditioning frames to
# non-conditioning ones)
obj_input_frames_inds = set()
obj_input_frames_inds.update(
inference_state["point_inputs_per_obj"][old_obj_idx_to_rm]
)
obj_input_frames_inds.update(
inference_state["mask_inputs_per_obj"][old_obj_idx_to_rm]
)
for frame_idx in obj_input_frames_inds:
self.clear_all_prompts_in_frame(
inference_state, frame_idx, obj_id, need_output=False
)
# Step 1: Update the object id mapping (note that it must be done after Step 0,
# since Step 0 still requires the old object id mappings in inference_state)
old_obj_ids = inference_state["obj_ids"]
old_obj_inds = list(range(len(old_obj_ids)))
remain_old_obj_inds = old_obj_inds.copy()
remain_old_obj_inds.remove(old_obj_idx_to_rm)
new_obj_ids = [old_obj_ids[old_idx] for old_idx in remain_old_obj_inds]
new_obj_inds = list(range(len(new_obj_ids)))
# build new mappings
old_idx_to_new_idx = dict(zip(remain_old_obj_inds, new_obj_inds))
inference_state["obj_id_to_idx"] = dict(zip(new_obj_ids, new_obj_inds))
inference_state["obj_idx_to_id"] = dict(zip(new_obj_inds, new_obj_ids))
inference_state["obj_ids"] = new_obj_ids
# Step 2: For per-object tensor storage, we shift their obj_idx in the dict keys.
# (note that "consolidated_frame_inds" doesn't need to be updated in this step as
# it's already handled in Step 0)
def _map_keys(container):
new_kvs = []
for k in old_obj_inds:
v = container.pop(k)
if k in old_idx_to_new_idx:
new_kvs.append((old_idx_to_new_idx[k], v))
container.update(new_kvs)
_map_keys(inference_state["point_inputs_per_obj"])
_map_keys(inference_state["mask_inputs_per_obj"])
_map_keys(inference_state["output_dict_per_obj"])
_map_keys(inference_state["temp_output_dict_per_obj"])
# Step 3: For packed tensor storage, we index the remaining ids and rebuild the per-object slices.
def _slice_state(output_dict, storage_key):
for frame_idx, out in output_dict[storage_key].items():
out["maskmem_features"] = out["maskmem_features"][remain_old_obj_inds]
out["maskmem_pos_enc"] = [
x[remain_old_obj_inds] for x in out["maskmem_pos_enc"]
]
# "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
out["maskmem_pos_enc"] = self._get_maskmem_pos_enc(inference_state, out)
out["pred_masks"] = out["pred_masks"][remain_old_obj_inds]
out["obj_ptr"] = out["obj_ptr"][remain_old_obj_inds]
out["object_score_logits"] = out["object_score_logits"][
remain_old_obj_inds
]
# also update the per-object slices
self._add_output_per_object(
inference_state, frame_idx, out, storage_key
)
_slice_state(inference_state["output_dict"], "cond_frame_outputs")
_slice_state(inference_state["output_dict"], "non_cond_frame_outputs")
# Step 4: Further collect the outputs on those frames in `obj_input_frames_inds`, which
# could show an updated mask for objects previously occluded by the object being removed
if need_output:
temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"]
for frame_idx in obj_input_frames_inds:
is_cond = any(
frame_idx in obj_temp_output_dict["cond_frame_outputs"]
for obj_temp_output_dict in temp_output_dict_per_obj.values()
)
consolidated_out = self._consolidate_temp_output_across_obj(
inference_state,
frame_idx,
is_cond=is_cond,
run_mem_encoder=False,
consolidate_at_video_res=True,
)
_, video_res_masks = self._get_orig_video_res_output(
inference_state, consolidated_out["pred_masks_video_res"]
)
updated_frames.append((frame_idx, video_res_masks))
return inference_state["obj_ids"], updated_frames