in theseus/core/objective.py [0:0]
def __init__(self, dtype: Optional[torch.dtype] = None):
# maps variable names to the variable objects
self.optim_vars: OrderedDict[str, Manifold] = OrderedDict()
# maps variable names to variables objects, for optimization variables
# that were registered when adding cost weights.
self.cost_weight_optim_vars: OrderedDict[str, Manifold] = OrderedDict()
# maps aux. variable names to the container objects
self.aux_vars: OrderedDict[str, Variable] = OrderedDict()
# maps variable name to variable, for any kind of variable added
self._all_variables: OrderedDict[str, Variable] = OrderedDict()
# maps cost function names to the cost function objects
self.cost_functions: OrderedDict[str, CostFunction] = OrderedDict()
# maps cost weights to the cost functions that use them
# this is used when deleting cost function to check if the cost weight
# variables can be deleted as well (when no other function uses them)
self.cost_functions_for_weights: Dict[CostWeight, List[CostFunction]] = {}
# ---- The following two methods are used just to get info from
# ---- the objective, they don't affect the optimization logic.
# a map from optimization variables to list of theseus functions it's
# connected to
self.functions_for_optim_vars: Dict[Manifold, List[TheseusFunction]] = {}
# a map from all aux. variables to list of theseus functions it's connected to
self.functions_for_aux_vars: Dict[Variable, List[TheseusFunction]] = {}
self._batch_size: Optional[int] = None
self.device: torch.device = torch.device("cpu")
self.dtype: Optional[torch.dtype] = dtype or torch.get_default_dtype()
# this increases after every add/erase operation, and it's used to avoid
# an optimizer to run on a stale version of the objective (since changing the
# objective structure might break optimizer initialization).
self.current_version = 0