def _pack_job()

in src/hpc/autoscale/job/demandcalculator.py [0:0]


    def _pack_job(self, job: Job) -> Result:
        """
        1) will it ever fit? - check num nodes with any capacity
        2) does it have the proper resources? bucket.match(job.resources)
        3) order them
        4) tell the bucket to allocate X nodes - let the bucket figure out what is new and what is not.
        """
        # TODO break non-exclusive
        allocated_nodes: List[Node] = []
        slots_to_allocate = job.iterations_remaining
        assert job.iterations_remaining > 0

        # available_buckets = self.node_mgr.get_buckets()
        # # I don't want to fill up the log with rejecting placement groups
        # # so just filter them here
        # filter_by_colocated = [
        #     b for b in available_buckets if bool(b.placement_group) == job.colocated
        # ]
        # candidates_result = job.bucket_candidates(filter_by_colocated)

        # if not candidates_result:
        #     logging.warning("There are no resources to scale up for job %s", job)
        #     logging.warning("See below:")
        #     for child_result in candidates_result.child_results or []:
        #         logging.warning("    %s", child_result.message)
        #     return candidates_result

        # logging.debug("Candidates for job %s: %s", job.name, candidates_result.candidates)

        failure_reasons = self._handle_allocate(
            job, allocated_nodes, all_or_nothing=False
        )

        # we have allocated at least some tasks
        if allocated_nodes:
            assert allocated_nodes
            return AllocationResult(
                "success", nodes=allocated_nodes, slots_allocated=slots_to_allocate
            )

        return AllocationResult("Failed", reasons=failure_reasons)