in azure-slurm/slurmcc/cli.py [0:0]
def _generate_gres_conf(partitions: List[partitionlib.Partition], writer: TextIO):
for partition in partitions:
if partition.node_list is None:
raise RuntimeError(
"No nodes found for nodearray %s. Please run 'azslurm create_nodes' first!"
% partition.nodearray
)
num_placement_groups = int(
ceil(float(partition.max_vm_count) / partition.max_scaleset_size)
)
all_nodes = sorted(
slutil.from_hostlist(partition.node_list),
key=slutil.get_sort_key_func(partition.is_hpc),
)
for pg_index in range(num_placement_groups):
start = pg_index * partition.max_scaleset_size
end = min(
partition.max_vm_count, (pg_index + 1) * partition.max_scaleset_size
)
subset_of_nodes = all_nodes[start:end]
if not subset_of_nodes:
continue
node_list = slutil.to_hostlist(",".join((subset_of_nodes)))
# cut out 1gb so that the node reports at least this amount of memory. - recommended by schedmd
if partition.gpu_count:
gpu_devices = _generate_gpu_devices(partition)
writer.write(
"Nodename={} Name=gpu Count={} File={}".format(
node_list, partition.gpu_count, gpu_devices
)
)
writer.write("\n")