def build_model()

in deploy/data_placement/src/model/data_placement.py [0:0]


  def build_model(self):
    logger.info(f"{self.num_nodes=} {self.num_targets_per_disk=} {self.group_size=} {self.num_groups=} {self.qlinearize=} {self.relax_lb=} {self.relax_ub=}")
    # v >= k
    assert self.num_nodes >= self.group_size, f"{self.num_nodes=} < {self.group_size=}"
    # Fisher's inequality
    if self.balanced_incomplete_block_design:
      # b >= v
      assert self.num_groups >= self.num_nodes, f"{self.num_groups=} < {self.num_nodes=}"
      # r >= k
      assert self.num_targets_per_disk >= self.group_size, f"{self.num_targets_per_disk=} < {self.group_size=}"

    logger.info(f"{self.sum_recovery_traffic_per_failure=} {self.max_recovery_traffic_on_peer=}")
    if self.sum_recovery_traffic_per_failure < self.num_nodes - 1:
      logger.warning(f"some disks do not share recovery traffic: {self.sum_recovery_traffic_per_failure=} < {self.num_nodes=} - 1")

    logger.info(f"{self.all_targets_used=} {self.balanced_peer_traffic=}")
    logger.info(f"{self.num_targets_used=} {self.num_targets_total=}")
    if self.num_targets_used < self.num_targets_total:
      logger.warning(f"some disks have unused targets: {self.num_targets_used=} < {self.num_targets_total=}")
    else:
      assert self.num_targets_used == self.num_targets_total, f"{self.num_targets_used=} > {self.num_targets_total=}"

    model = po.ConcreteModel()
    # index sets
    model.disks = po.RangeSet(1, self.num_nodes)
    model.target_idxs = po.RangeSet(1, self.num_targets_per_disk)
    model.targets = model.disks * model.target_idxs
    model.groups = po.RangeSet(1, self.num_groups)

    def disk_pairs_init(model):
      for disk in model.disks:
        for peer in model.disks:
          if peer > disk:
            yield (disk, peer)
    model.disk_pairs = po.Set(dimen=2, initialize=disk_pairs_init)

    # variables

    model.disk_used_by_group = po.Var(model.disks, model.groups, domain=po.Binary)
    if self.qlinearize:
      model.disk_in_same_group = po.Var(model.disk_pairs, model.groups, domain=po.Binary)

    # constraints

    def calc_disk_in_same_group(model, disk, peer, group):
      return model.disk_used_by_group[disk,group] * model.disk_used_by_group[peer,group]

    def define_disk_in_same_group_lower_bound(model, disk, peer, group):
      return model.disk_used_by_group[disk,group] + model.disk_used_by_group[peer,group] <= model.disk_in_same_group[disk,peer,group] + 1

    def define_disk_in_same_group_upper_bound1(model, disk, peer, group):
      return model.disk_in_same_group[disk,peer,group] <= model.disk_used_by_group[disk,group]

    def define_disk_in_same_group_upper_bound2(model, disk, peer, group):
      return model.disk_in_same_group[disk,peer,group] <= model.disk_used_by_group[peer,group]

    if self.qlinearize:
      model.define_disk_in_same_group_lower_bound_eqn = po.Constraint(model.disk_pairs, model.groups, rule=define_disk_in_same_group_lower_bound)
      model.define_disk_in_same_group_upper_bound1_eqn = po.Constraint(model.disk_pairs, model.groups, rule=define_disk_in_same_group_upper_bound1)
      model.define_disk_in_same_group_upper_bound2_eqn = po.Constraint(model.disk_pairs, model.groups, rule=define_disk_in_same_group_upper_bound2)

    def each_disk_has_limited_capcity(model, disk):
      if self.all_targets_used:
        return po.quicksum(model.disk_used_by_group[disk,group] for group in model.groups) == self.num_targets_per_disk
      else:
        return po.quicksum(model.disk_used_by_group[disk,group] for group in model.groups) <= self.num_targets_per_disk
    model.each_disk_has_limited_capcity_eqn = po.Constraint(model.disks, rule=each_disk_has_limited_capcity)

    def enough_disks_assigned_to_each_group(model, group):
      return po.quicksum(model.disk_used_by_group[disk,group] for disk in model.disks) == self.group_size
    model.enough_disks_assigned_to_each_group_eqn = po.Constraint(model.groups, rule=enough_disks_assigned_to_each_group)

    def calc_peer_recovery_traffic(model, disk, peer):
      if self.qlinearize:
        return po.quicksum(model.disk_in_same_group[disk,peer,group] for group in model.groups)
      else:
        return po.quicksum(calc_disk_in_same_group(model, disk, peer, group) for group in model.groups)

    def peer_recovery_traffic_upper_bound(model, disk, peer):
      if self.balanced_incomplete_block_design:
        return calc_peer_recovery_traffic(model, disk, peer) == self.max_recovery_traffic_on_peer
      else:
        return calc_peer_recovery_traffic(model, disk, peer) <= self.max_recovery_traffic_on_peer + self.relax_ub
    model.peer_recovery_traffic_upper_bound_eqn = po.Constraint(model.disk_pairs, rule=peer_recovery_traffic_upper_bound)

    def peer_recovery_traffic_lower_bound(model, disk, peer):
      return calc_peer_recovery_traffic(model, disk, peer) >= max(0, self.max_recovery_traffic_on_peer - self.relax_lb)

    if self.balanced_incomplete_block_design:
      logger.info(f"lower bound not needed for balanced incomplete block design (BIBD)")
    elif self.all_targets_used:
      logger.info(f"lower bound imposed on peer traffic: {self.relax_lb=} {self.qlinearize=} {self.all_targets_used=}")
      model.peer_recovery_traffic_lower_bound_eqn = po.Constraint(model.disk_pairs, rule=peer_recovery_traffic_lower_bound)
    else:
      logger.info(f"lower bound not imposed on peer traffic: {self.relax_lb=} {self.qlinearize=} {self.all_targets_used=}")

    def total_recovery_traffic(model):
      return po.summation(model.disk_in_same_group) * 2

    # model.obj = po.Objective(rule=total_recovery_traffic, sense=po.minimize)
    model.obj = po.Objective(expr=1)  # dummy objective
    return model