in grolp/gen/ff_planner/relax.c [1962:2266]
void achieve_goals( int time )
{
int i, j, k, ft, min_e, ef, min_p, p;
float val;
float preRPGcost, min_preRPGcost;
int preRPGlevel, min_preRPGlevel, min_ownlevel;
/* achieve the goals set at level time >= 1
*/
if ( gcmd_line.display_info == 127 ) {
printf("\nselecting at step %3d: ", time-1);
}
/* before we start, we must translate the artificial goals
* into real goals.
*/
for ( i = gnum_real_fl_conn; i < gnum_fl_conn; i++ ) {
if ( lf_goals_comp_at[time][i] == IGUAL ) {
/* this one isn't needed
*/
continue;
}
enforce_artificial_goal( i, time );
}
/* for helpful actions:
* remember at time 1 what the goals were.
*/
if ( time == 1 ) {
for ( i = 0; i < gnum_real_fl_conn; i++ ) {
lHcomp[i] = lf_goals_comp_at[time][i];
lHc[i] = lf_goals_c_at[time][i];
}
}
/* first, push the numeric goals at this level so far down that
* the requirement for each of them can be fulfilled in the previous
* level.
*/
for ( i = 0; i < gnum_real_fl_conn; i++ ) {
if ( lf_goals_comp_at[time][i] == IGUAL ) {
continue;
}
if ( gfl_conn[i].def[time-1] &&
number_comparison_holds( lf_goals_comp_at[time][i],
gfl_conn[i].level[time-1],
lf_goals_c_at[time][i] ) ) {
/* this can be solved one step earlier.
* propagate it downwards and mark as OK.
*/
update_f_goal( i, time-1, lf_goals_comp_at[time][i], lf_goals_c_at[time][i] );
lf_goals_comp_at[time][i] = IGUAL;
continue;
}
/* if there is a good assigner, then take it.
*/
for ( j = 0; j < gfl_conn[i].num_AS; j++ ) {
ef = gfl_conn[i].AS[j];
if ( !LESS( gef_conn[ef].level, time ) ) {
/* we allow any effect that's already there
*/
continue;
}
if ( gfl_conn[i].AS_fl_[j] != -1 &&
!gfl_conn[gfl_conn[i].AS_fl_[j]].def[time-1] ) {
/* accesses an undefined value.
*/
continue;
}
if ( gfl_conn[i].AS_fl_[j] != -1 ) {
val = gfl_conn[gfl_conn[i].AS_fl_[j]].level[time-1] + gfl_conn[i].AS_c[j];
} else {
val = gfl_conn[i].AS_c[j];
}
if ( !number_comparison_holds( lf_goals_comp_at[time][i],
val,
lf_goals_c_at[time][i] ) ) {
/* that one is not strong enough.
*/
continue;
}
break;
}
if ( j < gfl_conn[i].num_AS ) {
/* ef is an assigner that is strong enough and already there.
*/
if ( gef_conn[ef].in_plan == time - 1 ) {
printf("\n\nassigner already selected, nevertheless goal still there\n\n");
exit( 1 );
} else {
if ( gef_conn[ef].in_plan == INFINITY ) {
lin_plan_E[lnum_in_plan_E++] = ef;
}
gef_conn[ef].in_plan = time - 1;
}
/* now select the resp. op at this level, if necessary
*/
select_op( time, gef_conn[ef].op );
/* now mark the benefits of that effect, introducing
* also the fl_ level enforcement goals for each effect
* that is useful for solving a goal at time: in particular,
* this will be the one we have just selected.
*/
introduce_benefits_and_enforcements( time, ef );
/* now introduce the new goals
*/
introduce_pc_goals( time, ef );
/* care about next fluent
*/
continue;
}
/* debug...
*/
if ( !gfl_conn[i].def[time-1] ) {
printf("\n\nall assignerss applied yet goal not fulfilled - undefined below.\n\n");
exit( 1 );
}
/* no good assigner available. thus, push the goal at this level so far
* down that its requirement can be fulfilled in the previous level.
*/
for ( j = 0; j < gfl_conn[i].num_IN; j++ ) {
/* go through increasers in constant quantity order top to
* bottom (see inst_final.c);
*/
ef = gfl_conn[i].IN[j];
if ( !LESS( gef_conn[ef].level, time ) ) {
continue;
}
if ( gfl_conn[i].IN_fl_[j] != -1 &&
!gfl_conn[gfl_conn[i].IN_fl_[j]].def[time-1] ) {
/* accesses an undefined fluent.
*/
continue;
}
if ( gfl_conn[i].IN_fl_[j] != -1 ) {
val = gfl_conn[gfl_conn[i].IN_fl_[j]].level[time-1] + gfl_conn[i].IN_c[j];
} else {
val = gfl_conn[i].IN_c[j];
}
if ( val <= 0 ) {
/* that one does not help us at all.
*/
continue;
}
/* if ef is already selected here, we can not use it anymore;
* else, record it as selected.
*/
if ( gef_conn[ef].in_plan == time - 1 ) {
continue;
} else {
if ( gef_conn[ef].in_plan == INFINITY ) {
lin_plan_E[lnum_in_plan_E++] = ef;
}
gef_conn[ef].in_plan = time - 1;
}
/* do the usual stuff...
*/
select_op( time, gef_conn[ef].op );
introduce_benefits_and_enforcements( time, ef );
introduce_pc_goals( time, ef );
/* stop as soon as
* goal can be fulfilled one step below.
*/
if ( number_comparison_holds( lf_goals_comp_at[time][i],
gfl_conn[i].level[time-1],
lf_goals_c_at[time][i] ) ) {
break;
}
}
/* now propagate the revised goal downward, and say we are finished with
* this one.
*/
update_f_goal( i, time-1, lf_goals_comp_at[time][i], lf_goals_c_at[time][i] );
lf_goals_comp_at[time][i] = IGUAL;
/* debug...
*/
if ( !number_comparison_holds( lf_goals_comp_at[time-1][i],
gfl_conn[i].level[time-1],
lf_goals_c_at[time-1][i] ) ) {
printf("\n\nall increasers applied yet goal not fulfilled.\n\n");
exit( 1 );
}
}/* fluents at level time */
/* now achieve also the remaining logic goals here.
*/
for ( i = 0; i < lnum_goals_at[time]; i++ ) {
ft = lgoals_at[time][i];
if ( gft_conn[ft].is_true == time ) {
/* fact already added by prev now selected op
*/
continue;
}
if ( gcmd_line.debug ) {
printf("\ngoal at level %d: ", time);
print_ft_name(ft);
}
if ( !gcost_rplans ) {
/* even in the non-cost-minimizing-rplans setting,
* choose the actions by their cost!
* ... can only make the plan better...
*/
min_preRPGcost = -1;
min_e = -1;
for ( j = 0; j < gft_conn[ft].num_A; j++ ) {
ef = gft_conn[ft].A[j];
if ( gef_conn[ef].level != time - 1 ) continue;
preRPGcost = gef_conn[ef].RPGcost;
if ( min_preRPGcost == -1 || preRPGcost < min_preRPGcost ) {
min_preRPGcost = preRPGcost;
min_e = ef;
}
}
} else {
min_preRPGcost = -1;
min_preRPGlevel = -1;
min_e = -1;
for ( j = 0; j < gft_conn[ft].num_A; j++ ) {
ef = gft_conn[ft].A[j];
/* in the cost setting, the level of the first supporter
* may actually be less than level-1... !
* (may have several smaller cost levels in between)
*/
if ( gef_conn[ef].level == -1 || gef_conn[ef].level >= time ) continue;
preRPGcost = gop_conn[gef_conn[ef].op].cost;
preRPGlevel = 0;
for ( k = 0; k < gef_conn[ef].num_PC; k++ ) {
preRPGcost += gft_conn[gef_conn[ef].PC[k]].RPGcost;
/* not sure if this is important once 0-cost actions are
* derived preds... anyway: with 0-cost acts,
* a fact may be acheievd in ini state and still have same cost
* as one that must be achieved still... hence prefer lower
* facts (also lower actions, see below...)
*/
preRPGlevel += gft_conn[gef_conn[ef].PC[k]].level;
}
if ( gcmd_line.debug ) {
printf("\npossible achiever: ");
print_op_name(gef_conn[ef].op);
printf(" RPGcost: %f, own op-cost: %f, own level: %d, RPGlevel: %d",
preRPGcost, gop_conn[gef_conn[ef].op].cost, gef_conn[ef].level, preRPGlevel);
}
if ( min_preRPGcost == -1 || preRPGcost < min_preRPGcost ) {
if ( gcmd_line.debug ) {
printf("\nTake-first!");
}
min_preRPGcost = preRPGcost;
min_preRPGlevel = preRPGlevel;
min_ownlevel = gef_conn[ef].level;
min_e = ef;
continue;
}
if ( preRPGcost = min_preRPGcost &&
(preRPGlevel < min_preRPGlevel || min_ownlevel > gef_conn[ef].level) ) {
if ( gcmd_line.debug ) {
printf("\nTake-later!");
}
min_preRPGcost = preRPGcost;
min_preRPGlevel = preRPGlevel;
min_e = ef;
}
}
}
ef = min_e;
if ( min_e == -1 ) {
printf("\nDEBUG ME: no supporting effect found\n");
exit( 1 );
}
/* if ef is already selected, we can not use it anymore;
* else, record it as selected.
*
* actually it can't happen here that the ef
* is already selected as then the goal is true already.
* nevermind.
*/
if ( gef_conn[ef].in_plan == time - 1 ) {
continue;
} else {
if ( gef_conn[ef].in_plan == INFINITY ) {
lin_plan_E[lnum_in_plan_E++] = ef;
}
gef_conn[ef].in_plan = time - 1;
}
select_op( time, gef_conn[ef].op );
introduce_benefits_and_enforcements( time, ef );
introduce_pc_goals( time, ef );
}
}