in sql/iterators/window_iterators.cc [652:1513]
bool process_buffered_windowing_record(THD *thd, Temp_table_param *param,
const bool new_partition_or_eof,
bool *output_row_ready) {
DBUG_TRACE;
/**
The current window
*/
Window &w = *param->m_window;
/**
The frame
*/
const PT_frame *f = w.frame();
*output_row_ready = false;
/**
This is the row we are currently considering for processing and getting
ready for output, cf. output_row_ready.
*/
const int64 current_row = w.last_row_output() + 1;
/**
This is the row number of the last row we have buffered so far.
*/
const int64 last_rowno_in_cache = w.last_rowno_in_cache();
if (current_row > last_rowno_in_cache) // already sent all buffered rows
return false;
/**
If true, use code path for static aggregates
*/
const bool static_aggregate = w.static_aggregates();
/**
If true, use code path for ROW bounds with optimized strategy
*/
const bool row_optimizable = w.optimizable_row_aggregates();
/**
If true, use code path for RANGE bounds with optimized strategy
*/
const bool range_optimizable = w.optimizable_range_aggregates();
// These three strategies are mutually exclusive:
assert((static_aggregate + row_optimizable + range_optimizable) <= 1);
/**
We need to evaluate FIRST_VALUE, or optimized MIN/MAX
*/
const bool have_first_value = w.opt_first_row();
/**
We need to evaluate LAST_VALUE, or optimized MIN/MAX
*/
const bool have_last_value = w.opt_last_row();
/**
We need to evaluate NTH_VALUE
*/
const Window::st_nth &have_nth_value = w.opt_nth_row();
/**
We need to evaluate LEAD/LAG rows
*/
const Window::st_lead_lag &have_lead_lag = w.opt_lead_lag();
/**
True if an inversion optimization strategy is used. For common
code paths.
*/
const bool optimizable = (row_optimizable || range_optimizable);
/**
RANGE was specified as the bounds unit for the frame
*/
const bool range_frame = f->m_query_expression == WFU_RANGE;
const bool range_to_current_row =
range_frame && f->m_to->m_border_type == WBT_CURRENT_ROW;
const bool range_from_first_to_current_row =
range_to_current_row &&
f->m_from->m_border_type == WBT_UNBOUNDED_PRECEDING;
/**
UNBOUNDED FOLLOWING was specified for the frame
*/
bool unbounded_following = false;
/**
Row_number of the first row in the frame. Invariant: lower_limit >= 1
after initialization.
*/
int64 lower_limit = 1;
/**
Row_number of the logically last row to be computed in the frame, may be
higher than the number of rows in the partition. The actual highest row
number is computed later, see upper below.
*/
int64 upper_limit = 0;
/**
needs peerset of current row to evaluate a wf for the current row.
*/
bool needs_peerset = w.needs_peerset();
/**
needs the last peer of the current row within a frame.
*/
const bool needs_last_peer_in_frame = w.needs_last_peer_in_frame();
DBUG_PRINT("enter", ("current_row: %" PRId64 ", new_partition_or_eof: %d",
current_row, new_partition_or_eof));
/* Compute lower_limit, upper_limit and possibly unbounded_following */
if (f->m_query_expression == WFU_RANGE) {
lower_limit = w.first_rowno_in_range_frame();
/*
For RANGE frame, we first buffer all the rows in the partition due to the
need to find last peer before first can be processed. This can be
optimized,
FIXME.
*/
upper_limit = INT64_MAX;
} else {
assert(f->m_query_expression == WFU_ROWS);
bool lower_within_limits = true;
// Determine lower border, handle wraparound for unsigned value:
int64 border =
f->m_from->border() != nullptr ? f->m_from->border()->val_int() : 0;
if (border < 0) {
border = INT64_MAX;
}
switch (f->m_from->m_border_type) {
case WBT_CURRENT_ROW:
lower_limit = current_row;
break;
case WBT_VALUE_PRECEDING:
/*
Example: 1 PRECEDING and current row== 2 => 1
current row== 1 => 1
current row== 3 => 2
*/
lower_limit = std::max<int64>(current_row - border, 1);
break;
case WBT_VALUE_FOLLOWING:
/*
Example: 1 FOLLOWING and current row== 2 => 3
current row== 1 => 2
current row== 3 => 4
*/
if (border <= (std::numeric_limits<int64>::max() - current_row))
lower_limit = current_row + border;
else {
lower_within_limits = false;
lower_limit = INT64_MAX;
}
break;
case WBT_UNBOUNDED_PRECEDING:
lower_limit = 1;
break;
case WBT_UNBOUNDED_FOLLOWING:
assert(false);
break;
}
// Determine upper border, handle wraparound for unsigned value:
border = f->m_to->border() != nullptr ? f->m_to->border()->val_int() : 0;
if (border < 0) {
border = INT64_MAX;
}
{
switch (f->m_to->m_border_type) {
case WBT_CURRENT_ROW:
// we always have enough cache
upper_limit = current_row;
break;
case WBT_VALUE_PRECEDING:
upper_limit = current_row - border;
break;
case WBT_VALUE_FOLLOWING:
if (border <= (std::numeric_limits<longlong>::max() - current_row))
upper_limit = current_row + border;
else {
upper_limit = INT64_MAX;
/*
If both the border specifications are beyond numeric limits,
the window frame is empty.
*/
if (f->m_from->m_border_type == WBT_VALUE_FOLLOWING &&
!lower_within_limits) {
lower_limit = INT64_MAX;
upper_limit = INT64_MAX - 1;
}
}
break;
case WBT_UNBOUNDED_FOLLOWING:
unbounded_following = true;
upper_limit = INT64_MAX; // need whole partition
break;
case WBT_UNBOUNDED_PRECEDING:
assert(false);
break;
}
}
}
/*
Determine if, given our current read and buffering state, we have enough
buffered rows to compute an output row.
Example: ROWS BETWEEN 1 PRECEDING and 3 FOLLOWING
State:
+---+-------------------------------+
| | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+---+-------------------------------+
^ 1? ^
lower last_rowno_in_cache
(0) (4)
This state means:
We have read 4 rows, cf. value of last_rowno_in_cache.
We can now process row 1 since both lower (1-1=0) and upper (1+3=4) are less
than or equal to 4, the last row in the cache so far.
We can not process row 2 since: !(4 >= 2 + 3) and we haven't seen the last
row in partition which means that the frame may not be full yet.
If we have a window function that needs to know the partition cardinality,
we also must buffer all records of the partition before processing.
*/
if (!((lower_limit <= last_rowno_in_cache &&
upper_limit <= last_rowno_in_cache &&
!w.needs_partition_cardinality()) || /* we have cached enough rows */
new_partition_or_eof /* we have cached all rows */))
return false; // We haven't read enough rows yet, so return
w.set_rowno_in_partition(current_row);
/*
By default, we must:
- if we are the first row of a partition, reset values for both
non-framing and framing WFs
- reset values for framing WFs (new current row = new frame = new
values for WFs).
Both resettings require restoring the row from the FB. And, as we have
restored this row, we use this opportunity to compute non-framing
does-not-need-partition-cardinality functions.
The meaning of if statements below is that in some cases, we can avoid
this default behaviour.
For example, if we have static framing WFs, and this is not the
partition's first row: the previous row's framing-WF values should be
reused without change, so all the above resetting must be skipped;
so row restoration isn't immediately needed; that and the computation of
non-framing functions is then done in another later block of code.
Likewise, if we have framing WFs with inversion, and it's not the
first row of the partition, we must skip the resetting of framing WFs.
*/
if (!static_aggregate || current_row == 1) {
/*
We need to reset functions. As part of it, their comparators need to
update themselves to use the new row as base line. So, restore it:
*/
if (bring_back_frame_row(thd, &w, current_row,
Window_retrieve_cached_row_reason::CURRENT))
return true;
if (current_row == 1) // new partition
reset_non_framing_wf_state(param->items_to_copy);
if (!optimizable || current_row == 1) // new frame
{
reset_framing_wf_states(param->items_to_copy);
} // else we remember state and update it for row 2..N
/* E.g. ROW_NUMBER, RANK, DENSE_RANK */
if (copy_funcs(param, thd, CFT_WF_NON_FRAMING)) return true;
if (!optimizable || current_row == 1) {
/*
So far frame is empty; set up a flag which makes framing WFs set
themselves to NULL in OUT.
*/
w.set_do_copy_null(true);
if (copy_funcs(param, thd, CFT_WF_FRAMING)) return true;
w.set_do_copy_null(false);
} // else aggregates keep value of previous row, and we'll do inversion
}
if (range_frame) {
/* establish current row as base-line for RANGE computation */
w.reset_order_by_peer_set();
}
bool first_row_in_range_frame_seen = false;
/**
For optimized strategy we want to save away the previous aggregate result
and reuse in later round by inversion. This keeps track of whether we
managed to compute results for this current row (result are "primed"), so we
can use inversion in later rows. Cf Window::m_aggregates_primed.
*/
bool optimizable_primed = false;
/**
Possible adjustment of the logical upper_limit: no rows exist beyond
last_rowno_in_cache.
*/
const int64 upper = min(upper_limit, last_rowno_in_cache);
/*
Optimization: we evaluate the peer set of the current row potentially
several times. Window functions like CUME_DIST sets needs_peerset and is
evaluated last, so if any other wf evaluation led to finding the peer set
of the current row, make a note of it, so we can skip doing it twice.
*/
bool have_peers_current_row = false;
if ((static_aggregate && current_row == 1) || // skip for row > 1
(optimizable && !w.aggregates_primed()) || // skip for 2..N in frame
(!static_aggregate && !optimizable)) // normal: no skip
{
// Compute and output current_row.
int64 rowno; ///< iterates over rows in a frame
int64 skipped = 0; ///< RANGE: # of visited rows seen before the frame
if (!range_frame) {
w.set_first_rowno_in_rows_frame(lower_limit);
}
for (rowno = lower_limit; rowno <= upper; rowno++) {
if (optimizable) optimizable_primed = true;
/*
Set window frame state before computing framing window function.
'n' is the number of row #rowno relative to the beginning of the
frame, 1-based.
*/
const int64 n = rowno - lower_limit + 1 - skipped;
w.set_rowno_in_frame(n);
const Window_retrieve_cached_row_reason reason =
(n == 1 ? Window_retrieve_cached_row_reason::FIRST_IN_FRAME
: Window_retrieve_cached_row_reason::LAST_IN_FRAME);
/*
Hint maintenance: we will normally read past last row in frame, so
prepare to resurrect that hint once we do.
*/
w.save_pos(reason);
/* Set up the non-wf fields for aggregating to the output row. */
if (bring_back_frame_row(thd, &w, rowno, reason)) return true;
if (range_frame) {
if (w.before_frame()) {
skipped++;
continue;
}
if (w.after_frame()) {
w.set_last_rowno_in_range_frame(rowno - 1);
if (!first_row_in_range_frame_seen)
// empty frame, optimize starting point for next row
w.set_first_rowno_in_range_frame(rowno);
w.restore_pos(Window_retrieve_cached_row_reason::LAST_IN_FRAME);
break;
} // else: row is within range, process
if (!first_row_in_range_frame_seen) {
/*
Optimize starting point for next row: monotonic increase in frame
bounds
*/
first_row_in_range_frame_seen = true;
w.set_first_rowno_in_range_frame(rowno);
}
}
/*
Compute framing WFs. For ROWS frame, "upper" is exactly the frame's
last row; but for the case of RANGE
we can't be sure that this is indeed the last row, but we must make a
pessimistic assumption. If it is not the last, the final row
calculation, if any, as for AVG, will be repeated for the next peer
row(s).
For optimized MIN/MAX [1], we do this to make sure we have a non-NULL
last value (if one exists) for the initial frame.
*/
const bool setstate =
(rowno == upper || range_frame || have_last_value /* [1] */);
if (setstate)
w.set_is_last_row_in_frame(true); // temporary state for next call
// Accumulate frame's row into WF's value for current_row:
if (copy_funcs(param, thd, CFT_WF_FRAMING)) return true;
if (setstate) w.set_is_last_row_in_frame(false); // undo temporary state
}
if (range_frame || rowno > upper) // no more rows in partition
{
if (range_frame) {
if (!first_row_in_range_frame_seen) {
/*
Empty frame: optimize starting point for next row: monotonic
increase in frame bounds
*/
w.set_first_rowno_in_range_frame(rowno);
}
}
w.set_last_rowno_in_range_frame(rowno - 1);
if (range_to_current_row) {
w.set_last_rowno_in_peerset(w.last_rowno_in_range_frame());
have_peers_current_row = true;
}
} // else: we already set it before breaking out of loop
}
/*
While the block above was for the default execution method, below we have
alternative blocks for optimized methods: static framing WFs and
inversion, when current_row isn't first; i.e. we can use the previous
row's value of framing WFs as a base.
In the row buffer of OUT, after the previous row was emitted, these values
of framing WFs are still present, as no copy_funcs(CFT_WF_FRAMING) was run
for our new row yet.
*/
if (static_aggregate && current_row != 1) {
/* Set up the correct non-wf fields for copying to the output row */
if (bring_back_frame_row(thd, &w, current_row,
Window_retrieve_cached_row_reason::CURRENT))
return true;
/* E.g. ROW_NUMBER, RANK, DENSE_RANK */
if (copy_funcs(param, thd, CFT_WF_NON_FRAMING)) return true;
} else if (row_optimizable && w.aggregates_primed()) {
/*
Rows 2..N in partition: we still have state from previous current row's
frame computation, now adjust by subtracting row 1 in frame (lower_limit)
and adding new, if any, final frame row
*/
const bool remove_previous_first_row =
(lower_limit > 1 && lower_limit - 1 <= last_rowno_in_cache);
const bool new_last_row =
(upper_limit <= upper &&
!unbounded_following /* all added when primed */);
const int64 rows_in_frame = upper - lower_limit + 1;
w.set_first_rowno_in_rows_frame(lower_limit);
/* possibly subtract: early in partition there may not be any */
if (remove_previous_first_row) {
/*
Check if the row leaving the frame is the last row in the peerset
within a frame. If true, set is_last_row_in_peerset_within_frame
to true.
Used by JSON_OBJECTAGG to remove the key/value pair only
when it is the last row having that key value.
*/
if (needs_last_peer_in_frame) {
int64 rowno = lower_limit - 1;
bool is_last_row_in_peerset = true;
if (rowno < upper) {
if (bring_back_frame_row(
thd, &w, rowno,
Window_retrieve_cached_row_reason::LAST_IN_PEERSET))
return true;
// Establish current row as base-line for peer set.
w.reset_order_by_peer_set();
/*
Check if the next row is a peer to this row. If not
set current row as the last row in peerset within
frame.
*/
rowno++;
if (rowno < upper) {
if (bring_back_frame_row(
thd, &w, rowno,
Window_retrieve_cached_row_reason::LAST_IN_PEERSET))
return true;
// Compare only the first order by item.
if (!w.in_new_order_by_peer_set(false))
is_last_row_in_peerset = false;
}
}
if (is_last_row_in_peerset)
w.set_is_last_row_in_peerset_within_frame(true);
}
if (bring_back_frame_row(
thd, &w, lower_limit - 1,
Window_retrieve_cached_row_reason::FIRST_IN_FRAME))
return true;
w.set_inverse(true);
if (!new_last_row) {
w.set_rowno_in_frame(current_row - lower_limit + 1);
if (rows_in_frame > 0)
w.set_is_last_row_in_frame(true); // do final comp., e.g. div in AVG
if (copy_funcs(param, thd, CFT_WF_FRAMING)) return true;
w.set_is_last_row_in_frame(false); // undo temporary states
} else {
if (copy_funcs(param, thd, CFT_WF_FRAMING)) return true;
}
w.set_is_last_row_in_peerset_within_frame(false);
w.set_inverse(false);
}
if (have_first_value && (lower_limit <= last_rowno_in_cache)) {
// We have seen first row of frame, FIRST_VALUE can be computed:
if (bring_back_frame_row(
thd, &w, lower_limit,
Window_retrieve_cached_row_reason::FIRST_IN_FRAME))
return true;
w.set_rowno_in_frame(1);
/*
Framing WFs which accumulate (SUM, COUNT, AVG) shouldn't accumulate
this row again as they have done so already. Evaluate only
X_VALUE/MIN/MAX.
*/
if (copy_funcs(param, thd, CFT_WF_USES_ONLY_ONE_ROW)) return true;
}
if (have_last_value && !new_last_row) {
// We have seen last row of frame, LAST_VALUE can be computed:
if (bring_back_frame_row(
thd, &w, upper, Window_retrieve_cached_row_reason::LAST_IN_FRAME))
return true;
w.set_rowno_in_frame(current_row - lower_limit + 1);
if (rows_in_frame > 0) w.set_is_last_row_in_frame(true);
if (copy_funcs(param, thd, CFT_WF_USES_ONLY_ONE_ROW)) return true;
w.set_is_last_row_in_frame(false);
}
if (!have_nth_value.m_offsets.empty()) {
int fno = 0;
for (Window::st_offset nth : have_nth_value.m_offsets) {
if (lower_limit + nth.m_rowno - 1 <= upper) {
if (bring_back_frame_row(
thd, &w, lower_limit + nth.m_rowno - 1,
Window_retrieve_cached_row_reason::MISC_POSITIONS, fno++))
return true;
w.set_rowno_in_frame(nth.m_rowno);
if (copy_funcs(param, thd, CFT_WF_USES_ONLY_ONE_ROW)) return true;
}
}
}
if (new_last_row) // Add new last row to framing WF's value
{
if (bring_back_frame_row(
thd, &w, upper, Window_retrieve_cached_row_reason::LAST_IN_FRAME))
return true;
w.set_rowno_in_frame(upper - lower_limit + 1)
.set_is_last_row_in_frame(true); // temporary states for next copy
if (copy_funcs(param, thd, CFT_WF_FRAMING)) return true;
w.set_is_last_row_in_frame(false); // undo temporary states
}
} else if (range_optimizable && w.aggregates_primed()) {
/*
Peer sets 2..N in partition: we still have state from previous current
row's frame computation, now adjust by possibly subtracting rows no
longer in frame and possibly adding new rows now within range.
*/
const int64 prev_last_rowno_in_frame = w.last_rowno_in_range_frame();
const int64 prev_first_rowno_in_frame = w.first_rowno_in_range_frame();
/*
As an optimization, if:
- RANGE frame specification ends at CURRENT ROW and
- current_row belongs to frame of previous row,
then both rows are peers, so have the same frame: nothing changes.
*/
if (range_to_current_row && current_row >= prev_first_rowno_in_frame &&
current_row <= prev_last_rowno_in_frame) {
// Peer set should already have been determined:
assert(w.last_rowno_in_peerset() >= current_row);
have_peers_current_row = true;
} else {
/**
Whether we know the start of the frame yet. The a priori setting is
inherited from the previous current row.
*/
bool found_first =
(prev_first_rowno_in_frame <= prev_last_rowno_in_frame);
int64 new_first_rowno_in_frame = prev_first_rowno_in_frame; // a priori
int64 inverted = 0; // Number of rows inverted when moving frame
int64 rowno; // Partition relative, loop counter
if (range_from_first_to_current_row) {
/*
No need to locate frame's start, it's first row of partition. No
need to recompute FIRST_VALUE, it's same as for previous row.
So we just have to accumulate new rows.
*/
assert(current_row > prev_last_rowno_in_frame && lower_limit == 1 &&
prev_first_rowno_in_frame == 1 && found_first);
} else {
for (rowno = lower_limit;
(rowno <= upper &&
prev_first_rowno_in_frame <= prev_last_rowno_in_frame);
rowno++) {
/* Set up the non-wf fields for aggregating to the output row. */
if (bring_back_frame_row(
thd, &w, rowno,
Window_retrieve_cached_row_reason::FIRST_IN_FRAME))
return true;
if (w.before_frame()) {
w.set_inverse(true)
.
/*
The next setting sets the logical last row number in the frame
after inversion, so that final actions can do the right thing,
e.g. AVG needs to know the updated cardinality. The
aggregates consults m_rowno_in_frame for that, so set it
accordingly.
*/
set_rowno_in_frame(prev_last_rowno_in_frame -
prev_first_rowno_in_frame + 1 - ++inverted)
.set_is_last_row_in_frame(true); // pessimistic assumption
// Set the current row as the last row in the peerset.
w.set_is_last_row_in_peerset_within_frame(true);
/*
It may be that rowno is not in previous frame; for example if
column id contains 1, 3, 4 and 5 and frame is RANGE BETWEEN 2
FOLLOWING AND 2 FOLLOWING: we process id=1, frame of id=1 is
id=3; then we process id=3: id=3 is before frame (and was in
previous frame), id=4 is before frame too (and was not in
previous frame); so id=3 only should be inverted:
*/
if (rowno >= prev_first_rowno_in_frame &&
rowno <= prev_last_rowno_in_frame) {
if (copy_funcs(param, thd, CFT_WF_FRAMING)) return true;
}
w.set_inverse(false).set_is_last_row_in_frame(false);
w.set_is_last_row_in_peerset_within_frame(false);
found_first = false;
} else {
if (w.after_frame()) {
found_first = false;
} else {
w.set_first_rowno_in_range_frame(rowno);
found_first = true;
new_first_rowno_in_frame = rowno;
w.set_rowno_in_frame(1);
}
break;
}
}
// Empty frame
if (rowno > upper && !found_first) {
w.set_first_rowno_in_range_frame(rowno);
w.set_last_rowno_in_range_frame(rowno - 1);
}
if ((have_first_value || have_last_value) &&
(rowno <= last_rowno_in_cache) && found_first) {
/*
We have FIRST_VALUE or LAST_VALUE and have a new first row; make it
last also until we find something better.
*/
w.set_is_last_row_in_frame(true);
if (copy_funcs(param, thd, CFT_WF_USES_ONLY_ONE_ROW)) return true;
w.set_is_last_row_in_frame(false);
if (have_last_value && w.last_rowno_in_range_frame() > rowno) {
/* Set up the non-wf fields for aggregating to the output row. */
if (bring_back_frame_row(
thd, &w, w.last_rowno_in_range_frame(),
Window_retrieve_cached_row_reason::LAST_IN_FRAME))
return true;
w.set_rowno_in_frame(w.last_rowno_in_range_frame() -
w.first_rowno_in_range_frame() + 1)
.set_is_last_row_in_frame(true);
if (copy_funcs(param, thd, CFT_WF_USES_ONLY_ONE_ROW)) return true;
w.set_is_last_row_in_frame(false);
}
}
}
/*
We last evaluated last_rowno_in_range_frame for the previous current
row. Now evaluate over any new rows within range of the current row.
*/
const int64 first = w.last_rowno_in_range_frame() + 1;
const bool empty =
w.last_rowno_in_range_frame() < w.first_rowno_in_range_frame();
bool row_added = false;
for (rowno = first; rowno <= upper; rowno++) {
w.save_pos(Window_retrieve_cached_row_reason::LAST_IN_FRAME);
if (bring_back_frame_row(
thd, &w, rowno,
Window_retrieve_cached_row_reason::LAST_IN_FRAME))
return true;
if (rowno == first && !found_first)
w.copy_pos(Window_retrieve_cached_row_reason::LAST_IN_FRAME,
Window_retrieve_cached_row_reason::FIRST_IN_FRAME);
if (w.before_frame()) {
if (!found_first) new_first_rowno_in_frame++;
continue;
} else if (w.after_frame()) {
w.set_last_rowno_in_range_frame(rowno - 1);
if (!found_first) {
w.set_first_rowno_in_range_frame(rowno);
if (rowno > first) // if equal, we just copied hint above
w.copy_pos(Window_retrieve_cached_row_reason::LAST_IN_FRAME,
Window_retrieve_cached_row_reason::FIRST_IN_FRAME);
}
/*
We read one row too far, so reinstate previous hint for last in
frame. We will likely be reading the last row in frame
again in for next current row, and then we will need the hint.
*/
w.restore_pos(Window_retrieve_cached_row_reason::LAST_IN_FRAME);
break;
} // else: row is within range, process
const int64 rowno_in_frame = rowno - new_first_rowno_in_frame + 1;
if (rowno_in_frame == 1 && !found_first) {
found_first = true;
w.set_first_rowno_in_range_frame(rowno);
// Found the first row in this range frame. Make a note in the hint.
w.copy_pos(Window_retrieve_cached_row_reason::LAST_IN_FRAME,
Window_retrieve_cached_row_reason::FIRST_IN_FRAME);
}
w.set_rowno_in_frame(rowno_in_frame)
.set_is_last_row_in_frame(true); // pessimistic assumption
if (copy_funcs(param, thd, CFT_WF_FRAMING)) return true;
w.set_is_last_row_in_frame(false); // undo temporary states
row_added = true;
}
if (w.before_frame() && empty) {
assert(!row_added && !found_first);
// This row's value is too low to fit in frame. We already had an empty
// set of frame rows when evaluating for the previous row, and the set
// is still empty. So, we can move the possible boundaries for the
// set of frame rows for the next row to be evaluated one row ahead.
// We need only update last_rowno_in_range_frame here, first_row
// no_in_range_frame will be adjusted below to be one higher, cf.
// "maintain invariant" comment.
w.set_last_rowno_in_range_frame(
min(w.last_rowno_in_range_frame() + 1, upper));
}
if (rowno > upper && row_added)
w.set_last_rowno_in_range_frame(rowno - 1);
if (range_to_current_row) {
w.set_last_rowno_in_peerset(w.last_rowno_in_range_frame());
have_peers_current_row = true;
}
if (found_first && !have_nth_value.m_offsets.empty()) {
// frame is non-empty, so we might find NTH_VALUE
assert(w.first_rowno_in_range_frame() <= w.last_rowno_in_range_frame());
int fno = 0;
for (Window::st_offset nth : have_nth_value.m_offsets) {
const int64 row_to_get =
w.first_rowno_in_range_frame() + nth.m_rowno - 1;
if (row_to_get <= w.last_rowno_in_range_frame()) {
if (bring_back_frame_row(
thd, &w, row_to_get,
Window_retrieve_cached_row_reason::MISC_POSITIONS, fno++))
return true;
w.set_rowno_in_frame(nth.m_rowno);
if (copy_funcs(param, thd, CFT_WF_USES_ONLY_ONE_ROW)) return true;
}
}
}
// We have empty frame, maintain invariant
if (!found_first) {
assert(!row_added);
w.set_first_rowno_in_range_frame(w.last_rowno_in_range_frame() + 1);
}
}
}
/* We need the peer of the current row to evaluate the row. */
if (needs_peerset && !have_peers_current_row) {
int64 first = current_row;
if (current_row != 1) first = w.last_rowno_in_peerset() + 1;
if (current_row >= first) {
int64 rowno;
for (rowno = current_row; rowno <= last_rowno_in_cache; rowno++) {
if (bring_back_frame_row(
thd, &w, rowno,
Window_retrieve_cached_row_reason::LAST_IN_PEERSET))
return true;
if (rowno == current_row) {
/* establish current row as base-line for peer set */
w.reset_order_by_peer_set();
w.set_last_rowno_in_peerset(current_row);
} else if (w.in_new_order_by_peer_set()) {
w.set_last_rowno_in_peerset(rowno - 1);
break; // we have accumulated all rows in the peer set
}
}
if (rowno > last_rowno_in_cache)
w.set_last_rowno_in_peerset(last_rowno_in_cache);
}
}
if (optimizable && optimizable_primed) w.set_aggregates_primed(true);
if (bring_back_frame_row(thd, &w, current_row,
Window_retrieve_cached_row_reason::CURRENT))
return true;
/* NTILE and other non-framing wfs */
if (w.needs_partition_cardinality()) {
/* Set up the non-wf fields for aggregating to the output row. */
if (process_wfs_needing_partition_cardinality(
thd, param, have_nth_value, have_lead_lag, current_row, &w,
Window_retrieve_cached_row_reason::CURRENT))
return true;
}
if (w.is_last() && copy_funcs(param, thd, CFT_HAS_WF)) return true;
*output_row_ready = true;
w.set_last_row_output(current_row);
DBUG_PRINT("info", ("sent row: %" PRId64, current_row));
return false;
}