in turbonfs/src/readahead.cpp [424:491]
int64_t ra_state::get_next_ra(uint64_t length)
{
if (length == 0) {
length = def_ra_size;
}
/*
* RA is disabled?
*/
if (length == 0) {
return -1;
}
/*
* Don't perform readahead beyond eof.
* If we don't have a file size estimate (probably the attr cache is too
* old) then also we play safe and do not perform readahead.
*/
const int64_t filesize =
inode ? inode->get_server_file_size(): AZNFSC_MAX_FILE_SIZE;
assert(filesize >= 0 || filesize == -1);
if ((filesize == -1) ||
((int64_t) (last_byte_readahead + 1 + length) > filesize)) {
return -2;
}
/*
* Application read pattern is known to be non-sequential?
*/
if (!is_sequential()) {
return -3;
}
/*
* Scaled ra_bytes is the ra_bytes scaled to account for global cache
* pressure. We use that to decide how much to readahead.
*/
const uint64_t ra_bytes_scaled = get_ra_bytes();
/*
* If we already have ra_bytes readahead bytes read, don't readahead
* more.
*/
if ((last_byte_readahead + length) > (max_byte_read + ra_bytes_scaled)) {
return -4;
}
/*
* Keep readahead bytes issued always less than the scaled ra_bytes.
*/
if ((ra_ongoing += length) > ra_bytes_scaled) {
assert(ra_ongoing >= length);
ra_ongoing -= length;
return -5;
}
std::unique_lock<std::shared_mutex> _lock(ra_lock_40);
/*
* Atomically update last_byte_readahead, as we don't want to return
* duplicate readahead offset to multiple calls.
*/
const uint64_t next_ra =
std::atomic_exchange(&last_byte_readahead, last_byte_readahead + length) + 1;
assert((int64_t) next_ra > 0);
return next_ra;
}