in turbonfs/src/file_cache.cpp [2443:2610]
void bytes_chunk_cache::inline_prune()
{
uint64_t inline_bytes = 0;
uint64_t pruned_bytes = 0;
/*
* Update various client level stuff that needs to be updated periodically,
* like various read/write scale factors, last 5 secs throughput, etc.
*/
nfs_client::get_instance().periodic_updater();
get_prune_goals(&inline_bytes, nullptr);
// Inline pruning not needed.
if (inline_bytes == 0) {
return;
}
const std::unique_lock<std::mutex> _lock(chunkmap_lock_43);
/*
* Multiple fuse threads may get the prune goals and then all of them
* will prune that much resulting in too much pruning, so fetch the prune
* goals once after acquiring the lock.
*/
get_prune_goals(&inline_bytes, nullptr);
if (inline_bytes == 0) {
return;
}
AZLogDebug("[{}] inline_prune(): Inline prune goal of {:0.2f} MB",
CACHE_TAG, inline_bytes / (1024 * 1024.0));
uint32_t inuse = 0, dirty = 0, commit_pending = 0, locked = 0, inra = 0;
uint64_t inuse_bytes = 0, dirty_bytes = 0, commit_pending_bytes = 0, locked_bytes = 0, inra_bytes = 0;
for (auto it = chunkmap.cbegin(), next_it = it;
(it != chunkmap.cend()) && (pruned_bytes < inline_bytes);
it = next_it) {
++next_it;
const struct bytes_chunk *bc = &(it->second);
const struct membuf *mb = bc->get_membuf();
/*
* inode will be null only for testing.
*/
assert(!inode || (inode->magic == NFS_INODE_MAGIC));
if (inode && inode->in_ra_window(mb->offset.load(), mb->length.load())) {
AZLogDebug("[{}] inline_prune(): skipping as membuf(offset={}, "
"length={}) lies in RA window",
CACHE_TAG, mb->offset.load(), mb->length.load());
inra++;
inra_bytes += mb->allocated_length;
continue;
}
/*
* Possibly under IO.
*/
if (mb->is_inuse()) {
AZLogDebug("[{}] inline_prune(): skipping as membuf(offset={}, "
"length={}) is inuse (locked={}, dirty={}, flushing={}, "
"uptodate={})",
CACHE_TAG, mb->offset.load(), mb->length.load(),
mb->is_locked() ? "yes" : "no",
mb->is_dirty() ? "yes" : "no",
mb->is_flushing() ? "yes" : "no",
mb->is_uptodate() ? "yes" : "no");
inuse++;
inuse_bytes += mb->allocated_length;
continue;
}
/*
* Usually inuse count is dropped after the lock so if inuse count
* is zero membuf must not be locked, but users who may want to
* release() some chunk while holding the lock may drop their inuse
* count to allow release() to release the bytes_chunk.
*/
if (mb->is_locked()) {
AZLogDebug("[{}] inline_prune(): skipping as membuf(offset={}, "
"length={}) is locked (dirty={}, flushing={}, "
"uptodate={})",
CACHE_TAG, mb->offset.load(), mb->length.load(),
mb->is_dirty() ? "yes" : "no",
mb->is_flushing() ? "yes" : "no",
mb->is_uptodate() ? "yes" : "no");
locked++;
locked_bytes += mb->allocated_length;
continue;
}
/*
* Has data to be written to Blob.
* Cannot safely drop this from the cache.
*/
if (mb->is_dirty()) {
AZLogDebug("[{}] inline_prune(): skipping as membuf(offset={}, "
"length={}) is dirty (flushing={}, uptodate={})",
CACHE_TAG, mb->offset.load(), mb->length.load(),
mb->is_flushing() ? "yes" : "no",
mb->is_uptodate() ? "yes" : "no");
dirty++;
dirty_bytes += mb->allocated_length;
continue;
}
/*
* Data written to blob but not yet committed.
* Cannot safely drop this from the cache.
*/
if (mb->is_commit_pending()) {
AZLogDebug("[{}] inline_prune(): skipping as membuf(offset={}, "
"length={}) is commit_pending (dirty={} flushing={}, uptodate={})",
CACHE_TAG, mb->offset.load(), mb->length.load(),
mb->is_dirty() ? "yes" : "no",
mb->is_flushing() ? "yes" : "no",
mb->is_uptodate() ? "yes" : "no");
commit_pending++;
commit_pending_bytes += mb->allocated_length;
continue;
}
AZLogDebug("[{}] inline_prune(): deleting membuf(offset={}, length={})",
CACHE_TAG, mb->offset.load(), mb->length.load());
/*
* Release the chunk.
* This will release the membuf (munmap() it in case of file-backed
* cache and delete it for heap backed cache). At this point the membuf
* is guaranteed to be not in use since we checked the inuse count
* above.
*/
assert(num_chunks > 0);
num_chunks--;
assert(num_chunks_g > 0);
num_chunks_g--;
assert(bytes_cached >= bc->length);
assert(bytes_cached_g >= bc->length);
bytes_cached -= bc->length;
bytes_cached_g -= bc->length;
pruned_bytes += mb->allocated_length;
chunkmap.erase(it);
}
if (pruned_bytes < inline_bytes) {
AZLogDebug("Could not meet inline prune goal, pruned {} of {} bytes "
"[inuse={}/{}, dirty={}/{}, commit_pending={}/{}, locked={}/{}, inra={}/{}]",
pruned_bytes, inline_bytes,
inuse, inuse_bytes,
dirty, dirty_bytes,
commit_pending, commit_pending_bytes,
locked, locked_bytes,
inra, inra_bytes);
} else {
AZLogDebug("Successfully pruned {} bytes [inuse={}/{}, dirty={}/{}, "
"locked={}/{}, inra={}/{}]",
pruned_bytes,
inuse, inuse_bytes,
dirty, dirty_bytes,
locked, locked_bytes,
inra, inra_bytes);
}
}