in turbonfs/src/rpc_readdir.cpp [202:369]
bool readdirectory_cache::add(const std::shared_ptr<struct directory_entry>& entry,
const cookieverf3 *cookieverf,
bool acquire_lock)
{
// Maximum readdir cache size allowed in bytes.
static const uint64_t max_cache =
(aznfsc_cfg.cache.readdir.user.max_size_mb * 1024 * 1024ULL);
assert(max_cache != 0);
// Single cache must not be allowed to be more than half the global max.
const uint64_t max_single_cache = std::min((uint64_t) MAX_CACHE_SIZE_LIMIT, max_cache / 2);
assert(entry != nullptr);
assert(entry->name != nullptr);
// 0 is not a valid cookie.
assert(entry->cookie != 0);
/*
* If this cache grows beyond the single cache limit, purge this cache.
* This removes all cookies added till now making space for newer cookies,
* thus supporting unlimited directory enumerations. Since we remove the
* older cookies, any re-enumeration attempt will not find the starting
* entries cached and would cause fresh readdir calls to the server.
* This should work fine, and will be a non-issue with kernel readdir cache.
*/
if (cache_size >= max_single_cache) {
AZLogWarn("[{}] Readdir cache exceeded per-directory cache limit "
"({} > {}) while adding entry [name: {}, ino: {}], clearing "
"cache!",
dir_inode->get_fuse_ino(),
cache_size, max_single_cache, entry->name,
entry->nfs_inode ? entry->nfs_inode->get_fuse_ino() : -1);
clear(acquire_lock);
assert(cache_size < max_single_cache);
}
/*
* If we run out of global readdir cache space, stop adding new
* directory entries to the cache. We will still return the requested
* directory entries to fuse so existing enumeration will work fine.
*/
if (bytes_allocated_g > max_cache) {
AZLogWarn("[{}] Readdir cache exceeded global cache limit "
"({} > {}) while adding entry [name: {}, ino: {}], not "
"adding to cache!",
dir_inode->get_fuse_ino(),
bytes_allocated_g.load(), max_cache, entry->name,
entry->nfs_inode ? entry->nfs_inode->get_fuse_ino() : -1);
return false;
}
{
/*
* If acquire_lock is true, get exclusive lock on the map for adding
* the entry to the map. We use a dummy_lock for minimal code changes
* in the no-lock case.
* If you call it with acquire_lock=false make sure readdircache_lock_2
* is held in exclusive mode.
*/
std::shared_mutex dummy_lock;
std::unique_lock<std::shared_mutex> lock(
acquire_lock ? readdircache_lock_2 : dummy_lock);
if (entry->nfs_inode) {
/*
* directory_entry constructor must have grabbed the
* dircachecnt ref.
*/
assert(entry->nfs_inode->dircachecnt > 0);
AZLogDebug("[{}] Adding {} \"{}\", ino {}, cookie {}, to readdir "
"cache (dircachecnt: {})",
dir_inode->get_fuse_ino(),
entry->nfs_inode->is_dir() ? "directory" : "file",
entry->name,
entry->nfs_inode->get_fuse_ino(),
entry->cookie,
entry->nfs_inode->dircachecnt.load());
} else {
AZLogDebug("[{}] Adding \"{}\", cookie {}, to readdir cache",
dir_inode->get_fuse_ino(),
entry->name,
entry->cookie);
}
assert(dir_entries.size() == dnlc_map.size());
/*
* If entry->name exists with a different cookie, remove that.
* Note that caller must have removed entry->cookie but entry->name
* may exist with another cookie (f.e. added by lookup and then now
* we are here for readdirplus).
*/
const cookie3 cookie = filename_to_cookie(entry->name);
if (cookie != 0) {
remove(cookie, nullptr, false);
}
AZLogDebug("[{}] Adding dir cache entry {} -> {} (dircachecnt: {}, "
"lookupcnt: {})",
dir_inode->get_fuse_ino(), entry->cookie,
entry->name,
entry->nfs_inode ? entry->nfs_inode->dircachecnt.load() : -1,
entry->nfs_inode ? entry->nfs_inode->lookupcnt.load() : -1);
const auto it = dir_entries.emplace(entry->cookie, entry);
/*
* Now atomically update readdirectory_cache's cookieverf, so that any
* caller that looks up the readdirectory_cache and finds the directory
* entry we stored above, also finds a valid cookieverf in case they
* want to query subsequent entries from the server.
*/
if (cookieverf) {
set_cookieverf_nolock(cookieverf);
}
/*
* Caller only calls us after ensuring cookie isn't already cached,
* but since we don't hold readdircache_lock_2 across removing the
* old entry and adding this one, it may race with some other thread.
*
* TODO: Move the code to remove directory_entry with key
* entry->cookie, from readdir{plus}_callback() to here, inside
* the lock.
*/
if (it.second) {
AZLogDebug("[{}] Adding dnlc cache entry {} -> {} "
"(dircachecnt: {}, lookupcnt: {})",
dir_inode->get_fuse_ino(), entry->name,
entry->cookie,
entry->nfs_inode ? entry->nfs_inode->dircachecnt.load() : -1,
entry->nfs_inode ? entry->nfs_inode->lookupcnt.load() : -1);
cache_size += entry->get_cache_size();
/*
* Also add to the DNLC cache.
* In the common case the entry must not be present in the DNLC
* cache, but in case directory changes, same filename must have
* been seen on a different cookie value earlier.
* In any case, overwrite that.
*/
dnlc_map[entry->name] = entry->cookie;
/*
* Update seq_last_cookie as long as the sequence of cookies isn't
* broken.
* Note that Blob NFS server uses unit incrementing cookies, hence
* the following check works.
*
* For other NFS servers which return arbitrary cookie values, this
* won't work. Ref: ENABLE_NON_AZURE_NFS
*/
if (entry->cookie == (seq_last_cookie + 1)) {
seq_last_cookie = entry->cookie;
}
}
assert(dir_entries.size() == dnlc_map.size());
return it.second;
}
/*
* TODO: Prune the map for space constraint.
* For now we will just not add entry into the cache if it is full.
*/
return false;
}