in turbonfs/src/file_cache.cpp [3260:4137]
int bytes_chunk_cache::unit_test()
{
assert(::sysconf(_SC_PAGESIZE) == PAGE_SIZE);
/*
* Choose file-backed or non file-backed cache for testing.
* For file-backed cache, make sure /tmp as sufficient space.
*/
#if 1
bytes_chunk_cache cache(nullptr);
#else
bytes_chunk_cache cache(nullptr, "/tmp/bytes_chunk_cache");
#endif
std::vector<bytes_chunk> v;
uint64_t l, r;
/*
* Sometimes we want to validate that a bytes_chunk returned at a later
* point refers to a chunk allocated earlier. We use these temp bytes_chunk
* for that. Note that bytes_chunk can be deleted by calls to release(),
* and calls to dropall() may drop the buffer mappings, so might need to
* load() before we can use the buffers.
*/
bytes_chunk bc, bc1, bc2, bc3;
[[maybe_unused]] uint8_t *buffer;
#define ASSERT_NEW(chunk, start, end) \
do { \
assert(chunk.offset == start); \
assert(chunk.length == end-start); \
assert(chunk.is_new); \
assert(chunk.is_whole); \
if (cache.is_file_backed()) { \
assert(chunk.get_membuf()->buffer >= \
chunk.get_membuf()->allocated_buffer); \
assert(chunk.get_membuf()->allocated_length >= \
chunk.get_membuf()->length); \
} else { \
assert(chunk.get_membuf()->allocated_buffer == \
chunk.get_membuf()->buffer); \
assert(chunk.get_membuf()->allocated_length == \
chunk.get_membuf()->length); \
} \
assert((uint64_t) (chunk.get_membuf()->buffer - \
chunk.get_membuf()->allocated_buffer) == \
(chunk.get_membuf()->allocated_length - \
chunk.get_membuf()->length)); \
assert(chunk.bcc->bytes_cached >= chunk.length); \
assert(chunk.bcc->bytes_cached_g >= chunk.bcc->bytes_cached); \
/* All membufs MUST be returned with inuse incremented */ \
assert(chunk.get_membuf()->is_inuse()); \
chunk.get_membuf()->clear_inuse(); \
} while (0)
#define ASSERT_EXISTING(chunk, start, end) \
do { \
assert(chunk.offset == start); \
assert(chunk.length == end-start); \
assert(!(chunk.is_new)); \
if (cache.is_file_backed()) { \
assert(chunk.get_membuf()->buffer >= \
chunk.get_membuf()->allocated_buffer); \
assert(chunk.get_membuf()->allocated_length >= \
chunk.get_membuf()->length); \
} else { \
assert(chunk.get_membuf()->allocated_buffer == \
chunk.get_membuf()->buffer); \
assert(chunk.get_membuf()->allocated_length == \
chunk.get_membuf()->length); \
} \
assert((uint64_t) (chunk.get_membuf()->buffer - \
chunk.get_membuf()->allocated_buffer) == \
(chunk.get_membuf()->allocated_length - \
chunk.get_membuf()->length)); \
assert(chunk.bcc->bytes_cached >= chunk.length); \
assert(chunk.bcc->bytes_cached_g >= chunk.bcc->bytes_cached); \
/* All membufs MUST be returned with inuse incremented */ \
assert(chunk.get_membuf()->is_inuse()); \
chunk.get_membuf()->clear_inuse(); \
} while (0)
#define ASSERT_EXTENT(left, right) \
do { \
assert(l == left); \
assert(r == right); \
} while (0)
#define ASSERT_DROPALL() \
do { \
/* get all chunks and calculate total allocated bytes */ \
uint64_t total_allocated_bytes = 0; \
uint64_t total_bytes = 0; \
for ([[maybe_unused]] const auto& e : cache.chunkmap) { \
total_allocated_bytes += e.second.get_membuf()->allocated_length; \
total_bytes += e.second.get_membuf()->length; \
} \
[[maybe_unused]] const uint64_t total_dropped_bytes = cache.dropall(); \
if (cache.is_file_backed()) { \
/* For file-backed caches all allocated bytes must be dropped */ \
assert(total_dropped_bytes == total_allocated_bytes); \
} else { \
/* For memory-backed caches drop should be a no-op */ \
assert(total_dropped_bytes == 0); \
} \
/* \
* drop() should not change length and allocated_length, but it should
* set allocated_buffer and buffer to nullptr.
*/ \
uint64_t total_allocated_bytes1 = 0; \
uint64_t total_bytes1 = 0; \
for ([[maybe_unused]] const auto& e : cache.chunkmap) { \
if (cache.is_file_backed()) { \
assert(e.second.get_membuf()->allocated_buffer == nullptr); \
assert(e.second.get_membuf()->buffer == nullptr); \
} else { \
assert(e.second.get_membuf()->allocated_buffer != nullptr); \
assert(e.second.get_membuf()->buffer != nullptr); \
} \
total_allocated_bytes1 += e.second.get_membuf()->allocated_length; \
total_bytes1 += e.second.get_membuf()->length; \
} \
assert(total_bytes1 == total_bytes); \
assert(total_allocated_bytes1 == total_allocated_bytes); \
} while (0);
#define PRINT_CHUNK(chunk) \
do { \
assert(chunk.length > 0); \
AZLogInfo("[{},{}){}{} <{}> use_count={}, flag=0x{:x}", chunk.offset,\
chunk.offset + chunk.length,\
chunk.is_new ? " [New]" : "", \
chunk.is_whole ? " [Whole]" : "", \
fmt::ptr(chunk.get_buffer()), \
chunk.get_membuf_usecount(), \
chunk.get_membuf()->get_flag()); \
} while (0)
#define PRINT_CHUNKMAP() \
AZLogInfo("==== [{}] chunkmap start [a:{} c:{}] ====", \
__LINE__, cache.bytes_allocated.load(), cache.bytes_cached.load()); \
for (auto& e : cache.chunkmap) { \
/* mmap() just in case drop was called prior to this */ \
e.second.load(); \
PRINT_CHUNK(e.second); \
} \
AZLogInfo("==== chunkmap end ====");
/*
* Get cache chunks covering range [0, 300).
* Since the cache is empty, it'll add a new empty chunk and return that.
* The newly added chunk is also the largest contiguous block containing
* the chunk.
*/
AZLogInfo("========== [Get] --> (0, 300) ==========");
v = cache.getx(0, 300, &l, &r);
assert(v.size() == 1);
ASSERT_EXTENT(0, 300);
ASSERT_NEW(v[0], 0, 300);
/*
* This bytes_chunk later gets deleted by the call to release(200,100),
* so we store the buffer.
*/
buffer = v[0].get_buffer();
for ([[maybe_unused]] const auto& e : v) {
PRINT_CHUNK(e);
}
PRINT_CHUNKMAP();
/*
* Release data range [0, 100).
* After this the cache should have chunk [100, 300).
*/
AZLogInfo("========== [Release] --> (0, 100) ==========");
assert(cache.release(0, 100) == 100);
/*
* Release data range [200, 300).
* After this the cache should have chunk [100, 200).
*/
AZLogInfo("========== [Release] --> (200, 100) ==========");
assert(cache.release(200, 100) == 100);
/*
* Get cache chunks covering range [100, 200).
* This will return the (only) existing chunk.
* The newly added chunk is also the largest contiguous block containing
* the chunk.
*/
AZLogInfo("========== [Get] --> (100, 100) ==========");
v = cache.getx(100, 100, &l, &r);
assert(v.size() == 1);
ASSERT_EXTENT(100, 200);
ASSERT_EXISTING(v[0], 100, 200);
assert(v[0].get_buffer() == (buffer + 100));
assert(v[0].is_whole);
for ([[maybe_unused]] const auto& e : v) {
PRINT_CHUNK(e);
}
PRINT_CHUNKMAP();
/*
* Get cache chunks covering range [50, 150).
* This should return 2 chunks:
* 1. Newly allocated chunk [50, 100).
* 2. Existing chunk data from [100, 150).
*
* The largest contiguous block containing the requested chunk is [50, 200).
*/
AZLogInfo("========== [Get] --> (50, 100) ==========");
v = cache.getx(50, 100, &l, &r);
assert(v.size() == 2);
ASSERT_EXTENT(50, 200);
ASSERT_NEW(v[0], 50, 100);
ASSERT_EXISTING(v[1], 100, 150);
assert(v[1].get_buffer() == (buffer + 100));
assert(!v[1].is_whole);
for ([[maybe_unused]] const auto& e : v) {
PRINT_CHUNK(e);
}
PRINT_CHUNKMAP();
/*
* Need to clear the vector before dropall, else drop won't drop as
* bytes_chunk will have more than 1 use_count.
*/
AZLogInfo("========== [Dropall] ==========");
v.clear();
ASSERT_DROPALL();
/*
* Get cache chunks covering range [250, 300).
* This should return 1 chunk:
* 1. Newly allocated chunk [250, 300).
*
* The largest contiguous block containing the requested chunk is [250, 300).
*/
AZLogInfo("========== [Get] --> (250, 50) ==========");
v = cache.getx(250, 50, &l, &r);
assert(v.size() == 1);
ASSERT_EXTENT(250, 300);
ASSERT_NEW(v[0], 250, 300);
bc = v[0];
for ([[maybe_unused]] const auto& e : v) {
PRINT_CHUNK(e);
}
PRINT_CHUNKMAP();
/*
* Get cache chunks covering range [50, 200).
* This should return 1 chunk:
* 1. Existing chunk [50, 100).
* 2. Existing chunk [100, 200).
*
* The largest contiguous block containing the requested chunk is [50, 200).
*/
AZLogInfo("========== [Get] --> (50, 150) ==========");
v = cache.getx(50, 150, &l, &r);
assert(v.size() == 2);
ASSERT_EXTENT(50, 200);
ASSERT_EXISTING(v[0], 50, 100);
ASSERT_EXISTING(v[1], 100, 200);
v[0].get_membuf()->set_inuse();
v[0].get_membuf()->set_locked();
v[0].get_membuf()->set_uptodate();
v[0].get_membuf()->set_dirty();
v[0].get_membuf()->clear_locked();
v[0].get_membuf()->clear_inuse();
assert(v[0].needs_flush());
PRINT_CHUNKMAP();
/*
* Get cache chunks covering range [0, 50).
* This should return 1 chunk:
* 1. Newly allocated chunk [0, 50).
*
* The largest contiguous block containing the requested chunk is [0, 100).
* [0, 50) is included in the extent range since that contains the data
* just written by user.
* [50, 100) is included in the extent range as the membuf is dirty (marked
* above).
* [100, 200) though contiguous, it's not included in the extent range as
* needs_flush() is not true for it.
*/
AZLogInfo("========== [Get] --> (0, 50) ==========");
v = cache.getx(0, 50, &l, &r);
assert(v.size() == 1);
ASSERT_EXTENT(0, 100);
ASSERT_NEW(v[0], 0, 50);
for ([[maybe_unused]] const auto& e : v) {
PRINT_CHUNK(e);
}
PRINT_CHUNKMAP();
/*
* Get cache chunks covering range [150, 275).
* This should return following chunks:
* 1. Existing chunk [150, 200).
* 2. Newly allocated chunk [200, 250).
* 3. Existing chunk [250, 275).
*
* The largest contiguous block containing the requested chunk is [50, 300).
* [50, 100) is included in the extent range as the membuf is dirty (marked
* above).
* [100, 200) is included in the extent range since that partly contains the
* data just written by user.
* [200, 250) is included in the extent range since that fully contains the
* data just written by user.
* [250, 300) is included in the extent range since that partly contains the
* data just written by user.
*/
AZLogInfo("========== [Get] --> (150, 125) ==========");
v = cache.getx(150, 125, &l, &r);
assert(v.size() == 3);
ASSERT_EXTENT(50, 300);
ASSERT_EXISTING(v[0], 150, 200);
ASSERT_NEW(v[1], 200, 250);
ASSERT_EXISTING(v[2], 250, 275);
assert(v[2].get_buffer() == bc.get_buffer());
bc1 = v[0];
bc2 = v[1];
for ([[maybe_unused]] const auto& e : v) {
PRINT_CHUNK(e);
}
PRINT_CHUNKMAP();
/*
* Cannot call ASSERT_DROPALL() here as that asserts that we drop all
* chunks, but since we hold extra refs to chunks we won't drop all.
*/
AZLogInfo("========== [Dropall] ==========");
v.clear();
cache.dropall();
// Reload all bytes_chunk, after dropall().
bc.load();
bc1.load();
bc2.load();
/*
* Get cache chunks covering range [0, 300).
* This is all the chunks and should return following chunks:
* 1. Existing chunk [0, 50).
* 2. Existing chunk [50, 100).
* 3. Existing chunk [100, 200).
* 4. Existing chunk [200, 250).
* 5. Existing chunk [250, 300).
*
* Clear dirty flag from [50, 100) to allow the release() below to release
* it.
*/
AZLogInfo("========== [Get] --> (0, 300) ==========");
v = cache.getx(0, 300, &l, &r);
assert(v.size() == 5);
ASSERT_EXTENT(0, 300);
ASSERT_EXISTING(v[0], 0, 50);
ASSERT_EXISTING(v[1], 50, 100);
ASSERT_EXISTING(v[2], 100, 200);
ASSERT_EXISTING(v[3], 200, 250);
ASSERT_EXISTING(v[4], 250, 300);
PRINT_CHUNKMAP();
// Clear dirty.
v[1].get_membuf()->set_inuse();
v[1].get_membuf()->set_locked();
v[1].get_membuf()->set_flushing();
v[1].get_membuf()->clear_dirty();
v[1].get_membuf()->clear_flushing();
v[1].get_membuf()->clear_locked();
v[1].get_membuf()->clear_inuse();
assert(!v[1].needs_flush());
/*
* Release data range [0, 175).
* After this the cache should have the following chunk:
* 1. [175, 200).
* 2. [200, 250).
* 3. [250, 300).
*/
AZLogInfo("========== [Release] --> (0, 175) ==========");
assert(cache.release(0, 175) == 175);
/*
* Get cache chunks covering range [100, 280).
* This should return following chunks:
* 1. Newly allocated chunk [100, 175).
* 2. Existing chunk [175, 200).
* 3. Existing chunk [200, 250).
* 4. Existing chunk [250, 280).
*
* The largest contiguous block containing the requested chunk is [100, 300).
*/
AZLogInfo("========== [Get] --> (100, 180) ==========");
v = cache.getx(100, 180, &l, &r);
assert(v.size() == 4);
ASSERT_EXTENT(100, 300);
ASSERT_NEW(v[0], 100, 175);
ASSERT_EXISTING(v[1], 175, 200);
assert(v[1].get_buffer() == (bc1.get_buffer() + 25));
ASSERT_EXISTING(v[2], 200, 250);
assert(v[2].get_buffer() == bc2.get_buffer());
ASSERT_EXISTING(v[3], 250, 280);
assert(v[3].get_buffer() == bc.get_buffer());
bc3 = v[0];
for ([[maybe_unused]] const auto& e : v) {
PRINT_CHUNK(e);
}
PRINT_CHUNKMAP();
/*
* Get cache chunks covering range [0, 350).
* This should return following chunks:
* 1. Newly allocated chunk [0, 100).
* 2. Existing chunk [100, 175).
* 3. Existing chunk [175, 200).
* 4. Existing chunk [200, 250).
* 5. Existing chunk [250, 300).
* 6. Newly allocated chunk [300, 350).
*
* The largest contiguous block containing the requested chunk is [0, 350).
*/
AZLogInfo("========== [Get] --> (0, 350) ==========");
v = cache.getx(0, 350, &l, &r);
assert(v.size() == 6);
ASSERT_EXTENT(0, 350);
ASSERT_NEW(v[0], 0, 100);
ASSERT_EXISTING(v[1], 100, 175);
assert(v[1].get_buffer() == bc3.get_buffer());
ASSERT_EXISTING(v[2], 175, 200);
assert(v[2].get_buffer() == (bc1.get_buffer() + 25));
ASSERT_EXISTING(v[3], 200, 250);
assert(v[3].get_buffer() == bc2.get_buffer());
ASSERT_EXISTING(v[4], 250, 300);
assert(v[4].get_buffer() == bc.get_buffer());
ASSERT_NEW(v[5], 300, 350);
bc1 = v[0];
bc3 = v[5];
for ([[maybe_unused]] const auto& e : v) {
PRINT_CHUNK(e);
}
PRINT_CHUNKMAP();
/*
* Release data range [50, 225).
* After this the cache should have the following chunks:
* 1. [0, 50).
* 2. [225, 250).
* 3. [250, 300).
* 4. [300, 350).
*/
AZLogInfo("========== [Release] --> (50, 175) ==========");
assert(cache.release(50, 175) == 175);
/*
* Get cache chunks covering range [0, 325).
* This should return following chunks:
* 1. Existing chunk [0, 50).
* 2. Newly allocated chunk [50, 225).
* 3. Existing chunk [225, 250).
* 4. Existing chunk [250, 300).
* 5. Existing chunk [300, 325).
*
* The largest contiguous block containing the requested chunk is [0, 350).
*/
AZLogInfo("========== [Get] --> (0, 325) ==========");
v = cache.getx(0, 325, &l, &r);
assert(v.size() == 5);
ASSERT_EXTENT(0, 350);
ASSERT_EXISTING(v[0], 0, 50);
assert(v[0].get_buffer() == bc1.get_buffer());
ASSERT_NEW(v[1], 50, 225);
ASSERT_EXISTING(v[2], 225, 250);
assert(v[2].get_buffer() == (bc2.get_buffer() + 25));
ASSERT_EXISTING(v[3], 250, 300);
assert(v[3].get_buffer() == bc.get_buffer());
ASSERT_EXISTING(v[4], 300, 325);
assert(v[4].get_buffer() == bc3.get_buffer());
for ([[maybe_unused]] const auto& e : v) {
PRINT_CHUNK(e);
}
PRINT_CHUNKMAP();
/*
* Release data range [0, 349).
* After this the cache should have the following chunks:
* 1. [349, 350).
*/
AZLogInfo("========== [Release] --> (0, 349) ==========");
assert(cache.release(0, 349) == 349);
/*
* Get cache chunks covering range [349, 350).
* This should return following chunks:
* 1. Existing chunk [349, 350).
*
* The largest contiguous block containing the requested chunk is [349, 350).
*/
AZLogInfo("========== [Get] --> (349, 1) ==========");
v = cache.getx(349, 1, &l, &r);
assert(v.size() == 1);
ASSERT_EXTENT(349, 350);
ASSERT_EXISTING(v[0], 349, 350);
assert(v[0].get_buffer() == (bc3.get_buffer() + 49));
for ([[maybe_unused]] const auto& e : v) {
PRINT_CHUNK(e);
}
PRINT_CHUNKMAP();
/*
* Release data range [349, 350).
* This should release the last chunk remaining and cache should be empty
* after this.
*/
AZLogInfo("========== [Release] --> (349, 1) ==========");
assert(cache.release(349, 1) == 1);
AZLogInfo("========== [Dropall] ==========");
cache.dropall();
// Reload all bytes_chunk, after dropall().
bc.load();
bc1.load();
bc2.load();
bc3.load();
/*
* Get cache chunks covering range [0, 131072).
* This should return following chunks:
* 1. Newly allocated chunk [0, 131072).
*
* The largest contiguous block containing the requested chunk is
* [0, 131072).
*/
AZLogInfo("========== [Get] --> (0, 131072) ==========");
v = cache.getx(0, 131072, &l, &r);
assert(v.size() == 1);
ASSERT_EXTENT(0, 131072);
ASSERT_NEW(v[0], 0, 131072);
bc = v[0];
for ([[maybe_unused]] const auto& e : v) {
PRINT_CHUNK(e);
}
PRINT_CHUNKMAP();
/*
* Release data range [6, 131072), emulating eof after short read.
* This should not release any buffer but should just reduce the length
* of the chunk.
*/
AZLogInfo("========== [Release] --> (6, 131066) ==========");
assert(cache.release(6, 131066) == 131066);
PRINT_CHUNKMAP();
/*
* Get cache chunks covering range [6, 20).
* This should return following chunks:
* 1. Newly allocated chunk [6, 20).
*
* The largest contiguous block containing the requested chunk is
* [0, 20).
*/
AZLogInfo("========== [Get] --> (6, 14) ==========");
v = cache.getx(6, 14, &l, &r);
assert(v.size() == 1);
ASSERT_EXTENT(6, 20);
ASSERT_NEW(v[0], 6, 20);
#ifdef UTILIZE_TAILROOM_FROM_LAST_MEMBUF
// Must use the alloc_buffer from last chunk.
assert(v[0].get_buffer() == (bc.get_buffer() + 6));
#else
assert(v[0].buffer_offset == 0);
#endif
for ([[maybe_unused]] const auto& e : v) {
PRINT_CHUNK(e);
}
PRINT_CHUNKMAP();
/*
* Get cache chunks covering range [5, 30).
* This should return following chunks:
* 1. Existing chunk [5, 6).
* 2. Existing chunk [6, 20).
* 3. Newly allocated chunk [20, 30).
*
* The largest contiguous block containing the requested chunk is
* [0, 30).
*/
AZLogInfo("========== [Get] --> (5, 25) ==========");
v = cache.getx(5, 25, &l, &r);
assert(v.size() == 3);
ASSERT_EXTENT(0, 30);
ASSERT_EXISTING(v[0], 5, 6);
assert(v[0].get_buffer() == (bc.get_buffer() + 5));
ASSERT_EXISTING(v[1], 6, 20);
#ifdef UTILIZE_TAILROOM_FROM_LAST_MEMBUF
assert(v[1].get_buffer() == (bc.get_buffer() + 6));
#else
assert(v[1].buffer_offset == 0);
#endif
ASSERT_NEW(v[2], 20, 30);
for ([[maybe_unused]] const auto& e : v) {
PRINT_CHUNK(e);
}
PRINT_CHUNKMAP();
/*
* Clear entire cache.
* cache.clear() asserts that bytes_allocated must drop to 0 if all chunks
* are deleted. That will fail if we have some references to membuf(s),
* hence we need to destruct all bytes_chunk references that we have
* accumulated till now.
*/
AZLogInfo("========== [Clear] ==========");
v.clear();
bc.~bytes_chunk();
bc1.~bytes_chunk();
bc2.~bytes_chunk();
bc3.~bytes_chunk();
cache.clear();
PRINT_CHUNKMAP();
/*
* Get cache chunks covering range [5, 30).
* This should return following chunks:
* 1. Newly allocated chunk [5, 30).
*
* The largest contiguous block containing the requested chunk is
* [5, 30).
*/
AZLogInfo("========== [Get] --> (5, 25) ==========");
v = cache.getx(5, 25, &l, &r);
assert(v.size() == 1);
ASSERT_EXTENT(5, 30);
ASSERT_NEW(v[0], 5, 30);
for ([[maybe_unused]] const auto& e : v) {
PRINT_CHUNK(e);
}
PRINT_CHUNKMAP();
/*
* Get cache chunks covering range [5, 50).
* This should return following chunks:
* 1. Existing chunk [5, 30).
* 2. Newly allocated chunk [30, 50).
*
* The largest contiguous block containing the requested chunk is
* [5, 50).
*/
AZLogInfo("========== [Get] --> (5, 45) ==========");
v = cache.getx(5, 45, &l, &r);
assert(v.size() == 2);
ASSERT_EXTENT(5, 50);
ASSERT_EXISTING(v[0], 5, 30);
ASSERT_NEW(v[1], 30, 50);
for ([[maybe_unused]] const auto& e : v) {
PRINT_CHUNK(e);
}
PRINT_CHUNKMAP();
/*
* Get cache chunks covering range [5, 100).
* This should return following chunks:
* 1. Existing chunk [5, 30).
* 2. Existing chunk [30, 50).
* 3. Newly allocated chunk [50, 50).
*
* The largest contiguous block containing the requested chunk is
* [5, 100).
*/
AZLogInfo("========== [Get] --> (5, 95) ==========");
v = cache.getx(5, 95, &l, &r);
assert(v.size() == 3);
ASSERT_EXTENT(5, 100);
ASSERT_EXISTING(v[0], 5, 30);
ASSERT_EXISTING(v[1], 30, 50);
ASSERT_NEW(v[2], 50, 100);
for ([[maybe_unused]] const auto& e : v) {
PRINT_CHUNK(e);
}
PRINT_CHUNKMAP();
/*
* Release byte range [0, 200), but after setting the following:
* - [5, 30) as dirty, and
* - [50, 100) as inuse
* If we call release for the range [0, 200), it covers the entire
* cache, so it'll try to release all the chunks but it cannot release
* chunks v[0] and v[2] as they are dirty and inuse respectively, both
* of which are not safe_to_release().
*/
AZLogInfo("========== [Release] --> (0, 200) ==========");
v[0].get_membuf()->set_inuse();
v[0].get_membuf()->set_locked();
v[0].get_membuf()->set_dirty();
v[0].get_membuf()->set_uptodate();
v[0].get_membuf()->clear_locked();
assert(!v[0].safe_to_release());
assert(v[1].safe_to_release());
v[2].get_membuf()->set_inuse();
// hold the lock at the time of release() to ensure this works.
v[2].get_membuf()->set_locked();
assert(!v[2].safe_to_release());
// It should just release [30,50).
assert(cache.release(0, 200) == 20);
v[0].get_membuf()->set_locked();
v[0].get_membuf()->set_flushing();
v[0].get_membuf()->clear_dirty();
v[0].get_membuf()->clear_flushing();
v[0].get_membuf()->clear_locked();
v[0].get_membuf()->clear_inuse();
v[2].get_membuf()->clear_locked();
v[2].get_membuf()->clear_inuse();
/*
* Get cache chunks covering range [5, 200).
* This should return following chunks:
* 1. Existing chunk [5, 30).
* 2. New chunk [30, 50).
* 3. Existing chunk [50, 100).
* 4. Newly allocated chunk [100, 200).
*
* The largest contiguous block containing the requested chunk is
* [5, 200).
*/
AZLogInfo("========== [Get] --> (5, 195) ==========");
v = cache.getx(5, 195, &l, &r);
assert(v.size() == 4);
ASSERT_EXTENT(5, 200);
ASSERT_EXISTING(v[0], 5, 30);
ASSERT_NEW(v[1], 30, 50);
ASSERT_EXISTING(v[2], 50, 100);
ASSERT_NEW(v[3], 100, 200);
for ([[maybe_unused]] const auto& e : v) {
PRINT_CHUNK(e);
}
PRINT_CHUNKMAP();
/*
* Mark the entire range as dirty so that release() fails to release any
* byte.
*/
for (int i = 0; i < 4; i++) {
v[i].get_membuf()->set_inuse();
v[i].get_membuf()->set_locked();
v[i].get_membuf()->set_dirty();
v[i].get_membuf()->set_uptodate();
v[i].get_membuf()->clear_locked();
v[i].get_membuf()->clear_inuse();
assert(!v[i].safe_to_release());
}
v.clear();
AZLogInfo("========== [Release] --> (0, 500) ==========");
// All bytes are dirty, release() will return 0.
assert(cache.release(0, 500) == 0);
AZLogInfo("========== [Truncate] --> (75) ==========");
// truncate() should be able to release dirty bytes.
assert(cache.truncate(75) == 125);
/*
* Get cache chunks covering range [5, 200).
* This should return following chunks:
* 1. Existing chunk [5, 30).
* 2. Existing chunk [30, 50).
* 3. Existing chunk [50, 75).
* 4. Newly allocated chunk [75, 200).
*
* The largest contiguous block containing the requested chunk is
* [5, 200).
*/
AZLogInfo("========== [Get] --> (5, 195) ==========");
v = cache.getx(5, 195, &l, &r);
assert(v.size() == 4);
ASSERT_EXTENT(5, 200);
ASSERT_EXISTING(v[0], 5, 30);
ASSERT_EXISTING(v[1], 30, 50);
ASSERT_EXISTING(v[2], 50, 75);
ASSERT_NEW(v[3], 75, 200);
for (int i = 0; i < 3; i++) {
v[i].get_membuf()->set_inuse();
v[i].get_membuf()->set_locked();
v[i].get_membuf()->set_uptodate();
v[i].get_membuf()->set_flushing();
v[i].get_membuf()->clear_dirty();
v[i].get_membuf()->clear_flushing();
v[i].get_membuf()->clear_locked();
v[i].get_membuf()->clear_inuse();
}
v.clear();
/*
* Release [0, 500) should cover the entire cache and release all 195
* bytes:
* [5, 30)
* [30, 50)
* [50, 75)
* [75, 200)
*/
AZLogInfo("========== [Release] --> (0, 500) ==========");
assert(cache.release(0, 500) == 195);
assert(cache.chunkmap.empty());
assert(cache.release(0, 1) == 0);
assert(cache.release(10, 20) == 0);
assert(cache.release(2, 2000) == 0);
/*
* Now run some random cache get/release to stress test the cache.
*/
AZLogInfo("========== Starting cache stress ==========");
for (int i = 0; i < 10'000'000; i++) {
AZLogVerbose("\n\n ----[ {} ]----------\n", i);
const uint64_t offset = random_number(0, 100'000'000);
const uint64_t length = random_number(1, AZNFSC_MAX_CHUNK_SIZE);
const bool should_drop_all = random_number(0, 100) <= 1;
// Randomly drop caches for testing.
if (should_drop_all) {
cache.dropall();
}
if (is_read()) {
cache_read(cache, offset, length);
} else {
cache_write(cache, offset, length);
}
}
AZLogInfo("========== Cache stress successful! ==========");
return 0;
}