in cachelib/benchmarks/SpeedUpExistenceCheckBenchmark.cpp [284:344]
void testBatch(int numThreads,
int htBucketPower,
int htLockPower,
uint64_t numObjects,
bool doesPrefetchObject,
const char* msg = "reg") {
using HashTable = HashTableImpl<BucketT, LockT>;
using Object = typename HashTable::Object;
constexpr uint64_t kLoops = 10'000'000;
std::vector<std::string> keys;
std::vector<std::unique_ptr<Object>> objects;
std::unique_ptr<HashTable> ht;
BENCHMARK_SUSPEND {
ht = std::make_unique<HashTable>(htBucketPower, htLockPower);
for (uint64_t i = 0; i < numObjects; i++) {
auto key = folly::sformat("k_{:<8}", i);
keys.push_back(key);
objects.push_back(std::make_unique<Object>(key));
ht->insert(objects.back().get());
}
}
navy::SeqPoints sp;
auto readOps = [&] {
sp.wait(0);
std::mt19937 gen;
std::uniform_int_distribution<uint64_t> dist(0, numObjects - 1);
for (uint64_t loop = 0; loop < kLoops / BATCH_SIZE; loop++) {
std::array<Object*, BATCH_SIZE> objects;
std::array<std::string, BATCH_SIZE> batchedKeys;
BENCHMARK_SUSPEND {
for (auto& key : batchedKeys) {
key = keys[dist(gen)];
}
}
ht->template multiLookup<BATCH_SIZE>(batchedKeys, objects,
doesPrefetchObject);
folly::doNotOptimizeAway(objects);
}
};
std::vector<std::thread> rs;
for (int i = 0; i < numThreads; i++) {
rs.push_back(std::thread{readOps});
}
{
Timer t{folly::sformat("Prefetch{} - {: <4} B, {: <2} T, {: <2} HB, {: <2} "
"HL, {: <8} Objects",
msg, BATCH_SIZE, numThreads, htBucketPower,
htLockPower, numObjects),
kLoops};
sp.reached(0); // Start the operations
for (auto& r : rs) {
r.join();
}
}
}