typename NvmCache::ItemHandle NvmCache::find()

in cachelib/allocator/nvmcache/NvmCache-inl.h [102:197]


typename NvmCache<C>::ItemHandle NvmCache<C>::find(folly::StringPiece key) {
  if (!isEnabled()) {
    return ItemHandle{};
  }

  util::LatencyTracker tracker(stats().nvmLookupLatency_);

  auto shard = getShardForKey(key);
  // invalidateToken any inflight puts for the same key since we are filling
  // from nvmcache.
  inflightPuts_[shard].invalidateToken(key);

  stats().numNvmGets.inc();

  GetCtx* ctx{nullptr};
  ItemHandle hdl{nullptr};
  {
    auto lock = getFillLockForShard(shard);
    // do not use the Cache::find() since that will call back into us.
    hdl = CacheAPIWrapperForNvm<C>::findInternal(cache_, key);
    if (UNLIKELY(hdl != nullptr)) {
      if (hdl->isExpired()) {
        hdl.reset();
        hdl.markExpired();
        stats().numNvmGetMissExpired.inc();
        stats().numNvmGetMissFast.inc();
      }
      return hdl;
    }

    auto& fillMap = getFillMapForShard(shard);
    auto it = fillMap.find(key);
    // we use async apis for nvmcache operations into navy. async apis for
    // lookups incur additional overheads and thread hops. However, navy can
    // quickly answer negative lookups through a synchronous api. So we try to
    // quickly validate this, if possible, before doing the heavier async
    // lookup.
    //
    // since this is a synchronous api, navy would not guarantee any
    // particular ordering semantic with other concurrent requests to same
    // key. we need to ensure there are no asynchronous api requests for the
    // same key. First, if there are already concurrent get requests, we
    // simply add ourselves to the list of waiters for that get. If there are
    // concurrent put requests already enqueued, executing this synchronous
    // api can read partial state. Hence the result can not be trusted. If
    // there are concurrent delete requests enqueued, we might get false
    // positives that key is present. That is okay since it is a loss of
    // performance and not correctness.
    //
    // For concurrent put, if it is already enqueued, its put context already
    // exists. If it is not enqueued yet (in-flight) the above invalidateToken
    // will prevent the put from being enqueued.
    if (config_.enableFastNegativeLookups && it == fillMap.end() &&
        !putContexts_[shard].hasContexts() &&
        !navyCache_->couldExist(makeBufferView(key))) {
      stats().numNvmGetMiss.inc();
      stats().numNvmGetMissFast.inc();
      return ItemHandle{};
    }

    hdl = CacheAPIWrapperForNvm<C>::createNvmCacheFillHandle(cache_);
    hdl.markWentToNvm();

    auto waitContext = CacheAPIWrapperForNvm<C>::getWaitContext(cache_, hdl);
    XDCHECK(waitContext);

    if (it != fillMap.end()) {
      ctx = it->second.get();
      ctx->addWaiter(std::move(waitContext));
      stats().numNvmGetCoalesced.inc();
      return hdl;
    }

    // create a context
    auto newCtx = std::make_unique<GetCtx>(*this, key, std::move(waitContext),
                                           std::move(tracker));
    auto res =
        fillMap.emplace(std::make_pair(newCtx->getKey(), std::move(newCtx)));
    XDCHECK(res.second);
    ctx = res.first->second.get();
  } // scope for fill lock

  XDCHECK(ctx);
  auto guard = folly::makeGuard([ctx, this]() { removeFromFillMap(*ctx); });

  auto status = navyCache_->lookupAsync(
      makeBufferView(ctx->getKey()),
      [this, ctx](navy::Status s, navy::BufferView k, navy::Buffer v) {
        this->onGetComplete(*ctx, s, k, v.view());
      });

  XDCHECK_EQ(status, navy::Status::Ok);

  guard.dismiss();
  return hdl;
}