in netwerk/cache2/CacheStorageService.cpp [1237:2425]
void CacheStorageService::OnMemoryConsumptionChange(
CacheMemoryConsumer* aConsumer, uint32_t aCurrentMemoryConsumption) {
LOG(("CacheStorageService::OnMemoryConsumptionChange [consumer=%p, size=%u]",
aConsumer, aCurrentMemoryConsumption));
uint32_t savedMemorySize = aConsumer->LoadReportedMemoryConsumption();
if (savedMemorySize == aCurrentMemoryConsumption) return;
// Exchange saved size with current one.
aConsumer->StoreReportedMemoryConsumption(aCurrentMemoryConsumption);
bool usingDisk = !(aConsumer->LoadFlags() & CacheMemoryConsumer::MEMORY_ONLY);
bool overLimit = Pool(usingDisk).OnMemoryConsumptionChange(
savedMemorySize, aCurrentMemoryConsumption);
if (!overLimit) return;
// It's likely the timer has already been set when we get here,
// check outside the lock to save resources.
#ifdef MOZ_TSAN
if (mPurgeTimerActive) {
#else
if (mPurgeTimer) {
#endif
return;
}
// We don't know if this is called under the service lock or not,
// hence rather dispatch.
RefPtr<nsIEventTarget> cacheIOTarget = Thread();
if (!cacheIOTarget) return;
// Dispatch as a priority task, we want to set the purge timer
// ASAP to prevent vain redispatch of this event.
nsCOMPtr<nsIRunnable> event = NewRunnableMethod(
"net::CacheStorageService::SchedulePurgeOverMemoryLimit", this,
&CacheStorageService::SchedulePurgeOverMemoryLimit);
cacheIOTarget->Dispatch(event, nsIEventTarget::DISPATCH_NORMAL);
}
bool CacheStorageService::MemoryPool::OnMemoryConsumptionChange(
uint32_t aSavedMemorySize, uint32_t aCurrentMemoryConsumption) {
mMemorySize -= aSavedMemorySize;
mMemorySize += aCurrentMemoryConsumption;
LOG((" mMemorySize=%u (+%u,-%u)", uint32_t(mMemorySize),
aCurrentMemoryConsumption, aSavedMemorySize));
// Bypass purging when memory has not grew up significantly
if (aCurrentMemoryConsumption <= aSavedMemorySize) return false;
return mMemorySize > Limit();
}
void CacheStorageService::SchedulePurgeOverMemoryLimit() {
LOG(("CacheStorageService::SchedulePurgeOverMemoryLimit"));
mozilla::MutexAutoLock lock(mLock);
if (mShutdown) {
LOG((" past shutdown"));
return;
}
if (mPurgeTimer) {
LOG((" timer already up"));
return;
}
mPurgeTimer = NS_NewTimer();
if (mPurgeTimer) {
#ifdef MOZ_TSAN
mPurgeTimerActive = true;
#endif
nsresult rv;
rv = mPurgeTimer->InitWithCallback(this, 1000, nsITimer::TYPE_ONE_SHOT);
LOG((" timer init rv=0x%08" PRIx32, static_cast<uint32_t>(rv)));
}
}
NS_IMETHODIMP
CacheStorageService::Notify(nsITimer* aTimer) {
LOG(("CacheStorageService::Notify"));
mozilla::MutexAutoLock lock(mLock);
if (aTimer == mPurgeTimer) {
#ifdef MOZ_TSAN
mPurgeTimerActive = false;
#endif
mPurgeTimer = nullptr;
if (!mShutdown) {
nsCOMPtr<nsIRunnable> event = NewRunnableMethod(
"net::CacheStorageService::PurgeExpiredOrOverMemoryLimit", this,
&CacheStorageService::PurgeExpiredOrOverMemoryLimit);
Dispatch(event);
}
}
return NS_OK;
}
NS_IMETHODIMP
CacheStorageService::GetName(nsACString& aName) {
aName.AssignLiteral("CacheStorageService");
return NS_OK;
}
void CacheStorageService::PurgeExpiredOrOverMemoryLimit() {
MOZ_ASSERT(IsOnManagementThread());
LOG(("CacheStorageService::PurgeExpiredOrOverMemoryLimit"));
if (mShutdown) return;
static TimeDuration const kFourSeconds = TimeDuration::FromSeconds(4);
TimeStamp now = TimeStamp::NowLoRes();
if (!mLastPurgeTime.IsNull() && now - mLastPurgeTime < kFourSeconds) {
LOG((" bypassed, too soon"));
return;
}
mLastPurgeTime = now;
// We start purging memory entries first as we care more about RAM over
// disk space beeing freed in case we are interrupted.
Pool(MemoryPool::EType::MEMORY).PurgeExpiredOrOverMemoryLimit();
Pool(MemoryPool::EType::DISK).PurgeExpiredOrOverMemoryLimit();
}
void CacheStorageService::MemoryPool::PurgeExpiredOrOverMemoryLimit() {
TimeStamp start(TimeStamp::Now());
uint32_t const memoryLimit = Limit();
size_t minprogress =
(mType == EType::DISK)
? StaticPrefs::network_cache_purge_minprogress_disk()
: StaticPrefs::network_cache_purge_minprogress_memory();
// We always purge expired entries, even if under our limit.
size_t numExpired = PurgeExpired(minprogress);
if (numExpired > 0) {
LOG((" found and purged %zu expired entries", numExpired));
}
minprogress = (minprogress > numExpired) ? minprogress - numExpired : 0;
// If we are still under pressure, purge LFU entries until we aren't.
if (mMemorySize > memoryLimit) {
// Do not enter PurgeByFrecency if we reached the minimum and are asked to
// deliver entries.
if (minprogress == 0 && CacheIOThread::YieldAndRerun()) {
return;
}
if (mType == EType::DISK) {
mozilla::glean::networking::cache_purge_due_to_memory_limit
.Get("meta_data_file_size_limit"_ns)
.Add(1);
} else if (mType == EType::MEMORY) {
mozilla::glean::networking::cache_purge_due_to_memory_limit
.Get("cache_memory_limit"_ns)
.Add(1);
}
auto r = PurgeByFrecency(minprogress);
if (MOZ_LIKELY(r.isOk())) {
size_t numPurged = r.unwrap();
LOG((
" memory data consumption over the limit, abandoned %zu LFU entries",
numPurged));
} else {
// If we hit an error (OOM), do an emergency PurgeAll.
size_t numPurged = PurgeAll(CacheEntry::PURGE_WHOLE, minprogress);
LOG(
(" memory data consumption over the limit, emergency purged all %zu "
"entries",
numPurged));
}
}
LOG((" purging took %1.2fms", (TimeStamp::Now() - start).ToMilliseconds()));
}
// This function purges ALL expired entries.
size_t CacheStorageService::MemoryPool::PurgeExpired(size_t minprogress) {
MOZ_ASSERT(IsOnManagementThread());
uint32_t now = NowInSeconds();
size_t numPurged = 0;
// Scan for items to purge. mManagedEntries is not sorted but comparing just
// one integer should be faster than anything else, so go scan.
RefPtr<CacheEntry> entry = mManagedEntries.getFirst();
while (entry) {
// Get the next entry before we may be removed from our list.
RefPtr<CacheEntry> nextEntry = entry->getNext();
if (entry->GetExpirationTime() <= now) {
// Purge will modify our mManagedEntries list but we are prepared for it.
if (entry->Purge(CacheEntry::PURGE_WHOLE)) {
numPurged++;
LOG((" purged expired, entry=%p, exptime=%u (now=%u)", entry.get(),
entry->GetExpirationTime(), now));
}
}
entry = std::move(nextEntry);
// To have some progress even under load, we do the check only after
// purging at least minprogress items if under pressure.
if ((numPurged >= minprogress || mMemorySize <= Limit()) &&
CacheIOThread::YieldAndRerun()) {
break;
}
}
return numPurged;
}
Result<size_t, nsresult> CacheStorageService::MemoryPool::PurgeByFrecency(
size_t minprogress) {
MOZ_ASSERT(IsOnManagementThread());
// Pretend the limit is 10% lower so that we get rid of more entries at one
// shot and save the sorting below.
uint32_t const memoryLimit = (uint32_t)(Limit() * 0.9);
if (mMemorySize <= memoryLimit) {
return 0;
}
LOG(("MemoryPool::PurgeByFrecency, len=%zu", mManagedEntries.length()));
// We want to have an array snapshot for sorting and iterating.
struct mayPurgeEntry {
RefPtr<CacheEntry> mEntry;
double mFrecency;
explicit mayPurgeEntry(CacheEntry* aEntry) {
mEntry = aEntry;
mFrecency = aEntry->GetFrecency();
}
bool operator<(const mayPurgeEntry& aOther) const {
return mFrecency < aOther.mFrecency;
}
};
nsTArray<mayPurgeEntry> mayPurgeSorted;
if (!mayPurgeSorted.SetCapacity(mManagedEntries.length(),
mozilla::fallible)) {
return Err(NS_ERROR_OUT_OF_MEMORY);
}
{
mozilla::MutexAutoLock lock(CacheStorageService::Self()->Lock());
for (const auto& entry : mManagedEntries) {
// Referenced items cannot be purged and we deliberately want to not look
// at '0' frecency entries, these are new entries and can be ignored.
if (!entry->IsReferenced() && entry->GetFrecency() > 0.0) {
mayPurgeEntry copy(entry);
mayPurgeSorted.AppendElement(std::move(copy));
}
}
}
if (mayPurgeSorted.Length() == 0) {
return 0;
}
mayPurgeSorted.Sort();
size_t numPurged = 0;
for (auto& checkPurge : mayPurgeSorted) {
if (mMemorySize <= memoryLimit) {
break;
}
RefPtr<CacheEntry> entry = checkPurge.mEntry;
if (entry->Purge(CacheEntry::PURGE_WHOLE)) {
numPurged++;
LOG((" abandoned (%d), entry=%p, frecency=%1.10f",
CacheEntry::PURGE_WHOLE, entry.get(), entry->GetFrecency()));
}
if (numPurged >= minprogress && CacheIOThread::YieldAndRerun()) {
LOG(("MemoryPool::PurgeByFrecency interrupted"));
return numPurged;
}
}
LOG(("MemoryPool::PurgeByFrecency done"));
return numPurged;
}
size_t CacheStorageService::MemoryPool::PurgeAll(uint32_t aWhat,
size_t minprogress) {
LOG(("CacheStorageService::MemoryPool::PurgeAll aWhat=%d", aWhat));
MOZ_ASSERT(IsOnManagementThread());
size_t numPurged = 0;
RefPtr<CacheEntry> entry = mManagedEntries.getFirst();
while (entry) {
if (numPurged >= minprogress && CacheIOThread::YieldAndRerun()) break;
// Get the next entry before we may be removed from our list.
RefPtr<CacheEntry> nextEntry = entry->getNext();
if (entry->Purge(aWhat)) {
numPurged++;
LOG((" abandoned entry=%p", entry.get()));
}
entry = std::move(nextEntry);
}
return numPurged;
}
// Methods exposed to and used by CacheStorage.
nsresult CacheStorageService::AddStorageEntry(CacheStorage const* aStorage,
const nsACString& aURI,
const nsACString& aIdExtension,
uint32_t aFlags,
CacheEntryHandle** aResult) {
NS_ENSURE_FALSE(mShutdown, NS_ERROR_NOT_INITIALIZED);
NS_ENSURE_ARG(aStorage);
nsAutoCString contextKey;
CacheFileUtils::AppendKeyPrefix(aStorage->LoadInfo(), contextKey);
return AddStorageEntry(contextKey, aURI, aIdExtension,
aStorage->WriteToDisk(), aStorage->SkipSizeCheck(),
aStorage->Pinning(), aFlags, aResult);
}
nsresult CacheStorageService::AddStorageEntry(
const nsACString& aContextKey, const nsACString& aURI,
const nsACString& aIdExtension, bool aWriteToDisk, bool aSkipSizeCheck,
bool aPin, uint32_t aFlags, CacheEntryHandle** aResult) {
nsresult rv;
nsAutoCString entryKey;
rv = CacheEntry::HashingKey(""_ns, aIdExtension, aURI, entryKey);
NS_ENSURE_SUCCESS(rv, rv);
LOG(("CacheStorageService::AddStorageEntry [entryKey=%s, contextKey=%s]",
entryKey.get(), aContextKey.BeginReading()));
RefPtr<CacheEntry> entry;
RefPtr<CacheEntryHandle> handle;
{
mozilla::MutexAutoLock lock(mLock);
NS_ENSURE_FALSE(mShutdown, NS_ERROR_NOT_INITIALIZED);
// Ensure storage table
CacheEntryTable* const entries =
sGlobalEntryTables
->LookupOrInsertWith(
aContextKey,
[&aContextKey] {
LOG((" new storage entries table for context '%s'",
aContextKey.BeginReading()));
return MakeUnique<CacheEntryTable>(
CacheEntryTable::ALL_ENTRIES);
})
.get();
bool entryExists = entries->Get(entryKey, getter_AddRefs(entry));
if (!entryExists && (aFlags & nsICacheStorage::OPEN_READONLY) &&
(aFlags & nsICacheStorage::OPEN_SECRETLY) &&
StaticPrefs::network_cache_bug1708673()) {
return NS_ERROR_CACHE_KEY_NOT_FOUND;
}
bool replace = aFlags & nsICacheStorage::OPEN_TRUNCATE;
if (entryExists && !replace) {
// check whether we want to turn this entry to a memory-only.
if (MOZ_UNLIKELY(!aWriteToDisk) && MOZ_LIKELY(entry->IsUsingDisk())) {
LOG((" entry is persistent but we want mem-only, replacing it"));
replace = true;
}
}
// If truncate is demanded, delete and doom the current entry
if (entryExists && replace) {
entries->Remove(entryKey);
LOG((" dooming entry %p for %s because of OPEN_TRUNCATE", entry.get(),
entryKey.get()));
// On purpose called under the lock to prevent races of doom and open on
// I/O thread No need to remove from both memory-only and all-entries
// tables. The new entry will overwrite the shadow entry in its ctor.
entry->DoomAlreadyRemoved();
entry = nullptr;
entryExists = false;
// Would only lead to deleting force-valid timestamp again. We don't need
// the replace information anymore after this point anyway.
replace = false;
}
// Ensure entry for the particular URL
if (!entryExists) {
// When replacing with a new entry, always remove the current force-valid
// timestamp, this is the only place to do it.
if (replace) {
RemoveEntryForceValid(aContextKey, entryKey);
}
// Entry is not in the hashtable or has just been truncated...
entry = new CacheEntry(aContextKey, aURI, aIdExtension, aWriteToDisk,
aSkipSizeCheck, aPin);
entries->InsertOrUpdate(entryKey, RefPtr{entry});
LOG((" new entry %p for %s", entry.get(), entryKey.get()));
}
if (entry) {
// Here, if this entry was not for a long time referenced by any consumer,
// gets again first 'handles count' reference.
handle = entry->NewHandle();
}
}
handle.forget(aResult);
return NS_OK;
}
nsresult CacheStorageService::CheckStorageEntry(CacheStorage const* aStorage,
const nsACString& aURI,
const nsACString& aIdExtension,
bool* aResult) {
nsresult rv;
nsAutoCString contextKey;
CacheFileUtils::AppendKeyPrefix(aStorage->LoadInfo(), contextKey);
if (!aStorage->WriteToDisk()) {
AppendMemoryStorageTag(contextKey);
}
LOG(("CacheStorageService::CheckStorageEntry [uri=%s, eid=%s, contextKey=%s]",
aURI.BeginReading(), aIdExtension.BeginReading(), contextKey.get()));
{
mozilla::MutexAutoLock lock(mLock);
NS_ENSURE_FALSE(mShutdown, NS_ERROR_NOT_INITIALIZED);
nsAutoCString entryKey;
rv = CacheEntry::HashingKey(""_ns, aIdExtension, aURI, entryKey);
NS_ENSURE_SUCCESS(rv, rv);
CacheEntryTable* entries;
if ((*aResult = sGlobalEntryTables->Get(contextKey, &entries)) &&
entries->GetWeak(entryKey, aResult)) {
LOG((" found in hash tables"));
return NS_OK;
}
}
if (!aStorage->WriteToDisk()) {
// Memory entry, nothing more to do.
LOG((" not found in hash tables"));
return NS_OK;
}
// Disk entry, not found in the hashtable, check the index.
nsAutoCString fileKey;
rv = CacheEntry::HashingKey(contextKey, aIdExtension, aURI, fileKey);
CacheIndex::EntryStatus status;
rv = CacheIndex::HasEntry(fileKey, &status);
if (NS_FAILED(rv) || status == CacheIndex::DO_NOT_KNOW) {
LOG((" index doesn't know, rv=0x%08" PRIx32, static_cast<uint32_t>(rv)));
return NS_ERROR_NOT_AVAILABLE;
}
*aResult = status == CacheIndex::EXISTS;
LOG((" %sfound in index", *aResult ? "" : "not "));
return NS_OK;
}
nsresult CacheStorageService::GetCacheIndexEntryAttrs(
CacheStorage const* aStorage, const nsACString& aURI,
const nsACString& aIdExtension, bool* aHasAltData, uint32_t* aFileSizeKb) {
nsresult rv;
nsAutoCString contextKey;
CacheFileUtils::AppendKeyPrefix(aStorage->LoadInfo(), contextKey);
LOG(
("CacheStorageService::GetCacheIndexEntryAttrs [uri=%s, eid=%s, "
"contextKey=%s]",
aURI.BeginReading(), aIdExtension.BeginReading(), contextKey.get()));
nsAutoCString fileKey;
rv = CacheEntry::HashingKey(contextKey, aIdExtension, aURI, fileKey);
if (NS_FAILED(rv)) {
return rv;
}
*aHasAltData = false;
*aFileSizeKb = 0;
auto closure = [&aHasAltData, &aFileSizeKb](const CacheIndexEntry* entry) {
*aHasAltData = entry->GetHasAltData();
*aFileSizeKb = entry->GetFileSize();
};
CacheIndex::EntryStatus status;
rv = CacheIndex::HasEntry(fileKey, &status, closure);
if (NS_FAILED(rv)) {
return rv;
}
if (status != CacheIndex::EXISTS) {
return NS_ERROR_CACHE_KEY_NOT_FOUND;
}
return NS_OK;
}
namespace {
class CacheEntryDoomByKeyCallback : public CacheFileIOListener,
public nsIRunnable {
public:
NS_DECL_THREADSAFE_ISUPPORTS
NS_DECL_NSIRUNNABLE
explicit CacheEntryDoomByKeyCallback(nsICacheEntryDoomCallback* aCallback)
: mCallback(aCallback), mResult(NS_ERROR_NOT_INITIALIZED) {}
private:
virtual ~CacheEntryDoomByKeyCallback();
NS_IMETHOD OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) override {
return NS_OK;
}
NS_IMETHOD OnDataWritten(CacheFileHandle* aHandle, const char* aBuf,
nsresult aResult) override {
return NS_OK;
}
NS_IMETHOD OnDataRead(CacheFileHandle* aHandle, char* aBuf,
nsresult aResult) override {
return NS_OK;
}
NS_IMETHOD OnFileDoomed(CacheFileHandle* aHandle, nsresult aResult) override;
NS_IMETHOD OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) override {
return NS_OK;
}
NS_IMETHOD OnFileRenamed(CacheFileHandle* aHandle,
nsresult aResult) override {
return NS_OK;
}
nsCOMPtr<nsICacheEntryDoomCallback> mCallback;
nsresult mResult;
};
CacheEntryDoomByKeyCallback::~CacheEntryDoomByKeyCallback() {
if (mCallback) {
ProxyReleaseMainThread("CacheEntryDoomByKeyCallback::mCallback", mCallback);
}
}
NS_IMETHODIMP CacheEntryDoomByKeyCallback::OnFileDoomed(
CacheFileHandle* aHandle, nsresult aResult) {
if (!mCallback) return NS_OK;
mResult = aResult;
if (NS_IsMainThread()) {
Run();
} else {
NS_DispatchToMainThread(this);
}
return NS_OK;
}
NS_IMETHODIMP CacheEntryDoomByKeyCallback::Run() {
mCallback->OnCacheEntryDoomed(mResult);
return NS_OK;
}
NS_IMPL_ISUPPORTS(CacheEntryDoomByKeyCallback, CacheFileIOListener,
nsIRunnable);
} // namespace
nsresult CacheStorageService::DoomStorageEntry(
CacheStorage const* aStorage, const nsACString& aURI,
const nsACString& aIdExtension, nsICacheEntryDoomCallback* aCallback) {
LOG(("CacheStorageService::DoomStorageEntry"));
NS_ENSURE_ARG(aStorage);
nsAutoCString contextKey;
CacheFileUtils::AppendKeyPrefix(aStorage->LoadInfo(), contextKey);
nsAutoCString entryKey;
nsresult rv = CacheEntry::HashingKey(""_ns, aIdExtension, aURI, entryKey);
NS_ENSURE_SUCCESS(rv, rv);
RefPtr<CacheEntry> entry;
{
mozilla::MutexAutoLock lock(mLock);
NS_ENSURE_FALSE(mShutdown, NS_ERROR_NOT_INITIALIZED);
CacheEntryTable* entries;
if (sGlobalEntryTables->Get(contextKey, &entries)) {
if (entries->Get(entryKey, getter_AddRefs(entry))) {
if (aStorage->WriteToDisk() || !entry->IsUsingDisk()) {
// When evicting from disk storage, purge
// When evicting from memory storage and the entry is memory-only,
// purge
LOG(
(" purging entry %p for %s [storage use disk=%d, entry use "
"disk=%d]",
entry.get(), entryKey.get(), aStorage->WriteToDisk(),
entry->IsUsingDisk()));
entries->Remove(entryKey);
} else {
// Otherwise, leave it
LOG(
(" leaving entry %p for %s [storage use disk=%d, entry use "
"disk=%d]",
entry.get(), entryKey.get(), aStorage->WriteToDisk(),
entry->IsUsingDisk()));
entry = nullptr;
}
}
}
if (!entry) {
RemoveEntryForceValid(contextKey, entryKey);
}
}
if (entry) {
LOG((" dooming entry %p for %s", entry.get(), entryKey.get()));
return entry->AsyncDoom(aCallback);
}
LOG((" no entry loaded for %s", entryKey.get()));
if (aStorage->WriteToDisk()) {
nsAutoCString contextKey;
CacheFileUtils::AppendKeyPrefix(aStorage->LoadInfo(), contextKey);
rv = CacheEntry::HashingKey(contextKey, aIdExtension, aURI, entryKey);
NS_ENSURE_SUCCESS(rv, rv);
LOG((" dooming file only for %s", entryKey.get()));
RefPtr<CacheEntryDoomByKeyCallback> callback(
new CacheEntryDoomByKeyCallback(aCallback));
rv = CacheFileIOManager::DoomFileByKey(entryKey, callback);
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
class Callback : public Runnable {
public:
explicit Callback(nsICacheEntryDoomCallback* aCallback)
: mozilla::Runnable("Callback"), mCallback(aCallback) {}
NS_IMETHOD Run() override {
mCallback->OnCacheEntryDoomed(NS_ERROR_NOT_AVAILABLE);
return NS_OK;
}
nsCOMPtr<nsICacheEntryDoomCallback> mCallback;
};
if (aCallback) {
RefPtr<Runnable> callback = new Callback(aCallback);
return NS_DispatchToMainThread(callback);
}
return NS_OK;
}
nsresult CacheStorageService::DoomStorageEntries(
CacheStorage const* aStorage, nsICacheEntryDoomCallback* aCallback) {
LOG(("CacheStorageService::DoomStorageEntries"));
NS_ENSURE_FALSE(mShutdown, NS_ERROR_NOT_INITIALIZED);
NS_ENSURE_ARG(aStorage);
nsAutoCString contextKey;
CacheFileUtils::AppendKeyPrefix(aStorage->LoadInfo(), contextKey);
mozilla::MutexAutoLock lock(mLock);
return DoomStorageEntries(contextKey, aStorage->LoadInfo(),
aStorage->WriteToDisk(), aStorage->Pinning(),
aCallback);
}
nsresult CacheStorageService::DoomStorageEntries(
const nsACString& aContextKey, nsILoadContextInfo* aContext,
bool aDiskStorage, bool aPinned, nsICacheEntryDoomCallback* aCallback) {
LOG(("CacheStorageService::DoomStorageEntries [context=%s]",
aContextKey.BeginReading()));
mLock.AssertCurrentThreadOwns();
NS_ENSURE_TRUE(!mShutdown, NS_ERROR_NOT_INITIALIZED);
nsAutoCString memoryStorageID(aContextKey);
AppendMemoryStorageTag(memoryStorageID);
if (aDiskStorage) {
LOG((" dooming disk+memory storage of %s", aContextKey.BeginReading()));
// Walk one by one and remove entries according their pin status
CacheEntryTable *diskEntries, *memoryEntries;
if (sGlobalEntryTables->Get(aContextKey, &diskEntries)) {
sGlobalEntryTables->Get(memoryStorageID, &memoryEntries);
for (auto iter = diskEntries->Iter(); !iter.Done(); iter.Next()) {
auto entry = iter.Data();
if (entry->DeferOrBypassRemovalOnPinStatus(aPinned)) {
continue;
}
if (memoryEntries) {
RemoveExactEntry(memoryEntries, iter.Key(), entry, false);
}
iter.Remove();
}
}
if (aContext && !aContext->IsPrivate()) {
LOG((" dooming disk entries"));
CacheFileIOManager::EvictByContext(aContext, aPinned, u""_ns);
}
} else {
LOG((" dooming memory-only storage of %s", aContextKey.BeginReading()));
// Remove the memory entries table from the global tables.
// Since we store memory entries also in the disk entries table
// we need to remove the memory entries from the disk table one
// by one manually.
mozilla::UniquePtr<CacheEntryTable> memoryEntries;
sGlobalEntryTables->Remove(memoryStorageID, &memoryEntries);
CacheEntryTable* diskEntries;
if (memoryEntries && sGlobalEntryTables->Get(aContextKey, &diskEntries)) {
for (const auto& memoryEntry : *memoryEntries) {
const auto& entry = memoryEntry.GetData();
RemoveExactEntry(diskEntries, memoryEntry.GetKey(), entry, false);
}
}
}
{
mozilla::MutexAutoLock lock(mForcedValidEntriesLock);
if (aContext) {
for (auto iter = mForcedValidEntries.Iter(); !iter.Done(); iter.Next()) {
bool matches;
DebugOnly<nsresult> rv = CacheFileUtils::KeyMatchesLoadContextInfo(
iter.Key(), aContext, &matches);
MOZ_ASSERT(NS_SUCCEEDED(rv));
if (matches) {
iter.Remove();
}
}
} else {
mForcedValidEntries.Clear();
}
}
// An artificial callback. This is a candidate for removal tho. In the new
// cache any 'doom' or 'evict' function ensures that the entry or entries
// being doomed is/are not accessible after the function returns. So there is
// probably no need for a callback - has no meaning. But for compatibility
// with the old cache that is still in the tree we keep the API similar to be
// able to make tests as well as other consumers work for now.
class Callback : public Runnable {
public:
explicit Callback(nsICacheEntryDoomCallback* aCallback)
: mozilla::Runnable("Callback"), mCallback(aCallback) {}
NS_IMETHOD Run() override {
mCallback->OnCacheEntryDoomed(NS_OK);
return NS_OK;
}
nsCOMPtr<nsICacheEntryDoomCallback> mCallback;
};
if (aCallback) {
RefPtr<Runnable> callback = new Callback(aCallback);
return NS_DispatchToMainThread(callback);
}
return NS_OK;
}
nsresult CacheStorageService::WalkStorageEntries(
CacheStorage const* aStorage, bool aVisitEntries,
nsICacheStorageVisitor* aVisitor) {
LOG(("CacheStorageService::WalkStorageEntries [cb=%p, visitentries=%d]",
aVisitor, aVisitEntries));
NS_ENSURE_FALSE(mShutdown, NS_ERROR_NOT_INITIALIZED);
NS_ENSURE_ARG(aStorage);
if (aStorage->WriteToDisk()) {
RefPtr<WalkDiskCacheRunnable> event = new WalkDiskCacheRunnable(
aStorage->LoadInfo(), aVisitEntries, aVisitor);
return event->Walk();
}
RefPtr<WalkMemoryCacheRunnable> event = new WalkMemoryCacheRunnable(
aStorage->LoadInfo(), aVisitEntries, aVisitor);
return event->Walk();
}
void CacheStorageService::CacheFileDoomed(nsILoadContextInfo* aLoadContextInfo,
const nsACString& aIdExtension,
const nsACString& aURISpec) {
nsAutoCString contextKey;
CacheFileUtils::AppendKeyPrefix(aLoadContextInfo, contextKey);
nsAutoCString entryKey;
CacheEntry::HashingKey(""_ns, aIdExtension, aURISpec, entryKey);
mozilla::MutexAutoLock lock(mLock);
if (mShutdown) {
return;
}
CacheEntryTable* entries;
RefPtr<CacheEntry> entry;
if (sGlobalEntryTables->Get(contextKey, &entries) &&
entries->Get(entryKey, getter_AddRefs(entry))) {
if (entry->IsFileDoomed()) {
// Need to remove under the lock to avoid possible race leading
// to duplication of the entry per its key.
RemoveExactEntry(entries, entryKey, entry, false);
entry->DoomAlreadyRemoved();
}
// Entry found, but it's not the entry that has been found doomed
// by the lower eviction layer. Just leave everything unchanged.
return;
}
RemoveEntryForceValid(contextKey, entryKey);
}
bool CacheStorageService::GetCacheEntryInfo(
nsILoadContextInfo* aLoadContextInfo, const nsACString& aIdExtension,
const nsACString& aURISpec, EntryInfoCallback* aCallback) {
nsAutoCString contextKey;
CacheFileUtils::AppendKeyPrefix(aLoadContextInfo, contextKey);
nsAutoCString entryKey;
CacheEntry::HashingKey(""_ns, aIdExtension, aURISpec, entryKey);
RefPtr<CacheEntry> entry;
{
mozilla::MutexAutoLock lock(mLock);
if (mShutdown) {
return false;
}
CacheEntryTable* entries;
if (!sGlobalEntryTables->Get(contextKey, &entries)) {
return false;
}
if (!entries->Get(entryKey, getter_AddRefs(entry))) {
return false;
}
}
GetCacheEntryInfo(entry, aCallback);
return true;
}
// static
void CacheStorageService::GetCacheEntryInfo(CacheEntry* aEntry,
EntryInfoCallback* aCallback) {
nsCString const uriSpec = aEntry->GetURI();
nsCString const enhanceId = aEntry->GetEnhanceID();
nsAutoCString entryKey;
aEntry->HashingKeyWithStorage(entryKey);
nsCOMPtr<nsILoadContextInfo> info = CacheFileUtils::ParseKey(entryKey);
uint32_t dataSize;
if (NS_FAILED(aEntry->GetStorageDataSize(&dataSize))) {
dataSize = 0;
}
int64_t altDataSize;
if (NS_FAILED(aEntry->GetAltDataSize(&altDataSize))) {
altDataSize = 0;
}
uint32_t fetchCount;
if (NS_FAILED(aEntry->GetFetchCount(&fetchCount))) {
fetchCount = 0;
}
uint32_t lastModified;
if (NS_FAILED(aEntry->GetLastModified(&lastModified))) {
lastModified = 0;
}
uint32_t expirationTime;
if (NS_FAILED(aEntry->GetExpirationTime(&expirationTime))) {
expirationTime = 0;
}
aCallback->OnEntryInfo(uriSpec, enhanceId, dataSize, altDataSize, fetchCount,
lastModified, expirationTime, aEntry->IsPinned(),
info);
}
// static
uint32_t CacheStorageService::CacheQueueSize(bool highPriority) {
RefPtr<CacheIOThread> thread = CacheFileIOManager::IOThread();
// The thread will be null at shutdown.
if (!thread) {
return 0;
}
return thread->QueueSize(highPriority);
}
// Telemetry collection
namespace {
bool TelemetryEntryKey(CacheEntry const* entry, nsAutoCString& key) {
nsAutoCString entryKey;
nsresult rv = entry->HashingKey(entryKey);
if (NS_FAILED(rv)) return false;
if (entry->GetStorageID().IsEmpty()) {
// Hopefully this will be const-copied, saves some memory
key = entryKey;
} else {
key.Assign(entry->GetStorageID());
key.Append(':');
key.Append(entryKey);
}
return true;
}
} // namespace
void CacheStorageService::TelemetryPrune(TimeStamp& now) {
static TimeDuration const oneMinute = TimeDuration::FromSeconds(60);
static TimeStamp dontPruneUntil = now + oneMinute;
if (now < dontPruneUntil) return;
static TimeDuration const fifteenMinutes = TimeDuration::FromSeconds(900);
for (auto iter = mPurgeTimeStamps.Iter(); !iter.Done(); iter.Next()) {
if (now - iter.Data() > fifteenMinutes) {
// We are not interested in resurrection of entries after 15 minutes
// of time. This is also the limit for the telemetry.
iter.Remove();
}
}
dontPruneUntil = now + oneMinute;
}
void CacheStorageService::TelemetryRecordEntryCreation(
CacheEntry const* entry) {
MOZ_ASSERT(CacheStorageService::IsOnManagementThread());
nsAutoCString key;
if (!TelemetryEntryKey(entry, key)) return;
TimeStamp now = TimeStamp::NowLoRes();
TelemetryPrune(now);
// When an entry is craeted (registered actually) we check if there is
// a timestamp marked when this very same cache entry has been removed
// (deregistered) because of over-memory-limit purging. If there is such
// a timestamp found accumulate telemetry on how long the entry was away.
TimeStamp timeStamp;
if (!mPurgeTimeStamps.Get(key, &timeStamp)) return;
mPurgeTimeStamps.Remove(key);
glean::network::http_cache_entry_reload_time.AccumulateRawDuration(
TimeStamp::NowLoRes() - timeStamp);
}
void CacheStorageService::TelemetryRecordEntryRemoval(CacheEntry* entry) {
MOZ_ASSERT(CacheStorageService::IsOnManagementThread());
// Doomed entries must not be considered, we are only interested in purged
// entries. Note that the mIsDoomed flag is always set before deregistration
// happens.
if (entry->IsDoomed()) return;
nsAutoCString key;
if (!TelemetryEntryKey(entry, key)) return;
// When an entry is removed (deregistered actually) we put a timestamp for
// this entry to the hashtable so that when the entry is created (registered)
// again we know how long it was away. Also accumulate number of AsyncOpen
// calls on the entry, this tells us how efficiently the pool actually works.
TimeStamp now = TimeStamp::NowLoRes();
TelemetryPrune(now);
mPurgeTimeStamps.InsertOrUpdate(key, now);
glean::network::http_cache_entry_reuse_count.AccumulateSingleSample(
entry->UseCount());
glean::network::http_cache_entry_alive_time.AccumulateRawDuration(
TimeStamp::NowLoRes() - entry->LoadStart());
}
// nsIMemoryReporter
size_t CacheStorageService::SizeOfExcludingThis(
mozilla::MallocSizeOf mallocSizeOf) const {
CacheStorageService::Self()->Lock().AssertCurrentThreadOwns();
size_t n = 0;
// The elemets are referenced by sGlobalEntryTables and are reported from
// there.
// Entries reported manually in CacheStorageService::CollectReports callback
if (sGlobalEntryTables) {
n += sGlobalEntryTables->ShallowSizeOfIncludingThis(mallocSizeOf);
}
n += mPurgeTimeStamps.SizeOfExcludingThis(mallocSizeOf);
return n;
}
size_t CacheStorageService::SizeOfIncludingThis(
mozilla::MallocSizeOf mallocSizeOf) const {
return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf);
}
NS_IMETHODIMP
CacheStorageService::CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) {
MutexAutoLock lock(mLock);
MOZ_COLLECT_REPORT("explicit/network/cache2/io", KIND_HEAP, UNITS_BYTES,
CacheFileIOManager::SizeOfIncludingThis(MallocSizeOf),
"Memory used by the cache IO manager.");
MOZ_COLLECT_REPORT("explicit/network/cache2/index", KIND_HEAP, UNITS_BYTES,
CacheIndex::SizeOfIncludingThis(MallocSizeOf),
"Memory used by the cache index.");
// Report the service instance, this doesn't report entries, done lower
MOZ_COLLECT_REPORT("explicit/network/cache2/service", KIND_HEAP, UNITS_BYTES,
SizeOfIncludingThis(MallocSizeOf),
"Memory used by the cache storage service.");
// Report all entries, each storage separately (by the context key)
//
// References are:
// sGlobalEntryTables to N CacheEntryTable
// CacheEntryTable to N CacheEntry
// CacheEntry to 1 CacheFile
// CacheFile to
// N CacheFileChunk (keeping the actual data)
// 1 CacheFileMetadata (keeping http headers etc.)
// 1 CacheFileOutputStream
// N CacheFileInputStream
if (sGlobalEntryTables) {
for (const auto& globalEntry : *sGlobalEntryTables) {
CacheStorageService::Self()->Lock().AssertCurrentThreadOwns();
CacheEntryTable* table = globalEntry.GetWeak();
size_t size = 0;
mozilla::MallocSizeOf mallocSizeOf = CacheStorageService::MallocSizeOf;
size += table->ShallowSizeOfIncludingThis(mallocSizeOf);
for (const auto& tableEntry : *table) {
size += tableEntry.GetKey().SizeOfExcludingThisIfUnshared(mallocSizeOf);
// Bypass memory-only entries, those will be reported when iterating the
// memory only table. Memory-only entries are stored in both ALL_ENTRIES
// and MEMORY_ONLY hashtables.
RefPtr<mozilla::net::CacheEntry> const& entry = tableEntry.GetData();
if (table->Type() == CacheEntryTable::MEMORY_ONLY ||
entry->IsUsingDisk()) {
size += entry->SizeOfIncludingThis(mallocSizeOf);
}
}
aHandleReport->Callback(
""_ns,
nsPrintfCString(
"explicit/network/cache2/%s-storage(%s)",
table->Type() == CacheEntryTable::MEMORY_ONLY ? "memory" : "disk",
aAnonymize ? "<anonymized>"
: globalEntry.GetKey().BeginReading()),
nsIMemoryReporter::KIND_HEAP, nsIMemoryReporter::UNITS_BYTES, size,
"Memory used by the cache storage."_ns, aData);
}
}
return NS_OK;
}
// nsICacheTesting
NS_IMETHODIMP
CacheStorageService::IOThreadSuspender::Run() {
MonitorAutoLock mon(mMon);
while (!mSignaled) {
mon.Wait();
}
return NS_OK;
}
void CacheStorageService::IOThreadSuspender::Notify() {
MonitorAutoLock mon(mMon);
mSignaled = true;
mon.Notify();
}
NS_IMETHODIMP
CacheStorageService::SuspendCacheIOThread(uint32_t aLevel) {
RefPtr<CacheIOThread> thread = CacheFileIOManager::IOThread();
if (!thread) {
return NS_ERROR_NOT_AVAILABLE;
}
MOZ_ASSERT(!mActiveIOSuspender);
mActiveIOSuspender = new IOThreadSuspender();
return thread->Dispatch(mActiveIOSuspender, aLevel);
}
NS_IMETHODIMP
CacheStorageService::ResumeCacheIOThread() {
MOZ_ASSERT(mActiveIOSuspender);
RefPtr<IOThreadSuspender> suspender;
suspender.swap(mActiveIOSuspender);
suspender->Notify();
return NS_OK;
}
NS_IMETHODIMP
CacheStorageService::Flush(nsIObserver* aObserver) {
RefPtr<CacheIOThread> thread = CacheFileIOManager::IOThread();
if (!thread) {
return NS_ERROR_NOT_AVAILABLE;
}
nsCOMPtr<nsIObserverService> observerService =
mozilla::services::GetObserverService();
if (!observerService) {
return NS_ERROR_NOT_AVAILABLE;
}
// Adding as weak, the consumer is responsible to keep the reference
// until notified.
observerService->AddObserver(aObserver, "cacheservice:purge-memory-pools",
false);
// This runnable will do the purging and when done, notifies the above
// observer. We dispatch it to the CLOSE level, so all data writes scheduled
// up to this time will be done before this purging happens.
RefPtr<CacheStorageService::PurgeFromMemoryRunnable> r =
new CacheStorageService::PurgeFromMemoryRunnable(this,
CacheEntry::PURGE_WHOLE);
return thread->Dispatch(r, CacheIOThread::WRITE);
}
} // namespace mozilla::net