in internal/vfs/zip/vfs.go [199:257]
func (zfs *zipVFS) findOrCreateArchive(key string) (*zipArchive, error) {
// This needs to happen in lock to ensure that
// concurrent access will not remove it
// it is needed due to the bug https://github.com/patrickmn/go-cache/issues/48
zfs.cacheLock.Lock()
defer zfs.cacheLock.Unlock()
archive, expiry, found := zfs.cache.GetWithExpiration(key)
if found {
status, zipErr := archive.(*zipArchive).openStatus()
switch status {
case archiveOpening:
metrics.ZipCacheRequests.WithLabelValues("archive", "hit-opening").Inc()
case archiveOpenError:
// this means that archive is likely corrupted
// we keep it for duration of cache entry expiry (negative cache)
metrics.ZipCacheRequests.WithLabelValues("archive", "hit-open-error").Inc()
case archiveOpened:
if time.Until(expiry) < zfs.cacheRefreshInterval {
zfs.cache.SetDefault(key, archive)
metrics.ZipCacheRequests.WithLabelValues("archive", "hit-refresh").Inc()
} else {
metrics.ZipCacheRequests.WithLabelValues("archive", "hit").Inc()
}
case archiveCorrupted:
// this means that archive is likely changed
// we should invalidate it immediately
log.WithError(zipErr).WithFields(log.Fields{
"archive_key": key,
}).Error("archive corrupted")
metrics.ZipCacheRequests.WithLabelValues("archive", "corrupted").Inc()
archive = nil
}
}
if archive == nil {
archive = newArchive(zfs, zfs.openTimeout)
// We call delete to ensure that expired item
// is properly evicted as there's a bug in a cache library:
// https://github.com/patrickmn/go-cache/issues/48
zfs.cache.Delete(key)
// if adding the archive to the cache fails it means it's already been added before
// this is done to find concurrent additions.
if zfs.cache.Add(key, archive, zfs.cacheExpirationInterval) != nil {
metrics.ZipCacheRequests.WithLabelValues("archive", "already-cached").Inc()
return nil, errAlreadyCached
}
metrics.ZipCacheRequests.WithLabelValues("archive", "miss").Inc()
metrics.ZipCachedEntries.WithLabelValues("archive").Inc()
}
return archive.(*zipArchive), nil
}