func()

in lambda/internal/tarfile/src.go [257:306]


func (s *S3FileSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
	if err := s.ensureCachedDataIsPresent(); err != nil {
		return nil, 0, err
	}

	if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
		return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
	}

	if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,
		underlyingStream, err := s.s3fileReader.openTarComponent(li.path)
		if err != nil {
			return nil, 0, err
		}
		closeUnderlyingStream := true
		defer func() {
			if closeUnderlyingStream {
				underlyingStream.Close()
			}
		}()

		// In order to handle the fact that digests != diffIDs (and thus that a
		// caller which is trying to verify the blob will run into problems),
		// we need to decompress blobs. This is a bit ugly, but it's a
		// consequence of making everything addressable by their DiffID rather
		// than by their digest...
		//
		// In particular, because the v2s2 manifest being generated uses
		// DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of
		// layers not their _actual_ digest. The result is that copy/... will
		// be verifying a "digest" which is not the actual layer's digest (but
		// is instead the DiffID).

		uncompressedStream, _, err := compression.AutoDecompress(underlyingStream)
		if err != nil {
			return nil, 0, errors.Wrapf(err, "Error auto-decompressing blob %s", info.Digest)
		}

		newStream := uncompressedReadCloser{
			Reader:             uncompressedStream,
			underlyingCloser:   underlyingStream.Close,
			uncompressedCloser: uncompressedStream.Close,
		}
		closeUnderlyingStream = false

		return newStream, li.size, nil
	}

	return nil, 0, errors.Errorf("Unknown blob %s", info.Digest)
}