func()

in component/block_cache/block_cache.go [1659:1747]


func (bc *BlockCache) getBlockIDList(handle *handlemap.Handle) ([]string, []string, error) {
	// generate the block id list order
	list, _ := handle.GetValue("blockList")
	listMap := list.(map[int64]*blockInfo)

	offsets := make([]int64, 0)
	blockIDList := make([]string, 0)

	for k := range listMap {
		offsets = append(offsets, k)
	}
	sort.Slice(offsets, func(i, j int) bool { return offsets[i] < offsets[j] })

	zeroBlockStaged := false
	zeroBlockID := ""
	restageId := make([]string, 0)
	index := int64(0)
	i := 0

	for i < len(offsets) {
		if index == offsets[i] {
			if i != len(offsets)-1 && listMap[offsets[i]].size != bc.blockSize {
				// A non last block was staged earlier and it is not of the same size as block size
				// This happens when a block which is not full is staged and at that moment it was the last block
				// Now we have written data beyond that point and its no longer the last block
				// In such case we need to fill the gap with zero blocks
				// For simplicity we will fill the gap with a new block and later merge both these blocks in one block
				id := common.GetBlockID(common.BlockIDLength)
				fillerSize := (bc.blockSize - listMap[offsets[i]].size)
				fillerOffset := uint64(offsets[i]*int64(bc.blockSize)) + listMap[offsets[i]].size

				log.Debug("BlockCache::getBlockIDList : Staging semi zero block for %v=>%v offset %v, size %v", handle.ID, handle.Path, fillerOffset, fillerSize)
				err := bc.NextComponent().StageData(internal.StageDataOptions{
					Name: handle.Path,
					Data: bc.blockPool.zeroBlock.data[:fillerSize],
					Id:   id,
				})

				if err != nil {
					log.Err("BlockCache::getBlockIDList : Failed to write semi zero block for %v=>%v [%s]", handle.ID, handle.Path, err.Error())
					return nil, nil, err
				}

				blockIDList = append(blockIDList, listMap[offsets[i]].id)
				log.Debug("BlockCache::getBlockIDList : Preparing blocklist for %v=>%s (%v :  %v, size %v)", handle.ID, handle.Path, offsets[i], listMap[offsets[i]].id, listMap[offsets[i]].size)

				// After the flush call we need to merge this particular block with the next block (semi zero block)
				restageId = append(restageId, listMap[offsets[i]].id)

				// Add the semi zero block to the list
				blockIDList = append(blockIDList, id)
				log.Debug("BlockCache::getBlockIDList : Preparing blocklist for %v=>%s (%v :  %v, size %v)", handle.ID, handle.Path, fillerOffset, id, fillerSize)

				index++
				i++

			} else {
				blockIDList = append(blockIDList, listMap[offsets[i]].id)
				log.Debug("BlockCache::getBlockIDList : Preparing blocklist for %v=>%s (%v :  %v, size %v)", handle.ID, handle.Path, offsets[i], listMap[offsets[i]].id, listMap[offsets[i]].size)
				index++
				i++
			}
		} else {
			for index < offsets[i] {
				if !zeroBlockStaged {
					id, err := bc.stageZeroBlock(handle, 1)
					if err != nil {
						return nil, nil, err
					}

					zeroBlockStaged = true
					zeroBlockID = id
				}

				blockIDList = append(blockIDList, zeroBlockID)
				listMap[index] = &blockInfo{
					id:        zeroBlockID,
					committed: false,
					size:      bc.blockPool.blockSize,
				}
				log.Debug("BlockCache::getBlockIDList : Adding zero block for %v=>%s, index %v", handle.ID, handle.Path, index)
				log.Debug("BlockCache::getBlockIDList : Preparing blocklist for %v=>%s (%v :  %v, zero block size %v)", handle.ID, handle.Path, index, zeroBlockID, bc.blockPool.blockSize)
				index++
			}
		}
	}

	return blockIDList, restageId, nil
}