func()

in component/azstorage/block_blob.go [1376:1454]


func (bb *BlockBlob) Write(options internal.WriteFileOptions) error {
	name := options.Handle.Path
	offset := options.Offset
	defer log.TimeTrack(time.Now(), "BlockBlob::Write", options.Handle.Path)
	log.Trace("BlockBlob::Write : name %s offset %v", name, offset)
	// tracks the case where our offset is great than our current file size (appending only - not modifying pre-existing data)
	var dataBuffer *[]byte
	// when the file offset mapping is cached we don't need to make a get block list call
	fileOffsets, err := bb.GetFileBlockOffsets(name)
	if err != nil {
		return err
	}
	length := int64(len(options.Data))
	data := options.Data
	// case 1: file consists of no blocks (small file)
	if fileOffsets.SmallFile() {
		// get all the data
		oldData, _ := bb.ReadBuffer(name, 0, 0)
		// update the data with the new data
		// if we're only overwriting existing data
		if int64(len(oldData)) >= offset+length {
			copy(oldData[offset:], data)
			dataBuffer = &oldData
			// else appending and/or overwriting
		} else {
			// if the file is not empty then we need to combine the data
			if len(oldData) > 0 {
				// new data buffer with the size of old and new data
				newDataBuffer := make([]byte, offset+length)
				// copy the old data into it
				// TODO: better way to do this?
				if offset != 0 {
					copy(newDataBuffer, oldData)
					oldData = nil
				}
				// overwrite with the new data we want to add
				copy(newDataBuffer[offset:], data)
				dataBuffer = &newDataBuffer
			} else {
				dataBuffer = &data
			}
		}
		// WriteFromBuffer should be able to handle the case where now the block is too big and gets split into multiple blocks
		err := bb.WriteFromBuffer(name, options.Metadata, *dataBuffer)
		if err != nil {
			log.Err("BlockBlob::Write : Failed to upload to blob %s ", name, err.Error())
			return err
		}
		// case 2: given offset is within the size of the blob - and the blob consists of multiple blocks
		// case 3: new blocks need to be added
	} else {
		index, oldDataSize, exceedsFileBlocks, appendOnly := fileOffsets.FindBlocksToModify(offset, length)
		// keeps track of how much new data will be appended to the end of the file (applicable only to case 3)
		newBufferSize := int64(0)
		// case 3?
		if exceedsFileBlocks {
			newBufferSize, err = bb.createNewBlocks(fileOffsets, offset, length)
			if err != nil {
				log.Err("BlockBlob::Write : Failed to create new blocks for file %s", name, err.Error())
				return err
			}
		}
		// buffer that holds that pre-existing data in those blocks we're interested in
		oldDataBuffer := make([]byte, oldDataSize+newBufferSize)
		if !appendOnly {
			// fetch the blocks that will be impacted by the new changes so we can overwrite them
			err = bb.ReadInBuffer(name, fileOffsets.BlockList[index].StartIndex, oldDataSize, oldDataBuffer, nil)
			if err != nil {
				log.Err("BlockBlob::Write : Failed to read data in buffer %s [%s]", name, err.Error())
			}
		}
		// this gives us where the offset with respect to the buffer that holds our old data - so we can start writing the new data
		blockOffset := offset - fileOffsets.BlockList[index].StartIndex
		copy(oldDataBuffer[blockOffset:], data)
		err := bb.stageAndCommitModifiedBlocks(name, oldDataBuffer, fileOffsets)
		return err
	}
	return nil
}