in tx.go [462:574]
func (tx *Tx) tryCommitChanges() reason {
const op = "txfile/tx-commit"
pending, exclusive := tx.file.locks.Pending(), tx.file.locks.Exclusive()
// give concurrent read transactions a chance to complete, but don't allow
// for new read transactions to start while executing the commit
pending.Lock()
defer pending.Unlock()
// On function exit wait on writer to finish outstanding operations, in case
// we have to return early on error. On success, this is basically a no-op.
txWriteComplete := false
defer cleanup.IfNot(&txWriteComplete, func() {
err := tx.writeSync.Wait()
// if wait fails, enforce an fsync with error reset flag.
if err != nil {
tx.file.writer.Sync(tx.writeSync, syncDataOnly|syncResetErr)
tx.writeSync.Wait()
}
})
// Flush pages.
if err := tx.flushPages(op); err != nil {
return tx.err(op).report("failed to flush dirty pages")
}
// 1. finish Tx state updates and free file pages used to hold meta pages
csWAL, err := tx.commitPrepareWAL()
if err != nil {
return err
}
csAlloc := tx.commitPrepareAlloc()
// 2. - 5. Commit changes to file
metaID, err := tx.tryCommitChangesToFile(&csWAL, &csAlloc)
if err != nil {
return err
}
// 6. wait for all pages beeing written and synced,
// before updating in memory state.
err = tx.writeSync.Wait()
txWriteComplete = true
if err != nil {
return err
}
// At this point the transaction has been completed on file level.
// Update internal structures as well, so future transactions
// will use the new serialized transaction state.
// We have only one active write transaction + freelist is not shared with read transactions
// -> update freelist state before waiting for the exclusive lock to be available
tx.file.allocator.Commit(&csAlloc)
// Wait for all read transactions to finish before updating global references
// to new contents.
exclusive.Lock()
defer exclusive.Unlock()
// Update the WAL mapping.
tx.file.wal.Commit(&csWAL)
// Switch the files active meta page to meta page being written.
tx.file.metaActive = metaID
// Compare required file size with the real file size and the mmaped region.
// If the expected file size of the last transaction is < the real file size,
// we can truncate the file and update the mmaped region.
// If the expected file size is > the mmaped region, we need to update the mmaped file region.
// If we fail here, the file and internal state is already updated + valid.
// But mmap failed on us -> fatal error
endMarker := tx.file.allocator.data.endMarker
if metaEnd := tx.file.allocator.meta.endMarker; metaEnd > endMarker {
endMarker = metaEnd
}
// Compute maximum expected file size of current transaction
// and update the memory mapping if required.
expectedMMapSize := int64(uint(endMarker) * tx.file.allocator.pageSize)
maxSize := int64(tx.file.allocator.maxSize)
pageSize := tx.file.allocator.pageSize
requiredFileSz, truncate := checkTruncate(&tx.alloc, tx.file.size, expectedMMapSize, maxSize, pageSize)
if truncate {
err = tx.file.truncate(requiredFileSz)
} else if int(expectedMMapSize) > len(tx.file.mapped) {
err = tx.file.mmapUpdate()
} else {
sz := expectedMMapSize
if sz < tx.file.size {
sz = tx.file.size
}
tx.file.sizeEstimate = sz
}
if err != nil {
return err
}
traceln("tx stats:")
traceln(" available data pages:", tx.file.allocator.DataAllocator().Avail(nil))
traceln(" available meta pages:", tx.file.allocator.meta.freelist.Avail())
traceln(" total meta pages:", tx.file.allocator.metaTotal)
traceln(" freelist pages:", len(tx.file.allocator.freelistPages))
traceln(" wal mapping pages:", len(tx.file.wal.metaPages))
traceln(" max pages:", tx.file.allocator.maxPages)
traceln(" wal mapped pages:", len(tx.file.wal.mapping))
return nil
}