in src/firestore/delete.ts [307:463]
private recursiveBatchDelete() {
let queue: Document[] = [];
let numDocsDeleted = 0;
let numPendingDeletes = 0;
let pagesRemaining = true;
let pageIncoming = false;
let lastDocName: string | undefined = undefined;
const retried: { [name: string]: boolean } = {};
const failures: string[] = [];
let fetchFailures = 0;
const queueLoop = () => {
// No documents left to delete
if (queue.length === 0 && numPendingDeletes === 0 && !pagesRemaining) {
return true;
}
// Failure that can't be retried again
if (failures.length > 0) {
logger.debug("Found " + failures.length + " failed operations, failing.");
return true;
}
// We have room in the queue for more documents and more exist on the server,
// so fetch more.
if (queue.length <= this.maxQueueSize && pagesRemaining && !pageIncoming) {
pageIncoming = true;
this.getDescendantBatch(this.allDescendants, this.readBatchSize, lastDocName)
.then((docs) => {
fetchFailures = 0;
pageIncoming = false;
if (docs.length === 0) {
pagesRemaining = false;
return;
}
queue = queue.concat(docs);
lastDocName = docs[docs.length - 1].name;
})
.catch((e: unknown) => {
logger.debug("Failed to fetch page after " + lastDocName, e);
pageIncoming = false;
fetchFailures++;
if (fetchFailures >= 3) {
failures.push("Failed to fetch documents to delete >= 3 times.");
}
});
}
// We want to see one batch succeed before we scale up, so this case
// limits parallelism until first success
if (numDocsDeleted === 0 && numPendingDeletes >= 1) {
return false;
}
// There are too many outstanding deletes alread
if (numPendingDeletes > this.maxPendingDeletes) {
return false;
}
// There are no documents to delete right now
if (queue.length === 0) {
return false;
}
// At this point we want to delete another batch
const toDelete: Document[] = [];
const numToDelete = Math.min(this.deleteBatchSize, queue.length);
for (let i = 0; i < numToDelete; i++) {
const d = queue.shift();
if (d) {
toDelete.push(d);
}
}
numPendingDeletes++;
firestore
.deleteDocuments(this.project, toDelete)
.then((numDeleted) => {
FirestoreDelete.progressBar.tick(numDeleted);
numDocsDeleted += numDeleted;
numPendingDeletes--;
})
.catch((e) => {
// If the transaction is too large, reduce the batch size
if (
e.status === 400 &&
e.message.includes("Transaction too big") &&
this.deleteBatchSize >= 2
) {
logger.debug("Transaction too big error deleting doc batch", e);
// Cut batch size way down. If one batch is over 10MB then we need to go much
// lower in order to keep the total I/O appropriately low.
//
// Note that we have multiple batches out at once so we need to account for multiple
// concurrent failures hitting this branch.
const newBatchSize = Math.floor(toDelete.length / 10);
if (newBatchSize < this.deleteBatchSize) {
utils.logLabeledWarning(
"firestore",
`delete transaction too large, reducing batch size from ${this.deleteBatchSize} to ${newBatchSize}`
);
this.setDeleteBatchSize(newBatchSize);
}
// Retry this batch
queue.unshift(...toDelete);
} else if (e.status >= 500 && e.status < 600) {
// For server errors, retry if the document has not yet been retried.
logger.debug("Server error deleting doc batch", e);
// Retry each doc up to one time
toDelete.forEach((doc) => {
if (retried[doc.name]) {
const message = `Failed to delete doc ${doc.name} multiple times.`;
logger.debug(message);
failures.push(message);
} else {
retried[doc.name] = true;
queue.push(doc);
}
});
} else {
const docIds = toDelete.map((d) => d.name).join(", ");
const msg = `Fatal error deleting docs ${docIds}`;
logger.debug(msg, e);
failures.push(msg);
}
numPendingDeletes--;
});
return false;
};
return new Promise<void>((resolve, reject) => {
const intervalId = setInterval(() => {
if (queueLoop()) {
clearInterval(intervalId);
if (failures.length === 0) {
resolve();
} else {
const errorDescription = failures.join(", ");
reject(new FirebaseError(`Deletion failed. Errors: ${errorDescription}.`, { exit: 1 }));
}
}
}, 0);
});
}