in public/dexie.js [4556:4651]
delete: function () {
var _this = this;
var ctx = this._ctx,
range = ctx.range,
deletingHook = ctx.table.hook.deleting.fire,
hasDeleteHook = deletingHook !== nop;
if (
!hasDeleteHook &&
isPlainKeyRange(ctx) &&
((ctx.isPrimKey && !hangsOnDeleteLargeKeyRange) || !range)
) {
// May use IDBObjectStore.delete(IDBKeyRange) in this case (Issue #208)
// For chromium, this is the way most optimized version.
// For IE/Edge, this could hang the indexedDB engine and make operating system instable
// (https://gist.github.com/dfahlander/5a39328f029de18222cf2125d56c38f7)
return this._write(function (resolve, reject, idbstore) {
// Our API contract is to return a count of deleted items, so we have to count() before delete().
var onerror = eventRejectHandler(reject),
countReq = range ? idbstore.count(range) : idbstore.count();
countReq.onerror = onerror;
countReq.onsuccess = function () {
var count = countReq.result;
tryCatch(
function () {
var delReq = range
? idbstore.delete(range)
: idbstore.clear();
delReq.onerror = onerror;
delReq.onsuccess = function () {
return resolve(count);
};
},
function (err) {
return reject(err);
}
);
};
});
}
// Default version to use when collection is not a vanilla IDBKeyRange on the primary key.
// Divide into chunks to not starve RAM.
// If has delete hook, we will have to collect not just keys but also objects, so it will use
// more memory and need lower chunk size.
var CHUNKSIZE = hasDeleteHook ? 2000 : 10000;
return this._write(function (resolve, reject, idbstore, trans) {
var totalCount = 0;
// Clone collection and change its table and set a limit of CHUNKSIZE on the cloned Collection instance.
var collection = _this
.clone({
keysOnly: !ctx.isMatch && !hasDeleteHook,
}) // load just keys (unless filter() or and() or deleteHook has subscribers)
.distinct() // In case multiEntry is used, never delete same key twice because resulting count
.limit(CHUNKSIZE)
.raw(); // Don't filter through reading-hooks (like mapped classes etc)
var keysOrTuples = [];
// We're gonna do things on as many chunks that are needed.
// Use recursion of nextChunk function:
var nextChunk = function () {
return collection
.each(
hasDeleteHook
? function (val, cursor) {
// Somebody subscribes to hook('deleting'). Collect all primary keys and their values,
// so that the hook can be called with its values in bulkDelete().
keysOrTuples.push([cursor.primaryKey, cursor.value]);
}
: function (val, cursor) {
// No one subscribes to hook('deleting'). Collect only primary keys:
keysOrTuples.push(cursor.primaryKey);
}
)
.then(function () {
// Chromium deletes faster when doing it in sort order.
hasDeleteHook
? keysOrTuples.sort(function (a, b) {
return ascending(a[0], b[0]);
})
: keysOrTuples.sort(ascending);
return bulkDelete(
idbstore,
trans,
keysOrTuples,
hasDeleteHook,
deletingHook
);
})
.then(function () {
var count = keysOrTuples.length;
totalCount += count;
keysOrTuples = [];
return count < CHUNKSIZE ? totalCount : nextChunk();
});
};
resolve(nextChunk());
});
},