in Frameworks/CoreFoundation/Base.subproj/CFRuntime.c [1579:1892]
static void _CFRelease(CFTypeRef CF_RELEASES_ARGUMENT cf) {
// WINOBJC: add in a check to see if the object c dealloc should be called if this object is really freed.
// This is needed for objects that derive from a bridged class that may have special dealloc logic that needs to run.
bool shouldCFDealloc = __CF_IsBridgedObject(cf);
#if DEPLOYMENT_RUNTIME_SWIFT
// We always call through to swift_release, since all CFTypeRefs are at least _NSCFType objects
extern void swift_release(void *);
swift_release((void *)cf);
#else
uint32_t cfinfo = *(uint32_t *)&(((CFRuntimeBase *)cf)->_cfinfo);
if (cfinfo & RC_DEALLOCATED_BIT) return; // deallocated, or not a cf object
CFTypeID typeID = (cfinfo >> 8) & 0x03FF; // mask up to 0x0FFF
if (cfinfo & 0x800000) { // custom ref counting for object
CFRuntimeClass *cfClass = __CFRuntimeClassTable[typeID];
uint32_t (*refcount)(intptr_t, CFTypeRef) = cfClass->refcount;
if (!refcount || !(cfClass->version & _kCFRuntimeCustomRefCount) || (((CFRuntimeBase *)cf)->_cfinfo[CF_RC_BITS] != 0xFF)) {
HALT; // bogus object
}
#if __LP64__
if (((CFRuntimeBase *)cf)->_rc != 0xFFFFFFFFU) {
HALT; // bogus object
}
#endif
refcount(-1, cf);
return;
}
CFIndex start_rc = __builtin_expect(__CFOASafe, 0) ? CFGetRetainCount(cf) : 0;
Boolean isAllocator = (__kCFAllocatorTypeID_CONST == typeID);
Boolean didAuto = false;
#if __LP64__
#if !DEPLOYMENT_TARGET_WINDOWS
uint32_t lowBits;
uint64_t allBits;
again:;
do {
allBits = *(uint64_t *)&(((CFRuntimeBase *)cf)->_cfinfo);
lowBits = RC_GET(allBits);
if (0 == lowBits) {
if (CF_IS_COLLECTABLE(cf)) auto_zone_release(objc_collectableZone(), (void*)cf);
return; // Constant CFTypeRef
}
if (1 == lowBits) {
CFRuntimeClass *cfClass = __CFRuntimeClassTable[typeID];
if ((cfClass->version & _kCFRuntimeResourcefulObject) && cfClass->reclaim != NULL) {
cfClass->reclaim(cf);
}
if (!CF_IS_COLLECTABLE(cf)) {
uint64_t newAllBits = allBits | RC_DEALLOCATING_BIT;
if (!CAS64(allBits, newAllBits, (int64_t *)&((CFRuntimeBase *)cf)->_cfinfo)) {
goto again;
}
void (*func)(CFTypeRef) = __CFRuntimeClassTable[typeID]->finalize;
if (NULL != func) {
func(cf);
}
// Any further ref-count changes after this point are operating on a finalized object
allBits = *(uint64_t *)&(((CFRuntimeBase *)cf)->_cfinfo);
lowBits = RC_GET(allBits);
if (isAllocator || (1 == lowBits)) {
do { // hammer until it takes; trying to retain the object on another thread at this point? too late!
allBits = *(uint64_t *)&(((CFRuntimeBase *)cf)->_cfinfo);
} while (!CAS64(allBits, (allBits | RC_DEALLOCATED_BIT) - RC_INCREMENT, (int64_t *)&((CFRuntimeBase *)cf)->_cfinfo));
goto really_free;
}
Boolean success = false;
do { // drop the deallocating bit; racey, but this resurrection stuff isn't thread-safe anyway
allBits = *(uint64_t *)&(((CFRuntimeBase *)cf)->_cfinfo);
uint64_t newAllBits = allBits & ~RC_DEALLOCATING_BIT;
success = CAS64(allBits, newAllBits, (int64_t *)&((CFRuntimeBase *)cf)->_cfinfo);
} while (!success);
goto again; // still need to have the effect of a CFRelease
}
}
} while (!CAS64(allBits, allBits - RC_INCREMENT, (int64_t *)&((CFRuntimeBase *)cf)->_cfinfo));
if (lowBits == 1 && CF_IS_COLLECTABLE(cf)) {
// GC: release the collector's hold over the object, which will call the finalize function later on.
auto_zone_release(objc_collectableZone(), (void*)cf);
didAuto = true;
}
#else
uint32_t lowBits;
do {
lowBits = ((CFRuntimeBase *)cf)->_rc;
if (0 == lowBits) {
if (CF_IS_COLLECTABLE(cf)) auto_zone_release(objc_collectableZone(), (void*)cf);
return; // Constant CFTypeRef
}
if (1 == lowBits) {
// CANNOT WRITE ANY NEW VALUE INTO [CF_RC_BITS] UNTIL AFTER FINALIZATION
CFRuntimeClass *cfClass = __CFRuntimeClassTable[typeID];
if ((cfClass->version & _kCFRuntimeResourcefulObject) && cfClass->reclaim != NULL) {
cfClass->reclaim(cf);
}
if (!CF_IS_COLLECTABLE(cf)) {
void (*func)(CFTypeRef) = __CFRuntimeClassTable[typeID]->finalize;
if (NULL != func) {
func(cf);
}
if (isAllocator || CAS32(1, 0, (int32_t *)&((CFRuntimeBase *)cf)->_rc)) {
goto really_free;
}
}
}
} while (!CAS32(lowBits, lowBits - 1, (int32_t *)&((CFRuntimeBase *)cf)->_rc));
if (lowBits == 1 && CF_IS_COLLECTABLE(cf)) {
// GC: release the collector's hold over the object, which will call the finalize function later on.
auto_zone_release(objc_collectableZone(), (void*)cf);
didAuto = true;
}
#endif
#else
#if !DEPLOYMENT_TARGET_WINDOWS
again:;
volatile uint32_t *infoLocation = (uint32_t *)&(((CFRuntimeBase *)cf)->_cfinfo);
CFIndex rcLowBits = __CFBitfieldGetValue(cfinfo, RC_END, RC_START);
if (0 == rcLowBits) {
if (CF_IS_COLLECTABLE(cf)) auto_zone_release(objc_collectableZone(), (void*)cf);
return; // Constant CFTypeRef
}
bool success = 0;
Boolean whack = false;
do {
cfinfo = *infoLocation;
rcLowBits = __CFBitfieldGetValue(cfinfo, RC_END, RC_START);
if (1 == rcLowBits) {
// we think cf should be deallocated
uint32_t prospectiveNewInfo = cfinfo | (RC_DEALLOCATING_BIT);
if (CF_IS_COLLECTABLE(cf)) {
prospectiveNewInfo -= (1 << RC_START);
}
success = CAS32(*(int32_t *)& cfinfo, *(int32_t *)&prospectiveNewInfo, (int32_t *)infoLocation);
if (success) whack = true;
} else {
// not yet junk
uint32_t prospectiveNewInfo = cfinfo; // don't want compiler to generate prospectiveNewInfo = *infoLocation. This is why infoLocation is declared as a pointer to volatile memory.
if ((1 << 7) == rcLowBits) {
// Time to remove a bit from the external ref count
__CFLock(&__CFRuntimeExternRefCountTableLock);
CFIndex rcHighBitsCnt = __CFDoExternRefOperation(500, (id)cf);
if (1 == rcHighBitsCnt) {
__CFBitfieldSetValue(prospectiveNewInfo, RC_END, RC_START, (1 << 6) - 1);
} else {
__CFBitfieldSetValue(prospectiveNewInfo, RC_END, RC_START, ((1 << 6) | (1 << 7)) - 1);
}
success = CAS32(*(int32_t *)& cfinfo, *(int32_t *)&prospectiveNewInfo, (int32_t *)infoLocation);
if (success) {
__CFDoExternRefOperation(450, (id)cf);
}
__CFUnlock(&__CFRuntimeExternRefCountTableLock);
} else {
prospectiveNewInfo -= (1 << RC_START);
success = CAS32(*(int32_t *)& cfinfo, *(int32_t *)&prospectiveNewInfo, (int32_t *)infoLocation);
}
}
} while (!success);
if (whack) {
CFRuntimeClass *cfClass = __CFRuntimeClassTable[typeID];
if ((cfClass->version & _kCFRuntimeResourcefulObject) && cfClass->reclaim != NULL) {
cfClass->reclaim(cf);
}
if (CF_IS_COLLECTABLE(cf)) {
// GC: release the collector's hold over the object, which will call the finalize function later on.
auto_zone_release(objc_collectableZone(), (void*)cf);
didAuto = true;
} else {
if (isAllocator) {
goto really_free;
} else {
void (*func)(CFTypeRef) = __CFRuntimeClassTable[typeID]->finalize;
if (NULL != func) {
func(cf);
}
// Any further ref-count changes after this point are operating on a finalized object
rcLowBits = __CFBitfieldGetValue(*infoLocation, RC_END, RC_START);
if (1 == rcLowBits) {
do { // hammer until it takes; trying to retain the object on another thread at this point? too late!
cfinfo = *infoLocation;
} while (!CAS32(cfinfo, cfinfo | RC_DEALLOCATED_BIT, (int32_t *)infoLocation));
goto really_free;
}
do { // drop the deallocating bit; racey, but this resurrection stuff isn't thread-safe anyway
cfinfo = *infoLocation;
uint32_t prospectiveNewInfo = (cfinfo & ~(RC_DEALLOCATING_BIT));
success = CAS32(*(int32_t *)& cfinfo, *(int32_t *)&prospectiveNewInfo, (int32_t *)infoLocation);
} while (!success);
goto again;
}
}
}
#else
volatile uint32_t *infoLocation = (uint32_t *)&(((CFRuntimeBase *)cf)->_cfinfo);
CFIndex rcLowBits = __CFBitfieldGetValue(*infoLocation, RC_END, RC_START);
if (0 == rcLowBits) {
if (CF_IS_COLLECTABLE(cf)) auto_zone_release(objc_collectableZone(), (void*)cf);
return; // Constant CFTypeRef
}
bool success = 0;
do {
uint32_t initialCheckInfo = *infoLocation;
rcLowBits = __CFBitfieldGetValue(initialCheckInfo, RC_END, RC_START);
if (1 == rcLowBits) {
// we think cf should be deallocated
// CANNOT WRITE ANY NEW VALUE INTO [CF_RC_BITS] UNTIL AFTER FINALIZATION
CFRuntimeClass *cfClass = __CFRuntimeClassTable[typeID];
if ((cfClass->version & _kCFRuntimeResourcefulObject) && cfClass->reclaim != NULL) {
cfClass->reclaim(cf);
}
if (CF_IS_COLLECTABLE(cf)) {
uint32_t prospectiveNewInfo = initialCheckInfo - (1 << RC_START);
success = CAS32(*(int32_t *)&initialCheckInfo, *(int32_t *)&prospectiveNewInfo, (int32_t *)infoLocation);
// GC: release the collector's hold over the object, which will call the finalize function later on.
if (success) {
auto_zone_release(objc_collectableZone(), (void*)cf);
didAuto = true;
}
} else {
if (isAllocator) {
goto really_free;
} else {
void (*func)(CFTypeRef) = __CFRuntimeClassTable[typeID]->finalize;
if (NULL != func) {
func(cf);
}
// We recheck rcLowBits to see if the object has been retained again during
// the finalization process. This allows for the finalizer to resurrect,
// but the main point is to allow finalizers to be able to manage the
// removal of objects from uniquing caches, which may race with other threads
// which are allocating (looking up and finding) objects from those caches,
// which (that thread) would be the thing doing the extra retain in that case.
rcLowBits = __CFBitfieldGetValue(*infoLocation, RC_END, RC_START);
success = (1 == rcLowBits);
if (success) {
goto really_free;
}
}
}
} else {
// not yet junk
uint32_t prospectiveNewInfo = initialCheckInfo; // don't want compiler to generate prospectiveNewInfo = *infoLocation. This is why infoLocation is declared as a pointer to volatile memory.
if ((1 << 7) == rcLowBits) {
// Time to remove a bit from the external ref count
__CFLock(&__CFRuntimeExternRefCountTableLock);
CFIndex rcHighBitsCnt = __CFDoExternRefOperation(500, (id)cf);
if (1 == rcHighBitsCnt) {
__CFBitfieldSetValue(prospectiveNewInfo, RC_END, RC_START, (1 << 6) - 1);
} else {
__CFBitfieldSetValue(prospectiveNewInfo, RC_END, RC_START, ((1 << 6) | (1 << 7)) - 1);
}
success = CAS32(*(int32_t *)&initialCheckInfo, *(int32_t *)&prospectiveNewInfo, (int32_t *)infoLocation);
if (success) {
__CFDoExternRefOperation(450, (id)cf);
}
__CFUnlock(&__CFRuntimeExternRefCountTableLock);
} else {
prospectiveNewInfo -= (1 << RC_START);
success = CAS32(*(int32_t *)&initialCheckInfo, *(int32_t *)&prospectiveNewInfo, (int32_t *)infoLocation);
}
}
} while (!success);
#endif
#endif
if (!didAuto && __builtin_expect(__CFOASafe, 0)) {
__CFRecordAllocationEvent(__kCFReleaseEvent, (void *)cf, 0, start_rc - 1, NULL);
}
return;
really_free:;
if (!didAuto && __builtin_expect(__CFOASafe, 0)) {
// do not use CFGetRetainCount() because cf has been freed if it was an allocator
__CFRecordAllocationEvent(__kCFReleaseEvent, (void *)cf, 0, 0, NULL);
}
// WINOBJC: if this cf object was a bridged class, it could have gotten weak refs in objective C land.
// this call will invalidate all of those now that the cf object is truly dead. Placing this call *above*
// the actual deallocate calls so that there is no chance that a weak ref could slip in and be used before
// the object is toast.
objc_delete_weak_refs((id)(cf));
if(shouldCFDealloc) {
[(id)(cf) dealloc];
}
// cannot zombify allocators, which get deallocated by __CFAllocatorDeallocate (finalize)
if (isAllocator) {
__CFAllocatorDeallocate((void *)cf);
} else {
CFAllocatorRef allocator = kCFAllocatorSystemDefault;
Boolean usesSystemDefaultAllocator = true;
if (!__CFBitfieldGetValue(((const CFRuntimeBase *)cf)->_cfinfo[CF_INFO_BITS], 7, 7)) {
allocator = CFGetAllocator(cf);
usesSystemDefaultAllocator = _CFAllocatorIsSystemDefault(allocator);
}
// WinObjC: Resurrect zombie support; it looks like this was removed from the original codebase.
#if defined(DEBUG) || defined(ENABLE_ZOMBIES)
if (__CFZombieEnabled && __CFZombifyNSObjectHook) {
__CFZombifyNSObjectHook((id)cf);
} else
#endif
{
CFAllocatorDeallocate(allocator, (uint8_t *)cf - (usesSystemDefaultAllocator ? 0 : sizeof(CFAllocatorRef)));
}
if (kCFAllocatorSystemDefault != allocator) {
CFRelease(allocator);
}
}
#endif
}