iOS中malloc和calloc源码分析

2022-12-07  本文已影响0人  祀梦_

iOS中malloc和calloc源码分析

calloc

1. calloc

void *
calloc(size_t num_items, size_t size)
{
    return _malloc_zone_calloc(default_zone, num_items, size, MZ_POSIX);
}

2. _malloc_zone_calloc

MALLOC_NOINLINE
static void *
_malloc_zone_calloc(malloc_zone_t *zone, size_t num_items, size_t size,
        malloc_zone_options_t mzo)
{
    MALLOC_TRACE(TRACE_calloc | DBG_FUNC_START, (uintptr_t)zone, num_items, size, 0);

    void *ptr;
    if (malloc_check_start) {
        internal_check();
    }
    // ptr指针指向一块内存
    ptr = zone->calloc(zone, num_items, size);

    if (os_unlikely(malloc_logger)) {
        malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE | MALLOC_LOG_TYPE_CLEARED, (uintptr_t)zone,
                (uintptr_t)(num_items * size), 0, (uintptr_t)ptr, 0);
    }

    MALLOC_TRACE(TRACE_calloc | DBG_FUNC_END, (uintptr_t)zone, num_items, size, (uintptr_t)ptr);
    if (os_unlikely(ptr == NULL)) {
        malloc_set_errno_fast(mzo, ENOMEM);
    }
    return ptr;
}

3. default_zone_calloc

static void *
default_zone_calloc(malloc_zone_t *zone, size_t num_items, size_t size)
{
    zone = runtime_default_zone();
    
    return zone->calloc(zone, num_items, size);
}

4. nano_calloc

static void *
nano_calloc(nanozone_t *nanozone, size_t num_items, size_t size)
{
    size_t total_bytes;

    if (calloc_get_size(num_items, size, 0, &total_bytes)) {
        return NULL;
    }

    if (total_bytes <= NANO_MAX_SIZE) {
        void *p = _nano_malloc_check_clear(nanozone, total_bytes, 1);
        if (p) {
            return p;
        } else {
            /* FALLTHROUGH to helper zone */
        }
    }
    malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone);
    return zone->calloc(zone, 1, total_bytes);
}

5. _nano_malloc_check_clear

static void *
_nano_malloc_check_clear(nanozone_t *nanozone, size_t size, boolean_t cleared_requested)
{
    MALLOC_TRACE(TRACE_nano_malloc, (uintptr_t)nanozone, size, cleared_requested, 0);

    void *ptr;
    size_t slot_key;
    // 1. 计算合适的内存大小
    size_t slot_bytes = segregated_size_to_fit(nanozone, size, &slot_key); // Note slot_key is set here
    mag_index_t mag_index = nano_mag_index(nanozone);

    nano_meta_admin_t pMeta = &(nanozone->meta_data[mag_index][slot_key]);

    // 2. 开辟一片内存,并让ptr指针指向这块内存
    ptr = OSAtomicDequeue(&(pMeta->slot_LIFO), offsetof(struct chained_block_s, next));
    if (ptr) {
        unsigned debug_flags = nanozone->debug_flags;
#if NANO_FREE_DEQUEUE_DILIGENCE
        size_t gotSize;
        nano_blk_addr_t p; // the compiler holds this in a register

        p.addr = (uint64_t)ptr; // Begin the dissection of ptr
        if (NANOZONE_SIGNATURE != p.fields.nano_signature) {
            malloc_zone_error(debug_flags, true,
                    "Invalid signature for pointer %p dequeued from free list\n",
                    ptr);
        }

        if (mag_index != p.fields.nano_mag_index) {
            malloc_zone_error(debug_flags, true,
                    "Mismatched magazine for pointer %p dequeued from free list\n",
                    ptr);
        }

        gotSize = _nano_vet_and_size_of_free(nanozone, ptr);
        if (0 == gotSize) {
            malloc_zone_error(debug_flags, true,
                    "Invalid pointer %p dequeued from free list\n", ptr);
        }
        if (gotSize != slot_bytes) {
            malloc_zone_error(debug_flags, true,
                    "Mismatched size for pointer %p dequeued from free list\n",
                    ptr);
        }

        if (!_nano_block_has_canary_value(nanozone, ptr)) {
            malloc_zone_error(debug_flags, true,
                    "Heap corruption detected, free list canary is damaged for %p\n"
                    "*** Incorrect guard value: %lu\n", ptr,
                    ((chained_block_t)ptr)->double_free_guard);
        }

#if defined(DEBUG)
        void *next = (void *)(((chained_block_t)ptr)->next);
        if (next) {
            p.addr = (uint64_t)next; // Begin the dissection of next
            if (NANOZONE_SIGNATURE != p.fields.nano_signature) {
                malloc_zone_error(debug_flags, true,
                        "Invalid next signature for pointer %p dequeued from free "
                        "list, next = %p\n", ptr, "next");
            }

            if (mag_index != p.fields.nano_mag_index) {
                malloc_zone_error(debug_flags, true,
                        "Mismatched next magazine for pointer %p dequeued from "
                        "free list, next = %p\n", ptr, next);
            }

            gotSize = _nano_vet_and_size_of_free(nanozone, next);
            if (0 == gotSize) {
                malloc_zone_error(debug_flags, true,
                        "Invalid next for pointer %p dequeued from free list, "
                        "next = %p\n", ptr, next);
            }
            if (gotSize != slot_bytes) {
                malloc_zone_error(debug_flags, true,
                        "Mismatched next size for pointer %p dequeued from free "
                        "list, next = %p\n", ptr, next);
            }
        }
#endif /* DEBUG */
#endif /* NANO_FREE_DEQUEUE_DILIGENCE */

        ((chained_block_t)ptr)->double_free_guard = 0;
        ((chained_block_t)ptr)->next = NULL; // clear out next pointer to protect free list
    } else {
        // 如果ptr指针为空,则去去找下一块合适内存
        ptr = segregated_next_block(nanozone, pMeta, slot_bytes, mag_index);
    }
    
    // 3. 是否给内存进行初始化
    if (cleared_requested && ptr) {
        memset(ptr, 0, slot_bytes); // TODO: Needs a memory barrier after memset to ensure zeroes land first?
    }
    return ptr;
}

segregated_size_to_fit

static MALLOC_INLINE size_t
segregated_size_to_fit(nanozone_t *nanozone, size_t size, size_t *pKey)
{
    size_t k, slot_bytes;

    if (0 == size) {
        size = NANO_REGIME_QUANTA_SIZE; // Historical behavior
    }
    // 内存按照16字节对齐
    // k = (size + 16 - 1) >> 4 右移4位
    k = (size + NANO_REGIME_QUANTA_SIZE - 1) >> SHIFT_NANO_QUANTUM; // round up and shift for number of quanta
    // slot_bytes = k << 4 左移4位
    slot_bytes = k << SHIFT_NANO_QUANTUM;                           // multiply by power of two quanta size
    *pKey = k - 1;                                                  // Zero-based!

    return slot_bytes;
}

OSAtomicDequeue或者segregated_next_block

static MALLOC_INLINE void *
segregated_next_block(nanozone_t *nanozone, nano_meta_admin_t pMeta, size_t slot_bytes, unsigned int mag_index)
{
    while (1) {
        // 当前这块pMeta可用内存结束地址
        uintptr_t theLimit = pMeta->slot_limit_addr; // Capture the slot limit that bounds slot_bump_addr right now
        uintptr_t b = OSAtomicAdd64Barrier(slot_bytes, (volatile int64_t *)&(pMeta->slot_bump_addr));
        // 减去添加的偏移量,获取当前可以获取的地址
        b -= slot_bytes; 

        if (b < theLimit) {   // Did we stay within the bound of the present slot allocation?
            // 如果地址还在范围之内,则返回地址
            return (void *)b; 
        } else {
            // pMeta这块内存已经用完了
            if (pMeta->slot_exhausted) {
                pMeta->slot_bump_addr = theLimit;
                return 0;                // We're toast
            } else {
                // One thread will grow the heap, others will see its been grown and retry allocation
                _malloc_lock_lock(&nanozone->band_resupply_lock[mag_index]);
                // 由于多线程,这里再次进行检查是否用完
                if (pMeta->slot_exhausted) {
                    _malloc_lock_unlock(&nanozone->band_resupply_lock[mag_index]);
                    return 0; // Toast
                } else if (b < pMeta->slot_limit_addr) {
                    // 如果小于最大限制地址,当重新申请一个新的band后,重新尝试while
                    _malloc_lock_unlock(&nanozone->band_resupply_lock[mag_index]);
                    continue; 
                } else if (segregated_band_grow(nanozone, pMeta, slot_bytes, mag_index)) {
                    // 申请新的band成功,重新尝试while
                    _malloc_lock_unlock(&nanozone->band_resupply_lock[mag_index]);
                    continue; 
                } else {
                    pMeta->slot_exhausted = TRUE;
                    pMeta->slot_bump_addr = theLimit;
                    _malloc_lock_unlock(&nanozone->band_resupply_lock[mag_index]);
                    return 0;
                }
            }
        }
    }
}

memset(ptr, 0, slot_bytes)

malloc

1. malloc

void *
malloc(size_t size)
{
    return _malloc_zone_malloc(default_zone, size, MZ_POSIX);
}

2. _malloc_zone_malloc

MALLOC_NOINLINE
static void *
_malloc_zone_malloc(malloc_zone_t *zone, size_t size, malloc_zone_options_t mzo)
{
    MALLOC_TRACE(TRACE_malloc | DBG_FUNC_START, (uintptr_t)zone, size, 0, 0);

    void *ptr = NULL;

    if (malloc_check_start) {
        internal_check();
    }
    if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
        goto out;
    }

    ptr = zone->malloc(zone, size);     // if lite zone is passed in then we still call the lite methods

    if (os_unlikely(malloc_logger)) {
        malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0);
    }

    MALLOC_TRACE(TRACE_malloc | DBG_FUNC_END, (uintptr_t)zone, size, (uintptr_t)ptr, 0);
out:
    if (os_unlikely(ptr == NULL)) {
        malloc_set_errno_fast(mzo, ENOMEM);
    }
    return ptr;
}

3. default_zone_malloc

static void *
default_zone_malloc(malloc_zone_t *zone, size_t size)
{
    zone = runtime_default_zone();
    
    return zone->malloc(zone, size);
}

4. nano_malloc

static void *
nano_malloc(nanozone_t *nanozone, size_t size)
{
    if (size <= NANO_MAX_SIZE) {
        void *p = _nano_malloc_check_clear(nanozone, size, 0);
        if (p) {
            return p;
        } else {
            /* FALLTHROUGH to helper zone */
        }
    }

    malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone);
    return zone->malloc(zone, size);
}
// calloc中
void *p = _nano_malloc_check_clear(nanozone, total_bytes, 1);
// malloc中
void *p = _nano_malloc_check_clear(nanozone, size, 0);

static void *
_nano_malloc_check_clear(nanozone_t *nanozone, size_t size, boolean_t cleared_requested)
if (cleared_requested && ptr) {
    memset(ptr, 0, slot_bytes);
}

总结

  1. malloccalloc其实底层都调用的同一套开辟内存方法
  2. 不同在于,calloc在开辟完内存会进行初始化,malloc不会进行初始化,则是原始脏数据。如果需要使用malloc这块内存,还需要我们手动初始化
上一篇 下一篇

猜你喜欢

热点阅读