iOS底层系列08 -- malloc与calloc源码分析

2021-02-08  本文已影响0人  YanZi_33

malloc的主流程

#import <Foundation/Foundation.h>
#import <malloc/malloc.h>

int main(int argc, const char * argv[]) {
    @autoreleasepool {
        char *m;
        m = (char *)(malloc(24)); //动态分配24个字节
        NSLog(@"所占大小%lu",malloc_size(m));
    }
    return 0;
}
Snip20210208_110.png
void * malloc(size_t size)
{
    void *retval;
    retval = malloc_zone_malloc(default_zone, size);
    if (retval == NULL) {
        errno = ENOMEM;
    }
    return retval;
}
void * malloc_zone_malloc(malloc_zone_t *zone, size_t size)
{
    MALLOC_TRACE(TRACE_malloc | DBG_FUNC_START, (uintptr_t)zone, size, 0, 0);

    void *ptr;
    if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
        internal_check();
    }
    if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
        return NULL;
    }

    ptr = zone->malloc(zone, size);     // if lite zone is passed in then we still call the lite methods

    if (malloc_logger) {
        malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0);
    }

    MALLOC_TRACE(TRACE_malloc | DBG_FUNC_END, (uintptr_t)zone, size, (uintptr_t)ptr, 0);
    return ptr;
}
Snip20210208_112.png
static void * default_zone_malloc(malloc_zone_t *zone, size_t size)
{
    zone = runtime_default_zone();
    return zone->malloc(zone, size);
}
static inline malloc_zone_t *runtime_default_zone()
 {
    return (lite_zone) ? lite_zone : inline_malloc_default_zone();
}
static inline malloc_zone_t *inline_malloc_default_zone(void)
{
    _malloc_initialize_once();
    // malloc_report(ASL_LEVEL_INFO, "In inline_malloc_default_zone with %d %d\n", malloc_num_zones, malloc_has_debug_zone);
    return malloc_zones[0];
}
static inline void _malloc_initialize_once(void)
{
    os_once(&_malloc_initialize_pred, NULL, _malloc_initialize);
}
static void
_malloc_initialize(void *context __unused)
{
    MALLOC_LOCK();
    unsigned n;
    malloc_zone_t *zone = NULL;

#if CONFIG_NANOZONE
    nano_common_configure();
    //创建helper_zone
    malloc_zone_t *helper_zone = create_scalable_zone(0, malloc_debug_flags);
    //创建nanozone_t
    zone = nano_create_zone(helper_zone, malloc_debug_flags);
    
    if (zone) {
        malloc_zone_register_while_locked(zone);
        malloc_zone_register_while_locked(helper_zone);
        //helper_zone申请内存空间
        malloc_set_zone_name(zone, DEFAULT_MALLOC_ZONE_STRING);
        //nanozone_t申请内存空间
        malloc_set_zone_name(helper_zone, MALLOC_HELPER_ZONE_STRING);
    } else {
        zone = helper_zone;
        malloc_zone_register_while_locked(zone);
        malloc_set_zone_name(zone, DEFAULT_MALLOC_ZONE_STRING);
    }
#else
    zone = create_scalable_zone(0, malloc_debug_flags);
    malloc_zone_register_while_locked(zone);
    malloc_set_zone_name(zone, DEFAULT_MALLOC_ZONE_STRING);
#endif

    initial_default_zone = zone;

    if (n != 0) {
        unsigned protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
        malloc_zone_t *hold = malloc_zones[0];

        if (hold->zone_name && strcmp(hold->zone_name, DEFAULT_MALLOC_ZONE_STRING) == 0) {
            malloc_set_zone_name(hold, NULL);
        }

        mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE);
        malloc_zones[0] = malloc_zones[n];
        malloc_zones[n] = hold;
        mprotect(malloc_zones, protect_size, PROT_READ);
    }

    ......
    MALLOC_UNLOCK();
}

malloc分支流程 -- nanozone_t的创建

typedef struct nanozone_s {
    //first page will be given read-only protection
    malloc_zone_t       basic_zone;
    uint8_t         pad[PAGE_MAX_SIZE - sizeof(malloc_zone_t)];
    struct nano_meta_s      meta_data[NANO_MAG_SIZE][NANO_SLOT_SIZE];
    _malloc_lock_s          band_resupply_lock[NANO_MAG_SIZE];
    uintptr_t           band_max_mapped_baseaddr[NANO_MAG_SIZE];
    size_t          core_mapped_size[NANO_MAG_SIZE];
    unsigned            debug_flags;
    uintptr_t           cookie;
    malloc_zone_t       *helper_zone;
} nanozone_t;
malloc_zone_t *
nano_create_zone(malloc_zone_t *helper_zone, unsigned debug_flags)
{
    nanozone_t *nanozone;
    int i, j;

    /* get memory for the zone. */
    nanozone = nano_common_allocate_based_pages(NANOZONE_PAGED_SIZE, 0, 0, VM_MEMORY_MALLOC, 0);
    if (!nanozone) {
        _malloc_engaged_nano = NANO_NONE;
        return NULL;
    }

    //basic_zone结构的成员赋值
    nanozone->basic_zone.version = 10;
    nanozone->basic_zone.size = (void *)nano_size;
    nanozone->basic_zone.malloc = (debug_flags & MALLOC_DO_SCRIBBLE) ? (void *)nano_malloc_scribble : (void *)nano_malloc;
    nanozone->basic_zone.calloc = (void *)nano_calloc;
    nanozone->basic_zone.valloc = (void *)nano_valloc;
    nanozone->basic_zone.free = (debug_flags & MALLOC_DO_SCRIBBLE) ? (void *)nano_free_scribble : (void *)nano_free;
    nanozone->basic_zone.realloc = (void *)nano_realloc;
    nanozone->basic_zone.destroy = (void *)nano_destroy;
    nanozone->basic_zone.batch_malloc = (void *)nano_batch_malloc;
    nanozone->basic_zone.batch_free = (void *)nano_batch_free;
    nanozone->basic_zone.introspect = (struct malloc_introspection_t *)&nano_introspect;
    nanozone->basic_zone.memalign = (void *)nano_memalign;
    nanozone->basic_zone.free_definite_size = (debug_flags & MALLOC_DO_SCRIBBLE) ? (void *)nano_free_definite_size_scribble
                                                                                          : (void *)nano_free_definite_size;

    nanozone->basic_zone.pressure_relief = (void *)nano_pressure_relief;
    nanozone->basic_zone.claimed_address = (void *)nano_claimed_address;

    nanozone->basic_zone.reserved1 = 0;
    nanozone->basic_zone.reserved2 = 0;
    
    mprotect(nanozone, sizeof(nanozone->basic_zone), PROT_READ);
    if (debug_flags & MALLOC_ADD_GUARD_PAGES) {
        malloc_report(ASL_LEVEL_INFO, "nano zone does not support guard pages\n");
        debug_flags &= ~MALLOC_ADD_GUARD_PAGES;
    }

    /* set up the remainder of the nanozone structure */
    nanozone->debug_flags = debug_flags;

    if (phys_ncpus > sizeof(nanozone->core_mapped_size) /
            sizeof(nanozone->core_mapped_size[0])) {
        MALLOC_REPORT_FATAL_ERROR(phys_ncpus,
                "nanozone abandoned because NCPUS > max magazines.\n");
    }

    /* Initialize slot queue heads and resupply locks. */
    OSQueueHead q0 = OS_ATOMIC_QUEUE_INIT;
    for (i = 0; i < nano_common_max_magazines; ++i) {
        _malloc_lock_init(&nanozone->band_resupply_lock[I]);

        for (j = 0; j < NANO_SLOT_SIZE; ++j) {
            nanozone->meta_data[i][j].slot_LIFO = q0;
        }
    }

    //初始化安全令牌
    nanozone->cookie = (uintptr_t)malloc_entropy[0] & 0x0000ffffffff0000ULL;
    nanozone->helper_zone = helper_zone;
    return (malloc_zone_t *)nanozone;
}
void
malloc_set_zone_name(malloc_zone_t *z, const char *name)
{
    char *newName;

    mprotect(z, sizeof(malloc_zone_t), PROT_READ | PROT_WRITE);
    if (z->zone_name) {
        free((char *)z->zone_name);
        z->zone_name = NULL;
    }
    if (name) {
        size_t buflen = strlen(name) + 1;
        newName = malloc_zone_malloc(z, buflen);
        if (newName) {
            strlcpy(newName, name, buflen);
            z->zone_name = (const char *)newName;
        } else {
            z->zone_name = NULL;
        }
    }
    mprotect(z, sizeof(malloc_zone_t), PROT_READ);
}
Snip20210208_113.png
static void * nano_malloc(nanozone_t *nanozone, size_t size)
{
    if (size <= NANO_MAX_SIZE) {
        void *p = _nano_malloc_check_clear(nanozone, size, 0);
        if (p) {
            return p;
        } else {
            /* FALLTHROUGH to helper zone */
        }
    }

    malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone);
    return zone->malloc(zone, size);
}
static void * _nano_malloc_check_clear(nanozone_t *nanozone, size_t size, boolean_t cleared_requested)
{
    MALLOC_TRACE(TRACE_nano_malloc, (uintptr_t)nanozone, size, cleared_requested, 0);

    void *ptr;
    size_t slot_key;
    size_t slot_bytes = segregated_size_to_fit(nanozone, size, &slot_key); // Note slot_key is set here
    mag_index_t mag_index = nano_mag_index(nanozone);

    nano_meta_admin_t pMeta = &(nanozone->meta_data[mag_index][slot_key]);

    ptr = OSAtomicDequeue(&(pMeta->slot_LIFO), offsetof(struct chained_block_s, next));
    if (ptr) {
    
    } else {
        ptr = segregated_next_block(nanozone, pMeta, slot_bytes, mag_index);
    }

    if (cleared_requested && ptr) {
        memset(ptr, 0, slot_bytes);
    }
    return ptr;
}
static MALLOC_INLINE size_t
segregated_size_to_fit(nanozone_t *nanozone, size_t size, size_t *pKey)
{
    size_t k, slot_bytes;

    if (0 == size) {
        size = NANO_REGIME_QUANTA_SIZE; //16
    }
    k = (size + NANO_REGIME_QUANTA_SIZE - 1) >> SHIFT_NANO_QUANTUM; 
    slot_bytes = k << SHIFT_NANO_QUANTUM;                           
    *pKey = k - 1;                                                  

    return slot_bytes;
}

总结:

上一篇 下一篇

猜你喜欢

热点阅读