cache_t

2020-12-04  本文已影响0人  开发狗

objc_class

struct objc_class : objc_object {
  // Class ISA;
  Class superclass;
  cache_t cache;
  class_data_bits_t bits;
  class_rw_t *data() const {
    return bits.data()
  }  
}

cache_t

因为在不同的架构下包含的都不同

struct cache_t {
#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
    explicit_atomic<struct bucket_t *> _buckets;
    explicit_atomic<mask_t> _mask;
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
    explicit_atomic<uintptr_t> _maskAndBuckets;
    mask_t _mask_unused;
    
    // How much the mask is shifted by.
    static constexpr uintptr_t maskShift = 48;
    
    // Additional bits after the mask which must be zero. msgSend
    // takes advantage of these additional bits to construct the value
    // `mask << 4` from `_maskAndBuckets` in a single instruction.
    static constexpr uintptr_t maskZeroBits = 4;
    
    // The largest mask value we can store.
    static constexpr uintptr_t maxMask = ((uintptr_t)1 << (64 - maskShift)) - 1;
    
    // The mask applied to `_maskAndBuckets` to retrieve the buckets pointer.
    static constexpr uintptr_t bucketsMask = ((uintptr_t)1 << (maskShift - maskZeroBits)) - 1;
    
    // Ensure we have enough bits for the buckets pointer.
    static_assert(bucketsMask >= MACH_VM_MAX_ADDRESS, "Bucket field doesn't have enough bits for arbitrary pointers.");
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
    // _maskAndBuckets stores the mask shift in the low 4 bits, and
    // the buckets pointer in the remainder of the value. The mask
    // shift is the value where (0xffff >> shift) produces the correct
    // mask. This is equal to 16 - log2(cache_size).
    explicit_atomic<uintptr_t> _maskAndBuckets;
    mask_t _mask_unused;

    static constexpr uintptr_t maskBits = 4;
    static constexpr uintptr_t maskMask = (1 << maskBits) - 1;
    static constexpr uintptr_t bucketsMask = ~maskMask;
#else
#error Unknown cache mask storage type.
#endif
    
#if __LP64__
    uint16_t _flags;
#endif
    uint16_t _occupied;

public:
    static bucket_t *emptyBuckets();
    
    struct bucket_t *buckets();
    mask_t mask();
    mask_t occupied();
    void incrementOccupied();
    void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask);
    void initializeToEmpty();

    unsigned capacity();
    bool isConstantEmptyCache();
    bool canBeFreed();
}

代码中重要的部分为:_buckets_mask_occupied_flags

void cache_t::insert(Class cls, SEL sel, IMP imp, id receiver)
{
#if CONFIG_USE_CACHE_LOCK
    cacheUpdateLock.assertLocked();
#else
    runtimeLock.assertLocked();
#endif

    ASSERT(sel != 0 && cls->isInitialized());

    // Use the cache as-is if it is less than 3/4 full
    mask_t newOccupied = occupied() + 1;
    unsigned oldCapacity = capacity(), capacity = oldCapacity;
    if (slowpath(isConstantEmptyCache())) {
        // Cache is read-only. Replace it.
        if (!capacity) capacity = INIT_CACHE_SIZE;
        reallocate(oldCapacity, capacity, /* freeOld */false);
    }
    else if (fastpath(newOccupied + CACHE_END_MARKER <= capacity / 4 * 3)) { // 4  3 + 1 bucket cache_t
        // Cache is less than 3/4 full. Use it as-is.
    }
    else {
        capacity = capacity ? capacity * 2 : INIT_CACHE_SIZE;  // 扩容两倍 4
        if (capacity > MAX_CACHE_SIZE) {
            capacity = MAX_CACHE_SIZE;
        }
        reallocate(oldCapacity, capacity, true);  // 内存 库容完毕
    }

    bucket_t *b = buckets();
    mask_t m = capacity - 1;
    mask_t begin = cache_hash(sel, m);
    mask_t i = begin;

    // Scan for the first unused slot and insert there.
    // There is guaranteed to be an empty slot because the
    // minimum size is 4 and we resized at 3/4 full.
    do {
        if (fastpath(b[i].sel() == 0)) {
            incrementOccupied();
            b[i].set<Atomic, Encoded>(sel, imp, cls);
            return;
        }
        if (b[i].sel() == sel) {
            // The entry was added to the cache by some other thread
            // before we grabbed the cacheUpdateLock.
            return;
        }
    } while (fastpath((i = cache_next(i, m)) != begin));

    cache_t::bad_cache(receiver, (SEL)sel, cls);
}

以上代码是根据cache_t 中的方法incrementOccupied()查找得到,不难理解,当我们调用方法的时候会在缓存中对方法进行缓存,以便当再次使用该方法时能够快速调用,提高效率。在insert中调用occupied(),也就调用occupiedgetter方法,获取当前已占用的内存个数计数,调用capacity()时进行对mask() 进行判断,如果存在则+1,不存在则为0。过程如下:

unsigned cache_t::capacity()
{
    return mask() ? mask()+1 : 0; 
}

mask_t cache_t::mask() 
{
    return _mask.load(memory_order::memory_order_relaxed);
}
template <typename T>
struct explicit_atomic : public std::atomic<T> {
    explicit explicit_atomic(T initial) noexcept : std::atomic<T>(std::move(initial)) {}
    operator T() const = delete;
    
    T load(std::memory_order order) const noexcept {
        return std::atomic<T>::load(order);
    }
    void store(T desired, std::memory_order order) noexcept {
        std::atomic<T>::store(desired, order);
    }
    
    // Convert a normal pointer to an atomic pointer. This is a
    // somewhat dodgy thing to do, but if the atomic type is lock
    // free and the same size as the non-atomic type, we know the
    // representations are the same, and the compiler generates good
    // code.
    static explicit_atomic<T> *from_pointer(T *ptr) {
        static_assert(sizeof(explicit_atomic<T> *) == sizeof(T *),
                      "Size of atomic must match size of original");
        explicit_atomic<T> *atomic = (explicit_atomic<T> *)ptr;
        ASSERT(atomic->is_lock_free());
        return atomic;
    }
};

在底层中调用C++方法中的load方法,该方法时返回了当前原子变量的值。具体是什么这里就不清楚了,需要学习下C++相关知识点。同时memory_ordermemory_order的枚举。

然后对cache进行判空操作,如果是空的,则对capacity缓存容量进行初始化大小,大小为 INIT_CACHE_SIZE INIT_CACHE_SIZE_LOG2 = 2, INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2) 也就是4,然后进行重新初始化,当当前使用的缓存数满足小于或等于开辟的大小的3/4,则不进行扩容;当当前使用的缓存数量超过开辟的3/4时,会进行以2倍的大小进行扩容。

流程

上一篇下一篇

猜你喜欢

热点阅读