MySQL-Innodb-正常刷脏场景下FlushPages数量

2020-10-05  本文已影响0人  多血

相关参数

    n_pages = (PCT_IO(pct_total) + avg_page_rate + pages_for_lsn) / 3;
     /* define PCT_IO(p) ((ulong) (srv_io_capacity * ((double) (p) / 100.0)))
    n_pages是要flush的目标数 */

以下四个参数影响pct_total的大小。

    pct_for_dirty = af_get_pct_for_dirty();
    pct_for_lsn = af_get_pct_for_lsn(age);
    pct_total = ut_max(pct_for_dirty, pct_for_lsn);

af_get_pct_for_dirty是根据脏页数计算比例,当innodb_max_dirty_pages_pct_lwm设置为0时,如果脏页比例大于srv_max_buf_pool_modified_pct时,pct_for_dirty设置为100。如果innodb_max_dirty_pages_pct_lwm设置不为0,如果脏页比大于srv_max_dirty_pages_pct_lwm,pct_for_dirty值为
(dirty_pct * 100)/( srv_max_buf_pool_modified_pct+1)。

/*********************************************************************//**
Calculates if flushing is required based on number of dirty pages in
the buffer pool.
@return percent of io_capacity to flush to manage dirty page ratio */
static
ulint
af_get_pct_for_dirty()
/*==================*/
{
    double  dirty_pct = buf_get_modified_ratio_pct();
    if (dirty_pct == 0.0) {
        /* No pages modified */
        return(0);
    }
    if (srv_max_dirty_pages_pct_lwm == 0) {
        /* The user has not set the option to preflush dirty
        pages as we approach the high water mark. */
        if (dirty_pct >= srv_max_buf_pool_modified_pct) {
            /* We have crossed the high water mark of dirty
            pages In this case we start flushing at 100% of
            innodb_io_capacity. */
            return(100);
        }
    } else if (dirty_pct >= srv_max_dirty_pages_pct_lwm) {
        /* We should start flushing pages gradually. */
        return(static_cast<ulint>((dirty_pct * 100)
               / (srv_max_buf_pool_modified_pct + 1)));
    }

    return(0);
}

af_get_pct_for_lsn是根据Redo的lsn计算比例,如果age>max_async_age(7/8log_get_capacity())或者开启了自适应且age>srv_adaptive_flushing_lwm log_get_capacity(),返回((srv_max_io_capacity / srv_io_capacity)* (lsn_age_factor * sqrt((double)lsn_age_factor)))/ 7.5)),否则返回0。

/*********************************************************************//**
Calculates if flushing is required based on redo generation rate.
@return percent of io_capacity to flush to manage redo space */
static
ulint
af_get_pct_for_lsn(
/*===============*/
    lsn_t   age)    /*!< in: current age of LSN. */
{
    lsn_t   max_async_age;
    lsn_t   lsn_age_factor;
    lsn_t   af_lwm = (srv_adaptive_flushing_lwm
              * log_get_capacity()) / 100;
    if (age < af_lwm) {
        /* No adaptive flushing. */
        return(0);
    }
    max_async_age = log_get_max_modified_age_async();
    if (age < max_async_age && !srv_adaptive_flushing) {
        /* We have still not reached the max_async point and
        the user has disabled adaptive flushing. */
        return(0);
    }
    /* If we are here then we know that either:
    1) User has enabled adaptive flushing
    2) User may have disabled adaptive flushing but we have reached
    max_async_age. */
    lsn_age_factor = (age * 100) / max_async_age;
    return(static_cast<ulint>(
        ((srv_max_io_capacity / srv_io_capacity)
        * (lsn_age_factor * sqrt((double)lsn_age_factor)))
        / 7.5));
}

以下参数影响着avg_page_rate与pages_for_lsn的计算。

    sum_pages += last_pages_in; //last_pages_in:the number of pages flushed by the last flush_list flushing.
    ib_time_monotonic_t curr_time    = ut_time_monotonic();
    uint64_t            time_elapsed = curr_time - prev_time;
    const ulong             avg_loop     = srv_flushing_avg_loops;
    /* We update our variables every srv_flushing_avg_loops
    iterations to smooth out transition in workload. */
    if (++n_iterations >= avg_loop
        || time_elapsed >= (uint64_t)avg_loop) {
        if (time_elapsed < 1) {
            time_elapsed = 1;
        }
        avg_page_rate = static_cast<ulint>(
            ((static_cast<double>(sum_pages)
              / time_elapsed)
             + avg_page_rate) / 2);
        /* How much LSN we have generated since last call. */
        lsn_rate = static_cast<lsn_t>(
            static_cast<double>(cur_lsn - prev_lsn)
            / time_elapsed);
        lsn_avg_rate = (lsn_avg_rate + lsn_rate) / 2; 
        prev_lsn = cur_lsn;
        prev_time = curr_time;
        n_iterations = 0;
        sum_pages = 0;
    }

pages_for_lsn表示的是每个buffer pool 小于target_lsn的page数总和。
首先根据lsn_avg_rate计算target_lsn,然后遍历bp得到所有小于target_lsb的页的数量。

    oldest_lsn = buf_pool_get_oldest_modification();
    lsn_t   target_lsn = oldest_lsn
                 + lsn_avg_rate * buf_flush_lsn_scan_factor;
    for (ulint i = 0; i < srv_buf_pool_instances; i++) {
        buf_pool_t* buf_pool = buf_pool_from_array(i);
        ulint       pages_for_lsn = 0;
        buf_flush_list_mutex_enter(buf_pool);
        for (buf_page_t* b = UT_LIST_GET_LAST(buf_pool->flush_list);
             b != NULL;
             b = UT_LIST_GET_PREV(list, b)) {
            if (b->oldest_modification > target_lsn) {
                break;
            }
            ++pages_for_lsn;
        }
        buf_flush_list_mutex_exit(buf_pool);
        sum_pages_for_lsn += pages_for_lsn;
        mutex_enter(&page_cleaner->mutex);
        page_cleaner->slots[i].n_pages_requested
            = pages_for_lsn / buf_flush_lsn_scan_factor + 1;
        mutex_exit(&page_cleaner->mutex);
    }
    sum_pages_for_lsn /= buf_flush_lsn_scan_factor;
    if(sum_pages_for_lsn < 1) {
        sum_pages_for_lsn = 1;
    }
    /* Cap the maximum IO capacity that we are going to use by
    max_io_capacity. Limit the value to avoid too quick increase */
    ulint   pages_for_lsn =
        std::min<ulint>(sum_pages_for_lsn, srv_max_io_capacity * 2);

计算完总的n_pages以后还要根据redo log的空间以及bp脏页的分布情况考虑每个bp需要flush的页数。

    for (ulint i = 0; i < srv_buf_pool_instances; i++) {
        /* if REDO has enough of free space,
        don't care about age distribution of pages */
        page_cleaner->slots[i].n_pages_requested = pct_for_lsn > 30 ?
            page_cleaner->slots[i].n_pages_requested
            * n_pages / sum_pages_for_lsn + 1
            : n_pages / srv_buf_pool_instances;
    }

page_cleaner_flush_pages_recommendation完整代码

/*********************************************************************//**
This function is called approximately once every second by the
page_cleaner thread. Based on various factors it decides if there is a
need to do flushing.
@return number of pages recommended to be flushed
@param lsn_limit    pointer to return LSN up to which flushing must happen
@param last_pages_in    the number of pages flushed by the last flush_list
            flushing. */
static
ulint
page_cleaner_flush_pages_recommendation(
/*====================================*/
    lsn_t*  lsn_limit,
    ulint   last_pages_in)
{
    static  lsn_t       prev_lsn = 0;
    static  ulint       sum_pages = 0;
    static  ulint       avg_page_rate = 0;
    static  ulint       n_iterations = 0;
    static  ib_time_monotonic_t     prev_time;
    lsn_t           oldest_lsn;
    lsn_t           cur_lsn;
    lsn_t           age;
    lsn_t           lsn_rate;
    ulint           n_pages = 0;
    ulint           pct_for_dirty = 0;
    ulint           pct_for_lsn = 0;
    ulint           pct_total = 0;

    cur_lsn = log_get_lsn();

    if (prev_lsn == 0) {
        /* First time around. */
        prev_lsn = cur_lsn;
        prev_time = ut_time_monotonic();
        return(0);
    }

    if (prev_lsn == cur_lsn) {
        return(0);
    }

    sum_pages += last_pages_in;

    ib_time_monotonic_t curr_time    = ut_time_monotonic();
    uint64_t            time_elapsed = curr_time - prev_time;
    const ulong             avg_loop     = srv_flushing_avg_loops;

    /* We update our variables every srv_flushing_avg_loops
    iterations to smooth out transition in workload. */
    if (++n_iterations >= avg_loop
        || time_elapsed >= (uint64_t)avg_loop) {

        if (time_elapsed < 1) {
            time_elapsed = 1;
        }

        avg_page_rate = static_cast<ulint>(
            ((static_cast<double>(sum_pages)
              / time_elapsed)
             + avg_page_rate) / 2);

        /* How much LSN we have generated since last call. */
        lsn_rate = static_cast<lsn_t>(
            static_cast<double>(cur_lsn - prev_lsn)
            / time_elapsed);

        lsn_avg_rate = (lsn_avg_rate + lsn_rate) / 2; //这样做是为了让曲线平滑


        /* aggregate stats of all slots */
        mutex_enter(&page_cleaner->mutex);

        uint64_t  flush_tm = page_cleaner->flush_time;
        ulint   flush_pass = page_cleaner->flush_pass;

        page_cleaner->flush_time = 0;
        page_cleaner->flush_pass = 0;

        uint64_t lru_tm = 0;
        uint64_t list_tm = 0;
        ulint   lru_pass = 0;
        ulint   list_pass = 0;

        for (ulint i = 0; i < page_cleaner->n_slots; i++) {
            page_cleaner_slot_t*    slot;

            slot = &page_cleaner->slots[i];

            lru_tm    += slot->flush_lru_time;
            lru_pass  += slot->flush_lru_pass;
            list_tm   += slot->flush_list_time;
            list_pass += slot->flush_list_pass;

            slot->flush_lru_time  = 0;
            slot->flush_lru_pass  = 0;
            slot->flush_list_time = 0;
            slot->flush_list_pass = 0;
        }

        mutex_exit(&page_cleaner->mutex);

        /* minimum values are 1, to avoid dividing by zero. */
        if (lru_tm < 1) {
            lru_tm = 1;
        }
        if (list_tm < 1) {
            list_tm = 1;
        }
        if (flush_tm < 1) {
            flush_tm = 1;
        }

        if (lru_pass < 1) {
            lru_pass = 1;
        }
        if (list_pass < 1) {
            list_pass = 1;
        }
        if (flush_pass < 1) {
            flush_pass = 1;
        }

        MONITOR_SET(MONITOR_FLUSH_ADAPTIVE_AVG_TIME_SLOT,
                list_tm / list_pass);
        MONITOR_SET(MONITOR_LRU_BATCH_FLUSH_AVG_TIME_SLOT,
                lru_tm  / lru_pass);

        MONITOR_SET(MONITOR_FLUSH_ADAPTIVE_AVG_TIME_THREAD,
                list_tm / (srv_n_page_cleaners * flush_pass));
        MONITOR_SET(MONITOR_LRU_BATCH_FLUSH_AVG_TIME_THREAD,
                lru_tm / (srv_n_page_cleaners * flush_pass));
        MONITOR_SET(MONITOR_FLUSH_ADAPTIVE_AVG_TIME_EST,
                flush_tm * list_tm / flush_pass
                / (list_tm + lru_tm));
        MONITOR_SET(MONITOR_LRU_BATCH_FLUSH_AVG_TIME_EST,
                flush_tm * lru_tm / flush_pass
                / (list_tm + lru_tm));
        MONITOR_SET(MONITOR_FLUSH_AVG_TIME, flush_tm / flush_pass);

        MONITOR_SET(MONITOR_FLUSH_ADAPTIVE_AVG_PASS,
                list_pass / page_cleaner->n_slots);
        MONITOR_SET(MONITOR_LRU_BATCH_FLUSH_AVG_PASS,
                lru_pass / page_cleaner->n_slots);
        MONITOR_SET(MONITOR_FLUSH_AVG_PASS, flush_pass);

        prev_lsn = cur_lsn;
        prev_time = curr_time;

        n_iterations = 0;

        sum_pages = 0;
    }

    oldest_lsn = buf_pool_get_oldest_modification();

    ut_ad(oldest_lsn <= log_get_lsn());

    age = cur_lsn > oldest_lsn ? cur_lsn - oldest_lsn : 0;

    pct_for_dirty = af_get_pct_for_dirty();
    pct_for_lsn = af_get_pct_for_lsn(age);

    pct_total = ut_max(pct_for_dirty, pct_for_lsn);

    /* Estimate pages to be flushed for the lsn progress */
    ulint   sum_pages_for_lsn = 0;
    lsn_t   target_lsn = oldest_lsn
                 + lsn_avg_rate * buf_flush_lsn_scan_factor;

    for (ulint i = 0; i < srv_buf_pool_instances; i++) {
        buf_pool_t* buf_pool = buf_pool_from_array(i);
        ulint       pages_for_lsn = 0;

        buf_flush_list_mutex_enter(buf_pool);
        for (buf_page_t* b = UT_LIST_GET_LAST(buf_pool->flush_list);
             b != NULL;
             b = UT_LIST_GET_PREV(list, b)) {
            if (b->oldest_modification > target_lsn) {
                break;
            }
            ++pages_for_lsn;
        }
        buf_flush_list_mutex_exit(buf_pool);

        sum_pages_for_lsn += pages_for_lsn;

        mutex_enter(&page_cleaner->mutex);
        ut_ad(page_cleaner->slots[i].state
              == PAGE_CLEANER_STATE_NONE);
        page_cleaner->slots[i].n_pages_requested
            = pages_for_lsn / buf_flush_lsn_scan_factor + 1;
        mutex_exit(&page_cleaner->mutex);
    }

    sum_pages_for_lsn /= buf_flush_lsn_scan_factor;
    if(sum_pages_for_lsn < 1) {
        sum_pages_for_lsn = 1;
    }

    /* Cap the maximum IO capacity that we are going to use by
    max_io_capacity. Limit the value to avoid too quick increase */
    ulint   pages_for_lsn =
        std::min<ulint>(sum_pages_for_lsn, srv_max_io_capacity * 2);

    n_pages = (PCT_IO(pct_total) + avg_page_rate + pages_for_lsn) / 3;

    if (n_pages > srv_max_io_capacity) {
        n_pages = srv_max_io_capacity;
    }

    /* Normalize request for each instance */
    mutex_enter(&page_cleaner->mutex);
    ut_ad(page_cleaner->n_slots_requested == 0);
    ut_ad(page_cleaner->n_slots_flushing == 0);
    ut_ad(page_cleaner->n_slots_finished == 0);

    for (ulint i = 0; i < srv_buf_pool_instances; i++) {
        /* if REDO has enough of free space,
        don't care about age distribution of pages */
        page_cleaner->slots[i].n_pages_requested = pct_for_lsn > 30 ?
            page_cleaner->slots[i].n_pages_requested
            * n_pages / sum_pages_for_lsn + 1
            : n_pages / srv_buf_pool_instances;
    }
    mutex_exit(&page_cleaner->mutex);

    MONITOR_SET(MONITOR_FLUSH_N_TO_FLUSH_REQUESTED, n_pages);

    MONITOR_SET(MONITOR_FLUSH_N_TO_FLUSH_BY_AGE, sum_pages_for_lsn);

    MONITOR_SET(MONITOR_FLUSH_AVG_PAGE_RATE, avg_page_rate);
    MONITOR_SET(MONITOR_FLUSH_LSN_AVG_RATE, lsn_avg_rate);
    MONITOR_SET(MONITOR_FLUSH_PCT_FOR_DIRTY, pct_for_dirty);
    MONITOR_SET(MONITOR_FLUSH_PCT_FOR_LSN, pct_for_lsn);

    *lsn_limit = LSN_MAX;

    return(n_pages);
}

http://mysql.taobao.org/monthly/2015/03/02/
http://www.leviathan.vip/2020/05/19/mysql-understand-adaptive-flushing/

上一篇 下一篇

猜你喜欢

热点阅读