程序员

av1代码学习1---encoder_encode()

2020-06-18  本文已影响0人  青吟乐

1,函数功能

encoder_encode()做的大多是些编码初始工作,比如检查兼容性,初始化av1_lookahead,然后调用av1_get_compressed_data编码,得到压缩后的码流,

2,代码解释,

会在代码中大致说一下这一部分是在干什么,本人还是编码新手,希望多多指正,下一篇会写的详细些

static aom_codec_err_t encoder_encode(aom_codec_alg_priv_t *ctx,
                                      const aom_image_t *img,
                                      aom_codec_pts_t pts,
                                      unsigned long duration,
                                      aom_enc_frame_flags_t enc_flags) {
  const size_t kMinCompressedSize = 8192;
  volatile aom_codec_err_t res = AOM_CODEC_OK;//用来标识编码是否有错误
  AV1_COMP *const cpi = ctx->cpi;
  const aom_rational64_t *const timestamp_ratio = &ctx->timestamp_ratio;
  volatile aom_codec_pts_t ptsvol = pts;

  if (cpi == NULL) return AOM_CODEC_INVALID_PARAM;
  //这部分主要将一些初始化的配置从ctx转移到cpi中,为之后的编码做准备
  if (img != NULL) {
    res = validate_img(ctx, img);
    // TODO(jzern) the checks related to cpi's validity should be treated as a
    // failure condition, encoder setup is done fully in init() currently.
    if (res == AOM_CODEC_OK) {
      size_t data_sz = ALIGN_POWER_OF_TWO(ctx->cfg.g_w, 5) *                    //使value与2的幂对齐
                       ALIGN_POWER_OF_TWO(ctx->cfg.g_h, 5) * get_image_bps(img);
      if (data_sz < kMinCompressedSize) data_sz = kMinCompressedSize;
      if (ctx->cx_data == NULL || ctx->cx_data_sz < data_sz) {
        ctx->cx_data_sz = data_sz;
        free(ctx->cx_data);
        ctx->cx_data = (unsigned char *)malloc(ctx->cx_data_sz);
        if (ctx->cx_data == NULL) {
          return AOM_CODEC_MEM_ERROR;
        }
      }
    }
  }
  //配置文件
  if (ctx->oxcf.mode != GOOD && ctx->oxcf.mode != REALTIME) {
    ctx->oxcf.mode = GOOD;
    av1_change_config(ctx->cpi, &ctx->oxcf);
  }
  //偏移量
  if (!ctx->pts_offset_initialized) {
    ctx->pts_offset = ptsvol;
    ctx->pts_offset_initialized = 1;
  }
  ptsvol -= ctx->pts_offset;

  aom_codec_pkt_list_init(&ctx->pkt_list);

  volatile aom_enc_frame_flags_t flags = enc_flags;

  // The jmp_buf is valid only for the duration of the function that calls
  // setjmp(). Therefore, this function must reset the 'setjmp' field to 0
  // before it returns.
  if (setjmp(cpi->common.error.jmp)) {
    cpi->common.error.setjmp = 0;
    res = update_error_state(ctx, &cpi->common.error);
    aom_clear_system_state();
    return res;
  }
  cpi->common.error.setjmp = 1;

  // Note(yunqing): While applying encoding flags, always start from enabling
  // all, and then modifying according to the flags. Previous frame's flags are
  // overwritten.
  /*在应用编码标志时,始终从启用全部开始,然后根据标志进行修改。 前一帧的标志被覆盖。*/
  av1_apply_encoding_flags(cpi, flags);////读取flag中的数据

  // Handle fixed keyframe intervals
  if (ctx->cfg.kf_mode == AOM_KF_AUTO &&
      ctx->cfg.kf_min_dist == ctx->cfg.kf_max_dist) {
    if (++ctx->fixed_kf_cntr > ctx->cfg.kf_min_dist) {
      flags |= AOM_EFLAG_FORCE_KF;
      ctx->fixed_kf_cntr = 1;
    }
  }

  if (res == AOM_CODEC_OK) {
    int64_t dst_time_stamp = timebase_units_to_ticks(timestamp_ratio, ptsvol);
    int64_t dst_end_time_stamp =
        timebase_units_to_ticks(timestamp_ratio, ptsvol + duration);

    // Set up internal flags
    if (ctx->base.init_flags & AOM_CODEC_USE_PSNR) cpi->b_calculate_psnr = 1;

    if (img != NULL) {
      YV12_BUFFER_CONFIG sd;
      res = image2yuvconfig(img, &sd);//把buffer的数据存到sd里

      // Store the original flags in to the frame buffer. Will extract the
      // key frame flag when we actually encode this frame.
      /*将原始标志存储到帧缓冲区中。 当我们实际编码该帧时,将提取关键帧标志。*/
      if (av1_receive_raw_frame(cpi, flags | ctx->next_frame_flags, &sd,
                                dst_time_stamp, dst_end_time_stamp)) {
        /*初始化buffer,比如alloc_raw_frame_buffers里会对lookahead结构体里的buffer和变量初始化
        *(对每个depth都重新分配buffer,最大深度默认是20),以及alt帧buffer的重新分配。
        *init_ref_frame_bufs 初始化cm->ref_frame_map,以及哈希表的映射关系
        *alloc_util_frame_buffers初始化source 
        *lastsource等会每次更新的buffer
        *av1_lookahead_push
        */
          //初始化失败,错误返回
        res = update_error_state(ctx, &cpi->common.error);
      }
      ctx->next_frame_flags = 0;
    }

    unsigned char *cx_data = ctx->cx_data;
    size_t cx_data_sz = ctx->cx_data_sz;

    assert(!(cx_data == NULL && cx_data_sz != 0));

    /* 不可见帧内容 */
    if (ctx->pending_cx_data) {
      memmove(cx_data, ctx->pending_cx_data, ctx->pending_cx_data_sz);
      ctx->pending_cx_data = cx_data;
      cx_data += ctx->pending_cx_data_sz;
      cx_data_sz -= ctx->pending_cx_data_sz;

      /* TODO: this is a minimal check, the underlying codec doesn't respect
       * the buffer size anyway.
       */
      if (cx_data_sz < ctx->cx_data_sz / 2) {
        aom_internal_error(&cpi->common.error, AOM_CODEC_ERROR,
                           "Compressed data buffer too small");
      }
    }

    size_t frame_size = 0;
    unsigned int lib_flags = 0;
    int is_frame_visible = 0;
    int index_size = 0;
    int has_fwd_keyframe = 0;
    // invisible frames get packed with the next visible frame
    // av1_get_compressed_data返回-1指的是没有帧被编码,也是下一层编码的入口
    while (cx_data_sz - index_size >= ctx->cx_data_sz / 2 &&
           !is_frame_visible &&
           -1 != av1_get_compressed_data(cpi, &lib_flags, &frame_size, cx_data,
                                         &dst_time_stamp, &dst_end_time_stamp,
                                         !img, timestamp_ratio)) {
      cpi->seq_params_locked = 1;
      if (frame_size) {
        if (ctx->pending_cx_data == 0) ctx->pending_cx_data = cx_data;

        const int write_temporal_delimiter =
            !cpi->common.spatial_layer_id && !ctx->pending_frame_count;

        if (write_temporal_delimiter) {
          uint32_t obu_header_size = 1;
          const uint32_t obu_payload_size = 0;
          const size_t length_field_size =
              aom_uleb_size_in_bytes(obu_payload_size);

          if (ctx->pending_cx_data) {
            const size_t move_offset = length_field_size + 1;
            memmove(ctx->pending_cx_data + move_offset, ctx->pending_cx_data,
                    frame_size);
          }
          const uint32_t obu_header_offset = 0;
          obu_header_size = av1_write_obu_header(
              cpi, OBU_TEMPORAL_DELIMITER, 0,
              (uint8_t *)(ctx->pending_cx_data + obu_header_offset));

          // OBUs are preceded/succeeded by an unsigned leb128 coded integer.
          if (av1_write_uleb_obu_size(obu_header_size, obu_payload_size,
                                      ctx->pending_cx_data) != AOM_CODEC_OK) {
            aom_internal_error(&cpi->common.error, AOM_CODEC_ERROR, NULL);
          }

          frame_size += obu_header_size + obu_payload_size + length_field_size;
        }

        if (ctx->oxcf.save_as_annexb) {
          size_t curr_frame_size = frame_size;
          if (av1_convert_sect5obus_to_annexb(cx_data, &curr_frame_size) !=
              AOM_CODEC_OK) {
            aom_internal_error(&cpi->common.error, AOM_CODEC_ERROR, NULL);
          }
          frame_size = curr_frame_size;

          // B_PRIME (add frame size)
          const size_t length_field_size = aom_uleb_size_in_bytes(frame_size);
          if (ctx->pending_cx_data) {
            const size_t move_offset = length_field_size;
            memmove(cx_data + move_offset, cx_data, frame_size);
          }
          if (av1_write_uleb_obu_size(0, (uint32_t)frame_size, cx_data) !=
              AOM_CODEC_OK) {
            aom_internal_error(&cpi->common.error, AOM_CODEC_ERROR, NULL);
          }
          frame_size += length_field_size;
        }

        ctx->pending_frame_sizes[ctx->pending_frame_count++] = frame_size;
        ctx->pending_cx_data_sz += frame_size;

        cx_data += frame_size;
        cx_data_sz -= frame_size;

        index_size = MAG_SIZE * (ctx->pending_frame_count - 1) + 2;

        is_frame_visible = cpi->common.show_frame;

        has_fwd_keyframe |= (!is_frame_visible &&
                             cpi->common.current_frame.frame_type == KEY_FRAME);
      }
    }
    if (is_frame_visible) {
      // Add the frame packet to the list of returned packets.
      aom_codec_cx_pkt_t pkt;

      if (ctx->oxcf.save_as_annexb) {
        //  B_PRIME (add TU size)
        size_t tu_size = ctx->pending_cx_data_sz;
        const size_t length_field_size = aom_uleb_size_in_bytes(tu_size);
        if (ctx->pending_cx_data) {
          const size_t move_offset = length_field_size;
          memmove(ctx->pending_cx_data + move_offset, ctx->pending_cx_data,
                  tu_size);
        }
        if (av1_write_uleb_obu_size(0, (uint32_t)tu_size,
                                    ctx->pending_cx_data) != AOM_CODEC_OK) {
          aom_internal_error(&cpi->common.error, AOM_CODEC_ERROR, NULL);
        }
        ctx->pending_cx_data_sz += length_field_size;
      }

      pkt.kind = AOM_CODEC_CX_FRAME_PKT;

      pkt.data.frame.buf = ctx->pending_cx_data;
      pkt.data.frame.sz = ctx->pending_cx_data_sz;
      pkt.data.frame.partition_id = -1;
      pkt.data.frame.vis_frame_size = frame_size;

      pkt.data.frame.pts =
          ticks_to_timebase_units(timestamp_ratio, dst_time_stamp) +
          ctx->pts_offset;
      pkt.data.frame.flags = get_frame_pkt_flags(cpi, lib_flags);
      if (has_fwd_keyframe) {
        // If one of the invisible frames in the packet is a keyframe, set
        // the delayed random access point flag.
        pkt.data.frame.flags |= AOM_FRAME_IS_DELAYED_RANDOM_ACCESS_POINT;
      }
      pkt.data.frame.duration = (uint32_t)ticks_to_timebase_units(
          timestamp_ratio, dst_end_time_stamp - dst_time_stamp);

      aom_codec_pkt_list_add(&ctx->pkt_list.head, &pkt);

      ctx->pending_cx_data = NULL;
      ctx->pending_cx_data_sz = 0;
      ctx->pending_frame_count = 0;
    }
  }

  cpi->common.error.setjmp = 0;
  return res;
}
上一篇下一篇

猜你喜欢

热点阅读