mm-camera层frame数据流源码分析
/**
*感谢@yanbixing123的博文
*http://blog.csdn.net/yanbixing123/article/details/52294305
*/
想看上层的分析可以看http://www.jianshu.com/p/ecb1be82e6a8
本文基于android7.1.1 Hal3下高通的开源代码
- mm_camera层 分析mm_camera层主要会用到几个文件
- mm_camera_interface.c 这是暴露给HAL层调用的接口
- mm_camera.c 这是中枢部分
- mm_camera_channel.c 这是mm层channel
- mm_camera_stream.c 这是mm层stream
- mm_camera_thread.c 这是任务调度的关键部分
- mm_camera_interface.c 这是暴露给hal调用的接口
到mm层后都是用C语言写的所以已经不存在类了,这些文件里全是方法都可以直接调用,但是他使用几个结构体来维护自己代表的实体之间的关系。
mm_camera的结构体
mm_camera_struct可以看到其中引用了channel的结构体
mm_channel的结构体
mm_channel_struct不难发现channel也对mm_camera进行了引用他们是相互引用的
mm_stream的结构体
mm_stream_struct通过调用对应文件里方法要传参对应结构体的方法可以完成类似面向对象的组织方式。当然上面列出的结构体并不完整只挑选了觉得比较有用的成员。对应成员的具体代表意义可以去下面的路径看源代码注释:
source/hardwore/qcom/camera/QCamera2/stack/mm-camera-interface/inc/mm_camera.h
mm_camera_interface.c
这里通过 mm_camera_ops_t 这个结构体来暴露给HAL层调用,主要是注册函数指针与函数的映射关系如下
static mm_camera_ops_t mm_camera_ops = {
.query_capability = mm_camera_intf_query_capability,
.register_event_notify = mm_camera_intf_register_event_notify,
.close_camera = mm_camera_intf_close,
.set_parms = mm_camera_intf_set_parms,
.get_parms = mm_camera_intf_get_parms,
.do_auto_focus = mm_camera_intf_do_auto_focus,
.cancel_auto_focus = mm_camera_intf_cancel_auto_focus,
.prepare_snapshot = mm_camera_intf_prepare_snapshot,
.start_zsl_snapshot = mm_camera_intf_start_zsl_snapshot,
.stop_zsl_snapshot = mm_camera_intf_stop_zsl_snapshot,
.map_buf = mm_camera_intf_map_buf,
.map_bufs = mm_camera_intf_map_bufs,
.unmap_buf = mm_camera_intf_unmap_buf,
.add_channel = mm_camera_intf_add_channel,
.delete_channel = mm_camera_intf_del_channel,
.get_bundle_info = mm_camera_intf_get_bundle_info,
.add_stream = mm_camera_intf_add_stream,
.link_stream = mm_camera_intf_link_stream,
.delete_stream = mm_camera_intf_del_stream,
.config_stream = mm_camera_intf_config_stream,
.qbuf = mm_camera_intf_qbuf,
.cancel_buffer = mm_camera_intf_cancel_buf,
.get_queued_buf_count = mm_camera_intf_get_queued_buf_count,
.map_stream_buf = mm_camera_intf_map_stream_buf,
.map_stream_bufs = mm_camera_intf_map_stream_bufs,
.unmap_stream_buf = mm_camera_intf_unmap_stream_buf,
.set_stream_parms = mm_camera_intf_set_stream_parms,
.get_stream_parms = mm_camera_intf_get_stream_parms,
.start_channel = mm_camera_intf_start_channel,
.stop_channel = mm_camera_intf_stop_channel,
.request_super_buf = mm_camera_intf_request_super_buf,
.cancel_super_buf_request = mm_camera_intf_cancel_super_buf_request,
.flush_super_buf_queue = mm_camera_intf_flush_super_buf_queue,
.configure_notify_mode = mm_camera_intf_configure_notify_mode,
.process_advanced_capture = mm_camera_intf_process_advanced_capture,
.get_session_id = mm_camera_intf_get_session_id,
.sync_related_sensors = mm_camera_intf_sync_related_sensors,
.flush = mm_camera_intf_flush,
.register_stream_buf_cb = mm_camera_intf_register_stream_buf_cb
};
这里面左边是暴露给HAL层调用的右边是在自己里面定义的函数,其中调用每个函数都要传入camera的句柄camera_handle(cameraopen的时候生成的int数字),通过mm_camera_util_get_camera_by_handler获得对应的mm_camera结构体(一个手机有多个camera设备),其中interface里的每个方法也是调用mm_camera里对应的方法。
open源码
int32_t camera_open(uint8_t camera_idx, mm_camera_vtbl_t **camera_vtbl)
{
int32_t rc = 0;
mm_camera_obj_t *cam_obj = NULL;
......
/* initialize camera obj */
//初始化mm_camera对应结构体
memset(cam_obj, 0, sizeof(mm_camera_obj_t));
cam_obj->ctrl_fd = -1;
cam_obj->ds_fd = -1;
cam_obj->ref_count++;
cam_obj->my_hdl = mm_camera_util_generate_handler(camera_idx);
cam_obj->vtbl.camera_handle = cam_obj->my_hdl; /* set handler */
cam_obj->vtbl.ops = &mm_camera_ops;
pthread_mutex_init(&cam_obj->cam_lock, NULL);
/* unlock global interface lock, if not, in dual camera use case,
* current open will block operation of another opened camera obj*/
pthread_mutex_lock(&cam_obj->cam_lock);
pthread_mutex_unlock(&g_intf_lock);
rc = mm_camera_open(cam_obj);
......
}
mm_camera.c
这个文件主要是产生和管理channel,然后配置stream以及管理,然后也定义了一些事件回调来处理事件,mm_camera和mm_channel的沟通方式是调用mm_channle的状态机方法。
在opencamera的时候会开启mm_thread.c里定义的两个线程:cmd_thread 和 poll_thread ,poll_thread是一个很关键的线程最后获取数据也是靠这个线程但是现在开启的这个线程不是用来处理数据的而是用来处理kernel返回的event,比如相机挂了之类的。
int32_t mm_camera_open(mm_camera_obj_t *my_obj)
{
......
mm_camera_cmd_thread_launch(&my_obj->evt_thread,
mm_camera_dispatch_app_event,
(void *)my_obj);
/* launch event poll thread
* we will add evt fd into event poll thread upon user first register for evt */
LOGD("Launch evt Poll Thread in Cam Open");
snprintf(my_obj->evt_poll_thread.threadName, THREAD_NAME_SIZE, "CAM_evntPoll");
mm_camera_poll_thread_launch(&my_obj->evt_poll_thread,
MM_CAMERA_POLL_TYPE_EVT);
mm_camera_evt_sub(my_obj, TRUE);
......
}
kernel的event会通过poll_thread回调mm_camera_event_notify传达到mm_camera层然后mm_camera会给cmd_thread发送相应命令然后回调mm_camera_dispatch_app_event最后会回调到HAL层QCamera3HardwareInterface的camEvtHandle方法
mm_camera_channel
channel主要用于管理streams,一个channel可以对应多个stream并对stream进行操作。第一个channel在QCamera3HardwareInterface::initialize方法中通过add_channel添加至mm_camera
int QCamera3HardwareInterface::initialize(
const struct camera3_callback_ops *callback_ops)
{
ATRACE_CALL();
int rc;
LOGI("E :mCameraId = %d mState = %d", mCameraId, mState);
pthread_mutex_lock(&mMutex);
// Validate current state
switch (mState) {
case OPENED:
/* valid state */
break;
default:
LOGE("Invalid state %d", mState);
rc = -ENODEV;
goto err1;
}
rc = initParameters();
if (rc < 0) {
LOGE("initParamters failed %d", rc);
goto err1;
}
mCallbackOps = callback_ops;
//第一个channel
mChannelHandle = mCameraHandle->ops->add_channel(
mCameraHandle->camera_handle, NULL, NULL, this);
if (mChannelHandle == 0) {
LOGE("add_channel failed");
rc = -ENOMEM;
pthread_mutex_unlock(&mMutex);
return rc;
}
pthread_mutex_unlock(&mMutex);
mCameraInitialized = true;
mState = INITIALIZED;
LOGI("X");
return 0;
err1:
pthread_mutex_unlock(&mMutex);
return rc;
}
当add_channel的时候最后调用到的是mm_camera的mm_camera_add_channel。
uint32_t mm_camera_add_channel(mm_camera_obj_t *my_obj,
mm_camera_channel_attr_t *attr,
mm_camera_buf_notify_t channel_cb,
void *userdata)
{
mm_channel_t *ch_obj = NULL;
uint8_t ch_idx = 0;
uint32_t ch_hdl = 0;
for(ch_idx = 0; ch_idx < MM_CAMERA_CHANNEL_MAX; ch_idx++) {
if (MM_CHANNEL_STATE_NOTUSED == my_obj->ch[ch_idx].state) {
ch_obj = &my_obj->ch[ch_idx];
break;
}
}
if (NULL != ch_obj) {
/* initialize channel obj */
memset(ch_obj, 0, sizeof(mm_channel_t));
ch_hdl = mm_camera_util_generate_handler(ch_idx);
ch_obj->my_hdl = ch_hdl;
ch_obj->state = MM_CHANNEL_STATE_STOPPED;
ch_obj->cam_obj = my_obj;
pthread_mutex_init(&ch_obj->ch_lock, NULL);
ch_obj->sessionid = my_obj->sessionid;
mm_channel_init(ch_obj, attr, channel_cb, userdata);
}
pthread_mutex_unlock(&my_obj->cam_lock);
return ch_hdl;
}
可以看到会调用mm_channel_init。
int32_t mm_channel_init(mm_channel_t *my_obj,
mm_camera_channel_attr_t *attr,
mm_camera_buf_notify_t channel_cb,
void *userdata)
{
int32_t rc = 0;
my_obj->bundle.super_buf_notify_cb = channel_cb;
my_obj->bundle.user_data = userdata;
if (NULL != attr) {
my_obj->bundle.superbuf_queue.attr = *attr;
}
LOGD("Launch data poll thread in channel open");
snprintf(my_obj->poll_thread[0].threadName, THREAD_NAME_SIZE, "CAM_dataPoll");
mm_camera_poll_thread_launch(&my_obj->poll_thread[0],
MM_CAMERA_POLL_TYPE_DATA);
/这里又开启了一个poll_thread
/* change state to stopped state */
my_obj->state = MM_CHANNEL_STATE_STOPPED;
return rc;
}
这里的poll_thread我们可以看到类型是TYPE_DATA,没错这个就是我们从相机设备获取数据的关键线程,具体的细节我们稍后分析。而且除了QCamera3ReprocessChannel在initialize的时候也add_channel外其他HAL层channel都不会再add_channel,也就是说上层的很多channel到底层其实就一个channel管理了所有的stream。
mm_channel内部通过状态机来调度自己
int32_t mm_channel_fsm_fn(mm_channel_t *my_obj,
mm_channel_evt_type_t evt,
void * in_val,
void * out_val)
{
int32_t rc = -1;
LOGD("E state = %d", my_obj->state);
//先判断自己当前的状态然后调用相应的状态转移函数
switch (my_obj->state) {
case MM_CHANNEL_STATE_NOTUSED:
rc = mm_channel_fsm_fn_notused(my_obj, evt, in_val, out_val);
break;
case MM_CHANNEL_STATE_STOPPED:
rc = mm_channel_fsm_fn_stopped(my_obj, evt, in_val, out_val);
break;
case MM_CHANNEL_STATE_ACTIVE:
rc = mm_channel_fsm_fn_active(my_obj, evt, in_val, out_val);
break;
case MM_CHANNEL_STATE_PAUSED:
rc = mm_channel_fsm_fn_paused(my_obj, evt, in_val, out_val);
break;
default:
LOGD("Not a valid state (%d)", my_obj->state);
break;
}
/* unlock ch_lock */
pthread_mutex_unlock(&my_obj->ch_lock);
LOGD("X rc = %d", rc);
return rc;
}
这是调度函数,先判断自己现在的状态再调用相应的状态转移函数。比方说add_stream这个方法,在HAL层当channel被initialize的时候会调用QCamera3Channel的addStream方法new一个QCamera3Stream并调用init。
int32_t QCamera3Channel::addStream(cam_stream_type_t streamType,
cam_format_t streamFormat,
cam_dimension_t streamDim,
cam_rotation_t streamRotation,
uint8_t minStreamBufNum,
cam_feature_mask_t postprocessMask,
cam_is_type_t isType,
uint32_t batchSize)
{
int32_t rc = NO_ERROR;
if (m_numStreams >= 1) {
LOGE("Only one stream per channel supported in v3 Hal");
return BAD_VALUE;
}
if (m_numStreams >= MAX_STREAM_NUM_IN_BUNDLE) {
LOGE("stream number (%d) exceeds max limit (%d)",
m_numStreams, MAX_STREAM_NUM_IN_BUNDLE);
return BAD_VALUE;
}
QCamera3Stream *pStream = new QCamera3Stream(m_camHandle,
m_handle,
m_camOps,
&mPaddingInfo,
this);
if (pStream == NULL) {
LOGE("No mem for Stream");
return NO_MEMORY;
}
LOGD("batch size is %d", batchSize);
rc = pStream->init(streamType, streamFormat, streamDim, streamRotation,
NULL, minStreamBufNum, postprocessMask, isType, batchSize,
streamCbRoutine, this);
if (rc == 0) {
mStreams[m_numStreams] = pStream;
m_numStreams++;
} else {
delete pStream;
}
return rc;
}
可以看到这里调用了mm_camera_interface的add_stream方法以及config_stream
int32_t QCamera3Stream::init(cam_stream_type_t streamType,
cam_format_t streamFormat,
cam_dimension_t streamDim,
cam_rotation_t streamRotation,
cam_stream_reproc_config_t* reprocess_config,
uint8_t minNumBuffers,
cam_feature_mask_t postprocess_mask,
cam_is_type_t is_type,
uint32_t batchSize,
hal3_stream_cb_routine stream_cb,
void *userdata)
{
int32_t rc = OK;
ssize_t bufSize = BAD_INDEX;
mm_camera_stream_config_t stream_config;
LOGD("batch size is %d", batchSize);
mHandle = mCamOps->add_stream(mCamHandle, mChannelHandle);
if (!mHandle) {
LOGE("add_stream failed");
rc = UNKNOWN_ERROR;
goto done;
}
// allocate and map stream info memory
mStreamInfoBuf = new QCamera3HeapMemory(1);
if (mStreamInfoBuf == NULL) {
LOGE("no memory for stream info buf obj");
rc = -ENOMEM;
goto err1;
}
rc = mStreamInfoBuf->allocate(sizeof(cam_stream_info_t));
if (rc < 0) {
LOGE("no memory for stream info");
rc = -ENOMEM;
goto err2;
}
//配置streaminfo
mStreamInfo =
reinterpret_cast<cam_stream_info_t *>(mStreamInfoBuf->getPtr(0));
memset(mStreamInfo, 0, sizeof(cam_stream_info_t));
mStreamInfo->stream_type = streamType;
mStreamInfo->fmt = streamFormat;
mStreamInfo->dim = streamDim;
mStreamInfo->num_bufs = minNumBuffers;
mStreamInfo->pp_config.feature_mask = postprocess_mask;
mStreamInfo->is_type = is_type;
mStreamInfo->pp_config.rotation = streamRotation;
LOGD("stream_type is %d, feature_mask is %Ld",
mStreamInfo->stream_type, mStreamInfo->pp_config.feature_mask);
bufSize = mStreamInfoBuf->getSize(0);
if (BAD_INDEX != bufSize) {
//配置buffer
rc = mCamOps->map_stream_buf(mCamHandle,
mChannelHandle, mHandle, CAM_MAPPING_BUF_TYPE_STREAM_INFO,
0, -1, mStreamInfoBuf->getFd(0), (size_t)bufSize,
mStreamInfoBuf->getPtr(0));
if (rc < 0) {
LOGE("Failed to map stream info buffer");
goto err3;
}
} else {
LOGE("Failed to retrieve buffer size (bad index)");
goto err3;
}
mNumBufs = minNumBuffers;
if (reprocess_config != NULL) {
mStreamInfo->reprocess_config = *reprocess_config;
mStreamInfo->streaming_mode = CAM_STREAMING_MODE_BURST;
//mStreamInfo->num_of_burst = reprocess_config->offline.num_of_bufs;
mStreamInfo->num_of_burst = 1;
} else if (batchSize) {
if (batchSize > MAX_BATCH_SIZE) {
LOGE("batchSize:%d is very large", batchSize);
rc = BAD_VALUE;
goto err4;
}
else {
mNumBatchBufs = MAX_INFLIGHT_HFR_REQUESTS / batchSize;
mStreamInfo->streaming_mode = CAM_STREAMING_MODE_BATCH;
mStreamInfo->user_buf_info.frame_buf_cnt = batchSize;
mStreamInfo->user_buf_info.size =
(uint32_t)(sizeof(msm_camera_user_buf_cont_t));
mStreamInfo->num_bufs = mNumBatchBufs;
//Frame interval is irrelavent since time stamp calculation is not
//required from the mCamOps
mStreamInfo->user_buf_info.frameInterval = 0;
LOGD("batch size is %d", batchSize);
}
} else {
mStreamInfo->streaming_mode = CAM_STREAMING_MODE_CONTINUOUS;
}
// Configure the stream
stream_config.stream_info = mStreamInfo;
stream_config.mem_vtbl = mMemVtbl;
stream_config.padding_info = mPaddingInfo;
stream_config.userdata = this;
stream_config.stream_cb = dataNotifyCB;//关键这个是底层的buf_cb
stream_config.stream_cb_sync = NULL;
//会调用config_stream
rc = mCamOps->config_stream(mCamHandle,
mChannelHandle, mHandle, &stream_config);
if (rc < 0) {
LOGE("Failed to config stream, rc = %d", rc);
goto err
4;
}
mDataCB = stream_cb//channel传进来的回调保存起来待调用
mUserData = userdata;//channel引用
mBatchSize = batchSize;
return 0;
err4:
mCamOps->unmap_stream_buf(mCamHandle,
mChannelHandle, mHandle, CAM_MAPPING_BUF_TYPE_STREAM_INFO, 0, -1);
err3:
mStreamInfoBuf->deallocate();
err2:
delete mStreamInfoBuf;
mStreamInfoBuf = NULL;
mStreamInfo = NULL;
err1:
mCamOps->delete_stream(mCamHandle, mChannelHandle, mHandle);
mHandle = 0;
mNumBufs = 0;
done:
return rc;
}
先看add_stream根据上面的mm_camera_ops映射表我们可以知道这里调用的是mm_camera_intf_add_stream
static uint32_t mm_camera_intf_add_stream(uint32_t camera_handle,
uint32_t ch_id)
{
uint32_t stream_id = 0;
mm_camera_obj_t * my_obj = NULL;
LOGD("E handle = %d ch_id = %d",
camera_handle, ch_id);
pthread_mutex_lock(&g_intf_lock);
//通过句柄获得代表当前mm_camera的结构体
my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
if(my_obj) {
pthread_mutex_lock(&my_obj->cam_lock);
pthread_mutex_unlock(&g_intf_lock);
stream_id = mm_camera_add_stream(my_obj, ch_id);//调用mm_camera的mm_camera_add_stream方法
} else {
pthread_mutex_unlock(&g_intf_lock);
}
LOGD("X stream_id = %d", stream_id);
return stream_id;
}
继续看mm_camera
uint32_t mm_camera_add_stream(mm_camera_obj_t *my_obj,
uint32_t ch_id)
{
uint32_t s_hdl = 0;
mm_channel_t * ch_obj =
mm_camera_util_get_channel_by_handler(my_obj, ch_id);//通过channelId获得一个代表channel的结构体
if (NULL != ch_obj) {
pthread_mutex_lock(&ch_obj->ch_lock);
pthread_mutex_unlock(&my_obj->cam_lock);
//调用channel的状态机方法
mm_channel_fsm_fn(ch_obj,
MM_CHANNEL_EVT_ADD_STREAM,
NULL,
(void *)&s_hdl);
} else {
pthread_mutex_unlock(&my_obj->cam_lock);
}
return s_hdl;
}
可以看到调用了channel的状态机方法并指定事件为ADD_STREAM
int32_t mm_channel_fsm_fn(mm_channel_t *my_obj,
mm_channel_evt_type_t evt,
void * in_val,
void * out_val)
{
int32_t rc = -1;
LOGD("E state = %d", my_obj->state);
switch (my_obj->state) {
case MM_CHANNEL_STATE_NOTUSED://这个表示处于不可用状态而不是没有用过
rc = mm_channel_fsm_fn_notused(my_obj, evt, in_val, out_val);
break;
case MM_CHANNEL_STATE_STOPPED:
rc = mm_channel_fsm_fn_stopped(my_obj, evt, in_val, out_val);
break;
case MM_CHANNEL_STATE_ACTIVE:
rc = mm_channel_fsm_fn_active(my_obj, evt, in_val, out_val);
break;
case MM_CHANNEL_STATE_PAUSED:
rc = mm_channel_fsm_fn_paused(my_obj, evt, in_val, out_val);
break;
default:
LOGD("Not a valid state (%d)", my_obj->state);
break;
}
/* unlock ch_lock */
pthread_mutex_unlock(&my_obj->ch_lock);
LOGD("X rc = %d", rc);
return rc;
}
由于channel是之前都没有start过的所以现在是stop状态所以调用第二个状态转移函数
int32_t mm_channel_fsm_fn_stopped(mm_channel_t *my_obj,
mm_channel_evt_type_t evt,
void * in_val,
void * out_val)
{
int32_t rc = 0;
LOGD("E evt = %d", evt);
switch (evt) {
case MM_CHANNEL_EVT_ADD_STREAM: //可以看到ADD_STREAM是这个分支
{
uint32_t s_hdl = 0;
s_hdl = mm_channel_add_stream(my_obj);
*((uint32_t*)out_val) = s_hdl;
rc = 0;
}
break;
......
default:
LOGW("invalid state (%d) for evt (%d)",
my_obj->state, evt);
break;
}
LOGD("E rc = %d", rc);
return rc;
}
可以看到是调用了自己的add_stream,当然这个分支进行后不会改变channel的状态,但是其他分支有些执行完会改变当前的状态。
uint32_t mm_channel_add_stream(mm_channel_t *my_obj)
{
int32_t rc = 0;
uint8_t idx = 0;
uint32_t s_hdl = 0;
mm_stream_t *stream_obj = NULL;
LOGD("E");
/* check available stream */
for (idx = 0; idx < MAX_STREAM_NUM_IN_BUNDLE; idx++) {
if (MM_STREAM_STATE_NOTUSED == my_obj->streams[idx].state) {
stream_obj = &my_obj->streams[idx];
break;
}
}
if (NULL == stream_obj) {
LOGE("streams reach max, no more stream allowed to add");
return s_hdl;
}
/* initialize stream object */
memset(stream_obj, 0, sizeof(mm_stream_t));
stream_obj->fd = -1;
stream_obj->my_hdl = mm_camera_util_generate_handler(idx);
stream_obj->ch_obj = my_obj;
pthread_mutex_init(&stream_obj->buf_lock, NULL);
pthread_mutex_init(&stream_obj->cb_lock, NULL);
pthread_mutex_init(&stream_obj->cmd_lock, NULL);
pthread_cond_init(&stream_obj->buf_cond, NULL);
memset(stream_obj->buf_status, 0,
sizeof(stream_obj->buf_status));
stream_obj->state = MM_STREAM_STATE_INITED;
/* acquire stream */
rc = mm_stream_fsm_fn(stream_obj, MM_STREAM_EVT_ACQUIRE, NULL, NULL);
if (0 == rc) {
s_hdl = stream_obj->my_hdl;
} else {
/* error during acquire, de-init */
pthread_cond_destroy(&stream_obj->buf_cond);
pthread_mutex_destroy(&stream_obj->buf_lock);
pthread_mutex_destroy(&stream_obj->cb_lock);
pthread_mutex_destroy(&stream_obj->cmd_lock);
memset(stream_obj, 0, sizeof(mm_stream_t));
}
LOGD("stream handle = %d", s_hdl);
return s_hdl;
}
可以看到是配置了stream的结构体然后调用stream的状态机函数,设置stream的当前状态为MM_STREAM_STATE_INITED发送的事件为EVT_ACQUIRE。
mm_camera_stream
int32_t mm_stream_fsm_fn(mm_stream_t *my_obj,
mm_stream_evt_type_t evt,
void * in_val,
void * out_val)
{
int32_t rc = -1;
LOGD("E, my_handle = 0x%x, fd = %d, state = %d",
my_obj->my_hdl, my_obj->fd, my_obj->state);
switch (my_obj->state) {
case MM_STREAM_STATE_NOTUSED:
LOGD("Not handling evt in unused state");
break;
case MM_STREAM_STATE_INITED://可以知道调用了这条状态转移函数
rc = mm_stream_fsm_inited(my_obj, evt, in_val, out_val);
break;
case MM_STREAM_STATE_ACQUIRED:
rc = mm_stream_fsm_acquired(my_obj, evt, in_val, out_val);
break;
case MM_STREAM_STATE_CFG:
rc = mm_stream_fsm_cfg(my_obj, evt, in_val, out_val);
break;
case MM_STREAM_STATE_BUFFED:
rc = mm_stream_fsm_buffed(my_obj, evt, in_val, out_val);
break;
case MM_STREAM_STATE_REG:
rc = mm_stream_fsm_reg(my_obj, evt, in_val, out_val);
break;
case MM_STREAM_STATE_ACTIVE:
rc = mm_stream_fsm_active(my_obj, evt, in_val, out_val);
break;
default:
LOGD("Not a valid state (%d)", my_obj->state);
break;
}
LOGD("X rc =%d",rc);
return rc;
}
继续看源码
int32_t mm_stream_fsm_inited(mm_stream_t *my_obj,
mm_stream_evt_type_t evt,
void * in_val,
void * out_val)
{
int32_t rc = 0;
char dev_name[MM_CAMERA_DEV_NAME_LEN];
const char *dev_name_value = NULL;
if (NULL == my_obj) {
LOGE("NULL camera object\n");
return -1;
}
LOGD("E, my_handle = 0x%x, fd = %d, state = %d",
my_obj->my_hdl, my_obj->fd, my_obj->state);
switch(evt) {
case MM_STREAM_EVT_ACQUIRE:
if ((NULL == my_obj->ch_obj) || (NULL == my_obj->ch_obj->cam_obj)) {
LOGE("NULL channel or camera obj\n");
rc = -1;
break;
}
dev_name_value = mm_camera_util_get_dev_name(my_obj->ch_obj->cam_obj->my_hdl);
if (NULL == dev_name_value) {
LOGE("NULL device name\n");
rc = -1;
break;
}
snprintf(dev_name, sizeof(dev_name), "/dev/%s",
dev_name_value);
my_obj->fd = open(dev_name, O_RDWR | O_NONBLOCK);
if (my_obj->fd < 0) {
LOGE("open dev returned %d\n", my_obj->fd);
rc = -1;
break;
}
LOGD("open dev fd = %d\n", my_obj->fd);
rc = mm_stream_set_ext_mode(my_obj);
if (0 == rc) {
my_obj->state = MM_STREAM_STATE_ACQUIRED;
} else {
/* failed setting ext_mode
* close fd */
close(my_obj->fd);
my_obj->fd = -1;
break;
}
break;
default:
LOGE("invalid state (%d) for evt (%d), in(%p), out(%p)",
my_obj->state, evt, in_val, out_val);
break;
}
return rc;
}
这里的操作是开启了一个dev然后把fd赋给结构体然后改变当前状态,到这里刚才的add_stream过程可以说是结束了。但是还有一个config_stream操作接下来继续看。
不知道fd是什么的可以参考 http://blog.csdn.net/dlutbrucezhang/article/details/9319577
当然调用的路径和刚才的add_stream可以说是一摸一样,mm_camera也会调用channel的状态机方法,当前状态还是STOP而传递的事件为EVT_CONFIG_STREAM,会调用到channel里的mm_channel_config_stram,这里面也是继续调用stream的状态机函数当前状态为STATE_ACQUIRED传递事件为EVT_SET_FMT。
int32_t mm_stream_fsm_acquired(mm_stream_t *my_obj,
mm_stream_evt_type_t evt,
void * in_val,
void * out_val)
{
int32_t rc = 0;
LOGD("E, my_handle = 0x%x, fd = %d, state = %d",
my_obj->my_hdl, my_obj->fd, my_obj->state);
switch(evt) {
case MM_STREAM_EVT_SET_FMT://进入这个分支
{
mm_camera_stream_config_t *config =
(mm_camera_stream_config_t *)in_val;
rc = mm_stream_config(my_obj, config);
/* change state to configed */
my_obj->state = MM_STREAM_STATE_CFG;
break;
}
case MM_STREAM_EVT_RELEASE:
rc = mm_stream_release(my_obj);
/* change state to not used */
my_obj->state = MM_STREAM_STATE_NOTUSED;
break;
case MM_STREAM_EVT_SET_PARM:
{
mm_evt_paylod_set_get_stream_parms_t *payload =
(mm_evt_paylod_set_get_stream_parms_t *)in_val;
rc = mm_stream_set_parm(my_obj, payload->parms);
}
break;
case MM_STREAM_EVT_GET_PARM:
{
mm_evt_paylod_set_get_stream_parms_t *payload =
(mm_evt_paylod_set_get_stream_parms_t *)in_val;
rc = mm_stream_get_parm(my_obj, payload->parms);
}
break;
default:
LOGE("invalid state (%d) for evt (%d), in(%p), out(%p)",
my_obj->state, evt, in_val, out_val);
}
LOGD("X rc = %d", rc);
return rc;
}
可以看到是调用了mm_stream_config然后转换当前状态为STATE_CFG
int32_t mm_stream_config(mm_stream_t *my_obj,
mm_camera_stream_config_t *config)
{
int32_t rc = 0;
int32_t cb_index = 0;
LOGD("E, my_handle = 0x%x, fd = %d, state = %d",
my_obj->my_hdl, my_obj->fd, my_obj->state);
my_obj->stream_info = config->stream_info;
my_obj->buf_num = (uint8_t) config->stream_info->num_bufs;
my_obj->mem_vtbl = config->mem_vtbl;
my_obj->padding_info = config->padding_info;
if (config->stream_cb_sync != NULL) {
/* SYNC callback is always placed at index 0*/
my_obj->buf_cb[cb_index].cb = config->stream_cb_sync;//这个为NULL
my_obj->buf_cb[cb_index].user_data = config->userdata;
my_obj->buf_cb[cb_index].cb_count = -1; /* infinite by default */
my_obj->buf_cb[cb_index].cb_type = MM_CAMERA_STREAM_CB_TYPE_SYNC;
cb_index++;
}
my_obj->buf_cb[cb_index].cb = config->stream_cb;//这个为QCamera3Stream::init的时候赋值的
my_obj->buf_cb[cb_index].user_data = config->userdata;
my_obj->buf_cb[cb_index].cb_count = -1; /* infinite by default */
my_obj->buf_cb[cb_index].cb_type = MM_CAMERA_STREAM_CB_TYPE_ASYNC;
rc = mm_stream_sync_info(my_obj);
if (rc == 0) {
rc = mm_stream_set_fmt(my_obj);
if (rc < 0) {
LOGE("mm_stream_set_fmt failed %d",
rc);
}
}
my_obj->map_ops.map_ops = mm_stream_map_buf_ops;
my_obj->map_ops.bundled_map_ops = mm_stream_bundled_map_buf_ops;
my_obj->map_ops.unmap_ops = mm_stream_unmap_buf_ops;
my_obj->map_ops.userdata = my_obj;
if(my_obj->mem_vtbl.set_config_ops != NULL) {
my_obj->mem_vtbl.set_config_ops(&my_obj->map_ops,
my_obj->mem_vtbl.user_data);
}
return rc;
}
这里是把上层的cb传递给buffer,之后stream通过ioctl操作从驱动获取buffer会通过回调回传数据。
mm_camera_thread
接下来直接介绍两个thread,一个cmd_thread还有一个poll_thread,我们通过这样的代码
mm_camera_cmd_thread_launch(&my_obj->evt_thread,
mm_camera_dispatch_app_event,
(void *)my_obj);
来开启cmd_thread
int32_t mm_camera_cmd_thread_launch(mm_camera_cmd_thread_t * cmd_thread,
mm_camera_cmd_cb_t cb,
void* user_data)
{
int32_t rc = 0;
cam_sem_init(&cmd_thread->cmd_sem, 0);
cam_sem_init(&cmd_thread->sync_sem, 0);
cam_queue_init(&cmd_thread->cmd_queue);
cmd_thread->cb = cb;
cmd_thread->user_data = user_data;
cmd_thread->is_active = TRUE;
/* launch the thread */
pthread_create(&cmd_thread->cmd_pid,
NULL,
mm_camera_cmd_thread,
(void *)cmd_thread);
return rc;
}
可以看到第一个参数是描述thread的结构体,而第二个参数是赋给结构体的回调,然后他开了一个线程函数体为mm_camera_cmd_thread。
static void *mm_camera_cmd_thread(void *data)
{
int running = 1;
int ret;
mm_camera_cmd_thread_t *cmd_thread =
(mm_camera_cmd_thread_t *)data;
mm_camera_cmdcb_t* node = NULL;
mm_camera_cmd_thread_name(cmd_thread->threadName);
do {
do {
ret = cam_sem_wait(&cmd_thread->cmd_sem);//等待信号量阻塞线程
if (ret != 0 && errno != EINVAL) {
LOGE("cam_sem_wait error (%s)",
strerror(errno));
return NULL;
}
} while (ret != 0);
/* we got notified about new cmd avail in cmd queue */
node = (mm_camera_cmdcb_t*)cam_queue_deq(&cmd_thread->cmd_queue);//从命令队列取命令
while (node != NULL) {
switch (node->cmd_type) {
case MM_CAMERA_CMD_TYPE_EVT_CB:
case MM_CAMERA_CMD_TYPE_DATA_CB:
case MM_CAMERA_CMD_TYPE_REQ_DATA_CB:
case MM_CAMERA_CMD_TYPE_SUPER_BUF_DATA_CB:
case MM_CAMERA_CMD_TYPE_CONFIG_NOTIFY:
case MM_CAMERA_CMD_TYPE_START_ZSL:
case MM_CAMERA_CMD_TYPE_STOP_ZSL:
case MM_CAMERA_CMD_TYPE_GENERAL:
case MM_CAMERA_CMD_TYPE_FLUSH_QUEUE:
if (NULL != cmd_thread->cb) {
cmd_thread->cb(node, cmd_thread->user_data);//回调线程结构体的回调
}
break;
case MM_CAMERA_CMD_TYPE_EXIT:
default:
running = 0;
break;
}
free(node);
node = (mm_camera_cmdcb_t*)cam_queue_deq(&cmd_thread->cmd_queue);
} /* (node != NULL) */
} while (running);
return NULL;
}
大体上就是很简单的所有命令都是直接调用传进来的回调,所以在源代码看到用cmd_thread理解起来就很简单,接下来是poll_thread,用下方的方法开启。
mm_camera_poll_thread_launch(&my_obj->poll_thread[0],
MM_CAMERA_POLL_TYPE_DATA);
查看源码
int32_t mm_camera_poll_thread_launch(mm_camera_poll_thread_t * poll_cb,
mm_camera_poll_thread_type_t poll_type)
{
int32_t rc = 0;
size_t i = 0, cnt = 0;
poll_cb->poll_type = poll_type;
//Initialize poll_fds
cnt = sizeof(poll_cb->poll_fds) / sizeof(poll_cb->poll_fds[0]);
for (i = 0; i < cnt; i++) {
poll_cb->poll_fds[i].fd = -1;
}
//Initialize poll_entries
cnt = sizeof(poll_cb->poll_entries) / sizeof(poll_cb->poll_entries[0]);
for (i = 0; i < cnt; i++) {
poll_cb->poll_entries[i].fd = -1;
}
//Initialize pipe fds
poll_cb->pfds[0] = -1;
poll_cb->pfds[1] = -1;
rc = pipe(poll_cb->pfds);//通过fds建立pipeline(现在还是无效的)
if(rc < 0) {
LOGE("pipe open rc=%d\n", rc);
return -1;
}
poll_cb->timeoutms = -1; /* Infinite seconds */
LOGD("poll_type = %d, read fd = %d, write fd = %d timeout = %d",
poll_cb->poll_type,
poll_cb->pfds[0], poll_cb->pfds[1],poll_cb->timeoutms);
pthread_mutex_init(&poll_cb->mutex, NULL);
pthread_cond_init(&poll_cb->cond_v, NULL);
/* launch the thread */
pthread_mutex_lock(&poll_cb->mutex);
poll_cb->status = 0;
pthread_create(&poll_cb->pid, NULL, mm_camera_poll_thread, (void *)poll_cb);
if(!poll_cb->status) {
pthread_cond_wait(&poll_cb->cond_v, &poll_cb->mutex);
}
pthread_mutex_unlock(&poll_cb->mutex);
LOGD("End");
return rc;
}
第一个参数还是描述thread的结构体,而第二个是处理的类型,接下来会简历管道,然后开启一个线程。
static void *mm_camera_poll_thread(void *data)
{
mm_camera_poll_thread_t *poll_cb = (mm_camera_poll_thread_t *)data;
mm_camera_cmd_thread_name(poll_cb->threadName);
/* add pipe read fd into poll first */
poll_cb->poll_fds[poll_cb->num_fds++].fd = poll_cb->pfds[0];
mm_camera_poll_sig_done(poll_cb);
mm_camera_poll_set_state(poll_cb, MM_CAMERA_POLL_TASK_STATE_POLL);
return mm_camera_poll_fn(poll_cb);
}
最后运行函数
static void *mm_camera_poll_fn(mm_camera_poll_thread_t *poll_cb)
{
int rc = 0, i;
if (NULL == poll_cb) {
LOGE("poll_cb is NULL!\n");
return NULL;
}
LOGD("poll type = %d, num_fd = %d poll_cb = %p\n",
poll_cb->poll_type, poll_cb->num_fds,poll_cb);
//进行循环
do {
for(i = 0; i < poll_cb->num_fds; i++) {
poll_cb->poll_fds[i].events = POLLIN|POLLRDNORM|POLLPRI;
}
//从管道获取数据
rc = poll(poll_cb->poll_fds, poll_cb->num_fds, poll_cb->timeoutms);
if(rc > 0) {
if ((poll_cb->poll_fds[0].revents & POLLIN) &&
(poll_cb->poll_fds[0].revents & POLLRDNORM)) {
/* if we have data on pipe, we only process pipe in this iteration */
LOGD("cmd received on pipe\n");
mm_camera_poll_proc_pipe(poll_cb);
} else {
//当数据读取完毕后
for(i=1; i<poll_cb->num_fds; i++) {
/* Checking for ctrl events */
if ((poll_cb->poll_type == MM_CAMERA_POLL_TYPE_EVT) &&
(poll_cb->poll_fds[i].revents & POLLPRI)) {
LOGD("mm_camera_event_notify\n");
if (NULL != poll_cb->poll_entries[i-1].notify_cb) {
poll_cb->poll_entries[i-1].notify_cb(poll_cb->poll_entries[i-1].user_data);
}
}
if ((MM_CAMERA_POLL_TYPE_DATA == poll_cb->poll_type) &&
(poll_cb->poll_fds[i].revents & POLLIN) &&
(poll_cb->poll_fds[i].revents & POLLRDNORM)) {
LOGD("mm_stream_data_notify\n");
if (NULL != poll_cb->poll_entries[i-1].notify_cb) {
//会回调notify_cb提醒stream buffer已经填充
poll_cb->poll_entries[i-1].notify_cb(poll_cb->poll_entries[i-1].user_data);
}
}
}
}
} else {
/* in error case sleep 10 us and then continue. hard coded here */
usleep(10);
continue;
}
} while ((poll_cb != NULL) && (poll_cb->state == MM_CAMERA_POLL_TASK_STATE_POLL));
return NULL;
}
当有可读取数据的时候要通过下面的函数进行处理。
static void mm_camera_poll_proc_pipe(mm_camera_poll_thread_t *poll_cb)
{
ssize_t read_len;
int i;
mm_camera_sig_evt_t cmd_evt;
//从管道中读取cmd_evt(在调用mm_camera_poll_thread_add_poll_fd时会向管道写入)
read_len = read(poll_cb->pfds[0], &cmd_evt, sizeof(cmd_evt));
LOGD("read_fd = %d, read_len = %d, expect_len = %d cmd = %d",
poll_cb->pfds[0], (int)read_len, (int)sizeof(cmd_evt), cmd_evt.cmd);
switch (cmd_evt.cmd) {
case MM_CAMERA_PIPE_CMD_POLL_ENTRIES_UPDATED:
case MM_CAMERA_PIPE_CMD_POLL_ENTRIES_UPDATED_ASYNC:
/* we always have index 0 for pipe read */
poll_cb->num_fds = 0;
poll_cb->poll_fds[poll_cb->num_fds].fd = poll_cb->pfds[0];
poll_cb->poll_fds[poll_cb->num_fds].events = POLLIN|POLLRDNORM|POLLPRI;
poll_cb->num_fds++;
if (MM_CAMERA_POLL_TYPE_EVT == poll_cb->poll_type &&
poll_cb->num_fds < MAX_STREAM_NUM_IN_BUNDLE) {
if (poll_cb->poll_entries[0].fd >= 0) {
/* fd is valid, we update poll_fds */
poll_cb->poll_fds[poll_cb->num_fds].fd = poll_cb->poll_entries[0].fd;
poll_cb->poll_fds[poll_cb->num_fds].events = POLLIN|POLLRDNORM|POLLPRI;
poll_cb->num_fds++;
}
} else if (MM_CAMERA_POLL_TYPE_DATA == poll_cb->poll_type &&
poll_cb->num_fds <= MAX_STREAM_NUM_IN_BUNDLE) {
for(i = 0; i < MAX_STREAM_NUM_IN_BUNDLE; i++) {
if(poll_cb->poll_entries[i].fd >= 0) {
/* fd is valid, we update poll_fds to this fd */
poll_cb->poll_fds[poll_cb->num_fds].fd = poll_cb->poll_entries[i].fd;把stream里的fd赋值
poll_cb->poll_fds[poll_cb->num_fds].events = POLLIN|POLLRDNORM|POLLPRI;
poll_cb->num_fds++;
} else {
/* fd is invalid, we set the entry to -1 to prevent polling.
* According to spec, polling will not poll on entry with fd=-1.
* If this is not the case, we need to skip these invalid fds
* when updating this array.
* We still keep fd=-1 in this array because this makes easier to
* map cb associated with this fd once incoming data avail by directly
* using the index-1(0 is reserved for pipe read, so need to reduce index by 1) */
poll_cb->poll_fds[poll_cb->num_fds].fd = -1;
poll_cb->poll_fds[poll_cb->num_fds].events = 0;
poll_cb->num_fds++;
}
}
}
if (cmd_evt.cmd != MM_CAMERA_PIPE_CMD_POLL_ENTRIES_UPDATED_ASYNC)
mm_camera_poll_sig_done(poll_cb);
break;
case MM_CAMERA_PIPE_CMD_COMMIT:
mm_camera_poll_sig_done(poll_cb);
break;
case MM_CAMERA_PIPE_CMD_EXIT:
default:
mm_camera_poll_set_state(poll_cb, MM_CAMERA_POLL_TASK_STATE_STOPPED);
mm_camera_poll_sig_done(poll_cb);
break;
}
}
那么notify_cb是哪来的呢?到目前为止其实都是NULL,poll_thread提供mm_camera_poll_thread_add_poll_fd函数为结构体添加notify_cb。
int32_t mm_camera_poll_thread_add_poll_fd(mm_camera_poll_thread_t * poll_cb,
uint32_t handler,
int32_t fd,
mm_camera_poll_notify_t notify_cb,
void* userdata,
mm_camera_call_type_t call_type)
{
int32_t rc = -1;
uint8_t idx = 0;
if (MM_CAMERA_POLL_TYPE_DATA == poll_cb->poll_type) {
/* get stream idx from handler if CH type */
idx = mm_camera_util_get_index_by_handler(handler);
} else {
/* for EVT type, only idx=0 is valid */
idx = 0;
}
if (MAX_STREAM_NUM_IN_BUNDLE > idx) {
poll_cb->poll_entries[idx].fd = fd;//stream传入
poll_cb->poll_entries[idx].handler = handler;
poll_cb->poll_entries[idx].notify_cb = notify_cb;
poll_cb->poll_entries[idx].user_data = userdata;
/* send poll entries updated signal to poll thread */
if (call_type == mm_camera_sync_call ) {
rc = mm_camera_poll_sig(poll_cb, MM_CAMERA_PIPE_CMD_POLL_ENTRIES_UPDATED);//这个函数会向pipeline写数据
} else {
rc = mm_camera_poll_sig_async(poll_cb, MM_CAMERA_PIPE_CMD_POLL_ENTRIES_UPDATED_ASYNC );
}
} else {
LOGE("invalid handler %d (%d)", handler, idx);
}
return rc;
}
而这个函数在mm_stream_qbuf里被调用
关于v4l2 参考http://blog.csdn.net/yanbixing123/article/details/52294305
int32_tint32_t mm_stream_qbuf(mm_stream_t *my_obj, mm_camera_buf_def_t *buf)
{
int32_t rc = 0;
uint32_t length = 0;
struct v4l2_buffer buffer;
struct v4l2_plane planes[VIDEO_MAX_PLANES];
LOGD("E, my_handle = 0x%x, fd = %d, state = %d, stream type = %d",
my_obj->my_hdl, my_obj->fd, my_obj->state,
my_obj->stream_info->stream_type);
if (buf->buf_type == CAM_STREAM_BUF_TYPE_USERPTR) {
LOGD("USERPTR num_buf = %d, idx = %d",
buf->user_buf.bufs_used, buf->buf_idx);
memset(&planes, 0, sizeof(planes));
planes[0].length = my_obj->stream_info->user_buf_info.size;
planes[0].m.userptr = buf->fd;
length = 1;
} else {
memcpy(planes, buf->planes_buf.planes, sizeof(planes));
length = buf->planes_buf.num_planes;
}
memset(&buffer, 0, sizeof(buffer));
buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
buffer.memory = V4L2_MEMORY_USERPTR;
buffer.index = (__u32)buf->buf_idx;
buffer.m.planes = &planes[0];
buffer.length = (__u32)length;
if ( NULL != my_obj->mem_vtbl.invalidate_buf ) {
rc = my_obj->mem_vtbl.invalidate_buf(buffer.index,
my_obj->mem_vtbl.user_data);
if ( 0 > rc ) {
LOGE("Cache invalidate failed on buffer index: %d",
buffer.index);
return rc;
}
} else {
LOGE("Cache invalidate op not added");
}
pthread_mutex_lock(&my_obj->buf_lock);
my_obj->queued_buffer_count++;
if (1 == my_obj->queued_buffer_count) {
/* Add fd to data poll thread */
LOGH("Starting poll on stream %p type: %d",
my_obj,my_obj->stream_info->stream_type);
//这个地方调用了,把mm_stream_data_notify传做回调
rc = mm_camera_poll_thread_add_poll_fd(&my_obj->ch_obj->poll_thread[0],
my_obj->my_hdl, my_obj->fd, mm_stream_data_notify, (void*)my_obj,
mm_camera_async_call);
if (0 > rc) {
LOGE("Add poll on stream %p type: %d fd error (rc=%d)",
my_obj, my_obj->stream_info->stream_type, rc);
} else {
LOGH("Started poll on stream %p type: %d",
my_obj, my_obj->stream_info->stream_type);
}
}
pthread_mutex_unlock(&my_obj->buf_lock);
rc = ioctl(my_obj->fd, VIDIOC_QBUF, &buffer);
pthread_mutex_lock(&my_obj->buf_lock);
if (0 > rc) {
LOGE("VIDIOC_QBUF ioctl call failed on stream type %d (rc=%d): %s",
my_obj->stream_info->stream_type, rc, strerror(errno));
my_obj->queued_buffer_count--;
if (0 == my_obj->queued_buffer_count) {
/* Remove fd from data poll in case of failing
* first buffer queuing attempt */
LOGH("Stoping poll on stream %p type: %d",
my_obj, my_obj->stream_info->stream_type);
mm_camera_poll_thread_del_poll_fd(&my_obj->ch_obj->poll_thread[0],
my_obj->my_hdl, mm_camera_async_call);
LOGH("Stopped poll on stream %p type: %d",
my_obj, my_obj->stream_info->stream_type);
}
} else {
LOGH("VIDIOC_QBUF buf_index %d, frame_idx %d stream type %d, rc %d,"
" queued: %d, buf_type = %d",
buffer.index, buf->frame_idx, my_obj->stream_info->stream_type, rc,
my_obj->queued_buffer_count, buf->buf_type);
}
pthread_mutex_unlock(&my_obj->buf_lock);
return rc;
}
这个函数是用于让buffer入队返还给kernel,有四个个函数会调用到它:
- mm_stream_reg_buf
- mm_stream_write_user_buf
- mm_stream_read_user_buf
- mm_stream_buf_done
而mm_stream_reg_buf又是在channel start的时候会调用。当poll_thread有了回调之后就会回调mm_stream_data_notify来返回数据
static void mm_stream_data_notify(void* user_data)
{
mm_stream_t *my_obj = (mm_stream_t*)user_data;
int32_t i, rc;
uint8_t has_cb = 0, length = 0;
mm_camera_buf_info_t buf_info;
......
rc = mm_stream_read_msm_frame(my_obj, &buf_info,
(uint8_t)length);
if (rc != 0) {
return;
}
uint32_t idx = buf_info.buf->buf_idx;
pthread_mutex_lock(&my_obj->cb_lock);
for (i = 0; i < MM_CAMERA_STREAM_BUF_CB_MAX; i++) {
if(NULL != my_obj->buf_cb[i].cb) {
//这里得到的是false
if (my_obj->buf_cb[i].cb_type == MM_CAMERA_STREAM_CB_TYPE_SYNC) {
/*For every SYNC callback, send data*/
mm_stream_dispatch_sync_data(my_obj,
&my_obj->buf_cb[i], &buf_info);
} else {
/* for every ASYNC CB, need ref count */
has_cb = 1;
}
}
}
pthread_mutex_unlock(&my_obj->cb_lock);
pthread_mutex_lock(&my_obj->buf_lock);
/* update buffer location */
my_obj->buf_status[idx].in_kernel = 0;
/* update buf ref count */
if (my_obj->is_bundled) {
/* need to add into super buf since bundled, add ref count */
my_obj->buf_status[idx].buf_refcnt++;
}
my_obj->buf_status[idx].buf_refcnt =
(uint8_t)(my_obj->buf_status[idx].buf_refcnt + has_cb);
pthread_mutex_unlock(&my_obj->buf_lock);
mm_stream_handle_rcvd_buf(my_obj, &buf_info, has_cb);
}
先看mm_stream_read_msm_frame
int32_t mm_stream_read_msm_frame(mm_stream_t * my_obj,
mm_camera_buf_info_t* buf_info,
uint8_t num_planes)
{
int32_t rc = 0;
struct v4l2_buffer vb;
struct v4l2_plane planes[VIDEO_MAX_PLANES];
LOGD("E, my_handle = 0x%x, fd = %d, state = %d",
my_obj->my_hdl, my_obj->fd, my_obj->state);
memset(&vb, 0, sizeof(vb));
vb.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
vb.memory = V4L2_MEMORY_USERPTR;
vb.m.planes = &planes[0];
vb.length = num_planes;
rc = ioctl(my_obj->fd, VIDIOC_DQBUF, &vb);//通过v4l2从stream出队一个buffer给kernel
if (0 > rc) {
LOGE("VIDIOC_DQBUF ioctl call failed on stream type %d (rc=%d): %s",
my_obj->stream_info->stream_type, rc, strerror(errno));
} else {
pthread_mutex_lock(&my_obj->buf_lock);
my_obj->queued_buffer_count--;
if (0 == my_obj->queued_buffer_count) {
LOGH("Stoping poll on stream %p type: %d",
my_obj, my_obj->stream_info->stream_type);
//如果已经没有buffer了会把回调注销
mm_camera_poll_thread_del_poll_fd(&my_obj->ch_obj->poll_thread[0],
my_obj->my_hdl, mm_camera_async_call);
LOGH("Stopped poll on stream %p type: %d",
my_obj, my_obj->stream_info->stream_type);
}
pthread_mutex_unlock(&my_obj->buf_lock);
uint32_t idx = vb.index;
buf_info->buf = &my_obj->buf[idx];//已经由驱动填充数据
buf_info->frame_idx = vb.sequence;
buf_info->stream_id = my_obj->my_hdl;
buf_info->buf->stream_id = my_obj->my_hdl;
buf_info->buf->buf_idx = idx;
buf_info->buf->frame_idx = vb.sequence;
buf_info->buf->ts.tv_sec = vb.timestamp.tv_sec;
buf_info->buf->ts.tv_nsec = vb.timestamp.tv_usec * 1000;
buf_info->buf->flags = vb.flags;
LOGH("VIDIOC_DQBUF buf_index %d, frame_idx %d, stream type %d, rc %d,"
"queued: %d, buf_type = %d flags = %d",
vb.index, buf_info->buf->frame_idx,
my_obj->stream_info->stream_type, rc,
my_obj->queued_buffer_count, buf_info->buf->buf_type,
buf_info->buf->flags);
buf_info->buf->is_uv_subsampled =
(vb.reserved == V4L2_PIX_FMT_NV14 || vb.reserved == V4L2_PIX_FMT_NV41);
if(buf_info->buf->buf_type == CAM_STREAM_BUF_TYPE_USERPTR) {
mm_stream_read_user_buf(my_obj, buf_info);
}
if ( NULL != my_obj->mem_vtbl.clean_invalidate_buf ) {
rc = my_obj->mem_vtbl.clean_invalidate_buf(idx,
my_obj->mem_vtbl.user_data);
if (0 > rc) {
LOGE("Clean invalidate cache failed on buffer index: %d",
idx);
}
} else {
LOGE("Clean invalidate cache op not supported");
}
}
LOGD("X rc = %d",rc);
return rc;
}
这里已经填充了buffer里的数据接下来是
void mm_stream_handle_rcvd_buf(mm_stream_t *my_obj,
mm_camera_buf_info_t *buf_info,
uint8_t has_cb)
{
int32_t rc = 0;
LOGD("E, my_handle = 0x%x, fd = %d, state = %d",
my_obj->my_hdl, my_obj->fd, my_obj->state);
/* enqueue to super buf thread */
if (my_obj->is_bundled) {
rc = mm_stream_notify_channel(my_obj->ch_obj, buf_info);//会提醒channel
if (rc < 0) {
LOGE("Unable to notify channel");
}
}
pthread_mutex_lock(&my_obj->buf_lock);
if(my_obj->is_linked) {
/* need to add into super buf for linking, add ref count */
my_obj->buf_status[buf_info->buf->buf_idx].buf_refcnt++;
rc = mm_stream_notify_channel(my_obj->linked_obj, buf_info);
if (rc < 0) {
LOGE("Unable to notify channel");
}
}
pthread_mutex_unlock(&my_obj->buf_lock);
pthread_mutex_lock(&my_obj->cmd_lock);
if(has_cb && my_obj->cmd_thread.is_active) {
mm_camera_cmdcb_t* node = NULL;
/* send cam_sem_post to wake up cmd thread to dispatch dataCB */
node = (mm_camera_cmdcb_t *)malloc(sizeof(mm_camera_cmdcb_t));
if (NULL != node) {
memset(node, 0, sizeof(mm_camera_cmdcb_t));
node->cmd_type = MM_CAMERA_CMD_TYPE_DATA_CB;
node->u.buf = *buf_info;
/* enqueue to cmd thread */
cam_queue_enq(&(my_obj->cmd_thread.cmd_queue), node);
/* wake up cmd thread */
cam_sem_post(&(my_obj->cmd_thread.cmd_sem));//把消息发送到自己的cmd_thread,所以会调用回调
} else {
LOGE("No memory for mm_camera_node_t");
}
}
pthread_mutex_unlock(&my_obj->cmd_lock);
}
调用下方回调
static void mm_stream_dispatch_app_data(mm_camera_cmdcb_t *cmd_cb,
void* user_data)
{
int i;
mm_stream_t * my_obj = (mm_stream_t *)user_data;
mm_camera_buf_info_t* buf_info = NULL;
mm_camera_super_buf_t super_buf;
if (NULL == my_obj) {
return;
}
LOGD("E, my_handle = 0x%x, fd = %d, state = %d",
my_obj->my_hdl, my_obj->fd, my_obj->state);
if (MM_CAMERA_CMD_TYPE_DATA_CB != cmd_cb->cmd_type) {
LOGE("Wrong cmd_type (%d) for dataCB",
cmd_cb->cmd_type);
return;
}
buf_info = &cmd_cb->u.buf;
memset(&super_buf, 0, sizeof(mm_camera_super_buf_t));
super_buf.num_bufs = 1;
super_buf.bufs[0] = buf_info->buf;
super_buf.camera_handle = my_obj->ch_obj->cam_obj->my_hdl;
super_buf.ch_id = my_obj->ch_obj->my_hdl;
pthread_mutex_lock(&my_obj->cb_lock);
for(i = 0; i < MM_CAMERA_STREAM_BUF_CB_MAX; i++) {
if(NULL != my_obj->buf_cb[i].cb
&& (my_obj->buf_cb[i].cb_type !=
MM_CAMERA_STREAM_CB_TYPE_SYNC)) {
if (my_obj->buf_cb[i].cb_count != 0) {
/* if <0, means infinite CB
* if >0, means CB for certain times
* both case we need to call CB */
/* increase buf ref cnt */
pthread_mutex_lock(&my_obj->buf_lock);
my_obj->buf_status[buf_info->buf->buf_idx].buf_refcnt++;
pthread_mutex_unlock(&my_obj->buf_lock);
/* callback */
//可以看到的是调用了之前config_stream时绑定的回调
my_obj->buf_cb[i].cb(&super_buf,
my_obj->buf_cb[i].user_data);
}
/* if >0, reduce count by 1 every time we called CB until reaches 0
* when count reach 0, reset the buf_cb to have no CB */
if (my_obj->buf_cb[i].cb_count > 0) {
my_obj->buf_cb[i].cb_count--;
if (0 == my_obj->buf_cb[i].cb_count) {
my_obj->buf_cb[i].cb = NULL;
my_obj->buf_cb[i].user_data = NULL;
}
}
}
}
pthread_mutex_unlock(&my_obj->cb_lock);
/* do buf_done since we increased refcnt by one when has_cb */
mm_stream_buf_done(my_obj, buf_info->buf);
}
接下来数据经过PostProcessor的处理变成frame在QCamera3HWI里调用上一层回调mCallbackOps->process_capture_result返回数据到Camera3Device。
PS:18届毕业生,坐标杭州,方向Android,对我有兴趣可以聊聊,a799138951@gmail.com。