Android 获取视频缩略图(获取视频每帧数据)的优化方案
2019-01-22 本文已影响96人
deep_sadness
代码位置 MediaMetadataRetriever Wrapper
速度对比
speed.gif左边的图片是通过方式1
右边的图片是通过方式2
速度优化,效果拔群。
- 在缩小2倍的Bitmap输出情况下
使用MediaMetadataRetriever 抽帧的速度,每帧稳定在 300ms左右。
使用MediaCodec+ImageReader 第一次抽帧。大概是200ms ,后续每帧则是50ms左右。
注意:如果不缩小图片的话,建议还是使用MediaMetadataRetriever。
使用当前库的话,调用metadataRetriever.forceFallBack(true);
方案
1. 通过MediaMetaRetrivier来进行获取
代码较为简单,就是一个循环
MediaMetadataRetriever metadataRetriever = new MediaMetadataRetriever();
metadataRetriever.setDataSource(fileName);
String duration = metadataRetriever.extractMetadata(MediaMetadataRetriever.METADATA_KEY_DURATION);
Log.d(TAG, "duration = " + duration);
int durationMs = Integer.parseInt(duration);
//每秒取一次
for (int i = 0; i < durationMs; i += 1000) {
long start = System.nanoTime();
Log.d(TAG, "getFrameAtTime time = " + i);
//这里传入的是ms
Bitmap frameAtIndex = metadataRetriever.getFrameAtTime(i * 1000);
Bitmap frame=Bitmap.createScaledBitmap(frameAtIndex,frameAtIndex.getWidth()/8,frameAtIndex.getHeight()/8,false);
frameAtIndex.recycle();
long end = System.nanoTime();
long cost = end - start;
Log.d(TAG, "cost time in millis = " + (cost * 1f / 1000000));
if (callBack != null) {
callBack.onComplete(frame);
}
}
metadataRetriever.release();
2. 通过MediaCodec和ImageReader进行获取
就是通过通过Surface,用MediaExtrator,将MediaCodec解码后的数据,传递给ImageReader。来进行显示。
MediaExtractor extractor = null;
MediaCodec codec = null;
try {
extractor = new MediaExtractor();
extractor.setDataSource(fileName);
int trackCount = extractor.getTrackCount();
MediaFormat videoFormat = null;
for (int i = 0; i < trackCount; i++) {
MediaFormat trackFormat = extractor.getTrackFormat(i);
if (trackFormat.getString(MediaFormat.KEY_MIME).contains("video")) {
videoFormat = trackFormat;
extractor.selectTrack(i);
break;
}
}
if (videoFormat == null) {
Log.d(TAG, "Can not get video format");
return;
}
int imageFormat = ImageFormat.YUV_420_888;
int colorFormat = COLOR_FormatYUV420Flexible;
videoFormat.setInteger(MediaFormat.KEY_COLOR_FORMAT, colorFormat);
videoFormat.setInteger(MediaFormat.KEY_WIDTH, videoFormat.getInteger(MediaFormat.KEY_WIDTH) / 4);
videoFormat.setInteger(MediaFormat.KEY_HEIGHT, videoFormat.getInteger(MediaFormat.KEY_HEIGHT) / 4);
long duration = videoFormat.getLong(MediaFormat.KEY_DURATION);
codec = MediaCodec.createDecoderByType(videoFormat.getString(MediaFormat.KEY_MIME));
ImageReader imageReader = ImageReader
.newInstance(
videoFormat.getInteger(MediaFormat.KEY_WIDTH),
videoFormat.getInteger(MediaFormat.KEY_HEIGHT),
imageFormat,
3);
final ImageReaderHandlerThread imageReaderHandlerThread = new ImageReaderHandlerThread();
imageReader.setOnImageAvailableListener(new MyOnImageAvailableListener(callBack), imageReaderHandlerThread.getHandler());
codec.configure(videoFormat, imageReader.getSurface(), null, 0);
codec.start();
MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
long timeOut = 5 * 1000;//10ms
boolean inputDone = false;
boolean outputDone = false;
ByteBuffer[] inputBuffers = null;
if (android.os.Build.VERSION.SDK_INT < android.os.Build.VERSION_CODES.LOLLIPOP) {
inputBuffers = codec.getInputBuffers();
}
//开始进行解码。
int count = 1;
while (!outputDone) {
if (requestStop) {
return;
}
if (!inputDone) {
//feed data
int inputBufferIndex = codec.dequeueInputBuffer(timeOut);
if (inputBufferIndex >= 0) {
ByteBuffer inputBuffer;
if (android.os.Build.VERSION.SDK_INT >= android.os.Build.VERSION_CODES.LOLLIPOP) {
inputBuffer = codec.getInputBuffer(inputBufferIndex);
} else {
inputBuffer = inputBuffers[inputBufferIndex];
}
int sampleData = extractor.readSampleData(inputBuffer, 0);
if (sampleData > 0) {
long sampleTime = extractor.getSampleTime();
codec.queueInputBuffer(inputBufferIndex, 0, sampleData, sampleTime, 0);
//继续
if (interval == 0) {
extractor.advance();
} else {
extractor.seekTo(count * interval * 1000, MediaExtractor.SEEK_TO_PREVIOUS_SYNC);
count++;
// extractor.advance();
}
} else {
//小于0,说明读完了
codec.queueInputBuffer(inputBufferIndex, 0, 0, 0L, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
inputDone = true;
Log.d(TAG, "end of stream");
}
}
}
if (!outputDone) {
//get data
int status = codec.dequeueOutputBuffer(bufferInfo, timeOut);
if (status ==
MediaCodec.INFO_TRY_AGAIN_LATER) {
//继续
} else if (status == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
//开始进行解码
} else if (status == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
//同样啥都不做
} else {
//在这里判断,当前编码器的状态
if ((bufferInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
Log.d(TAG, "output EOS");
outputDone = true;
}
boolean doRender = (bufferInfo.size != 0);
long presentationTimeUs = bufferInfo.presentationTimeUs;
if (lastPresentationTimeUs == 0) {
lastPresentationTimeUs = presentationTimeUs;
} else {
long diff = presentationTimeUs - lastPresentationTimeUs;
if (interval != 0) {
if (diff < interval * 1000) {
doRender = false;
} else {
lastPresentationTimeUs = presentationTimeUs;
}
Log.d(TAG,
"diff time in ms =" + diff / 1000);
}
}
//有数据了.因为会直接传递给Surface,所以说明都不做好了
Log.d(TAG, "surface decoder given buffer " + status +
" (size=" + bufferInfo.size + ")" + ",doRender = " + doRender + ", presentationTimeUs=" + presentationTimeUs);
//直接送显就可以了
codec.releaseOutputBuffer(status, doRender);
}
}
}
} catch (IOException e) {
e.printStackTrace();
} finally {
if (codec != null) {
codec.stop();
codec.release();
}
if (extractor != null) {
extractor.release();
}
}
}
通过libyuv进行数据的转换
private static class MyOnImageAvailableListener implements ImageReader.OnImageAvailableListener {
private final BitmapCallBack callBack;
private MyOnImageAvailableListener(BitmapCallBack callBack) {
this.callBack = callBack;
}
@Override
public void onImageAvailable(ImageReader reader) {
Log.i(TAG, "in OnImageAvailable");
Image img = null;
try {
img = reader.acquireLatestImage();
if (img != null) {
//这里得到的YUV的数据。需要将YUV的数据变成Bitmap
Image.Plane[] planes = img.getPlanes();
if (planes[0].getBuffer() == null) {
return;
}
// Bitmap bitmap = getBitmap(img);
Bitmap bitmap = getBitmapScale(img, 8);
// Bitmap bitmap = getBitmapFromNv21(img);
if (callBack != null && bitmap != null) {
Log.d(TAG, "onComplete bitmap ");
callBack.onComplete(bitmap);
}
}
} catch (Exception e) {
e.printStackTrace();
} finally {
if (img != null) {
img.close();
}
}
}
@NonNull
private Bitmap getBitmapScale(Image img, int scale) {
int width = img.getWidth() / scale;
int height = img.getHeight() / scale;
final byte[] bytesImage = getDataFromYUV420Scale(img, scale);
Bitmap bitmap = null;
bitmap = Bitmap.createBitmap(height, width, Bitmap.Config.ARGB_8888);
bitmap.copyPixelsFromBuffer(ByteBuffer.wrap(bytesImage));
return bitmap;
}
private byte[] getDataFromYUV420Scale(Image image, int scale) {
int width = image.getWidth();
int height = image.getHeight();
// Read image data
Image.Plane[] planes = image.getPlanes();
byte[] argb = new byte[width / scale * height / scale * 4];
//值得注意的是在Java层传入byte[]以RGBA顺序排列时,libyuv是用ABGR来表示这个排列
//libyuv表示的排列顺序和Bitmap的RGBA表示的顺序是反向的。
// 所以实际要调用libyuv::ABGRToI420才能得到正确的结果。
YuvUtils.yuvI420ToABGRWithScale(
argb,
planes[0].getBuffer(), planes[0].getRowStride(),
planes[1].getBuffer(), planes[1].getRowStride(),
planes[2].getBuffer(), planes[2].getRowStride(),
width, height,
scale
);
return argb;
}
}
libyuv
extern "C"
JNIEXPORT void JNICALL
Java_com_example_yuv_YuvUtils_yuvI420ToABGRWithScale(JNIEnv *env, jclass type, jbyteArray argb_,
jobject y_buffer, jint y_rowStride,
jobject u_buffer, jint u_rowStride,
jobject v_buffer, jint v_rowStride,
jint width, jint height,
jint scale) {
jbyte *argb = env->GetByteArrayElements(argb_, NULL);
uint8_t *srcYPtr = reinterpret_cast<uint8_t *>(env->GetDirectBufferAddress(y_buffer));
uint8_t *srcUPtr = reinterpret_cast<uint8_t *>(env->GetDirectBufferAddress(u_buffer));
uint8_t *srcVPtr = reinterpret_cast<uint8_t *>(env->GetDirectBufferAddress(v_buffer));
int scaleW = width / scale;
int scaleH = height / scale;
int scaleSize = scaleW * scaleH;
jbyte *temp_y_scale = new jbyte[scaleSize * 3 / 2];
jbyte *temp_u_scale = temp_y_scale + scaleSize;
jbyte *temp_v_scale = temp_y_scale + scaleSize + scaleSize / 4;
libyuv::I420Scale(
srcYPtr, y_rowStride,
srcUPtr, u_rowStride,
srcVPtr, v_rowStride,
width, height,
(uint8_t *) temp_y_scale, scaleW,
(uint8_t *) temp_u_scale, scaleW >> 1,
(uint8_t *) temp_v_scale, scaleW >> 1,
scaleW, scaleH,
libyuv::kFilterNone
);
width = scaleW;
height = scaleH;
jbyte *temp_y = new jbyte[width * height * 3 / 2];
jbyte *temp_u = temp_y + width * height;
jbyte *temp_v = temp_y + width * height + width * height / 4;
libyuv::I420Rotate(
(uint8_t *) temp_y_scale, scaleW,
(uint8_t *) temp_u_scale, scaleW >> 1,
(uint8_t *) temp_v_scale, scaleW >> 1,
//
(uint8_t *) temp_y, height,
(uint8_t *) temp_u, height >> 1,
(uint8_t *) temp_v, height >> 1,
width, height,
libyuv::kRotate90
);
libyuv::I420ToABGR(
(uint8_t *) temp_y, height,
(uint8_t *) temp_u, height >> 1,
(uint8_t *) temp_v, height >> 1,
(uint8_t *) argb, height * 4,
height, width
);
env->ReleaseByteArrayElements(argb_, argb, 0);
}
后续
将文件通过MediaCodec解码。 输出到ImageReader当中。来获取截图。
使用MediaMetadataRetriever的方式,因为无法配置输出的图片的大小。
但当我们只需要生成小图预览的时候, 如果我们实现做了缩放的处理。就能得到很快的速度。
不足
需要对原来MediaMetadataRetriever的原理探究