YUV颜色编码解析
YUV
YUV是一种颜色空间,基于YUV的颜色编码是流媒体的常用编码方式。Y表示流明,U、V表示色度、浓度,这种表达方式起初是为了彩色电视与黑白电视之间的信号兼容。 对于图像每一点,Y确定其亮度,UV确认其彩度。
Y'CbCr也称为YUV,是YUV的压缩版本,不同之处在于Y'CbCr用于数字图像领域,YUV用于模拟信号领域,MPEG、DVD、摄像机中常说的YUV其实是Y'CbCr,二者转换为RGBA的转换矩阵是不同的。Y'为亮度,Cb、Cr分量代表当前颜色对蓝色和红色的偏移程度。
注意4:2:0并不是只抽样第一行的色度,是第一行和第二行轮番抽样的:4:2:0 --> 4:0:2 --> 4:2:0 ……
可以看到,不管是哪种抽样方式,亮度都是全抽样的,不同之处在于U、V分量的抽样率。可以看到常用的4:2:0的U、V都是半抽样,所以抽样后的数据量是RGB24一半。(RGB24相当于全抽样)
YUV存储方式
YUV存储方式主要分为两种:Packeted 和 Planar。
Packeted方式类似RGB的存储方式,以像素矩阵为存储方式。
Planar方式将YUV分量分别存储到矩阵,每一个分量矩阵称为一个平面。
YUV420即以平面方式存储,色度抽样为4:2:0的色彩编码格式。其中YUV420P为三平面存储,YUV420SP为两平面存储。
常用的I420(YUV420P),NV12(YUV420SP),YV12(YUV420P),NV21(YUV420SP)等都是属于YUV420,NV12是一种两平面存储方式,Y为一个平面,交错的UV为另一个平面。
由此,I420就是存储方式为Planar,抽样方式为4:2:0,数据组成为YYYYYYYYUUVV的一种色彩编码格式。
除此之外,NV12的数据组成:YYYYYYYYUVUV 。YV12的数据组成:YYYYYYYYVVUU。NV21的数据组成:YYYYYYYYVUVU。
通常,用来远程传输的是I420数据,而本地摄像头采集的是NV12数据。(iOS)
摄像头采集得到的数据是NV12
YUV与RGB之间的转换
在渲染时,不管是OpenGL还是iOS,都不支持直接渲染YUV数据,底层都是转为RGB。
//RGB --> YUV
Y = 0.299 R + 0.587 G + 0.114 B
U = - 0.1687 R - 0.3313 G + 0.5 B + 128
V = 0.5 R - 0.4187 G - 0.0813 B + 128
//YUV --> RGB
//由于U、V可能出现负数,单存储为了方便就用一个字节表示:0-255,读取时要-128回归原值。
R = Y + 1.402 (Cr-128)
G = Y - 0.34414 (Cb-128) - 0.71414 (Cr-128)
B = Y + 1.772 (Cb-128)
YUV数据渲染
以NV12为例:
void convertNv12ToRgb(unsigned char *rgbout, unsigned char *pdata,int DataWidth,int DataHeight)
{
unsigned long idx=0;
unsigned char *ybase,*ubase;
unsigned char y,u,v;
ybase = pdata; //获取Y平面地址
ubase = pdata+DataWidth * DataHeight; //获取U平面地址,由于NV12中U、V是交错存储在一个平民的,v是u+1
for(int j=0;j<DataHeight;j++)
{
idx=(DataHeight-j-1)*DataWidth*3;//该值保证所生成的rgb数据逆序存放在rgbbuf中,位图是底朝上的
for(int i=0;i<DataWidth;i++)
{
unsigned char r,g,b;
y=ybase[i + j * DataWidth];//一个像素对应一个y
u=ubase[j/2 * DataWidth+(i/2)*2];// 每四个y对应一个uv
v=ubase[j/2 * DataWidth+(i/2)*2+1]; //一定要注意是u+1
b=(unsigned char)(y+1.779*(u- 128));
g=(unsigned char)(y-0.7169*(v - 128)-0.3455*(u - 128));
r=(unsigned char)(y+ 1.4075*(v - 128));
rgbout[idx++]=b;
rgbout[idx++]=g;
rgbout[idx++]=r;
}
}
}
有时不同的YUV格式需要互相转换
unsigned char* convertNV12ToI420(unsigned char *data , int dataWidth, int dataHeight){
unsigned char *ybase,*ubase;
ybase = data;
ubase = data + dataWidth*dataHeight;
unsigned char* tmpData = (unsigned char*)malloc(dataWidth*dataHeight * 1.5);
int offsetOfU = dataWidth*dataHeight;
int offsetOfV = dataWidth*dataHeight* 5/4;
memcpy(tmpData, ybase, dataWidth*dataHeight);
for (int i = 0; i < dataWidth*dataHeight/2; i++) {
if (i % 2 == 0) {
tmpData[offsetOfU] = ubase[i];
offsetOfU++;
}else{
tmpData[offsetOfV] = ubase[i];
offsetOfV++;
}
}
free(data);
return tmpData;
}
或者需要旋转获得的数据
void rotate90NV12(unsigned char *dst, const unsigned char *src, int srcWidth, int srcHeight)
{
int wh = srcWidth * srcHeight;
int uvHeight = srcHeight / 2;
int uvWidth = srcWidth / 2;
//旋转Y
int i = 0, j = 0;
int srcPos = 0, nPos = 0;
for(i = 0; i < srcHeight; i++) {
nPos = srcHeight - 1 - i;
for(j = 0; j < srcWidth; j++) {
dst[j * srcHeight + nPos] = src[srcPos++];
}
}
srcPos = wh;
for(i = 0; i < uvHeight; i++) {
nPos = (uvHeight - 1 - i) * 2;
for(j = 0; j < uvWidth; j++) {
dst[wh + j * srcHeight + nPos] = src[srcPos++];
dst[wh + j * srcHeight + nPos + 1] = src[srcPos++];
}
}
}
void rotate270YUV420sp(unsigned char *dst, const unsigned char *src, int srcWidth, int srcHeight)
{
int nWidth = 0, nHeight = 0;
int wh = 0;
int uvHeight = 0;
if(srcWidth != nWidth || srcHeight != nHeight)
{
nWidth = srcWidth;
nHeight = srcHeight;
wh = srcWidth * srcHeight;
uvHeight = srcHeight >> 1;//uvHeight = height / 2
}
//旋转Y
int k = 0;
for(int i = 0; i < srcWidth; i++){
int nPos = srcWidth - 1;
for(int j = 0; j < srcHeight; j++)
{
dst[k] = src[nPos - i];
k++;
nPos += srcWidth;
}
}
for(int i = 0; i < srcWidth; i+=2){
int nPos = wh + srcWidth - 1;
for(int j = 0; j < uvHeight; j++) {
dst[k] = src[nPos - i - 1];
dst[k + 1] = src[nPos - i];
k += 2;
nPos += srcWidth;
}
}
}
在iOS中,可以使用core graphics将RGB数据画成UIImage。
- (UIImage *) convertBitmapRGBA8ToUIImage:(unsigned char *) buffer
withWidth:(int) width
withHeight:(int) height {
//转为RGBA32
char* rgba = (char*)malloc(width*height*4);
for(int i=0; i < width*height; ++i) {
rgba[4*i] = buffer[3*i];
rgba[4*i+1] = buffer[3*i+1];
rgba[4*i+2] = buffer[3*i+2];
rgba[4*i+3] = 255;
}
size_t bufferLength = width * height * 4;
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, rgba, bufferLength, NULL);
size_t bitsPerComponent = 8;
size_t bitsPerPixel = 32;
size_t bytesPerRow = 4 * width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
if(colorSpaceRef == NULL) {
NSLog(@"Error allocating color space");
CGDataProviderRelease(provider);
return nil;
}
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault | kCGImageAlphaPremultipliedLast;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
CGImageRef iref = CGImageCreate(width,
height,
bitsPerComponent,
bitsPerPixel,
bytesPerRow,
colorSpaceRef,
bitmapInfo,
provider, // data provider
NULL, // decode
YES, // should interpolate
renderingIntent);
uint32_t* pixels = (uint32_t*)malloc(bufferLength);
if(pixels == NULL) {
NSLog(@"Error: Memory not allocated for bitmap");
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
CGImageRelease(iref);
return nil;
}
CGContextRef context = CGBitmapContextCreate(pixels,
width,
height,
bitsPerComponent,
bytesPerRow,
colorSpaceRef,
bitmapInfo);
if(context == NULL) {
NSLog(@"Error context not created");
free(pixels);
}
UIImage *image = nil;
if(context) {
CGContextDrawImage(context, CGRectMake(0.0f, 0.0f, width, height), iref);
CGImageRef imageRef = CGBitmapContextCreateImage(context);
image = [UIImage imageWithCGImage:imageRef scale:[UIScreen mainScreen].scale orientation:UIImageOrientationUp];
if([UIImage respondsToSelector:@selector(imageWithCGImage:scale:orientation:)]) {
image = [UIImage imageWithCGImage:imageRef scale:1.0 orientation:UIImageOrientationUp];
} else {
image = [UIImage imageWithCGImage:imageRef];
}
CGImageRelease(imageRef);
CGContextRelease(context);
}
CGColorSpaceRelease(colorSpaceRef);
CGImageRelease(iref);
CGDataProviderRelease(provider);
if(pixels) {
free(pixels);
}
return image;
}