iOSiOS Developer

Objective-C图片处理

2017-02-03  本文已影响937人  朱思明

背景

1.在我们的实际开发工作中随着项目的深入对于图片得要求越来高,考虑的外在条件也越来越多

2.我们不在满足于UI给我图片我们直接加载显示在视图上,我们需要考虑资源包的大小,图片的可扩展性(机型的适配)

3.所以在这里给大家分享图片一些关于图片处理的技术

一、图片截屏处理

1.获取某个视图的截屏

- (UIImage *)getScreenRecordingImageWithView:(UIView *)view
{
    // 1.设置获取画布大小
    UIGraphicsBeginImageContextWithOptions(CGSizeMake(view.frame.size.width, view.frame.size.height), YES, [UIScreen mainScreen].scale);
    // [self.layer renderInContext:UIGraphicsGetCurrentContext()];
    [view drawViewHierarchyInRect:view.bounds afterScreenUpdates:true];  
    UIImage *screenshotImage = UIGraphicsGetImageFromCurrentImageContext();
    UIGraphicsEndImageContext();
    return screenshotImage;
}

二、视图的颜色处理

1.设置视图渐变色效果

- (void)setGradientLayerWithView:(UIView *)view {
    // 创建 CAGradientLayer 对象 
    CAGradientLayer *gradientLayer = [CAGradientLayer layer];
    // 设置 gradientLayer 的 Frame
    gradientLayer.frame = view.bounds;
    // 创建渐变色数组,需要转换为CGColor颜色
    gradientLayer.colors = @[(__bridge id)[UIColor redColor].CGColor, [UIColor yellowColor].CGColor, (__bridge id)[UIColor redColor].CGColor];
    // 设置三种颜色变化点,取值范围 0.0~1.0
    gradientLayer.locations = @[@(0.0f) ,@(0.2f) ,@(1.0f)];
    // 设置渐变颜色方向,左上点为(0,0), 右下点为(1,1)
    gradientLayer.startPoint = CGPointMake(0, 0);
    gradientLayer.endPoint = CGPointMake(0, 1);
    // 添加渐变色到创建的 UIView 上去
    [view.layer addSublayer:gradientLayer];
}

2.视图渐隐效果设置

// 在很多时候我们为了增强我们的视觉交互效果,往往让一些视图在边界位置有一个渐隐,效果,
// 例如:很多音乐播放器中歌词滚出屏幕或者进入屏幕时,文字会像切掉一样很生硬,如果能让文字出现渐隐效果会更好一些
// 实现思虑:1.UIView的蒙版效果,蒙版视图实体和视图重叠位置显示,蒙版视图透明位置和视图重叠位置隐藏,蒙版视图有透明效果位置和视图重叠位置alpha = 1.0 - 蒙版视图.alpha 
// 2.使用渐变视图作为蒙版视图
- (void)setBoundaryFadeWithView:(UIView *)view {
    // 1.获取一个黑色到透明色渐变色蒙版
    UIView *maskView = [[UIView alloc] initWithFrame:view.bounds];
    // 创建 CAGradientLayer 对象 
    CAGradientLayer *gradientLayer = [CAGradientLayer layer]; 
    // 设置 gradientLayer 的 Frame gradientLayer.frame = view.bounds; 
    // 创建渐变色数组,需要转换为CGColor颜色 
    gradientLayer.colors = @[[UIColor colorWithWhite:0 alpha:0].CGColor, [UIColor blackColor].CGColor, [UIColor blackColor].CGColor, [UIColor colorWithWhite:0 alpha:0].CGColor];
    // 设置三种颜色变化点,取值范围 0.0~1.0 
    gradientLayer.locations = @[@(0.0f) ,@(0.1f) ,@(0.9f) ,@(1.0f)]; 
    // 设置渐变颜色方向,左上点为(0,0), 右下点为(1,1) 
    gradientLayer.startPoint = CGPointMake(0, 0); 
    gradientLayer.endPoint = CGPointMake(0, 1); 
    // 添加渐变色到创建的 UIView 上去 
    [maskView.layer addSublayer:gradientLayer];

    // 2.把maskView作为view的蒙版视图
    view.maskView = maskView;
}

3.获取任意颜色色的png图片视图

- (void)setPNGColorView:(UIView *)view pngImage:(UIImage *)pngImage color:(UIColor *)color {
    // 1.设置视图的颜色
    view.backgroundColor = color;
    // 2.创建蒙版
    UIImageView *maskView = [[UIImageView alloc] initWithFrame:view.bounds];
    maskView.image = pngImage;
    view.maskView = maskView;
}

三、iOS绘制镂空的图片

// 获取镂空的背景图片,例如我们在日常开发中添加的各种功能引导视图,需要遮罩中漏出需要看到的按钮或者图片
- (UIImage*)drawHollowPicture
{
    // 01 创建画布
    CGFloat width = [UIScreen mainScreen].bounds.size.width;
    CGFloat height = [UIScreen mainScreen].bounds.size.height;
    CGRect frame = CGRectMake(0, 0, width, height);
    UIGraphicsBeginImageContextWithOptions(frame.size, false, [UIScreen mainScreen].nativeScale);
    CGContextRef context = UIGraphicsGetCurrentContext();
    // 02 绘制背景颜色
    UIColor *bgColor = [UIColor colorWithRed:0 green:0 blue:0 alpha:0.7];
    CGContextSetFillColorWithColor(context, bgColor.CGColor);
    CGContextAddRect(context, frame);
    CGContextDrawPath(context, kCGPathFill);
    // 03 裁剪圆形
    // 设置绘制模式
    CGContextSetBlendMode(context, kCGBlendModeClear);
    // 画椭圆
    CGContextAddEllipseInRect(context, CGRectMake(10, 10, 60, 60)); //椭圆
    CGContextSetFillColorWithColor(context, [UIColor clearColor].CGColor);
    CGContextDrawPath(context, kCGPathFill);
    // 04 获取绘制图片
    UIImage *theImage = UIGraphicsGetImageFromCurrentImageContext();
    UIGraphicsEndImageContext();
    return theImage;
}

四、iOS中对图片进行数字处理的方式

// 1.灰度处理

- (UIImage*)getNewImageWithOldImage:(UIImage*)image { 
    // 1.定义色值的索引位置
    const int RED =0;
    const int GREEN =1;
    const int BLUE =2;
    const int ALPHA =3;
    // 2.定义图片的大小
    CGRect imageRect = CGRectMake(0,0, image.size.width* image.scale, image.size.height* image.scale);
    int width = imageRect.size.width;
    int height = imageRect.size.height;
    // 3.创建图片像素数组的内存大小
    uint32_t *pixels = (uint32_t*) malloc(width * height *sizeof(uint32_t));
    // 清空像素组里面的内容
    memset(pixels,0, width * height *sizeof(uint32_t));
    // 4.创建色系对象
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
    // 5.创建一个rgba像素的画布对象,像素存放在pixels中
    // create a context with RGBA pixels
    CGContextRef context = CGBitmapContextCreate(pixels, width, height,8, width *sizeof(uint32_t), colorSpace,kCGImageAlphaPremultipliedLast);
    // 6.把图片绘制到画布上,并且同步到存放像素内容的数组中
    // paint the bitmap to our context which will fill in the pixels array
    CGContextDrawImage(context,CGRectMake(0,0, width, height), [image CGImage]);
    // 7.遍历所有的像素点进行处理
    for(int y = 0; y < height; y++) {
        for(int x = 0; x < width; x++) {
            // 01 每一个像素点的颜色数组
            uint8_t *rgbaPixel = (uint8_t*) &pixels[y * width + x];
            // 02 色彩灰度处理
            uint32_t gray = 0.3 * rgbaPixel[RED] +0.59 * rgbaPixel[GREEN] +0.11 * rgbaPixel[BLUE];
            // rgbaPixel[RED] = gray;    
            // rgbaPixel[GREEN] = gray;
            // rgbaPixel[BLUE] = gray;
            // 03 单一颜色替换成红色
            if (rgbaPixel[RED] == 239 && rgbaPixel[GREEN] == 60 && rgbaPixel[BLUE] == 153) {
                rgbaPixel[RED] = 255;
                rgbaPixel[GREEN] = 0;
                rgbaPixel[BLUE] = 0;
            }
        }
    }
    // 8.获取画布中的图片对象
    CGImageRef imageRef = CGBitmapContextCreateImage(context);
    UIImage *resultImage = [UIImage imageWithCGImage:imageRef scale:image.scale orientation:UIImageOrientationUp];
    // 9.释放对象
    // we're done with the context, color space, and pixels
    CGContextRelease(context);
    CGColorSpaceRelease(colorSpace);
    free(pixels);
    CGImageRelease(imageRef);
    return resultImage;
}

// 2.亮度和对比度处理

/*
 对比度:    contrast -1~1
 亮度:     brightness -1~1
 亮度公式:Gray = 0.3*R + 0.59*G + 0.11*B
 */
- (UIImage*)getNewImageWithOldImage:(UIImage*)image
                           contrast:(CGFloat)contrast
                         brightness:(CGFloat)brightness {
    // 1.定义色值的索引位置
    const int RED =0;
    const int GREEN =1;
    const int BLUE =2;
    const int ALPHA =3;
    // 2.定义图片的大小
    CGRect imageRect = CGRectMake(0,0, image.size.width* image.scale, image.size.height* image.scale);
    int width = imageRect.size.width;
    int height = imageRect.size.height;
    // 3.创建图片像素数组的内存大小
    uint32_t *pixels = (uint32_t*) malloc(width * height *sizeof(uint32_t));
    // 清空像素组里面的内容
    memset(pixels,0, width * height *sizeof(uint32_t));
    // 4.创建色系对象
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
    // 5.创建一个rgba像素的画布对象,像素存放在pixels中
    // create a context with RGBA pixels
    CGContextRef context = CGBitmapContextCreate(pixels, width, height,8, width *sizeof(uint32_t), colorSpace,kCGImageAlphaPremultipliedLast);
    // 6.把图片绘制到画布上,并且同步到存放像素内容的数组中
    // paint the bitmap to our context which will fill in the pixels array
    CGContextDrawImage(context,CGRectMake(0,0, width, height), [image CGImage]);

    // 7.设置亮度和对比度
    // 如果当前亮度或者对比度发生变化时修改图片参数
    if (contrast != 0 && brightness == 0) {
        // 01 当前只修改了对比度
        float ta = 0, tr = 0, tg = 0, tb = 0;
        for(int y = 0; y < height; y++) {
            for(int x = 0; x < width; x++) {
                // 01 每一个像素点的颜色数组
                uint8_t *rgbaPixel = (uint8_t*) &pixels[y * width + x];
                ta = rgbaPixel[ALPHA];
                tr = rgbaPixel[RED];
                tg = rgbaPixel[GREEN];
                tb = rgbaPixel[BLUE];
                
                tr = (tr - 127.5) * arctan(contrast) + 127.5;
                tg = (tg - 127.5) * arctan(contrast) + 127.5;
                tb = (tb - 127.5) * arctan(contrast) + 127.5;
                
                rgbaPixel[RED] = clamp(tr);
                rgbaPixel[GREEN] = clamp(tg);
                rgbaPixel[BLUE] = clamp(tb);
            }
        }
    } else if (contrast == 0 && brightness != 0) {
        // 02 当前只修改了亮度
        float ta = 0, tr = 0, tg = 0, tb = 0;
        for(int y = 0; y < height; y++) {
            for(int x = 0; x < width; x++) {
                // 01 每一个像素点的颜色数组
                uint8_t *rgbaPixel = (uint8_t*) &pixels[y * width + x];
                ta = rgbaPixel[ALPHA];
                tr = rgbaPixel[RED];
                tg = rgbaPixel[GREEN];
                tb = rgbaPixel[BLUE];
                
                tr = tr + 255 * brightness;
                tg = tg + 255 * brightness;
                tb = tb + 255 * brightness;
                
                rgbaPixel[RED] = clamp(tr);
                rgbaPixel[GREEN] = clamp(tg);
                rgbaPixel[BLUE] = clamp(tb);
            }
        }
    } else if (contrast != 0 && brightness != 0) {
        // 03 当前对比度和亮度
        float ta = 0, tr = 0, tg = 0, tb = 0;
        for(int y = 0; y < height; y++) {
            for(int x = 0; x < width; x++) {
                // 01 每一个像素点的颜色数组
                uint8_t *rgbaPixel = (uint8_t*) &pixels[y * width + x];
                ta = rgbaPixel[ALPHA];
                tr = rgbaPixel[RED];
                tg = rgbaPixel[GREEN];
                tb = rgbaPixel[BLUE];
                
                tr = (tr - 127.5 * (1 - brightness)) * arctan(contrast) + 127.5 * (1 + brightness);
                tg = (tg - 127.5 * (1 - brightness)) * arctan(contrast) + 127.5 * (1 + brightness);
                tb = (tb - 127.5 * (1 - brightness)) * arctan(contrast) + 127.5 * (1 + brightness);
                rgbaPixel[RED] = clamp(tr);
                rgbaPixel[GREEN] = clamp(tg);
                rgbaPixel[BLUE] = clamp(tb);
            }
        }
    }
    
    
    // 8.获取画布中的图片对象
    CGImageRef imageRef = CGBitmapContextCreateImage(context);
    UIImage *resultImage = [UIImage imageWithCGImage:imageRef scale:image.scale orientation:UIImageOrientationUp];
    // 9.释放对象
    // we're done with the context, color space, and pixels
    CGContextRelease(context);
    CGColorSpaceRelease(colorSpace);
    free(pixels);
    CGImageRelease(imageRef);
    return resultImage;
}

// 获取对比度系数
float arctan(float value) {
    return  tan( (45 + 44 * value) / 180 * M_PI);
}

float clamp(float value) {
    return value > 255 ? 255 :(value < 0 ? 0 : value);
}

五、多张图片合成视频文件

1.有多张UIImage对象合成视频文件

- (void)getVideoWithImages:(NSArray *)images 
{
    // 1.视频输出设置 
    NSError *error = nil;
    AVAssetWriter *videoWriter = [[AVAssetWriter alloc] initWithURL:[NSURL fileURLWithPath:kMoviePath] fileType:AVFileTypeMPEG4 error:&error];
    NSParameterAssert(videoWriter);
    // 2.视频写入设置
    NSDictionary *videoSettings = @{AVVideoCodecKey: AVVideoCodecH264, AVVideoWidthKey: [NSNumber numberWithInt:inRect.size.width], AVVideoHeightKey: [NSNumber numberWithInt:inRect.size.height]};
    AVAssetWriterInput* writerInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:videoSettings];        
    AVAssetWriterInputPixelBufferAdaptor *adaptor = [AVAssetWriterInputPixelBufferAdaptor assetWriterInputPixelBufferAdaptorWithAssetWriterInput:writerInput
    sourcePixelBufferAttributes:nil];
    NSParameterAssert(writerInput);
    NSParameterAssert([videoWriter canAddInput:writerInput]);
    [videoWriter addInput:writerInput];
    // 3.允许开始写入视频
    [videoWriter startWriting];
    [videoWriter startSessionAtSourceTime:kCMTimeZero];
    // 4.多张图片写入视频文件
    int __block frame = 0;
    [writerInput requestMediaDataWhenReadyOnQueue:dispatch_queue_create("mediaInputQueue", NULL) usingBlock:^{
        @autoreleasepool {
            while ([writerInput isReadyForMoreMediaData]) {
                @autoreleasepool {
                    if ([self isRecording] == NO) {
                        [writerInput markAsFinished];
                        [videoWriter finishWritingWithCompletionHandler:^{
                            NSLog(@"Successfully closed video writer");
                            if (videoWriter.status == AVAssetWriterStatusCompleted) {
                                NSLog(@"成功");
                            } else {
                                NSLog(@"失败");
                            }
                        }];
                        CVPixelBufferPoolRelease(adaptor.pixelBufferPool);
                        break;
                    }
                    CGImageRef imageRef = [self getScreenRecordingImage];
                    CVPixelBufferRef buffer = (CVPixelBufferRef)[self pixelBufferFromCGImage:imageRef size:inRect.size];
                    dispatch_async(dispatch_get_global_queue(0, 0), ^{
                        if (buffer) {
                            if(![adaptor appendPixelBuffer:buffer withPresentationTime:CMTimeMake(frame, 20)])
                                NSLog(@"FAIL");
                            else
                                CFRelease(buffer);
                        }
                    });
                    frame++;
                    NSLog(@"%d",frame);
                }
            }
        }
    }];
}

更多分享:

Github:https://github.com/zhusiming/
GitBook:https://zhusiming.gitbooks.io/smbook/

上一篇下一篇

猜你喜欢

热点阅读