Apply Black and White Filter to UIImage
Judging by the ciimage tag, perhaps the OP was thinking (correctly) that Core Image would provide a quick and easy way to do this?
Here's that, both in ObjC:
- (UIImage *)grayscaleImage:(UIImage *)image {
CIImage *ciImage = [[CIImage alloc] initWithImage:image];
CIImage *grayscale = [ciImage imageByApplyingFilter:@"CIColorControls"
withInputParameters: @{kCIInputSaturationKey : @0.0}];
return [UIImage imageWithCIImage:grayscale];
}
and Swift:
func grayscaleImage(image: UIImage) -> UIImage {
let ciImage = CIImage(image: image)
let grayscale = ciImage.imageByApplyingFilter("CIColorControls",
withInputParameters: [ kCIInputSaturationKey: 0.0 ])
return UIImage(CIImage: grayscale)
}
CIColorControls is just one of several built-in Core Image filters that can convert an image to grayscale. CIPhotoEffectMono, CIPhotoEffectNoir, and CIPhotoEffectTonal are different tone-mapping presets (each takes no parameters), and you can do your own tone mapping with filters like CIColorMap.
Unlike alternatives that involve creating and drawing into one's own CGBitmapContext
, these preserve the size/scale and alpha of the original image without extra work.
Objective C
- (UIImage *)convertImageToGrayScale:(UIImage *)image {
// Create image rectangle with current image width/height
CGRect imageRect = CGRectMake(0, 0, image.size.width, image.size.height);
// Grayscale color space
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
// Create bitmap content with current image size and grayscale colorspace
CGContextRef context = CGBitmapContextCreate(nil, image.size.width, image.size.height, 8, 0, colorSpace, kCGImageAlphaNone);
// Draw image into current context, with specified rectangle
// using previously defined context (with grayscale colorspace)
CGContextDrawImage(context, imageRect, [image CGImage]);
// Create bitmap image info from pixel data in current context
CGImageRef imageRef = CGBitmapContextCreateImage(context);
// Create a new UIImage object
UIImage *newImage = [UIImage imageWithCGImage:imageRef];
// Release colorspace, context and bitmap information
CGColorSpaceRelease(colorSpace);
CGContextRelease(context);
CFRelease(imageRef);
// Return the new grayscale image
return newImage;
}
Swift
func convertToGrayScale(image: UIImage) -> UIImage {
// Create image rectangle with current image width/height
let imageRect:CGRect = CGRect(x:0, y:0, width:image.size.width, height: image.size.height)
// Grayscale color space
let colorSpace = CGColorSpaceCreateDeviceGray()
let width = image.size.width
let height = image.size.height
// Create bitmap content with current image size and grayscale colorspace
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.none.rawValue)
// Draw image into current context, with specified rectangle
// using previously defined context (with grayscale colorspace)
let context = CGContext(data: nil, width: Int(width), height: Int(height), bitsPerComponent: 8, bytesPerRow: 0, space: colorSpace, bitmapInfo: bitmapInfo.rawValue)
context?.draw(image.cgImage!, in: imageRect)
let imageRef = context!.makeImage()
// Create a new UIImage object
let newImage = UIImage(cgImage: imageRef!)
return newImage
}
The Version of @rickster looks good considering the alpha channel. But a UIImageView without .AspectFit or Fill contentMode can't display it. Therefore the UIImage has to be created with an CGImage. This Version implemented as Swift UIImage extension keeps the current scale and gives some optional input parameters:
import CoreImage
extension UIImage
{
/// Applies grayscale with CIColorControls by settings saturation to 0.0.
/// - Parameter brightness: Default is 0.0.
/// - Parameter contrast: Default is 1.0.
/// - Returns: The grayscale image of self if available.
func grayscaleImage(brightness: Double = 0.0, contrast: Double = 1.0) -> UIImage?
{
if let ciImage = CoreImage.CIImage(image: self, options: nil)
{
let paramsColor: [String : AnyObject] = [ kCIInputBrightnessKey: NSNumber(double: brightness),
kCIInputContrastKey: NSNumber(double: contrast),
kCIInputSaturationKey: NSNumber(double: 0.0) ]
let grayscale = ciImage.imageByApplyingFilter("CIColorControls", withInputParameters: paramsColor)
let processedCGImage = CIContext().createCGImage(grayscale, fromRect: grayscale.extent)
return UIImage(CGImage: processedCGImage, scale: self.scale, orientation: self.imageOrientation)
}
return nil
}
}
The longer but faster way is the modificated version of @ChrisStillwells answer. Implemented as an UIImage extension considering the alpha channel and current scale in Swift:
extension UIImage
{
/// Create a grayscale image with alpha channel. Is 5 times faster than grayscaleImage().
/// - Returns: The grayscale image of self if available.
func convertToGrayScale() -> UIImage?
{
// Create image rectangle with current image width/height * scale
let pixelSize = CGSize(width: self.size.width * self.scale, height: self.size.height * self.scale)
let imageRect = CGRect(origin: CGPointZero, size: pixelSize)
// Grayscale color space
if let colorSpace: CGColorSpaceRef = CGColorSpaceCreateDeviceGray()
{
// Create bitmap content with current image size and grayscale colorspace
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.None.rawValue)
if let context: CGContextRef = CGBitmapContextCreate(nil, Int(pixelSize.width), Int(pixelSize.height), 8, 0, colorSpace, bitmapInfo.rawValue)
{
// Draw image into current context, with specified rectangle
// using previously defined context (with grayscale colorspace)
CGContextDrawImage(context, imageRect, self.CGImage)
// Create bitmap image info from pixel data in current context
if let imageRef: CGImageRef = CGBitmapContextCreateImage(context)
{
let bitmapInfoAlphaOnly = CGBitmapInfo(rawValue: CGImageAlphaInfo.Only.rawValue)
if let contextAlpha = CGBitmapContextCreate(nil, Int(pixelSize.width), Int(pixelSize.height), 8, 0, nil, bitmapInfoAlphaOnly.rawValue)
{
CGContextDrawImage(contextAlpha, imageRect, self.CGImage)
if let mask: CGImageRef = CGBitmapContextCreateImage(contextAlpha)
{
// Create a new UIImage object
if let newCGImage = CGImageCreateWithMask(imageRef, mask)
{
// Return the new grayscale image
return UIImage(CGImage: newCGImage, scale: self.scale, orientation: self.imageOrientation)
}
}
}
}
}
}
// A required variable was unexpected nil
return nil
}
}
While PiratM's solution works you lose the alpha channel. To preserve the alpha channel you need to do a few extra steps.
+(UIImage *)convertImageToGrayScale:(UIImage *)image {
// Create image rectangle with current image width/height
CGRect imageRect = CGRectMake(0, 0, image.size.width, image.size.height);
// Grayscale color space
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
// Create bitmap content with current image size and grayscale colorspace
CGContextRef context = CGBitmapContextCreate(nil, image.size.width, image.size.height, 8, 0, colorSpace, kCGImageAlphaNone);
// Draw image into current context, with specified rectangle
// using previously defined context (with grayscale colorspace)
CGContextDrawImage(context, imageRect, [image CGImage]);
// Create bitmap image info from pixel data in current context
CGImageRef imageRef = CGBitmapContextCreateImage(context);
// Release colorspace, context and bitmap information
CGColorSpaceRelease(colorSpace);
CGContextRelease(context);
context = CGBitmapContextCreate(nil,image.size.width, image.size.height, 8, 0, nil, kCGImageAlphaOnly );
CGContextDrawImage(context, imageRect, [image CGImage]);
CGImageRef mask = CGBitmapContextCreateImage(context);
// Create a new UIImage object
UIImage *newImage = [UIImage imageWithCGImage:CGImageCreateWithMask(imageRef, mask)];
CGImageRelease(imageRef);
CGImageRelease(mask);
// Return the new grayscale image
return newImage;
}