//
//  UIImage+maskutils.m
//  testimages
//

#import "UIImage+maskutils.h"
#import <QuartzCore/QuartzCore.h>

@implementation UIImage (maskutils)

// in response to this stack overflow question:  http://stackoverflow.com/questions/14622202/can-cgcontextcliptomask-mask-all-non-transparent-pixels-with-alpha-1/
// I started with code for converting to grayscale here:  http://stackoverflow.com/questions/1298867/convert-image-to-grayscale
// and hacked it to make create a stencil
//
// All pixels become full black so alpha controls mask properties.
// then if inCollapseAlpha is YES, then any non-zero alpha becomes 1.0 alpha and any 0 alpha stays zero alpha
// if inCollapseAlpha is NO then alpha values are untouched (so get alpha gradient like in source image).

- (UIImage *)createStencilWithCollapseAlpha: (BOOL) inCollapseAlpha
{    
    // Create image rectangle with current image width/height
    // oddly the size field of UIImage should be in points and we need pixels so we shoud have to multiply by self.scale
    // but that isn't what I'm seeing...
    // Need better test images to see if we are losing detail by not making the bitmap larger.
    CGRect imageRect = CGRectMake(0, 0, self.size.width * self.scale, self.size.height * self.scale );
    
    int width = imageRect.size.width;
    int height = imageRect.size.height;
    
    uint32_t *pixels = (uint32_t *) malloc(width * height * sizeof(uint32_t));
    
    // clear the pixels so any transparency is preserved
    memset(pixels, 0, width * height * sizeof(uint32_t));
    
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
    
    // create a context with RGBA pixels
    CGContextRef context = CGBitmapContextCreate(pixels, width, height, 8, width * sizeof(uint32_t), colorSpace,
                                                 kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedLast);
    
    // paint the bitmap to our context which will fill in the pixels array
    CGContextDrawImage(context, CGRectMake(0, 0, width, height), [self CGImage]);
    
    for ( int y = 0; y < height; y++ )
    {
        for ( int x = 0; x < width; x++ )
        {
            uint8_t *rgbaPixel = (uint8_t *) &pixels[y * width + x];
            
            if ( inCollapseAlpha )
            {
                if ( rgbaPixel[0] != 0 )
                    rgbaPixel[0] = 255;
            }
            // set pixels to black
            rgbaPixel[1] = 0;       // R
            rgbaPixel[2] = 0;       // G
            rgbaPixel[3] = 0;       // B
        }
    }
    
    // create a new CGImageRef from our context with the modified pixels
    CGImageRef image = CGBitmapContextCreateImage(context);
    
    // we're done with the context, color space, and pixels
    CGContextRelease(context);
    CGColorSpaceRelease(colorSpace);
    free(pixels);
    
    // in order to get the scale right for retina we need to do this:
    
    UIImage * result = [UIImage imageWithCGImage: image scale: self.scale orientation: self.imageOrientation];
    CGImageRelease( image );
    
    return result;
}

- (UIImage *)getImageWithClipImage:(UIImage *)clipImage andClipFrame:(CGRect)clipFrame andBoundSize:(CGSize)boundSize {
    UIGraphicsBeginImageContextWithOptions(clipFrame.size, NO, 0.0f);
    CGContextRef ctx = UIGraphicsGetCurrentContext();
    
    UIImage *bwImage = [clipImage createStencilWithCollapseAlpha:YES];
    
//    CGContextSaveGState(ctx);

    //////////////////////////////////////////////////
//    CGSize bwSize = self.size;
//    CGContextTranslateCTM(ctx, 0, bwSize.height);
//    CGContextScaleCTM(ctx, 1, -1);
//    
//    if (bwImage) {
//        CGContextClipToMask(ctx, CGRectMake(0, bwSize.height - clipFrame.size.height - 0, clipFrame.size.width, clipFrame.size.height), [bwImage CGImage]);
//    }
//    
//    CGContextDrawImage(ctx, CGRectMake(-clipFrame.origin.x, -boundSize.height + bwSize.height + clipFrame.origin.y, boundSize.width, boundSize.height), self.CGImage);
    //////////////////////////////////////////////////
    
    //////////////////////////////////////////////////
    if (bwImage) {
        CGContextClipToMask(ctx, CGRectMake(0, 0, clipFrame.size.width, clipFrame.size.height), [bwImage CGImage]);
    }
    UIImageView *renderImageView = [[UIImageView alloc] initWithFrame:CGRectMake(-clipFrame.origin.x, -clipFrame.origin.y, boundSize.width, boundSize.height)];
    renderImageView.image = self;
    UIView *renderView = [[UIView alloc] initWithFrame:CGRectMake(0, 0, boundSize.width, boundSize.height)];
    [renderView addSubview:renderImageView];
    [renderView.layer renderInContext:ctx];
    //////////////////////////////////////////////////
    
//    CGContextRestoreGState(ctx);
    
    UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
//    UIImage *flipped = [UIImage imageWithCGImage:newImage.CGImage scale:1.0 orientation:UIImageOrientationDown];
    
    UIGraphicsEndImageContext();
    
    return newImage;
}

- (UIImage *)getClipImageWithColor:(UIColor *)color {
    CGSize bwSize;
    UIImage *bwImage = nil;
    
    bwImage = [self createStencilWithCollapseAlpha:YES];
    bwSize = self.size;
    
    UIGraphicsBeginImageContextWithOptions(self.size, NO, 0.0f);
    
    CGContextRef ctx = UIGraphicsGetCurrentContext();
    CGContextSaveGState(ctx);
    
    CGContextTranslateCTM(ctx, 0, bwSize.height);
    CGContextScaleCTM(ctx, 1, -1);
    
    if (bwImage) {
        CGContextClipToMask(ctx, CGRectMake(0, bwSize.height - self.size.height - 0, self.size.width, self.size.height), [bwImage CGImage]);
    }
    
    CGContextSetFillColorWithColor(ctx, [color CGColor]);
    CGContextFillRect(ctx, CGRectMake(0, -self.size.height + bwSize.height, self.size.width, self.size.height));
    
    CGContextRestoreGState(ctx);
    
    UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
    
    UIGraphicsEndImageContext();
    
    return newImage;
}

@end
