/*
 Copyright � 2012 Paul Houghton and Futurice on behalf of the Tantalum Project.
 All rights reserved.

 Tantalum software shall be used to make the world a better place for everyone.

 This software is licensed for use under the Apache 2 open source software license,
 http://www.apache.org/licenses/LICENSE-2.0.html

 You are kindly requested to return your improvements to this library to the
 open source community at http://projects.developer.nokia.com/Tantalum

 The above copyright and license notice notice shall be included in all copies
 or substantial portions of the Software.

 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 SOFTWARE.
 */
package org.tantalum.util;

import javax.microedition.lcdui.Graphics;
import javax.microedition.lcdui.Image;

/**
 * Only one image processing routine will be active at a time. This is enforced
 * by internal synchronization, so image processing is thread safe with minimal
 * peak memory usage if you call these routines from multiple threads.
 * 
 * @author phou
 */
public final class ImageUtils {
	public static final int BASIC_ONE_POINT_PICK = 0;
	public static final int ONE_POINT_PICK = 1;
	public static final int FIVE_POINT_BLEND = 2;
	public static final int WEIGHTED_AVERAGE_OPAQUE = 3;
	public static final int WEIGHTED_AVERAGE_TRANSLUCENT = 4;
	public static final int MAX_SCALING_ALGORITHM = 4;

	private static final int M1 = 0x7F7F7F7F;
	private static final int M2 = 0x3F3F3F3F;
	private static final int M3 = 0x1F1F1F1F;
	private static final int FP_SHIFT = 12;
	private static final int FP_MASK = 0x00000FFF;
	private static final int ALPHA = 0xFF000000;
	private static final int RED = 0x00FF0000;
	private static final int GREEN = 0x0000FF00;
	private static final int BLUE = 0x000000FF;

	/**
	 * Convenience class for scaling images. This handles all the buffer
	 * management for you. It has higher peak memory usage and not buffer re-use
	 * which means the garbage collector will run more frequently. So if you
	 * will scale images in a tight loop, better performance can be achieved if
	 * you optimise your code and call the buffered scaling methods directly.
	 * 
	 * @param sourceImage
	 * @param maxW
	 * @param maxH
	 * @param processAlpha
	 * @param bestQuality
	 * @return
	 */
	public static Image scaleImage(final Image sourceImage, final int maxW,
			final int maxH, final int scalingAlgorithm) {
		if (maxW == sourceImage.getWidth() && maxH == sourceImage.getHeight()) {
			return sourceImage;
		}

		final int[] inputImageARGB = new int[sourceImage.getWidth()
				* sourceImage.getHeight()];

		sourceImage.getRGB(inputImageARGB, 0, sourceImage.getWidth(), 0, 0,
				sourceImage.getWidth(), sourceImage.getHeight());
		final int[] outputImageARGB;
		if (sourceImage.getWidth() >= maxW
				&& sourceImage.getWidth() * sourceImage.getHeight() >= maxW
						* maxH) {
			outputImageARGB = inputImageARGB;
		} else {
			outputImageARGB = new int[maxW * maxH];
		}

		return scaleImage(inputImageARGB, outputImageARGB,
				sourceImage.getWidth(), sourceImage.getHeight(), maxW, maxH,
				true, scalingAlgorithm);
	}

	/**
	 * Return an image which is smaller then the original.
	 * 
	 * The destination size can be defined exactly, or to fit within a bounding
	 * box with aspect ratio preserved.
	 * 
	 * To ensure your application does not use too much memory to scale the
	 * image at the same time other parts of the program have a peak memory
	 * usage, use the following calling pattern, modify as appropriate for your
	 * needs.
	 * 
	 * synchronized (Worker.LARGE_MEMORY_MUTEX) {
	 * 
	 * int[] data = new int[w * h];
	 * 
	 * image.getRGB(data, 0, w, 0, 0, w, h);
	 * 
	 * image = null;
	 * 
	 * image = ImageUtils.downscaleImage(data, w, h, maxW, maxH, true, false,
	 * false);
	 * 
	 * data = null;
	 * 
	 * }
	 * 
	 * @param inputImageARGB
	 *            - ARGB data for the original image
	 * @param outputImageARGB
	 *            - ARGB data buffer for the scaled image, can be the same as
	 *            inputImageARGB if downscaling (faster)
	 * @param srcW
	 *            - Source data row width
	 * @param srcH
	 *            - Source data column height
	 * @param maxW
	 *            - maximum bounding width of scaled image
	 * @param maxH
	 *            - maximum bounding size of scaled image
	 * @param preserveAspectRatio
	 *            - set true except for special effects
	 * @param processAlpha
	 *            - set true for translucent PNG images, false for JPG
	 * @param bestQuality
	 *            - set true for roughly 4x slower, more accurate scaling
	 * @return
	 */
	public static Image scaleImage(final int[] inputImageARGB,
			final int[] outputImageARGB, int srcW, int srcH, int maxW,
			int maxH, final boolean preserveAspectRatio,
			final int scalingAlgorithm) {
		if (scalingAlgorithm < 0 || scalingAlgorithm > MAX_SCALING_ALGORITHM) {
			throw new IllegalArgumentException("Unsupported scaling algorithm "
					+ scalingAlgorithm + ", should be [0-"
					+ MAX_SCALING_ALGORITHM + "]");
		}

		final float byWidth = maxW / (float) srcW;
		final float byHeight = maxH / (float) srcH;
		boolean widthIsMaxed = false;

		if (preserveAspectRatio) {
			if (byWidth <= byHeight) {
				maxW = (int) (srcW * byWidth);
				maxH = (int) (srcH * byWidth);
			} else {
				maxW = (int) (srcW * byHeight);
				maxH = (int) (srcH * byHeight);
			}
		}
		if (maxW >= srcW) {
			maxW = srcW;
			widthIsMaxed = true;
		}
		final boolean processAlpha = scalingAlgorithm != ImageUtils.WEIGHTED_AVERAGE_OPAQUE;
		if (maxH >= srcH) {
			if (widthIsMaxed) {
				// No resize needed
				maxH = srcH;
				return Image.createRGBImage(inputImageARGB, maxW, maxH,
						processAlpha);
			}
			maxH = srcH;
		}
		int[] scaledBuffer = outputImageARGB;
		switch (scalingAlgorithm) {
		case ONE_POINT_PICK:
			while (srcW >> 1 >= maxW && srcH >> 1 >= maxH) {
				ImageUtils.half(inputImageARGB, srcW, srcH >>= 1);
				srcW >>= 1;
			}
		case BASIC_ONE_POINT_PICK:
			if (srcW == maxW && srcH == maxH) {
				scaledBuffer = inputImageARGB;
			} else {
				ImageUtils.onePointPickDownscale(inputImageARGB, srcW, srcH,
						maxW, maxH);
			}
			break;
		case FIVE_POINT_BLEND:
			while (srcW >> 1 >= maxW && srcH >> 1 >= maxH) {
				ImageUtils.half(inputImageARGB, srcW, srcH >>= 1);
				srcW >>= 1;
			}
			if (srcW == maxW && srcH == maxH) {
				scaledBuffer = inputImageARGB;
			} else {
				ImageUtils.fivePointSampleDownscale(inputImageARGB,
						outputImageARGB, srcW, srcH, maxW, maxH);
			}
			break;
		case WEIGHTED_AVERAGE_TRANSLUCENT:
			ImageUtils.pureDownscale(inputImageARGB, outputImageARGB, srcW,
					srcH, maxW, maxH, preserveAspectRatio);
			break;
		case WEIGHTED_AVERAGE_OPAQUE:
			ImageUtils.pureOpaqueDownscale(inputImageARGB, outputImageARGB, srcW, srcH, maxW,
					maxH, preserveAspectRatio);
			break;
		}

		return Image.createRGBImage(scaledBuffer, maxW, maxH, processAlpha);
	}

	/**
	 * Draw at center anchor point
	 * 
	 * @param g
	 * @param x
	 * @param y
	 * @param inputImageARGB
	 * @param srcW
	 * @param srcH
	 * @param maxW
	 * @param maxH
	 * @param processAlpha
	 */
	public static void drawFlipshade(final Graphics g, final int x,
			final int y, final int[] inputImageARGB,
			final int[] outputImageARGB, int srcW, int srcH, int maxW,
			int maxH, final boolean processAlpha) {
		while (srcW >> 1 >= maxW && srcH >> 1 >= maxH) {
			ImageUtils.half(inputImageARGB, srcW, srcH >>= 1);
			srcW >>= 1;
		}
		maxW = Math.min(srcW, maxW);
		maxH = Math.min(srcH, maxH);
		int[] dest = outputImageARGB;
		if (srcW == maxW && srcH == maxH) {
			dest = inputImageARGB;
		} else {
			ImageUtils.fivePointSampleDownscale(inputImageARGB,
					outputImageARGB, srcW, srcH, maxW, maxH);
		}
		g.drawRGB(dest, 0, maxW, x - (maxW >> 1), y - (maxH >> 1),
				maxW, maxH, processAlpha);
	}

	/**
     * Return an ARGB image where width and height are half the original.
     *
     * 4 pixels are combined into 1 with 6 bit accuracy.
     *
     * @param imageARGB
     * @param srcW
     * @return
     */
    private static void half(final int[] imageARGB, final int srcW, final int h) {
    	final int w = srcW >> 1;
        int z = 0;

        for (int y = 0; y < h; y++) {
            int sourceImagePixelIndex = (y << 1) * srcW;
            for (int x = 0; x < w; x++) {
                int e = (imageARGB[sourceImagePixelIndex++] >>> 2) & M2;
                e += (imageARGB[sourceImagePixelIndex--] >>> 2) & M2;
                sourceImagePixelIndex += srcW;
                e += (imageARGB[sourceImagePixelIndex++] >>> 2) & M2;
                imageARGB[z++] = e + ((imageARGB[sourceImagePixelIndex++] >>> 2) & M2);
                sourceImagePixelIndex -= srcW;
            }
        }
    }
	/**
	 * Special thanks to Dr Teemu Korhonen for the original, very fast Matlab
	 * algorithm and tests. A weighted "X" is slid across the source image to
	 * generate destination pixels.
	 * 
	 * @param inputImageARGB
	 * @param srcW
	 * @param srcH
	 * @param w
	 * @param h
	 */
	private static void fivePointSampleDownscale(final int[] inputImageARGB,
			final int[] outputImageARGB, final int srcW, final int srcH,
			final int w, final int h) {
		final int dxFP = toFixedPoint(srcW / (float) (w + 2));
		final int dyFP = toFixedPoint(srcH / (float) (h + 2));
		int z = 0;

		for (int y = 0; y < h; y++) {
			final int rowstart = 1 + srcW + (srcW * fixedPointToInt(y * dyFP));
			for (int x = 0; x < w; x++) {
				int i = rowstart + fixedPointToInt(x * dxFP);
				int e = inputImageARGB[i--] >>> 1 & M1;
				i -= srcW;
				e += (inputImageARGB[i++] >>> 3 & M3);
				e += (inputImageARGB[++i] >>> 3 & M3);
				i += srcW << 1;
				e += inputImageARGB[i--] >>> 3 & M3;
				outputImageARGB[z++] = e + (inputImageARGB[--i] >>> 3 & M3);
			}
		}
	}

	/**
	 * A single point selected from the source image to generate destination
	 * pixels.
	 * 
	 * @param in
	 * @param srcW
	 * @param srcH
	 * @param w
	 * @param h
	 */
	private static void onePointPickDownscale(final int[] in, final int srcW,
			final int srcH, final int w, final int h) {
		final int dxFP = toFixedPoint(srcW / (float) w);
		final int dyFP = toFixedPoint(srcH / (float) h);
		int z = 0;

		for (int y = 0; y < h; y++) {
			final int rowstart = 1 + srcW + (srcW * fixedPointToInt(y * dyFP));
			for (int x = 0; x < w; x++) {
				in[z++] = in[rowstart + fixedPointToInt(x * dxFP)];
			}
		}
	}

	/**
	 * 
	 * @param i
	 * @return
	 */
	private static int toFixedPoint(final int i) {
		return i << FP_SHIFT;
	}

	/**
	 * 
	 * @param f
	 * @return
	 */
	private static int toFixedPoint(final float f) {
		return (int) (f * (1 << FP_SHIFT));
	}

	/**
	 * 
	 * @param i
	 * @return
	 */
	private static int fixedPointToInt(final int i) {
		return i >>> FP_SHIFT;
	}

	/**
	 * 
	 * @param i
	 * @return
	 */
	private static float fixedPointToFloat(final int i) {
		return (i >>> FP_SHIFT) + ((float) (i & FP_MASK)) / (1 << FP_SHIFT);
	}

	/**
	 * Additive blending shrinkImage, 8 bit accuracy. For speed, integers are
	 * used with fixed point accuracy instead of floats.
	 * 
	 * Gets a source image along with new size for it and resizes it to fit
	 * within max dimensions.
	 * 
	 * @param inputImageARGB
	 *            - ARGB image
	 * @param outputImageARGB
	 *            - ARGB image buffer, can be the same as inputImageARGB and
	 *            runs faster that way
	 * @param srcW
	 *            - source image width
	 * @param srcH
	 *            - source image height
	 * @param w
	 *            - final image width
	 * @param h
	 *            - final image height
	 * @param preserveAspectRatio
	 */
	private static void pureDownscale(final int[] inputImageARGB,
			final int[] outputImageARGB, final int srcW, final int srcH,
			final int w, final int h, final boolean preserveAspectRatio) {
		final int predictedCount = 1 + (srcW / w);
		final int[] lut = new int[predictedCount << 8];

		// Init division lookup table
		for (int i = 0; i < lut.length; i++) {
			lut[i] = i / predictedCount;
		}
		{
			// precalculate src/dest ratios
			final int ratioW = (srcW << FP_SHIFT) / w;

			// horizontal resampling (srcY = destY)
			for (int destY = 0; destY < srcH; ++destY) {
				final int srcRowStartIndex = destY * srcW;
				final int destRowStartIndex = destY * w;

				for (int destX = 0; destX < w; ++destX) {
					int srcX = (destX * ratioW) >> FP_SHIFT; // calculate
																// beginning of
																// sample
					final int initialSrcX = srcX;
					final int srcX2 = ((destX + 1) * ratioW) >> FP_SHIFT; // calculate
																			// end
																			// of
																			// sample
					int a = 0;
					int r = 0;
					int g = 0;
					int b = 0;

					// now loop from srcX to srcX2 and add up the values for
					// each channel
					do {
						final int argb = inputImageARGB[srcX + srcRowStartIndex];
						a += (argb & ALPHA) >>> 24;
						r += argb & RED;
						g += argb & GREEN;
						b += argb & BLUE;
						++srcX; // move on to the next pixel
					} while (srcX <= srcX2
							&& srcX + srcRowStartIndex < inputImageARGB.length);

					// average out the channel values
					// recreate color from the averaged channels and place it
					// into the destination buffer
					r >>>= 16;
					g >>>= 8;
					final int count = srcX - initialSrcX;
					if (count == predictedCount) {
						inputImageARGB[destX + destRowStartIndex] = (lut[a] << 24)
								| (lut[r] << 16) | (lut[g] << 8) | lut[b];
					} else {
						a /= count;
						r /= count;
						g /= count;
						b /= count;
						inputImageARGB[destX + destRowStartIndex] = ((a << 24)
								| (r << 16) | (g << 8) | b);
					}
				}
			}
		}

		// precalculate src/dest ratios
		final int predictedCount2;
		final int[] lut2;
		if (preserveAspectRatio) {
			predictedCount2 = predictedCount;
			lut2 = lut;
		} else {
			predictedCount2 = 1 + (srcH / h);
			lut2 = new int[predictedCount2 << 8];

			// Init division lookup table
			for (int i = 0; i < lut2.length; i++) {
				lut2[i] = i / predictedCount2;
			}
		}
		// vertical resampling (srcX = destX)
		final int ratioH = (srcH << FP_SHIFT) / h;
		for (int destX = 0; destX < w; ++destX) {
			for (int destY = 0; destY < h; ++destY) {
				int srcY = (destY * ratioH) >> FP_SHIFT; // calculate beginning
															// of sample
				final int initialSrcY = srcY;
				final int srcY2 = ((destY + 1) * ratioH) >> FP_SHIFT; // calculate
																		// end
																		// of
																		// sample
				int a = 0;
				int r = 0;
				int g = 0;
				int b = 0;

				// now loop from srcY to srcY2 and add up the values for each
				// channel
				do {
					final int argb = inputImageARGB[destX + srcY * w];
					a += (argb & ALPHA) >>> 24;
					r += argb & RED;
					g += argb & GREEN;
					b += argb & BLUE;
					++srcY; // move on to the next pixel
				} while (srcY <= srcY2
						&& destX + srcY * w < inputImageARGB.length);

				// average out the channel values
				r >>>= 16;
				g >>>= 8;
				final int count = srcY - initialSrcY;
				if (count == predictedCount2) {
					outputImageARGB[destX + destY * w] = (lut2[a] << 24)
							| (lut2[r] << 16) | (lut2[g] << 8) | lut2[b];
				} else {
					a /= count;
					r /= count;
					g /= count;
					b /= count;
					outputImageARGB[destX + destY * w] = (a << 24) | (r << 16)
							| (g << 8) | b;
				}
			}
		}
	}

	/**
	 * Additive blending shrinkImage, 8 bit accuracy. Slightly faster because
	 * Alpha is not calculated.
	 * 
	 * @param inputImageRGB
	 *            - Opaque RGB image
	 * @param outputImageRGB
	 *            - Opaque RGB output image buffer, can be the same as
	 *            inputImageRGB and runs faster that way
	 * @param srcW
	 *            - source image width
	 * @param srcH
	 *            - source image height
	 * @param w
	 *            - final image width
	 * @param h
	 *            - final image height
	 * @param preserveAspectRatio
	 */
	private static void pureOpaqueDownscale(final int[] inputImageRGB,
			final int[] outputImageRGB, final int srcW, final int srcH,
			final int w, final int h, final boolean preserveAspectRatio) {
		final int predictedCount = 1 + (srcW / w);
		final int[] lut = new int[predictedCount << 8];

		// Init division lookup table
		for (int i = 0; i < lut.length; i++) {
			lut[i] = i / predictedCount;
		}
		{
			// precalculate src/dest ratios
			final int ratioW = (srcW << FP_SHIFT) / w;

			// horizontal resampling (srcY = destY)
			for (int destY = 0; destY < srcH; ++destY) {
				final int srcRowStartIndex = destY * srcW;
				final int destRowStartIndex = destY * w;

				for (int destX = 0; destX < w; ++destX) {
					int srcX = (destX * ratioW) >> FP_SHIFT; // calculate
																// beginning of
																// sample
					final int initialSrcX = srcX;
					final int srcX2 = ((destX + 1) * ratioW) >> FP_SHIFT; // calculate
																			// end
																			// of
																			// sample
					int r = 0;
					int g = 0;
					int b = 0;

					// now loop from srcX to srcX2 and add up the values for
					// each channel
					do {
						final int rgb = inputImageRGB[srcRowStartIndex + srcX];
						r += rgb & RED;
						g += rgb & GREEN;
						b += rgb & BLUE;
						++srcX; // move on to the next pixel
					} while (srcX <= srcX2
							&& srcRowStartIndex + srcX < inputImageRGB.length);

					// average out the channel values
					// recreate color from the averaged channels and place it
					// into the destination buffer
					r >>>= 16;
					g >>>= 8;
					final int count = srcX - initialSrcX;
					if (count == predictedCount) {
						inputImageRGB[destX + destRowStartIndex] = (lut[r] << 16)
								| (lut[g] << 8) | lut[b];
					} else {
						r /= count;
						g /= count;
						b /= count;
						inputImageRGB[destX + destRowStartIndex] = (r << 16)
								| (g << 8) | b;
					}
				}
			}
		}

		// precalculate src/dest ratios
		final int predictedCount2;
		final int[] lut2;
		if (preserveAspectRatio) {
			predictedCount2 = predictedCount;
			lut2 = lut;
		} else {
			predictedCount2 = 1 + (srcH / h);
			lut2 = new int[predictedCount2 << 8];

			// Init division lookup table
			for (int i = 0; i < lut2.length; i++) {
				lut2[i] = i / predictedCount2;
			}
		}
		// vertical resampling (srcX = destX)
		final int ratioH = (srcH << FP_SHIFT) / h;
		for (int destX = 0; destX < w; ++destX) {
			for (int destY = 0; destY < h; ++destY) {
				int srcY = (destY * ratioH) >> FP_SHIFT; // calculate beginning
															// of sample
				final int initialSrcY = srcY;
				final int columnStart = srcY * w;
				final int srcY2 = ((destY + 1) * ratioH) >> FP_SHIFT; // calculate
																		// end
																		// of
																		// sample
				int r = 0;
				int g = 0;
				int b = 0;

				// now loop from srcY to srcY2 and add up the values for each
				// channel
				do {
					final int argb = inputImageRGB[columnStart + destX];
					r += argb & RED;
					g += argb & GREEN;
					b += argb & BLUE;
					++srcY; // move on to the next pixel
				} while (srcY <= srcY2
						&& columnStart + destX < inputImageRGB.length);

				// average out the channel values
				r >>>= 16;
				g >>>= 8;
				final int count = srcY - initialSrcY;
				if (count == predictedCount2) {
					outputImageRGB[destX + destY * w] = (lut2[r] << 16)
							| (lut2[g] << 8) | lut2[b];
				} else {
					r /= count;
					g /= count;
					b /= count;
					outputImageRGB[destX + destY * w] = (r << 16) | (g << 8) | b;
				}
			}
		}
	}
}
