package
{
	import com.quasimondo.geom.ColorMatrix;
	
	import flash.display.BitmapData;
	import flash.display.Graphics;
	import flash.display.Sprite;
	import flash.events.Event;
	import flash.events.EventDispatcher;
	import flash.events.IEventDispatcher;
	import flash.geom.Matrix;
	import flash.geom.Point;
	import flash.geom.Rectangle;
	import flash.net.URLLoader;
	import flash.net.URLLoaderDataFormat;
	import flash.net.URLRequest;
	import flash.utils.ByteArray;
	
	import model.vo.PhotoVO;
	
	import nochump.util.zip.ZipEntry;
	import nochump.util.zip.ZipFile;
	
	import org.osflash.signals.Signal;
	
	import ru.inspirit.image.CannyEdgeDetector;
	import ru.inspirit.image.feature.HaarCascadesDetector;
	
	import spark.primitives.Graphic;
	
	
	/**
	 * @author Ben.Garraud
	 */
	public class FaceDectector extends EventDispatcher
	{

		public const XML_FACE_URL:String = 'haarcascade_frontalface_default.xml';
		public const XML_MOUTH_URL:String = 'haarcascade_mcs_mouth.xml';
		public const XML_L_EYE_URL:String = 'haarcascade_mcs_lefteye.xml';
		public const XML_R_EYE_URL:String = 'haarcascade_mcs_righteye.xml';
		
		public const ZIP_XML_URL:String = 'cascades.zip'; // zip file including all XML files mentioned above
		
		private var detectorFace:HaarCascadesDetector;
		private var detectorMouth:HaarCascadesDetector;
		private var detectorLEye:HaarCascadesDetector;
		private var detectorREye:HaarCascadesDetector;
		
		private var canny:CannyEdgeDetector // ru.inspirit.image.CannyEdgeDetector
		
		//private var faceRectContainer :Sprite;
		//private var camera:CameraBitma; // com.quasimondo.bitmapdata.CameraBitmap
		public var detectionMap:BitmapData;
		private var edgesMap:BitmapData;
		private var drawMatrix:Matrix;
		public static const scaleFactor:Number = 3;
		private var w:int = 550;
		private var h:int = 550;
		public static const picScale:Number = 2;
		
		private var cm:ColorMatrix = new ColorMatrix(); // com.quasimondo.geom.ColorMatrix
		
		private var baseScale:Number = 1.5;
		private var scaleIncrement:Number = 1.25;
		private var stepIncrement:Number = 0.05;
		private var edgeDensity:Number = 0.09;
		
		private var proccessList:Array = [];
		private var index:int = 0;		
		
		
		public var ready:Signal = new Signal();
		
		private static const _instance:FaceDectector = new FaceDectector();
		public static function get instance():FaceDectector { return _instance; }
		
		
		
		public function FaceDectector()
		{
			super();
			if(_instance) throw new Error("FaceDectector is singleton, use .instance instead.");
		}
		
		
		public function load():void
		{
			init();
			var myLoader:URLLoader = new URLLoader();
			myLoader.dataFormat = URLLoaderDataFormat.BINARY;
			myLoader.addEventListener(Event.COMPLETE, onUnZipComplete);
			myLoader.load(new URLRequest(ZIP_XML_URL)); // load zip archive file
		}

		
		public function init():void 
		{       
			
			
			drawMatrix = new Matrix( 1 / picScale, 0, 0, 1 / picScale );
			
			
			//faceRectContainer = new Sprite();
			
			//addChild( faceRectContainer );
			detectionMap = new BitmapData( w / scaleFactor, h / scaleFactor, false, 0 );
			edgesMap = detectionMap.clone();
			detectionMap.lock();
			edgesMap.lock();
			
		}
		
		public function add(photo:PhotoVO):void
		{
			proccessList.push(photo);
			
			
		}
		
		/*private function onRender(e:Event):void
		{
			//detectionMap.draw(camera.bitmapData, drawMatrix, null, "normal", null, true);
			
			cm.reset();
			cm.desaturate();
			cm.applyFilter(detectionMap);
			
			canny.detectEdges(edgesMap);
			
			// first detect all available faces
			var faceRects:Vector.<Rectangle> = detectorFace.detect(null, baseScale, scaleIncrement, stepIncrement, edgesMap);
			
			if(faceRects.length > 1) 
			{
				faceRects = detectorFace.merge(faceRects);
			}
			
			// draw face rectangles
			//drawRects(faceRects, scaleFactor);
			
			var n:int = faceRects.length;
			
			// if we have face(s) lets try to estimate eyes and mouth positions
			if(n)
			{
				for(var i:int = 0; i < n; ++i)
				{
					detectEyesAndMouth(faceRects[i]);
				}
			}
		}*/
		
		
		public function detect(photoVO:PhotoVO):PhotoVO
		{
			var source:BitmapData = photoVO.photo.bitmapData;
			
			// need to add tag rect as matrix shrink search area.
			drawMatrix = new Matrix( 1 / picScale, 0, 0, 1 / picScale );
			// center image within detection map
			var tagPos:Point = new Point((photoVO.width/picScale)/ 100 * photoVO.x, (photoVO.height/picScale) / 100 * photoVO.y);
			
			var offset:Point = new Point(w/picScale / (2*picScale) - tagPos.x,h/picScale / (2*picScale) - tagPos.y);
			
			drawMatrix.translate(offset.x,offset.y)
			
			detectionMap.fillRect(detectionMap.rect,0);
			detectionMap.draw(source, drawMatrix, null, "normal", null, true);
			
			
			
			cm.reset();
			cm.desaturate();
			cm.applyFilter(detectionMap);
			
			canny.detectEdges(edgesMap);
			
			// first detect all available faces
			var faceRects:Vector.<Rectangle> = detectorFace.detect(null, baseScale, scaleIncrement, stepIncrement, edgesMap);
			
			if(faceRects.length > 1) 
			{
				faceRects = detectorFace.merge(faceRects);
			}
			
			
			if(faceRects.length > 1) 
			{
				photoVO.hasFace = true;				
			} else {
				photoVO.hasFace = false;
			}
			
			
			var i:int = 0;
			var len:int = faceRects.length;
			var rect:Rectangle;
			
			for (i; i<len; i++) {
				rect = faceRects[i];
				rect.x-=offset.x;
				rect.y-=offset.y;
				
				rect.x *= picScale;
				rect.y *= picScale;
				rect.width = rect.width*picScale;
				rect.height = rect.height*picScale;
				
			}
			
			
			photoVO.faces = faceRects;
			// draw face rectangles
			//drawRects(faceRects, scaleFactor);
			
			var n:int = faceRects.length;
			
			// if we have face(s) lets try to estimate eyes and mouth positions
			if(n)
			{
				
				
				/*for(var i:int = 0; i < n; ++i)
				{
					detectEyesAndMouth(faceRects[i]);
				}*/
			}
			return photoVO;
		}
		
		
		public function reset():void
		{
			index=0;
		}
		
		public function hasNaxt():Boolean
		{
			return index<proccessList.length
		}
		
		public function next():PhotoVO
		{
			return detect(PhotoVO(proccessList[index]));
		}
		
		
		private function detectEyesAndMouth(r:Rectangle):void
		{
			var eyeRects:Vector.<Rectangle>;
			
			var eyes_r:Rectangle = r.clone();
			var mouth_r:Rectangle = eyes_r.clone();
			
			// LEFT EYE
			// try to predict eye rectangle area
			eyes_r.height *= 0.375;
			eyes_r.width *= 0.5;
			eyes_r.y += eyes_r.height * 0.5;
			
			eyeRects = detectorLEye.detect(eyes_r, 1, 1.1, 0.05, null, detectorFace);
			
			if(eyeRects.length > 1) eyeRects = detectorLEye.merge(eyeRects);
			//drawCircles(eyeRects, scaleFactor, false);
			
			// RIGHT EYE
			
			eyes_r.x += eyes_r.width;
			
			eyeRects = detectorREye.detect(eyes_r, 1, 1.2, 0.05, null, detectorFace);
			
			if(eyeRects.length > 1) eyeRects = detectorLEye.merge(eyeRects);
			//drawCircles(eyeRects, scaleFactor, false);
			
			// MOUTH
			// try to predict mouth rectangle area
			mouth_r.y = mouth_r.bottom - mouth_r.height * 0.4;
			mouth_r.x += mouth_r.width * 0.125;
			mouth_r.width *= 0.75;
			mouth_r.height *= 0.375;
			
			eyeRects = detectorMouth.detect(mouth_r, 1, 1.1, 0.05, null, detectorFace);
			
			if(eyeRects.length > 1) eyeRects = detectorMouth.merge(eyeRects);
			//drawRects(eyeRects, scaleFactor, false);
		}
		 
		/*private function drawCircles(faceRects:Vector.<Rectangle>, scale:Number = 1, clear:Boolean = true):void
		{
			var g:Graphics = faceRectContainer.graphics;
			if(clear) g.clear();
			g.lineStyle(2, 0x00ff00);
			for(var i:int = 0; i < faceRects.length; ++i)
			{
				var size:int = (faceRects[i].width * scale);
				g.drawCircle(faceRects[i].x * scale + (size>>1), faceRects[i].y * scale + (size>>1), size>>1);
			}
		}
		private function drawRects(faceRects:Vector.<Rectangle>, scale:Number = 1, clear:Boolean = true):void
		{       
			var g:Graphics = faceRectContainer.graphics;
			if(clear) g.clear();
			g.lineStyle(2, 0x00ff00);
			for(var i:int = 0; i < faceRects.length; ++i)
			{
				g.drawRect(faceRects[i].x * scale, faceRects[i].y * scale, faceRects[i].width * scale, faceRects[i].height * scale);
			}
		}*/
		
		private function onUnZipComplete(e:Event):void
		{
			// init ZipFile
			var zipFile:ZipFile = new ZipFile(URLLoader(e.currentTarget).data as ByteArray); // nochump.util.zip.ZipFile
		
			// extract cascades XML
			// and init each dector instance
			
			var entry:ZipEntry = zipFile.getEntry(XML_FACE_URL);
			var data:ByteArray = zipFile.getInput(entry);
			var myXML:XML = XML(data.toString());
			
			
			
			detectorFace = new HaarCascadesDetector(myXML, false);
			detectorFace.image = detectionMap;
			
			entry = zipFile.getEntry(XML_MOUTH_URL);
			data = zipFile.getInput(entry);
			myXML = XML(data.toString());
			detectorMouth = new HaarCascadesDetector(myXML, false);
			detectorMouth.image = detectionMap;
			
			
			entry = zipFile.getEntry(XML_L_EYE_URL);
			data = zipFile.getInput(entry);
			myXML = XML(data.toString());
			detectorLEye = new HaarCascadesDetector(myXML, false);
			detectorLEye.image = detectionMap;
			
			entry = zipFile.getEntry(XML_R_EYE_URL);
			data = zipFile.getInput(entry);
			myXML = XML(data.toString());
			detectorREye = new HaarCascadesDetector(myXML, false);
			detectorREye.image = detectionMap;
			
			canny = new CannyEdgeDetector(detectionMap);
			
			detectorFace.edgesDensity = 255 * edgeDensity;
			
			ready.dispatch();
			
		}
		
		
		
	}
}

