// OCRScanner.java
// Copyright (c) 2003-2010 Ronald B. Cemer
// Modified by William Whitney
// All rights reserved.
// This software is released under the BSD license.
// Please see the accompanying LICENSE.txt for details.
package net.sourceforge.javaocr.ocrPlugins.neuralNetOCR;

import java.awt.Image;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.logging.Logger;

import javax.swing.DefaultListModel;

import net.sourceforge.javaocr.scanner.DocumentScanner;
import net.sourceforge.javaocr.scanner.DocumentScannerListenerAdaptor;
import net.sourceforge.javaocr.scanner.PixelImage;
import net.sourceforge.javaocr.scanner.accuracy.AccuracyListener;
import net.sourceforge.javaocr.scanner.accuracy.AccuracyProvider;
import net.sourceforge.javaocr.scanner.accuracy.OCRComp;
import net.sourceforge.javaocr.scanner.accuracy.OCRIdentification;

import org.encog.ml.data.MLData;
import org.encog.ml.data.basic.BasicMLData;
import org.encog.neural.som.SOM;

/**
 * OCR document scanner.
 * @author Ronald B. Cemer
 */
public class OCRScannerNeuralNet extends DocumentScannerListenerAdaptor implements AccuracyProvider
{

    private static final int BEST_MATCH_STORE_COUNT = 8;
    private StringBuffer decodeBuffer = new StringBuffer();
    private CharacterRange[] acceptableChars;
    private boolean beginningOfRow = false;
    private boolean firstRow = false;
    private String newline = System.getProperty("line.separator");
    private HashMap<Character, ArrayList<TrainingImageNeural>> trainingImages = new HashMap<Character, ArrayList<TrainingImageNeural>>();
    private DocumentScanner documentScanner = new DocumentScanner();
    private AccuracyListener accListener;
    
    private SOM net;
    private DefaultListModel letterListModel;
    private Entry entry;

    public void acceptAccuracyListener(AccuracyListener listener)
    {
        accListener = listener;
    }

    /**
     * @return The <code>DocumentScanner</code> instance that is used to scan the document(s).
     * This is useful if the caller wants to adjust some of the scanner's parameters.
     */
    public DocumentScanner getDocumentScanner()
    {
        return documentScanner;
    }

    /**
     * Remove all training images from the training set.
     */
    public void clearTrainingImages()
    {
        trainingImages.clear();
    }

    /**
     * Add training images to the training set.
     * @param images A <code>HashMap</code> using <code>Character</code>s for
     * the keys.  Each value is an <code>ArrayList</code> of
     * <code>TrainingImages</code> for the specified character.  The training
     * images are added to any that may already have been loaded.
     */
    public void addTrainingImages(HashMap<Character, ArrayList<TrainingImageNeural>> images)
    {
        for (Iterator<Character> it = images.keySet().iterator(); it.hasNext();)
        {
            Character key = it.next();
            ArrayList<TrainingImageNeural> al = images.get(key);
            ArrayList<TrainingImageNeural> oldAl = trainingImages.get(key);
            if (oldAl == null)
            {
                oldAl = new ArrayList<TrainingImageNeural>();
                trainingImages.put(key, oldAl);
            }
            for (int i = 0; i < al.size(); i++)
            {
                oldAl.add(al.get(i));
            }
        }
    }

    /**
     * Scan an image and return the decoded text.
     * @param image The <code>Image</code> to be scanned.
     * @param x1 The leftmost pixel position of the area to be scanned, or
     * <code>0</code> to start scanning at the left boundary of the image.
     * @param y1 The topmost pixel position of the area to be scanned, or
     * <code>0</code> to start scanning at the top boundary of the image.
     * @param x2 The rightmost pixel position of the area to be scanned, or
     * <code>0</code> to stop scanning at the right boundary of the image.
     * @param y2 The bottommost pixel position of the area to be scanned, or
     * <code>0</code> to stop scanning at the bottom boundary of the image.
     * @param acceptableChars An array of <code>CharacterRange</code> objects
     * representing the ranges of characters which are allowed to be decoded,
     * or <code>null</code> to not limit which characters can be decoded.
     * @return The decoded text.
     */
    public String scan(
            Image image,
            int x1,
            int y1,
            int x2,
            int y2,
            CharacterRange[] acceptableChars)
    {

    	/*
    	 * 1ro: Encontrar la columna
    	 * 2do: Definir los pixels de la columna
    	 * 3ro: Enviarselo al SOM
    	 */
        this.acceptableChars = acceptableChars;
        PixelImage pixelImage = new PixelImage(image);
        pixelImage.toGrayScale(true);
        pixelImage.filter();
        decodeBuffer.setLength(0);
        firstRow = true;
        this.entry = new Entry(image);
        documentScanner.scan(pixelImage, this, x1, y1, x2, y2);
        String result = decodeBuffer.toString();
        decodeBuffer.setLength(0);
        return result;
    }

    @Override
    public void endRow(PixelImage pixelImage, int y1, int y2)
    {
        //Send accuracy of this identification to the listener
        if (accListener != null)
        {
            OCRIdentification identAccuracy = new OCRIdentification(OCRComp.NEURAL);
            identAccuracy.addChar('\n', 0.0);
            accListener.processCharOrSpace(identAccuracy);
        }
    }

    @Override
    public void beginRow(PixelImage pixelImage, int y1, int y2)
    {
        beginningOfRow = true;
        if (firstRow)
        {
            firstRow = false;
        }
        else
        {
            decodeBuffer.append(newline);
        }
    }

    @Override
    public void processChar(PixelImage pixelImage, int x1, int y1, int x2, int y2, int rowY1, int rowY2) {
        
        int areaW = x2 - x1, areaH = y2 - y1;
        
        entry.downSample(x1, y1, areaW , areaH);
        
        SampleData data = entry.getData();

		final MLData input = new BasicMLData(OCR.DOWNSAMPLE_WIDTH * OCR.DOWNSAMPLE_HEIGHT);
		int idx = 0;
		for (int y = 0; y < OCR.DOWNSAMPLE_HEIGHT; y++) {
			for (int x = 0; x < OCR.DOWNSAMPLE_WIDTH; x++) {
				input.setData(idx++, data.getData(x, y) ? .5 : -.5);
			}
		}

		System.out.println("Detecto el siguiente esquema");
		for (int y = 0; y < data.getHeight(); y++) {
			for (int x = 0; x < data.getWidth(); x++) {
				System.out.print(data.getData(x, y) ? '*' : ' ');
			}
			System.out.println();
		}
		
//		for (int i = 0; i < input.getData().length; i++) {
//			if (input.getData(i) == 0.0) {
//				input.setData(i, -.5);
//			}
//		}
		final int best = this.net.classify(input);
		final char map[] = mapNeurons();
		System.out.println("Detecte la letra: " + map[best]);
		decodeBuffer.append(map[best]);
    }

    /**
	 * Used to map neurons to actual letters.
	 * 
	 * @return The current mapping between neurons and letters as an array.
	 */
	char[] mapNeurons() {
		final char map[] = new char[this.letterListModel.size()];

		for (int i = 0; i < map.length; i++) {
			map[i] = '?';
		}
		for (int i = 0; i < this.letterListModel.size(); i++) {
			
			int idx = 0;
			final SampleData ds = (SampleData) this.letterListModel.getElementAt(i);
			final MLData input = new BasicMLData(ds.getHeight() * ds.getWidth());
			for (int y = 0; y < ds.getHeight(); y++) {
				for (int x = 0; x < ds.getWidth(); x++) {
					input.setData(idx++, ds.getData(x, y) ? .5 : -.5);
				}
			}

			final int best = this.net.classify(input);
			map[best] = ds.getLetter();
		}
		return map;
	}

	
    @Override
    public void processSpace(PixelImage pixelImage, int x1, int y1, int x2, int y2)
    {
        decodeBuffer.append(' ');
    }
    private static final Logger LOG = Logger.getLogger(OCRScannerNeuralNet.class.getName());


	public void setNet(SOM net) {
		this.net = net;
	}

	public void setLetterListModel(DefaultListModel letterListModel) {
		this.letterListModel = letterListModel;
	}

	public DefaultListModel getLetterListModel() {
		return letterListModel;
	}

}
