package etxt2db.corpus;
import com.aliasi.chunk.CharLmHmmChunker;
import com.aliasi.chunk.Chunk;
import com.aliasi.chunk.ChunkFactory;
import com.aliasi.chunk.Chunking;
import com.aliasi.chunk.ChunkingImpl;

import com.aliasi.corpus.ChunkHandler;
import com.aliasi.hmm.HmmCharLmEstimator;
import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;
import com.aliasi.tokenizer.TokenizerFactory;
import com.aliasi.util.Files;
import com.aliasi.util.Streams;

import edu.cmu.minorthird.util.ProgressCounter;

import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.xml.sax.InputSource;


public class LingpipeTextTrainer {
	
	// style/location for IDs, groupID, Category of doc
	// Kept to support the old TextBaseLoader api
	public static final int NONE=0; // could be given as a param at some point

	public static final int DIRECTORY_NAME=1;

	public static final int FILE_NAME=2;

	public static final int IN_FILE=3;

	// document style
	public static final int DOC_PER_LINE=0;

	public static final int DOC_PER_FILE=1;
	
	// recursion -- if loading from a directory should subdirectories be loaded
	// too?
	private boolean recurseDirectories=false;
	
	private ChunkHandler mHandler;
	
	List<String> attributes;
	
    
    public LingpipeTextTrainer(List<String> attributes) throws IOException {
    	this.attributes = attributes;
    }

    public LingpipeTextTrainer(ChunkHandler handler, List<String> attributes) 
        throws IOException {
        mHandler = handler;
        this.attributes = attributes;
    }
    

    /**
     * Returns the chunk handler for this parser.
     *
     * @return The chunk handler.
     */
    public ChunkHandler getChunkHandler() {
        return getHandler();
    }

    public void parseString(char[] cs, int start, int end, String documentId) throws ParseException {
        String s = new String(cs,start,end-start);
        String cleanText = "";
        
        Pattern markupPattern=Pattern.compile("</?([^ ><]+)( [^<>]+)?>");
 		Matcher matcher=markupPattern.matcher(s);
 		
 		int currentChar=0;
 		int lastOpenTag=0;
 		String currentTag = null;
 		int currentOffset=0;
 		List<Chunk> listChunks = new ArrayList<Chunk>();
 		while(matcher.find()){
 			String tag=matcher.group(1);
 			boolean isOpenTag=!matcher.group().startsWith("</");
 			// copy stuff up to tag into buffer
 			cleanText = cleanText + s.substring(currentChar, matcher.start());
 			if(attributes==null || attributes.contains(tag)){
	 			if(isOpenTag){
	 				if(currentTag!=null){
						throw new ParseException( "This loader does not support embedded tags: " +
								"opening tag '"+tag+"' tag with tag '" + currentTag + "' opened in document " + 
								documentId,0);
					}
					currentTag = tag;
					lastOpenTag=matcher.end()-currentOffset-matcher.group(0).length();
				}else{
					if(currentTag==null)
						throw new ParseException(
								"close '"+tag+"' tag with no open in document " + documentId,0);
					if(!tag.equals(currentTag))
						throw new ParseException("close '"+tag+"' tag paired with open ' in document " + 
								documentId + currentTag+"'",0);
					currentTag=null;
					Chunk chunk = ChunkFactory.createChunk(lastOpenTag,matcher.start()-currentOffset,tag);
					listChunks.add(chunk);
					// spanList.add( new CharSpan(entry.index, docBuffer.length()-1, tag) );
				}
 			}
 			currentChar=matcher.end();
 			currentOffset += matcher.group(0).length();
 		}
 		cleanText = cleanText + s.substring(currentChar);
 		ChunkingImpl chunking = new ChunkingImpl(cleanText);
 		for(Chunk chunk : listChunks){
 			chunking.add(chunk);
 		}
 		getChunkHandler().handle(chunking);
    }
    
    public void parse(InputSource in, String documentId) throws IOException, ParseException {
        char[] cs = Streams.toCharArray(in);
        parseString(cs,0,cs.length,documentId);
    }
    
    public void parse(File file) throws IOException, ParseException {
    	if(file.isDirectory())
			parseDirectory(file);
		else
			parse(Files.fileToURLName(file));
    }
    
    private void parseDirectory(File directory) throws IOException,ParseException{
		// loop on files in directory or loop on directories?
		File[] files=directory.listFiles();
		Arrays.sort(files);
		if(files==null)
			throw new IllegalArgumentException("can't list directory "+
					directory.getName());

		ProgressCounter pc=
				new ProgressCounter("loading directory "+directory.getName(),"file",
						files.length);
		for(int i=0;i<files.length;i++){
			// skip CVS directories
			if("CVS".equals(files[i].getName()))
				continue;

			if(files[i].isDirectory()&&this.recurseDirectories)
				parseDirectory(files[i]);

			if(files[i].isFile())
				parse(Files.fileToURLName(files[i]));
			pc.progress();

		}
		pc.finished();
	}
    
    public void parse(String sysId) throws IOException, ParseException {
        InputSource in = new InputSource(sysId);
        parse(in, sysId);
    }
    
    public void setHandler(ChunkHandler handler) {
        mHandler = handler;
    }
    
    public ChunkHandler getHandler() {
        return mHandler;
    }
    
    static final int MAX_N_GRAM = 8;
    static final int NUM_CHARS = 256;
    static final double LM_INTERPOLATION = MAX_N_GRAM; // default behavior
    
    public static void main(String[] args) throws IOException, ParseException{
    	File corpusFile = new File("./resources/brownConverted");
		File testingFile = new File("./resources/brownUntagged/", "ca11");
		List<String> attributes = new ArrayList<String>();;
		attributes.add("nn");

        System.out.println("Setting up Chunker Estimator");
        TokenizerFactory factory = IndoEuropeanTokenizerFactory.FACTORY;
        HmmCharLmEstimator hmmEstimator
          = new HmmCharLmEstimator(MAX_N_GRAM,NUM_CHARS,LM_INTERPOLATION);
        CharLmHmmChunker chunkerEstimator
          = new CharLmHmmChunker(factory,hmmEstimator);

        //System.out.println("Setting up Data Parser");
        LingpipeTextTrainer parser = new LingpipeTextTrainer(null);
        parser.setHandler(chunkerEstimator);

        //System.out.println("Training with Data from File=" + corpusFile);
        parser.parse(corpusFile);
        
        BufferedInputStream bin = null;
        //create FileInputStream object			
        FileInputStream fin = new FileInputStream(testingFile); 			
        //create object of BufferedInputStream			
        bin = new BufferedInputStream(fin); 			
        //create a byte array			
        byte[] contents = new byte[1024]; 			
        int bytesRead=0;			
        String strFileContents = ""; 			
        while( (bytesRead = bin.read(contents)) != -1){ 				
        	strFileContents = new String(contents, 0, bytesRead);			
        }
        
        Chunking chunking = chunkerEstimator.chunk(strFileContents);
        //System.out.println("Chunking=" + chunking);
        for(Chunk chunk : chunking.chunkSet()){
        	System.out.println(strFileContents.substring(chunk.start(), chunk.end()) + " is a " + chunk.type());
        }
    }
}

