package it.polimi.dei.SeCo.LingPipe;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;

import com.aliasi.chunk.Chunk;
import com.aliasi.chunk.Chunker;
import com.aliasi.chunk.Chunking;
import com.aliasi.dict.ExactDictionaryChunker;
import com.aliasi.dict.MapDictionary;
import com.aliasi.hmm.HiddenMarkovModel;
import com.aliasi.hmm.HmmDecoder;
import com.aliasi.tag.Tagging;
import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;
import com.aliasi.tokenizer.RegExTokenizerFactory;
import com.aliasi.tokenizer.TokenChunker;
import com.aliasi.tokenizer.TokenizerFactory;
import com.aliasi.util.AbstractExternalizable;
import com.aliasi.sentences.SentenceChunker;
import com.aliasi.sentences.SentenceModel;
import com.aliasi.sentences.MedlineSentenceModel;

/**
 * @author Simone Benefico
 */
public class LingPipeTool {
	
	static final TokenizerFactory TOKENIZER_FACTORY = IndoEuropeanTokenizerFactory.INSTANCE;
	static final SentenceModel SENTENCE_MODEL = new MedlineSentenceModel();
	static final SentenceChunker SENTENCE_CHUNKER = new SentenceChunker(TOKENIZER_FACTORY, SENTENCE_MODEL);
	
	/**
	 * Splits a text in sentences.
	 * @param text text to split
	 * @return list of sentences
	 */
	public static List<Chunk> detectSentences(String text) {
		Chunking chunking = SENTENCE_CHUNKER.chunk(text.toCharArray(),0,text.length());
		Set<Chunk> sentences = chunking.chunkSet();
		List<Chunk> chunks = new ArrayList<Chunk>(sentences);
		return chunks;
	}
	
	/**
	 * Finds all the NEs in a sentence using a statistical NER.
	 * @param sentence sentence to analyze
	 * @param model model used by the statistical NER to analyze the sentence
	 * @return list of Chunks corresponding to NEs
	 */
	public static List<Chunk> findNames(String sentence, String model) {
	    File modelFile = new File(model);
	    Chunker chunker = null;
	    Chunking chunking = null;
	    List<Chunk> chunks = null;
		try {
			chunker = (Chunker) AbstractExternalizable.readObject(modelFile);
			chunking = chunker.chunk(sentence);
			Set<Chunk> chunkSet = chunking.chunkSet();
			if(chunkSet!=null && !chunkSet.isEmpty())
				chunks = new ArrayList<Chunk>(chunkSet);
		} catch (IOException e) {
			e.printStackTrace();
		} catch (ClassNotFoundException e) {
			e.printStackTrace();
		}
	    return chunks;
	}
	
	/**
	 * Select the locations from a list of NEs.
	 * @param names list of NEs to filter
	 * @return list of locations
	 */
	public static List<Chunk> filterLocations(List<Chunk> names) {
		if(names==null || names.isEmpty())
			return null;
		List<Chunk> locations = new ArrayList<Chunk>();
		Iterator<Chunk> itr = names.iterator();
		Chunk chunk = null;
		while (itr.hasNext()) {
			chunk = itr.next();
			if (chunk.type().equalsIgnoreCase("LOCATION"))
				locations.add(chunk);
		}
		return locations;
	}
	
	/**
	 * Select the organizations from a list of NEs.
	 * @param names list of NEs to filter
	 * @return list of organizations
	 */
	public static List<Chunk> filterOrganizations(List<Chunk> names) {
		if(names==null || names.isEmpty())
			return null;
		List<Chunk> locations = new ArrayList<Chunk>();
		Iterator<Chunk> itr = names.iterator();
		Chunk chunk = null;
		while (itr.hasNext()) {
			chunk = itr.next();
			if (chunk.type().equalsIgnoreCase("ORGANIZATION"))
				locations.add(chunk);
		}
		return locations;
	}
	
	/**
	 * Creates a list a Strings starting from the corresponding list of Chunks.
	 * @param sentence sentence which the list of Chunks is extracted from
	 * @param chunks list of Chunks to find in the sentence
	 * @return list of Strings that corresponds to the given list of Chunks
	 */
	public static List<String> chunkListStrings(String sentence, List<Chunk> chunks) {
		if(chunks==null || chunks.isEmpty())
			return null;
		Iterator<Chunk> itr = chunks.iterator();
		Chunk chunk = null;
		List<String> strings = new ArrayList<String>();
		while (itr.hasNext()) {
			chunk = itr.next();
			strings.add(sentence.substring(chunk.start(), chunk.end()));
		}
		return strings;
	}
	
	/**
	 * Finds all the NEs in a sentence using a dictionary-based NER.
	 * @param sentence sentence to analyze
	 * @param dictionary dictionary with the phrases to find in it
	 * @return list of Chunks corresponding to NEs
	 */
	public static List<Chunk> find(String sentence, MapDictionary<String> dictionary) {
		ExactDictionaryChunker dictionaryChunker = new ExactDictionaryChunker(dictionary, TOKENIZER_FACTORY, true, false);
		Chunking chunking = dictionaryChunker.chunk(sentence);
        Set<Chunk> chunkSet = chunking.chunkSet();
        List<Chunk> chunks = null;
        if(chunkSet!=null && !chunkSet.isEmpty())
        	chunks = new ArrayList<Chunk>(chunkSet);
		return chunks;
	}
	
	/**
	 * Splits a sentence in phrase chunks.
	 * @param sentence sentence to split
	 * @param model model used by the Chunker to split the given sentence
	 * @return list of phrase chunks
	 */
	public static List<Chunk> phraseChunk(String sentence, String model) {
		FileInputStream fileIn;
		List<Chunk> chunks = null;		
		try {
			fileIn = new FileInputStream(model);
			ObjectInputStream objIn = new ObjectInputStream(fileIn);
			HiddenMarkovModel hmm = (HiddenMarkovModel) objIn.readObject();
			objIn.close();
			HmmDecoder decoder = new HmmDecoder(hmm);
			PhraseChunker phraseChunker = new PhraseChunker(decoder, TOKENIZER_FACTORY);
			Chunking chunking = phraseChunker.chunk(sentence);
			chunks = new ArrayList<Chunk>(chunking.chunkSet());
		} catch (ClassNotFoundException e) {
			e.printStackTrace();
		} catch (FileNotFoundException e) {
			e.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		}
		return chunks;
	}
	
	/**
	 * Splits a sentence in tokens.
	 * @param sentence sentence to split
	 * @return list of tokens
	 */
	public static List<Chunk> tokenChunk(String sentence) {
		List<Chunk> chunks = null;
		TokenChunker tokenChunker = new TokenChunker(TOKENIZER_FACTORY);
		Chunking chunking = tokenChunker.chunk(sentence);
		chunks = new ArrayList<Chunk>(chunking.chunkSet());
		return chunks;
	}
	
	/**
	 * The tokens of a sentence using a Part-Of-Speech tagger.
	 * @param sentence sentence to split and tag
	 * @param model model used by the POS tagger
	 * @return map of Chunks with corresponding POS-tags
	 */
	public static Map<Chunk, String> tagPOS(String sentence, String model) {
		TokenizerFactory tokenizerFactory  = new RegExTokenizerFactory("(-|'|\\d|\\p{L})+|\\S");
	    TokenChunker chunker = new TokenChunker(tokenizerFactory);		
		FileInputStream fileIn;
		Map<Chunk, String> tagsMap = new HashMap<Chunk, String>();
		try {
			fileIn = new FileInputStream(model);			
			ObjectInputStream objIn = new ObjectInputStream(fileIn);
			HiddenMarkovModel hmm = (HiddenMarkovModel) objIn.readObject();
			objIn.close();
			HmmDecoder decoder = new HmmDecoder(hmm);
			Chunking chunking = chunker.chunk(sentence);
			List<Chunk> chunks = new ArrayList<Chunk>(chunking.chunkSet());
			List<String> tokenList = chunkListStrings(sentence, chunks);
		    Tagging<String> tagging = decoder.tag(tokenList);
		    int i=0;
		    for(Chunk chunk : chunks)
		    	tagsMap.put(chunk, tagging.tag(i++));
		} catch (FileNotFoundException e) {
			e.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		} catch (ClassNotFoundException e) {
			e.printStackTrace();
		}		
		return tagsMap;
	}		
	
	/**
	 * Filters a list of Chunks to remove the ones that are also in another list.
	 * @param newOnes list to filter
	 * @param alreadyPicked list with the Chunks to remove in it
	 * @return siltered list
	 */
	public static List<Chunk> removeAlreadyPicked (List<Chunk> newOnes, List<Chunk> alreadyPicked) {
		if(newOnes==null || newOnes.isEmpty())
			return null;
		if(alreadyPicked==null || alreadyPicked.isEmpty())
			return newOnes;
		List<Chunk> chunks = new ArrayList<Chunk>();
		boolean add;
		Iterator<Chunk> itrNewOnes = newOnes.iterator();
		Iterator<Chunk> itrAlreadyPicked = null;
		Chunk chunkNewOnes = null;
		Chunk chunkAlreadyPicked = null;
		while(itrNewOnes.hasNext()) {
			add = true;
			chunkNewOnes = itrNewOnes.next();
			itrAlreadyPicked = alreadyPicked.iterator();
			while(itrAlreadyPicked.hasNext() && add==true) {
				chunkAlreadyPicked = itrAlreadyPicked.next();
				/*System.out.println("startAP="+chunkAlreadyPicked.start()+"\tstartNO="+chunkNewOnes.start());
				System.out.println("endAP="+chunkAlreadyPicked.end()+"\tendNO="+chunkNewOnes.end());*/
				//if(chunkAlreadyPicked.start()<=chunkNewOnes.start() && chunkAlreadyPicked.end()>=chunkNewOnes.end()) {
				if((chunkAlreadyPicked.start()<=chunkNewOnes.start() && chunkNewOnes.start()<=chunkAlreadyPicked.end()) || (chunkAlreadyPicked.start()<=chunkNewOnes.end() && chunkNewOnes.end()<=chunkAlreadyPicked.end())) {
					add=false;
					//System.out.println("Removing="+chunkNewOnes.toString()+"\n_____");
				}
			}
			if(add==true && !chunks.contains(chunkNewOnes)){
				chunks.add(chunkNewOnes);
				//System.out.println("Adding="+chunkNewOnes.toString()+"\n_____");
			}
		}
		return chunks;
	}
		
	/*public static List<Chunk> getPrepositions(Map<Chunk, String> tagsMap) {
		if(tagsMap==null || tagsMap.isEmpty())
			return null;
		List<Chunk> prepsOfP = new ArrayList<Chunk>();
		Iterator<Entry<Chunk, String>> itr = tagsMap.entrySet().iterator();
		Entry<Chunk, String> entry = null;
		while(itr.hasNext()) {
			entry = itr.next();
			if(entry.getValue().equals("in"))
				prepsOfP.add(entry.getKey());
		}
		return prepsOfP;
	}*/
	
	/**
	 * Selects all the adverbs from a map of POS-tagged Chunks.
	 * @param tagsMap map to filter
	 * @return list of adverbs found
	 */
	public static List<Chunk> getAdverbs(Map<Chunk, String> tagsMap) {
		if(tagsMap==null || tagsMap.isEmpty())
			return null;
		List<Chunk> adverbs = new ArrayList<Chunk>();
		Iterator<Entry<Chunk, String>> itr = tagsMap.entrySet().iterator();
		Entry<Chunk, String> entry = null;
		while(itr.hasNext()) {
			entry = itr.next();
			if(entry.getValue().equals("rb"))
				adverbs.add(entry.getKey());
		}
		return adverbs;
	}
		
	/**
	 * Selects the right phrase chunks with the given Chunks in them.
	 * @param chunks list of Chunks to find in the phrase Chunks
	 * @param phraseChunks list of phraseChunks that should have the Chunks in them
	 * @return list of phrase chunks with the Chunks in them
	 */
	public static List<Chunk> getPhraseChunks(List<Chunk> chunks, List<Chunk> phraseChunks) {
		if(chunks==null || chunks.isEmpty() || phraseChunks==null || phraseChunks.isEmpty())
			return null;
		List<Chunk> selected = new ArrayList<Chunk>();
		boolean add;
		Iterator<Chunk> itrChunks = chunks.iterator();
		Iterator<Chunk> itrPhraseChunks = null;
		Chunk chunkChunks = null;
		Chunk chunkPhraseChunks = null;
		while(itrChunks.hasNext()) {
			add = false;
			chunkChunks = itrChunks.next();
			itrPhraseChunks = phraseChunks.iterator();
			while(itrPhraseChunks.hasNext() && add==false) {
				chunkPhraseChunks = itrPhraseChunks.next();
				if(chunkPhraseChunks.start()<=chunkChunks.start() && chunkPhraseChunks.end()+1>=chunkChunks.end())
					add=true;
			}
			if(add==true && !selected.contains(chunkPhraseChunks))
				selected.add(chunkPhraseChunks);
		}
		return selected;
	}
	
	/**
	 * Selects the right phrase chunks with the given Chunks in them.<br>
	 * It is used to find and categorize the ways to refer to geographical entities.
	 * @param sentence sentence analyzed
	 * @param chunks list of Chunks to find
	 * @param phraseChunks list of phraseChunks that should have some Chunks in them
	 * @param tokenChunks list of tokens that compose the given sentence
	 * @param adverbs adverbs from the given sentence
	 * @return map of phrase chunks or tokens with the Chunks in them and the category of the way to refer to geographical entities
	 */
	public static Map<Chunk, String> getPhraseChunks(String sentence, List<Chunk> chunks, List<Chunk> phraseChunks, List<Chunk> tokenChunks, List<Chunk> adverbs) {
		if(chunks==null || chunks.isEmpty() || phraseChunks==null || phraseChunks.isEmpty())
			return null;
		Map<Chunk, String> selected = new HashMap<Chunk, String>();
		boolean add;
		boolean  equal;
		List<String> phrases = LingPipeTool.chunkListStrings(sentence, phraseChunks);
		Object[] strings = LingPipeTool.chunkListStrings(sentence, tokenChunks).toArray();
		Iterator<Chunk> itrChunks = chunks.iterator();
		Iterator<Chunk> itrPhraseChunks = null;
		Iterator<Chunk> itrAdverbs = null;
		Iterator<String> itrPhrases = null;
		Iterator<Chunk> itrTokens = null;
		Chunk chunkChunks = null;
		Chunk chunkPhraseChunks = null;
		Chunk adverb = null;
		String phrase = null;
		String string = null;
		String type = null;
		int i;
		while(itrChunks.hasNext()) {
			i = 0;
			add = false;
			equal = false;
			type = null;
			chunkChunks = itrChunks.next();
			itrPhraseChunks = phraseChunks.iterator();
			itrTokens = tokenChunks.iterator();
			itrPhrases = phrases.iterator();
			while(itrTokens.hasNext() && equal==false) {
				Chunk token = itrTokens.next();
				if(token.start()==chunkChunks.start() && token.end()==chunkChunks.end()) {
					equal = true;
				}
				i++;
			}
			if(i>=2)
				string = (String)strings[i-2];
			else
				if(i==1)
					string = (String)strings[i-1];
				else
					if(i==0)
						string = (String)strings[i];
			while(itrPhraseChunks.hasNext() && add==false) {
				chunkPhraseChunks = itrPhraseChunks.next();
				phrase = itrPhrases.next();
				if(chunkPhraseChunks.start()<=chunkChunks.start() && chunkPhraseChunks.end()>=chunkChunks.end()) {
					add=true;
					type = null;
					itrAdverbs = adverbs.iterator();
					while(itrAdverbs.hasNext() && type==null) {
						adverb = itrAdverbs.next();
						if(adverb.start()==chunkChunks.start() && adverb.end()==chunkChunks.end()){
							if(phrase.startsWith("not"))
								type = "[NOT] "+chunkChunks.type();
							else
								type = chunkChunks.type();
						}
					}
				}
			}
			if(add==true && !selected.values().contains(chunkPhraseChunks))
				selected.put(chunkPhraseChunks, type);
			else
				if(add==false && !selected.values().contains(chunkPhraseChunks)) {
					if(string.endsWith("not"))
						type = "[NOT] "+chunkChunks.type();
					else
						type = chunkChunks.type();
					selected.put(chunkChunks, type);
				}
		}
		return selected;
	}
	
	/**
	 * Removes all the Chunks of a given type from a given list.
	 * @param chunks list of Chunks to filter
	 * @param type type of Chunks to remove
	 */
	public static List<Chunk> removeType(List<Chunk> chunks, String type) {
		if(chunks==null || chunks.isEmpty())
			return null;
		if(type==null || type.equals(""))
			return chunks;
		List<Chunk> selected = new ArrayList<Chunk>();
		for(Chunk chunk : chunks)
			if(!chunk.type().equalsIgnoreCase(type))
				selected.add(chunk);
		return selected;		
	}
}
