package util.preprocessing;

import java.util.Enumeration;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Set;

import util.io.FileInput;
import util.ir.Indexer;
import util.nlp.Dictionary;
import util.parser.DBPedia;
import util.string.StringAnalysis;

import com.aliasi.chunk.Chunk;
import com.aliasi.chunk.Chunking;

import com.aliasi.dict.ApproxDictionaryChunker;
import com.aliasi.dict.DictionaryEntry;
import com.aliasi.dict.ExactDictionaryChunker;
import com.aliasi.dict.TrieDictionary;

import com.aliasi.spell.FixedWeightEditDistance;
import com.aliasi.spell.WeightedEditDistance;

import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;
import com.aliasi.tokenizer.Tokenizer;
import com.aliasi.tokenizer.TokenizerFactory;

public class EntityParser {

	/**
	 * @param args
	 * @return
	 */
	/*public static void test(){
		
		
		TrieDictionary<String> dict = new TrieDictionary<String>();
		
		
		DictionaryEntry<String> entry1 = new DictionaryEntry<String>("http://www.google.com","buenas");
		DictionaryEntry<String> entry2 = new DictionaryEntry<String>("http://www.google.com/email/","buenas");
		DictionaryEntry<String> entry3 = new DictionaryEntry<String>("http://www.google.com/email/ser","buenas");
		
		
		dict.addEntry(entry1);
		dict.addEntry(entry2);
		dict.addEntry(entry3);
		TokenizerFactory tokenizerFactory = IndoEuropeanTokenizerFactory.INSTANCE;
		ExactDictionaryChunker chunker = new ExactDictionaryChunker(dict,
				tokenizerFactory);
		
		Chunking chunking = chunker.chunk("http://www.google.com/email/main.jsp");
		CharSequence cs = chunking.charSequence();
		Set<Chunk> chunkSet = chunking.chunkSet();

		LinkedList<Token> tokens = new LinkedList<Token>();

		String current = "";

		Hashtable<Integer, Integer> hash = new Hashtable<Integer, Integer>();

		for (Chunk chunk : chunkSet) {
			int start = chunk.start();
			int end = chunk.end();
			CharSequence str = cs.subSequence(start, end);

			current = str.toString();
				System.out.println(current);

			// double distance = chunk.score();
			// String match = chunk.type();
			// System.out.printf("%15s  %15s   %8.1f\n", str, match, distance);

		}
		
	}
	
	
	
	public static ApproxDictionaryChunker buildApproximateChunkerDictionary(
			Hashtable<String, LinkedList<String>> hash_entities,
			int max_distance_allowed) {

		TrieDictionary<String> dict = new TrieDictionary<String>();

		Enumeration<String> iter = hash_entities.keys();

		while (iter != null && iter.hasMoreElements()) {

			String entity = iter.nextElement();
			DictionaryEntry<String> entry1 = new DictionaryEntry<String>(entity
					.toLowerCase(), StringAnalysis
					.LinkedListToString(hash_entities.get(entity)));

			dict.addEntry(entry1);

		}

		TokenizerFactory tokenizerFactory = IndoEuropeanTokenizerFactory.INSTANCE;

		WeightedEditDistance editDistance = new FixedWeightEditDistance(0, -1,
				-1, -1, Double.NaN);

		ApproxDictionaryChunker chunker = new ApproxDictionaryChunker(dict,
				tokenizerFactory, editDistance, max_distance_allowed);

		
		
		return chunker;

	}
	
	
	*//**
	 * 
	 * 
	 * 
	 * @param path
	 *//*
	public static void annotateYagoTypes(String path, String yagoPath) {
		
		
		Hashtable<String, LinkedList<String>> hash = Indexer.loadTypesYago(yagoPath);
		ApproxDictionaryChunker chunker = buildApproximateChunkerDictionary(
				hash, 0);
		
		
		
		FileInput in = new FileInput(path);
		
		String line = in.readString();
		
		
		while(line!=null){
			
			
		line = line.trim();
		
		
		LinkedList<Token> list = getQueryEntities(line,chunker);
		
		
		for(int i =0 ; i < list.size(); i++){
			
			Token t = list.get(i);
			System.out.println(t.token + "\t" + t.type);
			
		}		
		line= in.readString();
			
			
		}
		
		
		
	}
	
	*//**
	 * Math lines from file in format count TAB entry
	 * 
	 * with the titles given in DBPedia wiki-title format
	 * 
	 * 
	 *//*
	
	
	
	public static Hashtable<String,String> fillHashWithWikiTitles(String dbpedia){
		
		Hashtable<String,String> titles= new Hashtable<String,String>();
		
		
		FileInput in = new FileInput(dbpedia);
		
		String line = in.readString();
		
		while(line!=null){
			
			
			String t[]= DBPedia.parseWikiTitleEntry(line);
			
			titles.put(t[0],t[1] );
			
			System.out.println(line);
			line =in.readString();
			
		}
		
		
		return titles;
		
		
		
	}
	
	public static void matchWikiTitles(String file, String dbpedia, int freq) {

		Hashtable<String, String> titles = fillHashWithWikiTitles(dbpedia);

		
		
		
		
		System.out.println("Dictionary complete");
		Dictionary dict = new Dictionary(titles,0);
		
		FileInput in = new FileInput(dbpedia);

		String line = in.readString();
		
		
		
		

		int hits=0;
		int total =0;
		while (line != null) {

			String t[] = line.split("\t");

			if (t.length > 1) {
				
				
				if(Integer.valueOf(t[0])>=freq){
					
					
					if(t[1].startsWith("\"") && t[1].endsWith("\""))
						t[1] = t[1].replaceAll("\"", "");
					
					
					LinkedList<String> list = dict.lookFor(t[1]);
					
					
					if(list.size()>0)
					System.out.print(t[1]);
					
					for(int i =0 ; i < list.size(); i++){
					System.out.print("\t"+ list.get(i));
					
					}
					

					if(list.size()>0){
						hits++;
					System.out.println("");	
					}
					
					
					total++;
				}
				
				
			}

			line = in.readString();

		}
		
		
		
		
		
		System.out.println("Coverage: " + (double)hits/(double)total);

	}
	
	*//**
	 * 
	 * 
	 * @param path
	 * @param yagoPath
	 *//*
	
	public static void annotateYagoTypesCVS(String path, String yagoPath,
			int freq) {

		Hashtable<String, LinkedList<String>> hash = Indexer
				.loadTypesYago(yagoPath);
		ApproxDictionaryChunker chunker = buildApproximateChunkerDictionary(
				hash, 0);

		FileInput in = new FileInput(path);

		String line = in.readString();

		while (line != null) {

			line = line.trim();

			String t[] = line.split("\t");

			if (Integer.valueOf(t[0]) > freq && t.length > 1) {
				LinkedList<Token> list = getQueryEntities(t[1], chunker);

				for (int i = 0; i < list.size(); i++) {

					Token tt = list.get(i);
					System.out.println(tt.token + "\t" + tt.type);

				}
				line = in.readString();

			}

		}

	}
	
	
	
	public static void annotateDBPediaTypes(String path, String dbpath) {
		
		
		Hashtable<String, LinkedList<String>> hash = Indexer.loadTypes(dbpath);
		ApproxDictionaryChunker chunker = buildApproximateChunkerDictionary(
				hash, 0);
		
		
		
		FileInput in = new FileInput(path);
		
		String line = in.readString();
		
		
		while(line!=null){
			
			
		line = line.trim();
		
		
		LinkedList<Token> list = getQueryEntities(line,chunker);
		
		
		for(int i =0 ; i < list.size(); i++){
			
			Token t = list.get(i);
			System.out.println(t.token + "\t" + t.type);
			
		}		
		line= in.readString();
			
			
		}
		
	}

	public static LinkedList<String> getEntityContextInQuery2(String query,
			LinkedList<Token> entities) {

		TokenizerFactory tokenizerFactory = IndoEuropeanTokenizerFactory.INSTANCE;

		LinkedList<String> context = new LinkedList<String>();

		HashSet<String> entityNames = new HashSet<String>();

		for (int i = 0; i < entities.size(); i++) {
			
			

			query = query.replaceAll(entities.get(i).token.trim(), "");

		}

		String t[] = query.split("\\s+");

		for (int i = 0; i < t.length; i++) {

			context.add(t[i]);

		}

		return context;
	}

	public static LinkedList<String> getEntityContextInQuery(String query,
			LinkedList<Token> entities) {

		TokenizerFactory tokenizerFactory = IndoEuropeanTokenizerFactory.INSTANCE;

		Tokenizer tokenizer = tokenizerFactory.tokenizer(query.toCharArray(),
				0, query.length());

		Iterator<String> iter = tokenizer.iterator();
		LinkedList<String> context = new LinkedList<String>();

		HashSet<String> entityNames = new HashSet<String>();

		for (int i = 0; i < entities.size(); i++) {

			entityNames.add(entities.get(i).token);

		}

		while (iter.hasNext()) {

			String word = iter.next();
			if (entityNames.contains(word)) {

			} else {

				context.add(word);
			}

		}

		return context;

	}

	public static LinkedList<Token> getQueryEntities(String query,
			ApproxDictionaryChunker chunker) {

		Chunking chunking = chunker.chunk(query);
		CharSequence cs = chunking.charSequence();
		Set<Chunk> chunkSet = chunking.chunkSet();

		LinkedList<Token> tokens = new LinkedList<Token>();

		String current = "";

		Hashtable<Integer, Integer> hash = new Hashtable<Integer, Integer>();

		for (Chunk chunk : chunkSet) {
			int start = chunk.start();
			int end = chunk.end();
			CharSequence str = cs.subSequence(start, end);

			current = str.toString();

			if (hash.containsKey(chunk.start())) {

				if (hash.get(chunk.start()) < chunk.end()) {

					hash.put(chunk.start(), chunk.end());

					updateTokenList(tokens, chunk, current);
				}

			} else {
				hash.put(chunk.start(), chunk.end());

				updateTokenList(tokens, chunk, current);
			}

			// double distance = chunk.score();
			// String match = chunk.type();
			// System.out.printf("%15s  %15s   %8.1f\n", str, match, distance);

		}

		return tokens;

	}

	private static void updateTokenList(LinkedList<Token> tokens, Chunk chunk,
			String current) {
		// TODO Auto-generated method stub

		if (tokens.size() == 0) {

			tokens.add(createToken(chunk, current));

		}

		for (int i = 0; i < tokens.size(); i++) {

			if (tokens.get(i).start == chunk.start()) {

				tokens.remove(i);
				tokens.add(i, createToken(chunk, current));
				return;

			} else if (tokens.get(i).end < chunk.start()) {
				{

					tokens.add(i, createToken(chunk, current));
					return;
				}

			}

		}

		tokens.addLast(createToken(chunk, current));

	}

	private static Token createToken(Chunk chunk, String current) {

		Token t = new Token();
		t.end = chunk.end();
		t.start = chunk.start();
		t.type = chunk.type();
		t.token = current;
		return t;
	}

	public static void listEntitiesAndContext(String queryLogPath,
			String dbpediaPath, int level, int min_distance) {

		FileInput in = new FileInput(queryLogPath);

		String line = in.readString();

		Hashtable<String, LinkedList<String>> hash = Indexer
				.loadTypes(dbpediaPath);

		ApproxDictionaryChunker chunker = buildApproximateChunkerDictionary(
				hash, min_distance);

		while (line != null ) {

			LinkedList<Token> entities = getQueryEntities(line, chunker);

			LinkedList<String> context = getEntityContextInQuery2(line,
					entities);

			for (int i = 0; i < entities.size(); i++) {
				String ent = entities.get(i).type;
				String enti[] = ent.split("\t");

				String entity = "";
				if (enti.length >= level) {

					entity = enti[level - 1];
				} else {
					entity = enti[enti.length - 1];
				}

				
				if(!line.trim().equals("-"))
				System.out.print(entity + "\t");

			}

			
			if(!line.trim().equals("-"))
			System.out.print("\t---\t");

			for (int i = 0; i < context.size(); i++) {
			
				if(!line.trim().equals("-"))
				System.out.print(context.get(i) + "\t");

			}
			if(!line.trim().equals("-"))
			System.out.println("");
			line = in.readString();

		}

	}

	public static void main(String[] args) {
		// TODO Auto-generated method stub
		String dbpedia_types = "/home/sergio/projects/data/dbpedia/instancetype_en.csv";

		dbpedia_types = "/home/sergio/projects/data/dbpedia/yagoclasses_links.nt";

		
		String queryLogPath = "/home/sergio/projects/CODFC/queryAnalysis/all_queries.txt";

		//listEntitiesAndContext(queryLogPath, dbpedia_types,2,1);
		
		String delicious_tags= "/home/sergio/projects/data/delicious/experiments/tags_kids.txt";
		
		//annotateYagoTypes(delicious_tags,dbpedia_types);
		
		
		String titles= "/home/sergio/projects/data/wikipedia_titles/labels_en.nq";
		String tags= "/home/sergio/Documents_delicious_stats/projects/data/delicious/experiments/tags_normalized_temporal_sorted2.txt";
		
		
		matchWikiTitles(tags,titles,2);
		
		//test();

	}*/

}

class Token {

	String token = "";

	int start = 0;
	int end = 0;

	String type = "";

	HashSet<String> set = new HashSet<String>();

}
