package util.ir;

import java.io.File;
import java.io.UnsupportedEncodingException;

import java.io.IOException;
import java.net.URLDecoder;

import java.util.Enumeration;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.LinkedList;

import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;

import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;

import util.io.FileInput;

import org.apache.lucene.store.LockObtainFailedException;

public class Indexer {

	/**
	 * @param args
	 */
	static final File INDEX_DIR = new File("index");
	private static boolean deleting = false; // true during deletion pass
	private static IndexReader reader; // existing index
	private static IndexWriter writer; // new index being built

	/** Indexer for Wiki Pages */

	/**
	 * Index all text files under a directory.
	 * 
	 * @throws IOException
	 * @throws LockObtainFailedException
	 * @throws CorruptIndexException
	 */
	public static void main(String[] args) throws CorruptIndexException,
			LockObtainFailedException, IOException {

		// String path="/project/data/AOL_queryLOG/queries_kids.txt";
		// indexDocs(path);

		String redirect = "/media/sata/data/dbpedia/redirects_en.nt";

		// Hashtable<String, LinkedList<String>> hash_redirects =
		// loadListHash(redirect,2,0);

		String dbpedia_types = "/home/sergio/projects/data/dbpedia/ instance_types_en.nt";

		String wikiLabels = "/media/sata/data/dbpedia/labels_en.nt";

		// String redirect = "/media/sata/data/dbpedia/redirects_en.nt";

		//indexHashtableList2();

		// Hashtable<String, HashSet<String>> hash = loadTypes(dbpedia_types);
		System.out.println(decodeString("Ham_%28disambiguation%29"));

		// indexHashtable(hash);
		// indexDocs(dbpedia_types);

	}

public  static String decodeString(String item) {
		// TODO Auto-generated method stub

		String t = null;
		try {
			t = URLDecoder.decode(item, "utf8");
		} catch (UnsupportedEncodingException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}

		// t = t.replaceAll("\\(|\\)", "");
		// t = t.replaceAll("_", " ");

		return t;

	}

	private static String getDBpediaType(String line) {

		String a = line;

		int i = a.lastIndexOf("/");

		return a.substring(i + 1, a.length());

	}

	public static Hashtable<String, String> loadSimpleHash(String path) {

		Hashtable<String, String> hash = new Hashtable<String, String>();

		FileInput in = new FileInput(path);
		String line = in.readString();

		int key_pos = 0;
		int value_pos = 2;

		while (line != null) {


			String t[] = line.split("\\s+");

			t[key_pos] = t[key_pos]
					.replace("<http://dbpedia.org/resource/", "");

			t[key_pos] = t[key_pos].replace(">", "");
			t[value_pos] = t[value_pos].replace(
					"<http://dbpedia.org/resource/", "");

			t[value_pos] = t[value_pos].replace(
					"<http://www.w3.org/2002/07/owl#Thing", "");
			t[value_pos] = t[value_pos].replace(
					"<http://dbpedia.org/ontology/", "");

			t[value_pos] = t[value_pos].replace(">", "");

			//System.out.println(t[key_pos] + "\t" + t[value_pos]);

			if (t.length > value_pos && t.length > key_pos) {

				if (!hash.containsKey(t[key_pos])) {

			
					hash.put(t[key_pos].trim(), t[value_pos].trim());

				} else {

				}

			}

			line = in.readString();
		}

		return hash;

	}

	private static Hashtable<String, LinkedList<String>> loadListHash(
			String path, int key_pos, int value_pos) {

		Hashtable<String, LinkedList<String>> hash = new Hashtable<String, LinkedList<String>>();

		FileInput in = new FileInput(path);
		String line = in.readString();

		while (line != null) {

			String t[] = line.split("\\s+");

			t[key_pos] = t[key_pos]
					.replace("<http://dbpedia.org/resource/", "");

			t[key_pos] = t[key_pos].replace(">", "");
			t[value_pos] = t[value_pos].replace(
					"<http://dbpedia.org/resource/", "");

			t[value_pos] = t[value_pos].replace(
					"<http://www.w3.org/2002/07/owl#Thing", "");
			t[value_pos] = t[value_pos].replace(
					"<http://dbpedia.org/ontology/", "");

			t[value_pos] = t[value_pos].replace(">", "");

			System.out.println(t[key_pos] + "\t" + t[value_pos]);

			if (t.length > value_pos && t.length > key_pos) {

				if (hash.containsKey(t[key_pos].trim())) {

					LinkedList<String> list = hash.get(t[key_pos].trim());
					list.add(t[value_pos].trim());

					hash.put(t[key_pos].trim(), list);

				} else {

					LinkedList<String> list = new LinkedList<String>();

					list.add(t[value_pos].trim());

					hash.put(t[key_pos].trim(), list);

				}

			}

			line = in.readString();
		}

		return hash;

	}

	/**
	 * 
	 * @param path
	 * @param key_pos
	 * @param value_pos
	 * @return
	 */
	private static Hashtable<String, LinkedList<String>> loadWikiTitles(
			String path) {

		Hashtable<String, LinkedList<String>> hash = new Hashtable<String, LinkedList<String>>();

		FileInput in = new FileInput(path);
		String line = in.readString();

		int key_pos = 0;
		int value_pos = 2;

		while (line != null) {

			String t[] = line.split(">");

			t[key_pos] = t[key_pos]
					.replace("<http://dbpedia.org/resource/", "");

			t[key_pos] = t[key_pos].replace(">", "");
			t[value_pos] = t[value_pos].replace("\"", "");
			t[value_pos] = t[value_pos].replace("@en .", "");

			System.out.println(t[key_pos] + "\t" + t[value_pos]);

			if (t.length > value_pos && t.length > key_pos) {

				if (hash.containsKey(t[key_pos].trim())) {

					LinkedList<String> list = hash.get(t[key_pos].trim());
					list.add(t[value_pos].trim());

					hash.put(t[key_pos].trim(), list);

				} else {

					LinkedList<String> list = new LinkedList<String>();

					list.add(t[value_pos].trim());

					hash.put(t[key_pos].trim(), list);

				}

			}

			line = in.readString();
		}

		return hash;

	}

	public static Hashtable<String, HashSet<String>> loadTypes(String path) {

		Hashtable<String, HashSet<String>> hash = new Hashtable<String, HashSet<String>>();

		FileInput in = new FileInput(path);
		String line = in.readString();

		int entity_pos = 0;
		int type_pos = 2;

		while (line != null) {

			String t[] = line.split("\t");

			if (t.length > type_pos) {

				String entity = t[entity_pos];
				String type = t[type_pos];

				type = getDBpediaType(type);

				if (!type.contains("Thing")) {

					if (hash.containsKey(entity)) {

						hash.get(entity).add(type);

					} else {
						HashSet<String> set = new HashSet<String>();
						set.add(type);

						hash.put(entity, set);

					}

				}

			}

			line = in.readString();

		}

		return hash;
	}

	/**
	 * Index Yago, Dbpedia types and abstracts of wikipedia entities
	 * 
	 * 
	 * @param hash
	 * @throws CorruptIndexException
	 * @throws LockObtainFailedException
	 * @throws IOException
	 */
	private static void indexHashtable(Hashtable<String, HashSet<String>> hash)
			throws CorruptIndexException, LockObtainFailedException,
			IOException {

		String index = "/home/sergio/projects/indexes/index_dbpedia_entityTypes_en.txt";

		writer = new IndexWriter(index, new StandardAnalyzer(), true);

		Enumeration<String> en = hash.keys();

		while (en.hasMoreElements()) {

			Document doc = new Document();
			String entity = en.nextElement();

			HashSet<String> types = hash.get(entity);

			// concatenate types to a single string to index it, seperated with
			// tabs

			Iterator<String> iter = types.iterator();
			String types_concat = iter.next();

			while (iter.hasNext()) {

				types_concat += "\t" + iter.next();
			}

			doc.add(new Field("entity_name", entity, Field.Store.YES,
					Field.Index.NOT_ANALYZED));

			doc.add(new Field("entity", entity, Field.Store.YES,
					Field.Index.ANALYZED));

			doc.add(new Field("types", types_concat, Field.Store.YES,
					Field.Index.NOT_ANALYZED));

			System.out.println("adding " + doc.get("entity_name"));
			writer.addDocument(doc); // add docs unconditionally

		}

		writer.optimize();
		writer.close();
	}

	/**
	 * 
	 * Index dbpedia: titles, ontoloy types, redirects
	 * 
	 * @param hash
	 * @param index
	 */
	private static void indexHashtableList() throws CorruptIndexException,
			LockObtainFailedException, IOException {

		String index = "/media/sata/data/indexes/dbpedia1/";

		writer = new IndexWriter(index, new WhitespaceAnalyzer(), true);

		String dbpedia_types = "/media/sata/data/dbpedia/instance_types_en.nt";

		String wikiLabels = "/media/sata/data/dbpedia/labels_en.nt";

		String redirect = "/media/sata/data/dbpedia/redirects_en.nt";

		Hashtable<String, LinkedList<String>> hash_titles = loadWikiTitles(wikiLabels);

		Hashtable<String, LinkedList<String>> hash_redirects = loadListHash(
				redirect, 2, 0);

		// Hashtable<String, LinkedList<String>> hash_types =
		// loadListHash(dbpedia_types, 0, 2);

		Enumeration<String> en = hash_titles.keys();

		while (en.hasMoreElements()) {

			Document doc = new Document();
			String key = en.nextElement();

			// LinkedList<String> types = hash_types.get(key);

			LinkedList<String> re = hash_redirects.get(key);

			String title = decodeString(key).toLowerCase().replace(" ", "_");

			// concatenate types to a single string to index it, seperated with
			// tabs

			String keyUpper = key.toUpperCase();
			if (!key.equals(keyUpper)) {

				String temp = "";

				doc.add(new Field(Constants.WIKIPEDIA_TITLE, key,
						Field.Store.YES, Field.Index.NOT_ANALYZED));

				doc.add(new Field(Constants.WIKIPEDIA_LABEL, title,
						Field.Store.YES, Field.Index.NOT_ANALYZED));

				doc.add(new Field(Constants.WIKIPEDIA_LABEL_RAW, title,
						Field.Store.YES, Field.Index.ANALYZED));

				/*
				 * if (types != null) { for (int i = 0; i < types.size(); i++)
				 * 
				 * {
				 * 
				 * temp += types.get(i) + "\t";
				 * 
				 * }
				 * 
				 * doc.add(new Field(Constants.WIKI_TYPES, temp,
				 * Field.Store.YES, Field.Index.ANALYZED));
				 * 
				 * doc.add(new Field(Constants.WIKI_TYPES_RAW, temp,
				 * Field.Store.YES, Field.Index.NOT_ANALYZED));
				 * 
				 * }
				 */

				// now index redirects
				temp = "";
				if (re != null) {

					for (int i = 0; i < re.size(); i++) {
						temp += decodeString(re.get(i)).toLowerCase().replace(
								" ", "_")
								+ "\t";
					}

					System.out.println("String decoded: " + temp.toLowerCase());
					doc.add(new Field(Constants.WIKI_REDIRECT, temp,
							Field.Store.YES, Field.Index.ANALYZED));

					doc.add(new Field(Constants.WIKI_REDIRECT_RAW, temp,
							Field.Store.YES, Field.Index.NOT_ANALYZED));

					temp = temp + title;

					doc.add(new Field(Constants.WIKIPEDIA_LABEL_REDIRECT, temp,
							Field.Store.YES, Field.Index.ANALYZED));

					doc.add(new Field(Constants.WIKIPEDIA_LABEL_REDIRECT_RAW,
							temp.toLowerCase(), Field.Store.YES,
							Field.Index.NOT_ANALYZED));

				} else {

					temp = title + "\t";

					doc.add(new Field(Constants.WIKI_REDIRECT, temp,
							Field.Store.YES, Field.Index.ANALYZED));

					doc.add(new Field(Constants.WIKI_REDIRECT_RAW, temp,
							Field.Store.YES, Field.Index.NOT_ANALYZED));

					doc.add(new Field(Constants.WIKIPEDIA_LABEL_REDIRECT, temp
							.toLowerCase(), Field.Store.YES,
							Field.Index.ANALYZED));

					doc.add(new Field(Constants.WIKIPEDIA_LABEL_REDIRECT_RAW,
							temp.toLowerCase(), Field.Store.YES,
							Field.Index.NOT_ANALYZED));

				}

				System.out.println("adding " + doc.get(key));
				writer.addDocument(doc); // add docs unconditionally

			}

		}

		writer.optimize();
		writer.close();
	}

	private static void indexHashtableList1() throws CorruptIndexException,
			LockObtainFailedException, IOException {

		String index = "/media/sata/data/indexes/dbpedia_types/";

		writer = new IndexWriter(index, new WhitespaceAnalyzer(), true);

		String dbpedia_types = "/media/sata/data/dbpedia/instance_types_en.nt";

		Hashtable<String, LinkedList<String>> hash_types = loadListHash(
				dbpedia_types, 0, 2);

		Enumeration<String> en = hash_types.keys();

		while (en.hasMoreElements()) {

			Document doc = new Document();
			String key = en.nextElement();

			LinkedList<String> types = hash_types.get(key);

			String title = decodeString(key).toLowerCase().replace(" ", "_");

			// concatenate types to a single string to index it, seperated with
			// tabs

			String keyUpper = key.toUpperCase();
			if (!key.equals(keyUpper)) {

				String temp = "";

				doc.add(new Field(Constants.WIKIPEDIA_TITLE, key,
						Field.Store.YES, Field.Index.NOT_ANALYZED));

				doc.add(new Field(Constants.WIKIPEDIA_LABEL, title,
						Field.Store.YES, Field.Index.NOT_ANALYZED));

				doc.add(new Field(Constants.WIKIPEDIA_LABEL_RAW, title,
						Field.Store.YES, Field.Index.ANALYZED));

				if (types != null) {
					for (int i = 0; i < types.size(); i++)

					{

						temp += types.get(i) + "\t";
						System.out.println(temp);

					}

					doc.add(new Field(Constants.WIKI_TYPES, temp,
							Field.Store.YES, Field.Index.ANALYZED));

					doc.add(new Field(Constants.WIKI_TYPES_RAW, temp,
							Field.Store.YES, Field.Index.NOT_ANALYZED));

				}

				System.out.println("adding " + doc.get(key));
				writer.addDocument(doc); // add docs unconditionally

			}

		}

		writer.optimize();
		writer.close();
	}
	
	
	
	
	/**
	 * 
	 * Index wiki categories
	 * @throws CorruptIndexException
	 * @throws LockObtainFailedException
	 * @throws IOException
	 */
	private static void indexHashtableList2() throws CorruptIndexException,
			LockObtainFailedException, IOException {

		String index = "/media/sata/data/indexes/wiki_categories2/";

		writer = new IndexWriter(index, new WhitespaceAnalyzer(), true);

		String dbpedia_types = "/media/sata/data/dbpedia/article_categories_en.nt";

		Hashtable<String, LinkedList<String>> hash_types = loadListHash(
				dbpedia_types, 0, 2);

		Enumeration<String> en = hash_types.keys();

		while (en.hasMoreElements()) {

			Document doc = new Document();
			String key = en.nextElement();

			LinkedList<String> types = hash_types.get(key);

			String title = decodeString(key).toLowerCase().replace(" ", "_");

			// concatenate types to a single string to index it, seperated with
			// tabs

			String keyUpper = key.toUpperCase();
			if (!key.equals(keyUpper)) {

				String temp = "";
				
				String temp_decoded = "";

				doc.add(new Field(Constants.WIKIPEDIA_TITLE, key,
						Field.Store.YES, Field.Index.NOT_ANALYZED));

				doc.add(new Field(Constants.WIKIPEDIA_LABEL, title,
						Field.Store.YES, Field.Index.NOT_ANALYZED));

				
				if (types != null) {
					for (int i = 0; i < types.size(); i++)

					{

						temp += types.get(i) + "\t";
						System.out.println(temp);

					}
					
					
					for (int i = 0; i < types.size(); i++)

					{

						temp_decoded += decodeString(types.get(i).toLowerCase()) + "\t";
						System.out.println(temp_decoded);

					}

					doc.add(new Field(Constants.WIKIPEDIA_CATEGORY, temp,
							Field.Store.YES, Field.Index.ANALYZED));

					doc.add(new Field(Constants.WIKIPEDIA_CATEGORY_RAW, temp,
							Field.Store.YES, Field.Index.NOT_ANALYZED));
					
					
					doc.add(new Field(Constants.WIKIPEDIA_CATEGORY_DECODED, temp_decoded,
							Field.Store.YES, Field.Index.ANALYZED));

					doc.add(new Field(Constants.WIKIPEDIA_CATEGORY_RAW_DECODED, temp_decoded,
							Field.Store.YES, Field.Index.NOT_ANALYZED));

				}

				System.out.println("adding " + doc.get(key));
				writer.addDocument(doc); // add docs unconditionally

			}

		}

		writer.optimize();
		writer.close();
	}

	private static void indexDocs(String path) throws CorruptIndexException,
			LockObtainFailedException, IOException {

		String index = "/home/sergio/projects/indexes/index_dbpedia_types_en.txt";

		writer = new IndexWriter(index, new StandardAnalyzer(), true);

		FileInput input = new FileInput(path);

		int field = 2;

		String line = input.readString();

		while (line != null) {

			String t[] = line.split("\t");

			Document doc = new Document();

			String type = getDBpediaType(t[field]);

			if (!type.contains("Thing")) {

				doc.add(new Field("type_name", type, Field.Store.YES,
						Field.Index.NOT_ANALYZED));

				doc.add(new Field("type", type, Field.Store.YES,
						Field.Index.ANALYZED));

				System.out.println("adding " + doc.get("type_name"));
				writer.addDocument(doc); // add docs unconditionally

			}

			line = input.readString();

		}

		writer.optimize();
		writer.close();

	}
}
