package document;

import java.util.Enumeration;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Vector;

import query.Query;

public class InvertedIndex {
	public static final String DELIMITERS = " \t\n\r\f,;.:;-\"'?!()";
	private Hashtable<String, PostingList> indexEntries; // holds all document
															// IDs where a term
															// occurs
	private Hashtable<Integer, String> dictionary; // maps document IDs to
													// document Names
	private Hashtable<String, Integer> inverseDictionary; // maps document names
															// to document IDs
	private HashSet<String> unstemmedInputs;
	
	private Hashtable<Integer, HashSet<String>> documentToTerms;
	
	private int count;
	// SK maybe we need to use treemap or something instead
	private Stemmer stemmer;
	
	private final int MIN_TOKEN_LENGTH = 1;

	public InvertedIndex() {
		indexEntries = new Hashtable<String, PostingList>();
		dictionary = new Hashtable<Integer, String>();
		inverseDictionary = new Hashtable<String, Integer>();
		unstemmedInputs = new HashSet<String>();
		documentToTerms = new Hashtable<Integer, HashSet<String>>();
		count = 0;
		stemmer = new Stemmer();
	}

	public void indexLibrary(DocumentLibrary library) {
		for (InputDocument doc : library.getInputDocuments()) {
			indexDocument(doc);
		}
	}

	public Vector<Integer> getPostingsDocId(String term) {
		Vector<Integer> result = null;
		if(indexEntries.get(term) != null)
		{
			result = indexEntries.get(term).getAllDocumentIDs();
		}
		
		if (result == null) {
			result = new Vector<Integer>();
		}

		return result;
	}
	
	public Iterator<String> getAllIndexedTerms() {
		return unstemmedInputs.iterator();
	}

	public List<Integer> processQuery(Query q) {
		q.process(this);
		return q.getResult();
	}

	public void indexDocument(InputDocument document) {
		String content = document.getContent();
		int wordPosition = 0;		
		String delim = "[\\s.,;:\\-\\(\\)\\!\\?'\"]|(?<=[\\d])(?=[a-zA-Z])";
		String whitStr = "^([\\s.,;:\\-\\(\\)\\!\\?'\"])+$";
		String []tokens = content.split(delim);
		for(String s : tokens)
		{
			if(!s.matches(whitStr) && s.length() > 0)
			{
				addToIndex(s, document.getTitle(), wordPosition);
				wordPosition++;
			}
		}
	}
	
	public Document getDocumentForId(int docId) {
		Document result = new Document();
		HashSet<String> terms = getTermsOfDocument(docId);
		for(String s : terms)
		{
			PostingList pl = indexEntries.get(s);
			if(pl != null && pl.getPosting(docId)!=null) 
			{
				// SK i think we should use this instead of just the term frequency;
				double scoreValueOfOtherDocument = Math
				.log(getNumberOfDocuments()
						/ ((double) getPostings(s)
								.size()))
				* (1 + Math.log(pl.getPosting(docId)
					.getTermFrequency()));
				
				result.addEntry(new DocumentEntry(s, scoreValueOfOtherDocument));
			}
			
		}
		return result;
	}

	public int getNumberOfDocuments() {
		return count;
	}

	private void addToIndex(String token, String title, int wordPosition) {

		if (!StopWordLexicon.getInstance().isStopWord(token) && token.length() >= MIN_TOKEN_LENGTH) {
			unstemmedInputs.add(token);
			String term = stemmer.stem(token);
			if (!inverseDictionary.containsKey(title)) {
				dictionary.put(count, title);
				inverseDictionary.put(title, count);
				count++;
			}
			Integer docId = inverseDictionary.get(title);
			if (indexEntries.containsKey(term)) {
				indexEntries.get(term).addPosting(docId, wordPosition);
			} else {
				PostingList list = new PostingList();
				list.addPosting(docId, wordPosition);
				indexEntries.put(term, list);
			}
			if(documentToTerms.containsKey(docId))
			{
				documentToTerms.get(docId).add(term);
			}
			else
			{
				HashSet<String> terms = new HashSet<String>();
				terms.add(term);
				documentToTerms.put(docId, terms);
			}
		}
	}
	
	public HashSet<String> getTermsOfDocument(Integer docId)
	{
		return documentToTerms.get(docId);
	}

	public void print() {
		Enumeration<String> e = indexEntries.keys();
		while (e.hasMoreElements()) {
			String key = e.nextElement();
			System.out.println(key
					+ toDocNames(indexEntries.get(key).getAllDocumentIDs())
							.toString());
		}
	}

	public Vector<String> toDocNames(List<Integer> indices) {
		Vector<String> result = new Vector<String>();
		if (indices != null && !indices.isEmpty()) {
			for (Integer i : indices) {
				result.add(dictionary.get(i));
			}
		} else {
			result.add("no match");
		}
		return result;
	}

	public String getStatistics(int numberOfDocuments) {
		StringBuilder s = new StringBuilder();
		s.append("Size of the term-document-matrix: " + indexEntries.size()
				+ " X " + numberOfDocuments + "\n");
		s.append("number of 1s in this matrix: ").append(getOnes())
				.append("\n");
		s.append("longest posting list: ").append(getSizeOfLongestPostinList())
				.append("\n");
		s.append("shortest posting list: ").append(
				getSizeOfShortestPostinList()).append("\n");

		return s.toString();
	}

	private int getOnes() {
		int result = 0;
		for (PostingList list : indexEntries.values()) {
			result += list.getAllDocumentIDs().size();
		}
		return result;
	}

	private int getSizeOfShortestPostinList() {
		int min = Integer.MAX_VALUE;
		for (PostingList list : indexEntries.values()) {
			int listSize = list.getAllDocumentIDs().size();
			if (listSize < min) {
				min = listSize;
			}
		}
		return min;
	}

	private int getSizeOfLongestPostinList() {
		int max = 0;
		for (PostingList list : indexEntries.values()) {
			int listSize = list.getAllDocumentIDs().size();
			if (listSize > max) {
				max = listSize;
			}
		}
		return max;
	}

	public Posting getPosting(Integer docId, String subTerm) {
		return indexEntries.get(subTerm).getPosting(docId);
	}
	
	public Set<Map.Entry<Integer, Posting>> getPostings(String subTerm)
	{
		if(indexEntries.get(subTerm) != null)
		{
			return indexEntries.get(subTerm).getPostings();
		}
		else
		{
			return null;
		}
		
	}
}
