package com.core.indexer;

import java.util.HashMap;
import java.util.Set;
import org.jsoup.Jsoup;

public class Document {

	protected String mDocNo;
	protected String mDocHeadLine;
	protected String mDocContent;
	protected int mSequenceNum;
	private String mDocHeadPlusContent;
	private int mDocSize;
	private double mDocWeight;
	
	public int GetDocSize()
	{
		return mDocSize;
	}
	
	public double GetDocWight()
	{
		return mDocWeight;
	}
	
	public String GetDocNo() {
		return mDocNo;
	}

	public Document(int SequenceNum, String docNo, String docHeadLine, String docContent) {
		mSequenceNum = SequenceNum;
		mDocNo = docNo;
		mDocHeadLine = docHeadLine;
		mDocContent = docContent;
		mDocSize = 0;
		mDocWeight = 0;
	}

	public boolean analyze(Set<String> stopList) {

		// Step 1: Remove HTML tags.
		RemoveHTMLTags();
		
		// Step 2: Convert to lower-case. Transforming at this stage to remove stop words efficiently.
		ConvertToLowerCase(); 
		
		// Step 3: Remove stop words using regex.
		RemoveStopWords(stopList);
		
		// Step 4: Remove punctuation using regex.
		// All non-word characters are replaced with * for convenience
		RemovePunctuation();
		mDocSize = mDocHeadPlusContent.getBytes().length;
		
		// Step 5: Split the string to get a list of all the words.
		String[] listOfWords = StringToList();
		
		// Step 6: Add an entry for this document in the MAP
		//Sequence number is initialized at time of creation of this class
		//mSequenceNum = IndexData.sequentialId;
				
		// Step 7: Count occurrence of words in the document
		HashMap<String, IndexData.Lexicon> tempHashMap = new HashMap<String, IndexData.Lexicon>();
		for (String word : listOfWords) {
			if(word.isEmpty())
				continue; 
			if(tempHashMap.containsKey(word)) {
				tempHashMap.get(word).IncRef();
			}
			else{
				IndexData.Lexicon lexicon = IndexData.Get().GetNewLexicon(word, 1, 0, 0);
				tempHashMap.put(word, lexicon);
			}
		}
		
		// Step 8: Calculate document weight
		for (IndexData.Lexicon lexicon : tempHashMap.values())
		{
			mDocWeight += Math.pow((1+Math.log(lexicon.GetCount())), 2);
		}

		mDocWeight = Math.sqrt(mDocWeight);
		
		// Step 9: Add each lexicon's frequency to the the inverted list
		IndexData.Get().FillInData(tempHashMap, mSequenceNum);
		// Write the Report :(
		
		return true;
	}

	private void RemoveHTMLTags() {
		mDocHeadLine = Jsoup.parse(mDocHeadLine).text();
		mDocContent = Jsoup.parse(mDocContent).text();
		mDocHeadPlusContent = mDocHeadLine + " " + mDocContent;
	}
	
	private void ConvertToLowerCase() {
		mDocHeadPlusContent = mDocHeadPlusContent.toLowerCase();		
	}
	
	private void RemoveStopWords(Set<String> stopList) {
		for (String strStop : stopList) {
			String rego = "\\b" + strStop + "\\b";
			mDocHeadPlusContent = mDocHeadPlusContent.replaceAll(rego, "");
		}
	}

	/* All non-word characters are replaced with * for convenience */
	private void RemovePunctuation() {
		mDocHeadPlusContent = mDocHeadPlusContent.replaceAll("[^a-z]", "*");
	}
	
	private String[] StringToList() {
		return mDocHeadPlusContent.split("\\*+");
	}
}
