package de.uniwue.cs.ir.vsr;

import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.HashSet;
import java.util.Set;
import java.util.StringTokenizer;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;

public class HTMLDocument extends DocumentImpl {
	private InputStream inputURL;
	private String html;

	public HTMLDocument(String name) {
		super(name);	
	}
	
	public HTMLDocument(URL url) {
		super(url.toString());
	}
	
	@Override
	public void read(InputStream input) throws IOException {
		inputURL = input;
		BufferedInputStream bis = new BufferedInputStream(input);
	    ByteArrayOutputStream buf = new ByteArrayOutputStream();
	    int result;
		try {
			result = bis.read();
			while(result != -1) {
		    	byte b = (byte)result;
		    	buf.write(b);
		    	result = bis.read();
		    }     
		} catch (IOException e) {
			e.printStackTrace();
		}
	    html  = new String(buf.toString());

	    // extract text only
	    String textOnly = html2text(html);
	    
	    // append the relevant attribute values, like "alt" and "title"
	    textOnly = textOnly.concat(getTags("alt"));
	    textOnly = textOnly.concat(getTags("title"));
	    
	    // removes the non alpha numerical characters
	    String textNonAlphaNum = removeTextNonAlphaNum(textOnly);
	    
	    // transforms the text to lower case
	    String text = textNonAlphaNum.toLowerCase();
	    
	    // remove the stop words
	    text = removeStopWords(text);
	    
	    // apply the Porter Stemming algorithm
	    String textStems = stemString(text);
	    
	    // call the super method, to tokenize and parse the text
	    InputStream is = new ByteArrayInputStream(textStems.getBytes());
		super.read(is);
	}
	
	/**
	 * returns all the links embedded in the html file
	 * @return
	 */
	public Set<URL> extractLinks() {
		Set<URL> urls = new HashSet<URL>();
		Document doc = Jsoup.parse(html);
		Elements elements = doc.getElementsByAttribute("href");
		for (int i = 0; i < elements.size(); ++i){
			try {
				urls.add(new URL(elements.get(i).attr("href")));
			} catch (MalformedURLException e) {
				e.printStackTrace();
			}
		}
		return urls;
	}
	
	/**
	 * finds all the elements in the html documents with the given attributee name
	 * @param tag attribute name
	 * @return
	 */
	private String getTags(String tag){
		Document doc = Jsoup.parse(html);
		Elements elements = doc.getElementsByAttribute(tag);
		String tags = " ";
		for (int i = 0; i < elements.size(); ++i){
			tags = tags + elements.get(i).attr(tag) + " ";
		}
		return tags;
	}

	/**
	 * removes the non alpha numerical characters from the given text
	 * @param str
	 * @return
	 */
	private String removeTextNonAlphaNum(String str) {
		return str.replaceAll("[^A-Za-z0-9 ]", " ");
	}

	/**
	 * extracts the plain text from an Html string
	 * @param html - the html file
	 * @return plain text
	 */
	private String html2text(String html) {
	    return Jsoup.parse(html).text();
	}
	
	/**
	 * stemms a given string with the Porter Stemmer 
	 * @param str string to stem
	 * @return stemmed string
	 */
	private String stemString(String str) {
		Stemmer s = new Stemmer();
		String stemmedStr = new String("");
		
		StringTokenizer st = new StringTokenizer(str);

		while (st.hasMoreTokens()) {

			String tok = st.nextToken();
			s.add(tok.toCharArray(), tok.length());
			s.stem();
			stemmedStr = stemmedStr + " " + s.toString();
			
		}

		return stemmedStr;
	}
	
	/**
	 * removes the stop words from the given string based on the "resources/englishST.txt" list
	 * @param str
	 * @return string without stop words
	 */
	private String removeStopWords(String str) {
		String noStopWords = new String("");  // result string without any stop words
		try {
			
			File stopWordsFile = new File("resources/englishST.txt");
			FileInputStream stream;
			
			stream = new FileInputStream(stopWordsFile);
			IDocument stopWords = new DocumentImpl("englishST.txt");
			stopWords.read(stream);
			StringTokenizer st = new StringTokenizer(str);
			while (st.hasMoreTokens()) {

				String tok = st.nextToken();
				if (stopWords.getTermCount(tok) == 0) {   // no stop word
					noStopWords = noStopWords + " " + tok;
				}
				
			}

		} catch (FileNotFoundException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
				
		return noStopWords;
	}
}
