package hebClustering.nlp;

import java.io.File;
import java.io.IOException;
import java.io.StringReader;
import java.net.SocketTimeoutException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Scanner;
import java.util.Set;

import org.jsoup.Jsoup;

import hebClustering.ClusterSearch;
import hebClustering.documentTypes.Document;
import hebClustering.searchEngines.WebsiteInformation;
import hebmorph.*;
import hebmorph.Token;


/**
 * An implementation of a natural language processor.
 *
 */
public class HebrewNLP implements INLP {
	StreamLemmatizer m_lemmatizer;

	private boolean threadSemaphoreFlag = false;

	private static String[] BasicStopWordsArr = { "אם","כי","בתוך","לתוך","הוא","היא","הם","הן","לא","היכן","יש",
		"כן","או","היה","היו","יהיה","יהיו","להיות","תהיינה","למה","מדוע","האם","אבל","ע\"י","עבור","זה","זאת",
		"בשביל","מה","גם","הם","אז","כלומר", "רק", "בגלל", "מכיוון", "עד", "כמו", "מאד", "של", "את",
		"בעיקר", "זו","הזה","מלבד","בלבד","בין", "ובין","לבין","למשל","שבהם","על","אחד", "כך", "אך","למרות",
		"אני","את","אתה","אנחנו","אתן","אתם","הם","הן","היא","הוא","שלי","שלו","שלך","שלה","שלנו","שלכם",
		"שלכן","שלהם","שלהן","לי","לו","לה","לנו","לכם","לכן","להם","להן","אותה","אותו","זה","זאת",
		"אלה","אלו","תחת","מתחת","מעל","בין","עם","עד","נגר","על","אל","מול","של","אצל","כמו","אחר",
		"אותו","בלי","לפני","אחרי","מאחורי","עלי","עליו","עליה","עליך","עלינו","עליכם","לעיכן","עליהם",
		"עליהן","כל","כולם","כולן","כך","ככה","כזה","זה","זות","אותי","אותה","אותם","אותך","אותו","אותן",
		"אותנו","ואת","את","אתכם","אתכן","איתי","איתו","איתך","איתה","איתם","איתן","איתנו","איתכם",
		"איתכן","יהיה","תהיה","היתי","היתה","היה","להיות","עצמי","עצמו","עצמה","עצמם","עצמן","עצמנו",
		"עצמהם","עצמהן","מי","מה","איפה","היכן","במקום שבו","אם","לאן","למקום שבו","מקום בו","איזה",
		"מהיכן","איך","כיצד","באיזו מידה","מתי","בשעה ש","כאשר","כש","למרות","לפני","אחרי","למה",
		"מדוע","כי","יש","אין","אך","מנין","מאין","מאיפה","יכל","יכלה","יכלו","יכול","יכולה",
		"יכולים","יכולות","יוכלו","יוכל","מסוגל","לא","רק","אולי","אין","לאו","אי","כלל","נגד","אם",
		"עם","אל","אלה","אלו","אף","על","מעל","מתחת","מצד","בשביל","לבין","באמצע","בתוך","דרך","מבעד",
		"באמצעות","למעלה","למטה","מחוץ","מן","לעבר","מכאן","כאן","הנה","הרי","פה","שם","אך","ברם",
		"שוב","אבל","מבלי","בלי","מלבד","רק","בגלל","מכיוון","עד","אשר","ואילו","למרות","אס","כמו",
		"כפי","אז","אחרי","כן","לכן","לפיכך","מאד","עז","מעט","מעטים","במידה","שוב","יותר","מדי","גם"
		,"כן","נו","אחר","אחרת","אחרים","אחרות","אשר","או","כדי","אגב"};

	/**
	 * Creates a new NLP and initializes the lemmatizer.
	 */
	public HebrewNLP(){

		m_lemmatizer=new StreamLemmatizer();
		try {
			m_lemmatizer.initFromHSpellFolder("../hspell-data-files", true, false);
		} catch (IOException e) {
			e.printStackTrace();
		}
	}

	/**
	 * Add certain tokens to a token map so that it could be added to the corpus. 
	 * 
	 * @param tokens - A list of tokens.
	 * @param tokenMap - The token map, contains tokens and their repetition number in the current document.
	 */
	private void processTokens(List<Token> tokens,HashMap<String, Integer> tokenMap){

		HebrewToken ht = (HebrewToken) tokens.get(0);
		
		if( !isStopWord(ht.getLemma()) && !isStopWord(ht.getText())){

			//			if (ht.getLemma().equals(ht.getText()))
			//				d.addToken(ht.getText());

			if( !ht.getLemma().equals("שונות"))
				addToken(tokenMap,bestLemma(tokens));
			else 
				addToken(tokenMap,ht.getText());
		}
	}
	
	/**
	 * Add a token to a token map.
	 * 
	 * @param numOfRepetitions
	 * @param token
	 */
	private void addToken(HashMap<String,Integer>numOfRepetitions,String token){
		int repetitionsNum;
		
		if (numOfRepetitions.containsKey(token)){
			repetitionsNum = numOfRepetitions.get(token)+1;
		}else{
			repetitionsNum = 1;
		}
		
		numOfRepetitions.put(token, repetitionsNum);
	}

	/**
	 * Get the best possible lemma of a token's "הטיות" list.
	 * 
	 * @param tokens The token's "הטיות" list.
	 * 
	 * @return Best lemma.
	 */
	private String bestLemma(List<Token> tokens) {
		for(Token t : tokens){
			HebrewToken  ht = (HebrewToken) t;
			if(ht.getLemma().equals(ht.getText()))
				return ht.getLemma();
		}

		return ((HebrewToken) tokens.get(0)).getLemma();
	}

	/**
	 * Tells if a word is a stop word.
	 * 
	 * @param text - A word to check.
	 * 
	 * @return true if the word is a stop word.
	 */
	private boolean isStopWord(String text) {
		for(String str : BasicStopWordsArr){
			if(str.equals(text))
				return true;
		}

		return false;
	}

	/**
	 * Gets a string representation of the file's text. 
	 * 
	 * @param path - The path to the desired file.
	 * 
	 * @return A string representation of the file's text.
	 */
	private String fileToString(String path){

		String text="";
		File f = new File(path);
		try{
			Scanner s = new Scanner(f,"UTF-8");
			while (s.hasNext()){
				text+=s.next()+" ";
			}
		}catch(IOException e){e.printStackTrace();};

		return text;

	}

	/**
	 * Creates a new document from a file.
	 *  
	 * @param path - The path to the desired file.
	 * 
	 * @return A new document.
	 */
	public Document rawFileToDocument(String path){
		String text=fileToString(path);
		return textToDocument(text, path);
	}

	/**
	 * Creates a new document using text and a path.
	 * 
	 * @param text - The text of the document.
	 * @param path - The path of the file from which the text was taken from.
	 * 
	 * @return A new Document.
	 */
	public Document textToDocument(String text,String path){
		WebsiteInformation info = new WebsiteInformation(path);
		return textToDocument(text, info);
	}
	

	public Document textToDocument(String text,WebsiteInformation info){
		Document d=new Document(info);

		StringReader reader = new StringReader(text);
		
		while (threadSemaphoreFlag){
			try {
				Thread.currentThread();
				Thread.sleep(100);
			} catch (InterruptedException e) {
				e.printStackTrace();
			}
		}
		threadSemaphoreFlag = true;
		
		m_lemmatizer.SetStream(reader);
		
		HashMap<String,Integer> numOfRepetitions= new HashMap<String,Integer>();
		
		String word = "";
		List<Token> tokens = new ArrayList<Token>();
		try{
			while (m_lemmatizer.getLemmatizeNextToken(new Reference<String>(word), tokens) > 0)
			{
				//skip this token if its not a hebrew token, and if its not a verb or noun
				if (((tokens.size() == 1) && !(tokens.get(0) instanceof HebrewToken))
						||tokens.size() == 0 ||((!tokens.get(0).toString().contains("Verb") && (!tokens.get(0).toString().contains("Noun")))))            	
					continue;
				
				processTokens(tokens,numOfRepetitions);
			}
		}catch(IOException e){e.printStackTrace();};
		
		threadSemaphoreFlag = false;
		
		for(String str : numOfRepetitions.keySet()){
			int num = numOfRepetitions.get(str);
			if(numOfRepetitions.get(str)>1)
				d.addToken(str,num);
		}
		
		//don't add the document if it has less than 10 valid tokens
		if (d.getTotalNumOfTokens() <= 10)
			return null;
		
		
		return d;
	}

	/**
	 * Iterates over a folder and creates new documents for all of it's subfiles.<br>
	 * Used in case of premade search results.
	 * 
	 * @param outputFolderPath - Folder path.
	 * 
	 * @return A document set. 
	 */
	public Set<Document> rawFilesToDocuments(String outputFolderPath){
		Set<Document> documentSet = new HashSet<Document>();
		File rawFilesFolder = new File(outputFolderPath);

		String subFiles[] = rawFilesFolder.list();
		if (subFiles == null){
			System.err.println("No files in dataset folder!");
			System.exit(0);
		}
		
		for (int i = 0; i < subFiles.length ; i++){
			if (subFiles[i].equals(".svn")) continue;
			Document currentDocument = rawFileToDocument(outputFolderPath + "\\" + subFiles[i]);
			if (currentDocument != null)
				documentSet.add(currentDocument);
		}

		return documentSet;
	}

	//	UN-THREADED
	//	public Set<Document> urlsToDocuments(Set<String> urls) {
	//		Set<Document> documentSet = new HashSet<Document>();
	//		long currentTime = System.currentTimeMillis();
	//
	//		List<MyThread> threadList = new LinkedList<MyThread>();
	//		int index;
	//
	//		for (String url : urls){
	//			Document result = urlToDocument(url);
	//			if (result != null)
	//				documentSet.add(result);	
	//			
	//		}
	//		System.out.println("total time for jsouping: " + (System.currentTimeMillis() - currentTime));
	//
	//		return documentSet;
	//	}

	public Set<Document> urlsToDocuments(Set<WebsiteInformation> urls) {
		Set<Document> documentSet = new HashSet<Document>();

		List<MyThread> threadList = new LinkedList<MyThread>();
		int index = 0;

		for (WebsiteInformation url : urls){
			if (index < Math.min(30,urls.size()-1)){
				MyThread thread = new MyThread(index++, url);
				thread.setDaemon(true);
				thread.setPriority(Thread.MAX_PRIORITY);
				threadList.add(thread);
				thread.start();
			}else{
				index = 0;
				waitForThreads(threadList);
				for (MyThread t : threadList){
					Document result = t.getDoc();
					if (result != null)
						documentSet.add(result);	
				}
				threadList.clear();
				MyThread thread = new MyThread(index++, url);
				threadList.add(thread);
				thread.start();
			}

		}
		System.err.println("time for jsouping: " + (System.currentTimeMillis() - ClusterSearch.currentTime));

		return documentSet;
	}

	/**
	 * Stop and wait for all of the current threads to finish.
	 * 
	 * @param threadList - A list of threads.
	 */
	private void waitForThreads(List<MyThread> threadList){
		for (MyThread t : threadList){
			try {
				t.join();
			} catch (InterruptedException e) {
				System.err.println("Thread was interrupted!");
				e.printStackTrace();
			}
		}
	}

	/**
	 * A private internal class that implements a thread that downloads a site's content.
	 */
	private class MyThread extends Thread{

		private Document d;
		private WebsiteInformation info;

		public MyThread(int index,WebsiteInformation info){
			super(Integer.toString(index));
			this.info = info;
		}

		public void run(){
			d = urlToDocument(info);
		}

		public Document getDoc(){
			return d;
		}

	}

	/**
	 * Creates a single document from a website.
	 * 
	 * @param info - Information about the website, including it's URL.
	 * 
	 * @return A new document.
	 */
	public Document urlToDocument(WebsiteInformation info){

		org.jsoup.nodes.Document doc = null;

		try {
			doc = Jsoup.connect(info.getUrl()).get();
			//URL url1 = new URL(url);
			//doc = Jsoup.parse(url1, 500); //TO-DO its lowering the docs count, is that OK?
		}catch (java.nio.charset.UnsupportedCharsetException e){
			return null;
		}catch (SocketTimeoutException e){
			return null;
		}catch (IOException e) {
			return null;
		}
		return textToDocument(doc.text(), info);
	}

}
