package org.NooLab.itexx.app.controller;

import java.util.ArrayList;
import java.util.Observable;
import java.util.Observer;

import org.NooLab.itexx.ITexxWindowCommons;
import org.NooLab.itexx.app.controller.results.SearchEngineResultItem;
import org.NooLab.itexx.app.controller.results.SearchEngineResults;
import org.NooLab.itexx.app.processes.ProcessAdmin;
import org.NooLab.nativebrowser.WebBrowserPublic;
import org.NooLab.nativebrowser.components.WebBrowserPublicIntf;
import org.NooLab.utilities.logging.PrintLog;
import org.NooLab.utilities.net.pages.WebRetriever;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;

/** 
 *
 * this "simply" retrieves web pages for a given url, or a list of them.
 * 
 * it also accepts "SearchEngineResults", which contains items, which in turn
 * provide the url and the field for storing the retrieved page
 * 
 * loading of web pages takes place in its own thread
 * 	         
 */
public class PageLoader extends Observable implements Observer,PageLoaderIntf{

	
	 
	ITexxWindowCommons appCommons;
	
	SearchEngineResults seInResultsBuffer ;
	
	boolean resultsDigesterIsRunning=false;
	
	SeResultsDigester seDigester;
	
	private Observer observer;
	
	private WebBrowserPublicIntf nativeBrowser;
	
	PrintLog out;

	private int infrastructureForRetrieval=2;

	private ProcessAdmin processAdmin;

	private boolean nativeBrowserAvailable;

	
	
	// ========================================================================
	public PageLoader(ITexxWindowCommons commons){
		
		appCommons = commons;
		out = appCommons.out ;
		
		// buffer for incoming items
		seInResultsBuffer = new SearchEngineResults(appCommons) ; // the list is inside there 
		// note that prepared  
		
		observer = appCommons.getSessionManager().getObserver();
		seDigester = new SeResultsDigester();
		
		try {
			// we can NOT open it directly
			// nativeBrowser = WebBrowserPublic.open();
			
			appCommons.setNativeBrowserPort(this) ;
			// instead, we have to use it as a process
			processAdmin = appCommons.getProcessAdmin();
			processAdmin.startExternalNativeBrowserProcess();
			// all browser responses will arrive in the update method !
			nativeBrowserAvailable = true;
			
		} catch (Exception e) {
			out.printErr(1, "Problems to create an instance of the native browser while preparing the PageLoader object.");
			// e.printStackTrace();
			nativeBrowserAvailable = false;
		}
		
	}
	// ========================================================================	
	
	public void close(){
		// close all processes
		resultsDigesterIsRunning = false;
	}
	
	@Override
	public void update(Observable sender, Object dataobj) {
		//
		out.print(2, "PageLoader received an update from "+sender.toString()+",  data : "+dataobj.toString()) ;
		
		
	}
	

	public void getPagesForSearchEngineResults(SearchEngineResults seResults){
		
		// put it to a queue, on which we will work async
		int z=0;
		 
		
		for (int i=0;i<seResults.size();i++){
			
			SearchEngineResultItem item = seResults.get(i);
			if (item!=null){
				seInResultsBuffer.add( item) ;
				z++;
			}
			
		}// i->
		
		out.print(2, "result items added to PageLoader's buffer list, n="+z);
	}
	
	// this is working on the result items buffer ,...
	// note that the result items are objects that contain fields url and text !
	class SeResultsDigester extends Observable implements Runnable{

		SearchEngineResultItem serItem;
		Thread srdThrd;
		
		public SeResultsDigester (){
			
			this.addObserver(observer) ;
			srdThrd = new Thread(this,"srdThrd");
			srdThrd.start();
		}
		
		@Override
		public void run() {
			// 
			boolean isWorking=false;
			
			resultsDigesterIsRunning = true;
			
			while (resultsDigesterIsRunning){
				
				if ((isWorking==false) && (seInResultsBuffer.size()>0)){
					isWorking=true;
					
					serItem = seInResultsBuffer.get(0) ;
					seInResultsBuffer.getItems().remove(0);
					
					String url = serItem.getTargetDocUrl() ;
											out.print(2, "loading page for url (remaining:"+seInResultsBuffer.size()+") : "+ url); 
					String html ;
					html = loadPage(url);
					
					serItem.setTargetDocHtml(html) ;
					
					Document soupdoc = Jsoup.parse(html);
					
					serItem.setTargetDocContentText( soupdoc.text() ) ; // FIXME: this should be content aware
					// we need a class that knows about wiki, CMS, blogs, typical news portals
					
					
					// send to session manager
					setChanged();
					this.notifyObservers(serItem) ;
					
					if (seInResultsBuffer.size()==0){
						appCommons.getWindowGui().getMainStatusBar().showBusyIndicator(false);
						
						// send a signal to SessionManager that this raid has been finished
						// yet, the packages may still be on the way... as it runs decoupled
						appCommons.getSessionManager().setStateOnProcess( 1, serItem.getGuid(), SessionManagerIntf._REQUEST_RAID_COMPLETED);
					}												// serItem.getRequestguid() ??
					isWorking=false;
					
				}else{
					out.delay(50);
				}
				
			} // -> 
			out.print(2, "\nPageLoader's sub-class for feeding results to the SessionManager stopped working.\n") ;
			resultsDigesterIsRunning = false;
		}
		
	}
	
	
	public ArrayList<String> loadPages(ArrayList<String> urlStrList){
		ArrayList<String> htmlList = new ArrayList<String> ();
		
		return htmlList;
	}
	
	/**
	 * organizing the loading (facade for threaded process)
	 *  
	 * @param urlStr
	 * @return
	 */
	public String loadPage(String urlStr){
		internalLoaderProcess loader;
		
		// this runs in its own thread, but it will wait for completion
		String html = (loader = new internalLoaderProcess(urlStr)).getHtml();
		
		return html;
	}
	
	
	
	/** physical loading
	 * 
	 * @param urlStr
	 * @return
	 */
	protected String getPage(String urlStr){
		String html="";
		WebBrowserPublicIntf browser ;

		// simple: 
		try{
		
			if (infrastructureForRetrieval==1){

				// retrieve the page
				WebRetriever webloader = new WebRetriever();
				
				webloader.setUrlStr( urlStr, "");
				webloader.setUserAgent("Mozilla/5.0 (Windows; U; Windows NT 7.1; en; rv:1.9.0.12) Gecko/2012042316 Firefox/15.0.10") ;
				
				html = webloader.getHtml() ;

			}else{
				browser = WebBrowserPublic.open();
				html = browser.navigate( urlStr );
			}

		}catch(Exception e){
			e.printStackTrace();
		}
		
		
		// check the html: is it empty, except lots of java script ?
		// then we use the native browser ...
		// NativeBrowser nBrowser = new NativeBrowser(); 
		
		return html;
	}
	
	
	
	// this is volatile, it serves just as a thread provider!
	// processing = loading is defined outside of it.
	class internalLoaderProcess implements Runnable{
		
		Thread ilpThrd;
		String htmlstr="", urlStr="" ;
		boolean isNotCompleted;
		
		public internalLoaderProcess(String urlstr){
			this.urlStr = urlstr;
			isNotCompleted = true;
			
			ilpThrd = new Thread(this,"ilpThrd") ;
			
		}

		public String getHtml(){
			
			isNotCompleted = true;
			ilpThrd.start();
			
			while (isNotCompleted){
				out.delay(5);
			}
			return htmlstr;
		}
		
		@Override
		public void run() {
			// 
			htmlstr = getPage(urlStr) ;
			isNotCompleted = false ;
		}
		
		
		
	}


	public int getInfrastructureForRetrieval() {
		return infrastructureForRetrieval;
	}

	public void setInfrastructureForRetrieval(int infrastructureForRetrieval) {
		this.infrastructureForRetrieval = infrastructureForRetrieval;
	}
}

