package nl.han.ica.ap.searchengine.server.crawling;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.List;
import java.util.Observable;
import java.util.Observer;

import nl.han.ica.ap.searchengine.shared.sdo.Webpage;

/**
  * This class is responsible for creating several UrlCrawlers to index
  * a given list of urls. These urls are found inside a .txt file located
  * in src/main/resources with it's filename as given in the constructor.
  * 
  * <p>These UrlCrawler objects run in seperate threads. This class will 
  * wait for all the crawling to finish and then return a List of 
  * {@link Webpage Webpages}.
  * 
  * <p>The collecting of created Webpage objects is managed with the
  * Observer/Observable design pattern.
  * 
  * @author Laptop Willem
  */
public class WebpageIndexer implements Observer {

	private final String indexFile;
	private final List<Webpage> indexed;
	
	/**
	  * Creates a new WebpageIndexer object. 
	  * 
	  * @param file The filename of the .txt file containing the links
	  * to index.
	  */
	public WebpageIndexer(final String file) {
		this.indexFile = file;
		this.indexed = new ArrayList<Webpage>();
	}
	
	/**
	  * Starts the process of creating UrlCrawler objects for each url
	  * to index. This results in a List of Webpage objects. These Webpage
	  * objects have their url and content attributes defined, the pagerank
	  * attribute is ommitted as we need the entire index in order to calculate
	  * that property.
	  * 
	  * @return Returns a list of indexed Webpage objects.
	  * @throws SourceFileException Thrown when an IO error occurs while reading 
	  * the index text file.
	  */
	public List<Webpage> startIndexing() throws SourceFileException {
		List<String> sources = this.getUrlsToIndex();
		List<Thread> workers = new ArrayList<Thread>();
		
		for (String source : sources) {
			UrlCrawler uc = new UrlCrawler(source);
			uc.addObserver(this);
			
			Thread worker = new Thread(uc);
			workers.add(worker);
			worker.start();
		}
		
		for (Thread worker : workers) {
			try {
				worker.join();
			} catch (InterruptedException e) {}
		}
		
		return this.indexed;
	}

	/**
	  * This method is called by the oberservable UrlCrawlers. These UrlCrawlers send a 
	  * Webpage object that <i>can</i> be null. These non-null Webpage objects are then
	  * added to the list of indexed Webpages.
	  * 
	  * <p>This method is synchronised as each UrlCrawler object runs in a seperate thread,
	  * this is to prevent locking issues.
	  */
	public final synchronized void update(final Observable o, final Object message) {
		if (message instanceof Webpage)
			this.indexed.add((Webpage) message);
	}
	
	private List<String> getUrlsToIndex() throws SourceFileException {
		List<String> index = new ArrayList<String>();
		InputStreamReader resource = new InputStreamReader(this.getClass().getResourceAsStream(this.indexFile));
		BufferedReader fileContents = new BufferedReader(resource);
		try {
			String line = null;
			
			while ((line = fileContents.readLine()) != null) {
				index.add(line);
			}
		} catch (IOException e) {
			throw new SourceFileException();
		}
		
		return index;
	}
	
}
