/**
 * 
 */
package org.dse.proc;

import java.util.List;
import java.util.Map;
import java.util.Set;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.dse.beans.HtmlDocument;
import org.dse.dao.HtmlDocumentDAO;
import org.dse.html.HtmlParser;
import org.dse.util.Constants;
import org.dse.web.PageRank;
import org.dse.web.WebGraph;
import org.dse.web.WebGraphDelegate;
import org.springframework.transaction.TransactionStatus;
import org.springframework.transaction.support.TransactionCallback;
import org.springframework.transaction.support.TransactionTemplate;

/**
 * This class implements the algorithm to read the raw files and process them.
 * 
 * The object of this class will be a singleton in one system running. It is
 * configged in the application context.
 * 
 * The algorithm will read html documents from the raw files one by one, and
 * process them one by one. If the content is not new, which means it is an
 * update, the algorithm will perform a proper update action.
 * 
 * @author zhangchen
 * 
 */
public class RawHtmlDocumentManagerImpl implements RawHtmlDocumentManager {

	// private final static String VERSION_MARK = "version: 1.0";

	// the two useful head of the html documents
	private final static String URL_MARK = "URL:";

	private final static String LENGTH_MARK = "Character-Length=";

	private final static String CONTENT_TYPE_MARK = "Content-Type=";

	private static final Log log = LogFactory
			.getLog(RawHtmlDocumentManagerImpl.class);

	/**
	 * This method contains the algorithm. Read the raw files and create keyword
	 * indexes. In addition, calculate some useful data like pagerank, which
	 * will be used in the query subsystem. All outputs of this algorithm will
	 * flow to database.
	 * 
	 * @param taskFile
	 *            the file containing the raw files which will be processed and
	 *            the urls which will be deleted
	 */
	public void indexFiles(String taskFile) {
		TaskGetter taskGetter = new TaskGetter(taskFile);
		taskGetter.getTaskFromFile();

		List<String> deleteUrls = taskGetter.getDeleteUrls();

		// delete non-existed documents from database
		transactionTemplate.execute(new TransactionDoDelete(deleteUrls));

		List<String> rawFiles = taskGetter.getUpdateFiles();// the raw files
		// contains new and
		// updated documents

		// WebGraph graph = new WebGraph();
		WebGraph graph = (WebGraph) transactionTemplate
				.execute(new TransactionCallback() {
					public Object doInTransaction(TransactionStatus arg0) {
						return webGraphDelegate.rebuildWebGraph();
					}
				});// rebuild web graph from the database

		double d = 0;
		PageRank rank = new PageRank(graph);
		Map<String, Double> map = rank.pageRank();
		for (String s : map.keySet()) {
			double t = map.get(s).doubleValue();
			d += t;
		}
		System.out.println(d);

		for (String url : deleteUrls) {// delete the non-existed urls from the
										// graph
			graph.removeLink(url);
		}

		for (String fileName : rawFiles) {// process the raw files
			indexRawFile(fileName, graph);// call this private method to
											// process each raw file
		}

		transactionTemplate.execute(new TransactionDoSaveWebGraph(graph));
		graph.finishConstruction();

		d = 0;
		rank = new PageRank(graph);
		map = rank.pageRank();
		for (String s : map.keySet()) {
			double t = map.get(s).doubleValue();
			d += t;
		}
		System.out.println(d);

		calculatePagerank(graph);
	}

	private void indexRawFile(String fileName, WebGraph graph) {
		RawHtmlFilesReader rawFileReader = new RawHtmlFilesReader(fileName);

		HtmlDocument document = null;
		// int iterator = 0;

		int offset = rawFileReader.getOffset();
		String strLine = null;
		while ((strLine = rawFileReader.readLine()) != null) {
			if (strLine.length() == 0) {
				offset = rawFileReader.getOffset();
				continue;
			}
			char firstCh = strLine.charAt(0);
			if (firstCh == '#' || firstCh == '\n') {
				offset = rawFileReader.getOffset();
				continue;
			}

			if (strLine.startsWith(URL_MARK)) {// start of a document
				/*
				 * strLine = rawFileReader.readLine(); if (strLine == null)
				 * break; else if (strLine.startsWith(URL_MARK)) {
				 */
				document = new HtmlDocument();
				String url = strLine.substring(URL_MARK.length());
				if (!url.endsWith("/"))
					url += "/";
				document.setDocUrl(url);
				/*
				 * } else continue;
				 */

				while ((strLine = rawFileReader.readLine()) != null) {
					if (strLine.startsWith(LENGTH_MARK)) {
						String s_length = strLine.substring(LENGTH_MARK
								.length());
						document.setDocLength(Integer.parseInt(s_length));
						break;
					}
				}

				while ((strLine = rawFileReader.readLine()) != null) {
					if (strLine.startsWith(CONTENT_TYPE_MARK)) {
						int ind = strLine.indexOf(";");

						if (ind != -1)
							document.setMimeType(strLine.substring(
									CONTENT_TYPE_MARK.length(), ind));
						else
							document.setMimeType(strLine
									.substring(CONTENT_TYPE_MARK.length()));
					}
					if (strLine.equals(""))
						break;
				}

				// document.setDocIndexId(iterator);
				document.setDocOffset(offset);
				document.setRawFileName(fileName);
				document.setPagerank(0.0);

				char[] content = new char[document.getDocLength()];
				// read the content of the document and call other modules to
				// process it
				if (rawFileReader.read(content) != -1) {
					HtmlParser parser = new HtmlParser(new String(content),
							document.getMimeType());

					HtmlContentProcessor contentProcessor = new HtmlContentProcessor(
							parser, document);
					document.setTitle(parser.getTitle());
					document.setText(parser.getPlainText());

					document.setDocLength(parser.getPlainText().length());

					transactionTemplate.execute(new TransactionDoIndex(
							document, contentProcessor));

					addToWebGraph(graph, document.getDocUrl(), parser);
				}

				// ++iterator;
			}

			offset = rawFileReader.getOffset();
		}

		rawFileReader.close();
	}

	private void addToWebGraph(WebGraph graph, String url, HtmlParser parser) {
		if (graph.availableLink().contains(url)) {
			graph.removeLink(url);
		}

		Set<String> links = parser.getLinks();
		if (links.size() == 0) {
			graph.addLink(url);
			return;
		}

		final String prefix = "://";
		int prefixIndex = url.indexOf(prefix) + prefix.length();
		int hostEnd = url.indexOf("/", prefixIndex);

		for (String link : links) {// local links means the same host name
			if (link.indexOf(prefix) == -1) {
				if (link.charAt(0) != '/')
					link = "/" + link;
				link = url.substring(0, hostEnd) + link;
			}

			if (!link.endsWith("/"))
				link += "/";// add / at the end
			graph.addLink(url, link);
		}
	}

	private void calculatePagerank(WebGraph graph) {
		PageRank pagerank = new PageRank(graph);
		Map<String, Double> scores = pagerank.pageRank();

		transactionTemplate.execute(new TransactionDoPagerank(scores));
	}

	/**
	 * Get the url string of a html document according to the startpos of the
	 * html document in the raw file.
	 * 
	 * @param rawFileName
	 *            the raw file containing the document
	 * @param offset
	 *            the start pos in the raw file
	 * @return a String representing the url
	 */
	public String getUrl(String rawFileName, int offset) {
		RawHtmlFilesReader rawFileReader = new RawHtmlFilesReader(rawFileName);

		rawFileReader.setOffset(offset);

		/*
		 * String strLine = rawFileReader.readLine(); if (strLine == null ||
		 * !strLine.equals(VERSION_MARK)) return null;
		 */

		String result = null;

		String strLine = rawFileReader.readLine();
		if (strLine != null && strLine.startsWith(URL_MARK))
			result = strLine.substring(URL_MARK.length());

		rawFileReader.close();

		return result;
	}

	/**
	 * Get the full content of a html document according to the startpos of the
	 * html document in the raw file.
	 * 
	 * @param rawFileName
	 *            the raw file containing the document
	 * @param offset
	 *            the start pos in the raw file
	 * @return a String storing the content
	 */
	public String getHtmlContent(String rawFileName, int offset) {
		RawHtmlFilesReader rawFileReader = new RawHtmlFilesReader(rawFileName);

		rawFileReader.setOffset(offset);

		/*
		 * String strLine = rawFileReader.readLine(); if (strLine == null ||
		 * !strLine.equals(VERSION_MARK)) return null;
		 */

		String strLine = rawFileReader.readLine();
		if (strLine == null || !strLine.startsWith(URL_MARK))
			return null;

		int length = 0;
		while ((strLine = rawFileReader.readLine()) != null) {
			if (strLine.startsWith(LENGTH_MARK)) {
				String s_length = strLine.substring(LENGTH_MARK.length());
				length = Integer.parseInt(s_length);
				break;
			}
		}

		while ((strLine = rawFileReader.readLine()) != null) {
			if (strLine.equals(""))
				break;
		}

		String result = null;

		char[] content = new char[length];
		if (rawFileReader.read(content) != -1)
			result = new String(content);

		rawFileReader.close();

		return result;
	}

	private HtmlDocumentDAO htmlDocumentDao;

	private WebGraphDelegate webGraphDelegate;

	private TransactionTemplate transactionTemplate;

	/**
	 * Spring framework IOC
	 */
	public void setHtmlDocumentDao(HtmlDocumentDAO htmlDocumentDao) {
		this.htmlDocumentDao = htmlDocumentDao;
	}

	/**
	 * Spring framework IOC
	 */
	public void setWebGraphDelegate(WebGraphDelegate webGraphDelegate) {
		this.webGraphDelegate = webGraphDelegate;
	}

	/**
	 * Spring framework IOC
	 */
	public void setTransactionTemplate(TransactionTemplate transactionTemplate) {
		this.transactionTemplate = transactionTemplate;
	}

	/**
	 * The create indexes operation done in a transaction. Use Spring
	 * TransactionTemplate and TransactionCallback to specify the range of the
	 * transaction.
	 */
	private class TransactionDoIndex implements TransactionCallback {
		HtmlDocument document;

		HtmlContentProcessor contentProcessor;

		/**
		 * The constructor.
		 * 
		 * @param document
		 *            the current processing HtmlDocument
		 * @param contentProcessor
		 *            the HtmlContentProcessor asscioated with the document
		 */
		public TransactionDoIndex(HtmlDocument document,
				HtmlContentProcessor contentProcessor) {
			this.document = document;
			this.contentProcessor = contentProcessor;
		}

		/**
		 * The range of this transaction is save or update the document to
		 * database. Then, use content processor to segment the plain text and
		 * create index. At last, save all indexes in batch mode.
		 * 
		 * @param ts
		 *            the TransactionStatus object
		 * @return always null
		 */
		public Object doInTransaction(TransactionStatus ts) {
			try {
				HtmlDocument temp = htmlDocumentDao.findByUrl(document
						.getDocUrl());
				if (temp != null) {
					htmlDocumentDao.makeTransient(temp);
					htmlDocumentDao.flush();
				}
				htmlDocumentDao.makePersistent(document);
				htmlDocumentDao.flush();
				htmlDocumentDao.clear();

				contentProcessor.processContent();
			} catch (Exception e) {
				ts.setRollbackOnly();
				System.err.println("Error when creating index.");
				System.err.println(e.getMessage());
				e.printStackTrace();
				log.error("Error when creating index.", e);
			}
			return null;
		}
	}

	/**
	 * The calculate pagerank operation done in a transaction. Use Spring
	 * TransactionTemplate and TransactionCallback to specify the range of the
	 * transaction.
	 */
	private class TransactionDoPagerank implements TransactionCallback {
		Map<String, Double> scores;

		/**
		 * The constructor.
		 * 
		 * @param scores
		 *            a map containing the url and its pagerank
		 */
		public TransactionDoPagerank(Map<String, Double> scores) {
			this.scores = scores;
		}

		/**
		 * The range of this transaction is save all pagerank to database, in
		 * batch mode.
		 * 
		 * @param ts
		 *            the TransactionStatus object
		 * @return always null
		 */
		public Object doInTransaction(TransactionStatus ts) {
			try {
				int iter = 0;

				for (String url : scores.keySet()) {
					HtmlDocument document = htmlDocumentDao.findByUrl(url);
					if (document != null) {
						document.setPagerank(scores.get(url));
						htmlDocumentDao.makePersistent(document);

						if (iter % Constants.BATCH_SIZE == 0) {
							htmlDocumentDao.flush();
							htmlDocumentDao.clear();
						}
					}

					++iter;
				}
			} catch (Exception e) {
				ts.setRollbackOnly();
				System.err.println("Error when calculating pagerank.");
				System.err.println(e.getMessage());
				log.error("Error when calculating pagerank.", e);
			}
			return null;
		}
	}

	/**
	 * The delete urls operation done in a transaction. Use Spring
	 * TransactionTemplate and TransactionCallback to specify the range of the
	 * transaction.
	 */
	private class TransactionDoDelete implements TransactionCallback {
		List<String> urls;

		/**
		 * The constructor.
		 * 
		 * @param urls
		 *            the list containing all urls to be deleted
		 */
		public TransactionDoDelete(List<String> urls) {
			this.urls = urls;
		}

		/**
		 * The range of this transaction is delete all the urls from database.
		 * This means delete the associated indexes, too.
		 * 
		 * @param ts
		 *            the TransactionStatus object
		 * @return always null
		 */
		public Object doInTransaction(TransactionStatus ts) {
			try {
				for (String url : urls) {
					HtmlDocument document = htmlDocumentDao.findByUrl(url);
					if (document != null) {
						htmlDocumentDao.makeTransient(document);
					}
				}
			} catch (Exception e) {
				ts.setRollbackOnly();
				System.err.println("Error when deleting urls.");
				System.err.println(e.getMessage());
				log.error("Error when deleting urls.", e);
			}
			return null;
		}
	}

	/**
	 * The save web graph operation done in a transaction. Use Spring
	 * TransactionTemplate and TransactionCallback to specify the range of the
	 * transaction.
	 */
	private class TransactionDoSaveWebGraph implements TransactionCallback {
		WebGraph graph;

		/**
		 * The constructor.
		 * 
		 * @param graph
		 *            the WebGraph data structure
		 */
		public TransactionDoSaveWebGraph(WebGraph graph) {
			this.graph = graph;
		}

		/**
		 * The range of this transaction is save the whole web graph to
		 * database.
		 * 
		 * @param ts
		 *            the TransactionStatus object
		 * @return always null
		 */
		public Object doInTransaction(TransactionStatus ts) {
			try {
				webGraphDelegate.saveWebGraph(graph);
			} catch (Exception e) {
				ts.setRollbackOnly();
				System.err.println("Error when saving web graph.");
				System.err.println(e.getMessage());
				log.error("Error when saving web graph.", e);
			}
			return null;
		}
	}
}
