/**
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package com.openness.crawler.crawler;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.ZooDefs.Ids;
import org.apache.zookeeper.ZooKeeper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.openness.crawler.fetcher.PageFetcher;
import com.openness.crawler.frontier.DocIDServer;
import com.openness.crawler.frontier.Frontier;
import com.openness.crawler.hbase.HBaseTablePool;
import com.openness.crawler.hbase.HBaseUtil;
import com.openness.crawler.robotstxt.RobotstxtServer;
import com.openness.crawler.url.URLCanonicalizer;
import com.openness.crawler.url.WebURL;
import com.openness.crawler.util.Constants;
import com.openness.crawler.zookeeper.ZKConnectionLatch;

/**
 * The controller that manages a crawling session. This class creates the
 * crawler threads and monitors their progress.
 * 
 */
public class CrawlController extends Configurable {

	private static final Logger LOGGER = LoggerFactory
			.getLogger(CrawlController.class);

	/**
	 * The 'customData' object can be used for passing custom crawl-related
	 * configurations to different components of the crawler.
	 */
	private Object customData;

	/**
	 * Once the crawling session finishes the controller collects the local data
	 * of the crawler threads and stores them in this List.
	 */
	private List<Object> crawlersLocalData = new ArrayList<Object>();

	/**
	 * Is the crawling of this session finished?
	 */
	private boolean finished;

	/**
	 * Is the crawling session set to 'shutdown'. Crawler threads monitor this
	 * flag and when it is set they will no longer process new pages.
	 */
	private boolean shuttingDown;

	private ZooKeeper zk;

	private PageFetcher pageFetcher;

	private RobotstxtServer robotstxtServer;

	private Frontier frontier;

	private DocIDServer docIdServer;

	private String server;

	private final Object waitingLock = new Object();

	private void initializeHBaseTablePool() throws IOException {
		LOGGER.info("HBaseTablePool initialize");

		HBaseTablePool.initialize(config);

		LOGGER.info("HBaseTablePool initialize success");
	}

	private void shutdownHBaseTablePool() throws IOException {
		LOGGER.info("HBaseTablePool shutdown");

		HBaseTablePool.shutdown();

		LOGGER.info("HBaseTablePool shutdown success");
	}

	private void initializeHBaseTable() throws IOException {
		LOGGER.info("HBase Table initialize");

		boolean initialized = false;

		try {
			String path = zk.create(Constants.ZK_HBASE_TABLE_STATUS,
					Constants.EMPTY_BYTEARRAY, Ids.OPEN_ACL_UNSAFE,
					CreateMode.PERSISTENT);

			if (path != null && !path.isEmpty()) {
				initialized = false;
			}
		} catch (Exception e) {
			initialized = true;
		}

		if (!initialized) {
			HBaseAdmin admin = null;

			try {
				admin = new HBaseAdmin(HBaseTablePool.getConf());

				String workqueue = Constants.HBASE_TABLE_WORKQUEUE;

				HTableDescriptor workqueueDesc = HBaseUtil
						.getHTableDescriptor(workqueue);

				if (admin.tableExists(workqueue)) {
					if (!config.isResumableCrawling()) {
						LOGGER.info(workqueue
								+ ": exists and crawler is not resumable, truncate");

						admin.disableTable(workqueue);

						admin.deleteTable(workqueue);

						admin.createTable(workqueueDesc);
					} else {
						LOGGER.info(workqueue
								+ ": exists and crawler is resumable, skip");
					}
				} else {
					LOGGER.info(workqueue + ": not exists, create");

					admin.createTable(workqueueDesc);
				}

				String inprocesspagesdb = Constants.HBASE_TABLE_INPROCESSPAGESDB;

				HTableDescriptor inprocesspagesdbDesc = HBaseUtil
						.getHTableDescriptor(inprocesspagesdb);

				if (admin.tableExists(inprocesspagesdb)) {
					if (!config.isResumableCrawling()) {
						LOGGER.info(inprocesspagesdb
								+ ": exists and crawler is not resumable, truncate");

						admin.disableTable(inprocesspagesdb);

						admin.deleteTable(inprocesspagesdb);

						admin.createTable(inprocesspagesdbDesc);
					} else {
						LOGGER.info(inprocesspagesdb
								+ ": exists and crawler is resumable, skip");
					}
				} else {
					LOGGER.info(inprocesspagesdb + ": not exists, create");

					admin.createTable(inprocesspagesdbDesc);
				}

				String docidserver = Constants.HBASE_TABLE_DOCIDSERVER;

				HTableDescriptor docidserverDesc = HBaseUtil
						.getHTableDescriptor(docidserver);

				if (admin.tableExists(docidserver)) {
					if (!config.isResumableCrawling()) {
						LOGGER.info(docidserver
								+ ": exists and crawler is not resumable, truncate");

						admin.disableTable(docidserver);

						admin.deleteTable(docidserver);

						admin.createTable(docidserverDesc);
					} else {
						LOGGER.info(docidserver
								+ ": exists and crawler is resumable, skip");
					}
				} else {
					LOGGER.info(docidserver + ": not exists, create");

					admin.createTable(docidserverDesc);
				}

				String counters = Constants.HBASE_TABLE_COUNTERS;

				HTableDescriptor countersDesc = HBaseUtil
						.getHTableDescriptor(counters);

				if (admin.tableExists(counters)) {
					if (!config.isResumableCrawling()) {
						LOGGER.info(counters
								+ ": exists and crawler is not resumable, truncate");

						admin.disableTable(counters);

						admin.deleteTable(counters);

						admin.createTable(countersDesc);
					} else {
						LOGGER.info(counters
								+ ": exists and crawler is resumable, skip");
					}
				} else {
					LOGGER.info(counters + ": not exists, create");

					admin.createTable(countersDesc);
				}

				LOGGER.info("HBase Table initialize success");
			} catch (IOException e) {
				throw new IOException("HBase Table initialize failure");
			} finally {
				if (admin != null) {
					admin.close();
				}
			}

		} else {
			LOGGER.info("HBase Table initialized, skip");
		}
	}

	private void shutdownHBaseTable() throws IOException {
		LOGGER.info("HBase Table shutdown");

		try {
			if (zk.getChildren(Constants.ZK_SERVERS, false).size() == 0) {
				if (zk.exists(Constants.ZK_HBASE_TABLE_STATUS, false) != null) {
					zk.delete(Constants.ZK_HBASE_TABLE_STATUS, -1);
				}
			}
		} catch (Exception e) {
			LOGGER.error("delete zk node hbasestatus error: " + e.toString());
		}

		LOGGER.info("HBase Table shutdown success");
	}

	private void initializeZooKeeper() throws Exception {
		LOGGER.info("ZooKeeper initialize");

		ZKConnectionLatch connectionLatch = new ZKConnectionLatch();

		zk = new ZooKeeper(
				config.getHbaseZooKeeperQuorum(),
				config.getZookeeperSessionTimeout(), connectionLatch);

		connectionLatch.await();

		try {
			// zk root dir: /crawler
			if (zk.exists(Constants.ZK_ROOT_DIR, false) == null) {
				zk.create(Constants.ZK_ROOT_DIR, Constants.EMPTY_BYTEARRAY,
						Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
			}

			// zk node servers: /crawler/servers
			if (zk.exists(Constants.ZK_SERVERS, false) == null) {
				zk.create(Constants.ZK_SERVERS, Constants.EMPTY_BYTEARRAY,
						Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
			}

			// zk node crawlingthread: /crawler/crawlingthread
			if (zk.exists(Constants.ZK_CRAWLING_THREAD, false) == null) {
				zk.create(Constants.ZK_CRAWLING_THREAD,
						Constants.EMPTY_BYTEARRAY, Ids.OPEN_ACL_UNSAFE,
						CreateMode.PERSISTENT);
			}

			// zk counters dir: /crawler/counters
			if (zk.exists(Constants.ZK_COUNTERS_DIR, false) == null) {
				zk.create(Constants.ZK_COUNTERS_DIR, Constants.EMPTY_BYTEARRAY,
						Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
			}

			// zk node counters scheduledpages: /crawler/counters/scheduledpages
			if (zk.exists(Constants.ZK_NODE_COUNTERS_SCHEDULEDPAGES, false) == null) {
				zk.create(Constants.ZK_NODE_COUNTERS_SCHEDULEDPAGES,
						Constants.ZERO_BYTEARRAY, Ids.OPEN_ACL_UNSAFE,
						CreateMode.PERSISTENT);
			}

			// zk node counters processedpages: /crawler/counters/processedpages
			if (zk.exists(Constants.ZK_NODE_COUNTERS_PROCESSEDPAGES, false) == null) {
				zk.create(Constants.ZK_NODE_COUNTERS_PROCESSEDPAGES,
						Constants.ZERO_BYTEARRAY, Ids.OPEN_ACL_UNSAFE,
						CreateMode.PERSISTENT);
			}

			// zk node lastdocid: /crawler/lastdocid
			if (zk.exists(Constants.ZK_NODE_LASTDOCID, false) == null) {
				zk.create(Constants.ZK_NODE_LASTDOCID,
						Constants.ZERO_BYTEARRAY, Ids.OPEN_ACL_UNSAFE,
						CreateMode.PERSISTENT);
			}

			// zk node scheduledPages: /crawler/scheduledpages
			if (zk.exists(Constants.ZK_NODE_SCHEDULEDPAGES, false) == null) {
				zk.create(Constants.ZK_NODE_SCHEDULEDPAGES,
						Constants.ZERO_BYTEARRAY, Ids.OPEN_ACL_UNSAFE,
						CreateMode.PERSISTENT);
			}

			// zk lock dir: /crawler/lock
			if (zk.exists(Constants.ZK_LOCK_DIR, false) == null) {
				zk.create(Constants.ZK_LOCK_DIR, Constants.EMPTY_BYTEARRAY,
						Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
			}

			// zk lock docid: /crawler/lock/docid
			if (zk.exists(Constants.ZK_LOCK_DOCID, false) == null) {
				zk.create(Constants.ZK_LOCK_DOCID, Constants.EMPTY_BYTEARRAY,
						Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
			}

			// zk lock frontier: /crawler/lock/frontier
			if (zk.exists(Constants.ZK_LOCK_FRONTIER, false) == null) {
				zk.create(Constants.ZK_LOCK_FRONTIER,
						Constants.EMPTY_BYTEARRAY, Ids.OPEN_ACL_UNSAFE,
						CreateMode.PERSISTENT);
			}

			// zk lock counters: /crawler/lock/counters
			if (zk.exists(Constants.ZK_LOCK_COUNTERS, false) == null) {
				zk.create(Constants.ZK_LOCK_COUNTERS,
						Constants.EMPTY_BYTEARRAY, Ids.OPEN_ACL_UNSAFE,
						CreateMode.PERSISTENT);
			}

			LOGGER.info("ZooKeeper initialize success");
		} catch (Exception e) {
			LOGGER.error("CrawlController constructor zookeeper error: "
					+ e.toString());
		}
	}

	private void shutdownZooKeeper() throws Exception {
		LOGGER.info("ZooKeeper shutdown");

		try {
			if (zk.getChildren(Constants.ZK_SERVERS, false).size() == 0) {
				try {
					if (zk.exists(Constants.ZK_CRAWLING_THREAD, false) != null) {
						zk.delete(Constants.ZK_CRAWLING_THREAD, -1);
					}

					if (zk.exists(Constants.ZK_SERVERS, false) != null) {
						zk.delete(Constants.ZK_SERVERS, -1);
					}

					if (zk.exists(Constants.ZK_LOCK_COUNTERS, false) != null) {
						zk.delete(Constants.ZK_LOCK_COUNTERS, -1);
					}

					if (zk.exists(Constants.ZK_NODE_LASTDOCID, false) != null) {
						zk.delete(Constants.ZK_NODE_LASTDOCID, -1);
					}

					if (zk.exists(Constants.ZK_NODE_SCHEDULEDPAGES, false) != null) {
						zk.delete(Constants.ZK_NODE_SCHEDULEDPAGES, -1);
					}

					if (zk.exists(Constants.ZK_LOCK_DOCID, false) != null) {
						zk.delete(Constants.ZK_LOCK_DOCID, -1);
					}

					if (zk.exists(Constants.ZK_LOCK_FRONTIER, false) != null) {
						zk.delete(Constants.ZK_LOCK_FRONTIER, -1);
					}

					if (zk.exists(Constants.ZK_LOCK_DIR, false) != null) {
						zk.delete(Constants.ZK_LOCK_DIR, -1);
					}

					if (zk.exists(Constants.ZK_NODE_COUNTERS_SCHEDULEDPAGES,
							false) != null) {
						zk.delete(Constants.ZK_NODE_COUNTERS_SCHEDULEDPAGES, -1);
					}

					if (zk.exists(Constants.ZK_NODE_COUNTERS_PROCESSEDPAGES,
							false) != null) {
						zk.delete(Constants.ZK_NODE_COUNTERS_PROCESSEDPAGES, -1);
					}

					if (zk.exists(Constants.ZK_COUNTERS_DIR, false) != null) {
						zk.delete(Constants.ZK_COUNTERS_DIR, -1);
					}

					if (zk.exists(Constants.ZK_ROOT_DIR, false) != null) {
						zk.delete(Constants.ZK_ROOT_DIR, -1);
					}

					LOGGER.info("ZooKeeper shutdown success");
				} catch (Exception e) {
					LOGGER.error("CrawlController shutdown error: "
							+ e.toString());
				}
			} else {
				LOGGER.info("ZooKeeper shutdown success");
			}
		} catch (Exception e) {

		}
	}

	private void register() {
		try {
			server = zk.create(Constants.ZK_SERVERS + Constants.ZK_SEPARATOR
					+ Constants.ZK_SERVER_PREFIX, Constants.EMPTY_BYTEARRAY,
					Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL);
		} catch (Exception e) {
			throw new ExceptionInInitializerError("register error: "
					+ e.toString());
		}
	}

	private void removeRegister() {
		try {
			if (zk.exists(server, false) != null) {
				zk.delete(server, -1);
			}
		} catch (Exception e) {
			throw new RuntimeException("removeRegister error: " + e.toString());
		}
	}

	public CrawlController(CrawlConfig config, PageFetcher pageFetcher,
			RobotstxtServer robotstxtServer) throws Exception {
		super(config);

		config.validate();

		initializeZooKeeper();

		initializeHBaseTablePool();

		initializeHBaseTable();

		register();

		docIdServer = new DocIDServer(zk, config);

		frontier = new Frontier(zk, docIdServer, config);

		this.pageFetcher = pageFetcher;

		this.robotstxtServer = robotstxtServer;

		finished = false;

		shuttingDown = false;
	}

	/**
	 * Start the crawling session and wait for it to finish.
	 * 
	 * @param _c
	 *            the class that implements the logic for crawler threads
	 * @param numberOfCrawlers
	 *            the number of concurrent threads that will be contributing in
	 *            this crawling session.
	 */
	public <T extends WebCrawler> void start(final Class<T> _c) {
		this.start(_c, true);
	}

	/**
	 * Start the crawling session and return immediately.
	 * 
	 * @param _c
	 *            the class that implements the logic for crawler threads
	 * @param numberOfCrawlers
	 *            the number of concurrent threads that will be contributing in
	 *            this crawling session.
	 */
	public <T extends WebCrawler> void startNonBlocking(final Class<T> _c) {
		this.start(_c, false);
	}

	protected <T extends WebCrawler> void start(final Class<T> _c,
			boolean isBlocking) {
		try {
			finished = false;

			crawlersLocalData.clear();

			final List<Thread> threads = new ArrayList<Thread>();

			final List<T> crawlers = new ArrayList<T>();

			final int numberOfCrawlers = config.getNumberOfCrawlers();

			for (int i = 1; i <= numberOfCrawlers; i++) {
				try {
					T crawler = _c.newInstance();

					Thread thread = new Thread(crawler, "Crawler " + i);

					crawler.setThread(thread);

					crawler.init(i, this, zk);

					thread.start();

					crawlers.add(crawler);

					threads.add(thread);

					LOGGER.info("Crawler " + i + " started.");
				} catch (Exception e) {
					LOGGER.error("Crawler " + i + " initialize error: "
							+ e.toString());
				}
			}

			Thread monitorThread = new Thread(new Runnable() {

				@Override
				public void run() {
					try {
						synchronized (waitingLock) {
							while (true) {
								sleep(60);

								boolean finish = false;

								try {
									if (zk.getChildren(
											Constants.ZK_CRAWLING_THREAD, false)
											.size() == 0
											&& frontier.getQueueLength() == 0) {

										LOGGER.info("no crawler is crawling, and workqueue size is 0, request crawler finish");

										for (T t : crawlers) {
											t.requestFinish();
										}

										for (T t : crawlers) {
											while (!t.isFinished()) {
												LOGGER.info("wait crawler "
														+ t.getThread()
																.getName()
														+ " finish");

												Thread.sleep(5000);
											}
										}

										finish = true;
									}
								} catch (Exception e) {
									LOGGER.error("zk getChildren or frontier getQueueLength error: "
											+ e.toString());
								}

								if (finish) {
									frontier.finish();

									frontier.close();

									docIdServer.close();

									pageFetcher.shutDown();

									finished = true;

									removeRegister();

									shutdownHBaseTable();

									shutdownHBaseTablePool();

									shutdownZooKeeper();

									waitingLock.notifyAll();

									return;
								}
							}
						}
					} catch (Exception e) {
						e.printStackTrace();
					}
				}
			});

			monitorThread.start();

			if (isBlocking) {
				waitUntilFinish();
			}
		} catch (Exception e) {
			e.printStackTrace();
		}
	}

	/**
	 * Wait until this crawling session finishes.
	 */
	public void waitUntilFinish() {
		while (!finished) {
			synchronized (waitingLock) {
				if (finished) {
					return;
				}

				try {
					waitingLock.wait();
				} catch (InterruptedException e) {
					e.printStackTrace();
				}
			}
		}
	}

	/**
	 * Once the crawling session finishes the controller collects the local data
	 * of the crawler threads and stores them in a List. This function returns
	 * the reference to this list.
	 */
	public List<Object> getCrawlersLocalData() {
		return crawlersLocalData;
	}

	protected static void sleep(int seconds) {
		try {
			Thread.sleep(seconds * 1000);
		} catch (Exception ignored) {
			// Do nothing
		}
	}

	/**
	 * Adds a new seed URL. A seed URL is a URL that is fetched by the crawler
	 * to extract new URLs in it and follow them for crawling.
	 * 
	 * depth is 0
	 * 
	 * @param pageUrl
	 *            the URL of the seed
	 */
	public void addSeed(String pageUrl) {
		addSeed(pageUrl, -1);
	}

	/**
	 * Adds a new seed URL. A seed URL is a URL that is fetched by the crawler
	 * to extract new URLs in it and follow them for crawling. You can also
	 * specify a specific document id to be assigned to this seed URL. This
	 * document id needs to be unique. Also, note that if you add three seeds
	 * with document ids 1,2, and 7. Then the next URL that is found during the
	 * crawl will get a doc id of 8. Also you need to ensure to add seeds in
	 * increasing order of document ids.
	 * 
	 * Specifying doc ids is mainly useful when you have had a previous crawl
	 * and have stored the results and want to start a new crawl with seeds
	 * which get the same document ids as the previous crawl.
	 * 
	 * @param pageUrl
	 *            the URL of the seed
	 * @param docId
	 *            the document id that you want to be assigned to this seed URL.
	 * 
	 */
	public void addSeed(String pageUrl, long docId) {
		String canonicalUrl = URLCanonicalizer.getCanonicalURL(pageUrl);

		if (canonicalUrl == null) {
			LOGGER.error("Invalid seed URL: " + pageUrl);

			return;
		}

		if (docId < 0) {
			docId = docIdServer.getDocId(canonicalUrl);

			if (docId > 0) {
				// This URL is already seen.
				return;
			}

			docId = docIdServer.getNewDocID(canonicalUrl);
		} else {
			try {
				docIdServer.addUrlAndDocId(canonicalUrl, docId);
			} catch (Exception e) {
				LOGGER.error("Could not add seed: " + e.getMessage());
			}
		}

		WebURL webUrl = new WebURL();

		webUrl.setURL(canonicalUrl);
		webUrl.setDocid(docId);
		webUrl.setDepth(0);

		if (!robotstxtServer.allows(webUrl)) {
			LOGGER.info("Robots.txt does not allow this seed: " + pageUrl);
		} else {
			frontier.schedule(webUrl);
		}
	}

	/**
	 * This function can called to assign a specific document id to a url. This
	 * feature is useful when you have had a previous crawl and have stored the
	 * Urls and their associated document ids and want to have a new crawl which
	 * is aware of the previously seen Urls and won't re-crawl them.
	 * 
	 * Note that if you add three seen Urls with document ids 1,2, and 7. Then
	 * the next URL that is found during the crawl will get a doc id of 8. Also
	 * you need to ensure to add seen Urls in increasing order of document ids.
	 * 
	 * @param url
	 *            the URL of the page
	 * @param docId
	 *            the document id that you want to be assigned to this URL.
	 * 
	 */
	public void addSeenUrl(String url, int docId) {
		String canonicalUrl = URLCanonicalizer.getCanonicalURL(url);
		if (canonicalUrl == null) {
			LOGGER.error("Invalid Url: " + url);
			return;
		}
		try {
			docIdServer.addUrlAndDocId(canonicalUrl, docId);
		} catch (Exception e) {
			LOGGER.error("Could not add seen url: " + e.getMessage());
		}
	}

	public PageFetcher getPageFetcher() {
		return pageFetcher;
	}

	public void setPageFetcher(PageFetcher pageFetcher) {
		this.pageFetcher = pageFetcher;
	}

	public RobotstxtServer getRobotstxtServer() {
		return robotstxtServer;
	}

	public void setRobotstxtServer(RobotstxtServer robotstxtServer) {
		this.robotstxtServer = robotstxtServer;
	}

	public Frontier getFrontier() {
		return frontier;
	}

	public void setFrontier(Frontier frontier) {
		this.frontier = frontier;
	}

	public DocIDServer getDocIdServer() {
		return docIdServer;
	}

	public void setDocIdServer(DocIDServer docIdServer) {
		this.docIdServer = docIdServer;
	}

	public Object getCustomData() {
		return customData;
	}

	public void setCustomData(Object customData) {
		this.customData = customData;
	}

	public boolean isFinished() {
		return this.finished;
	}

	public boolean isShuttingDown() {
		return shuttingDown;
	}

	/**
	 * Set the current crawling session set to 'shutdown'. Crawler threads
	 * monitor the shutdown flag and when it is set to true, they will no longer
	 * process new pages.
	 * 
	 * @throws Exception
	 */
	public void shutdown() throws Exception {
		LOGGER.info("Shutting down...");

		this.shuttingDown = true;

		frontier.finish();

		removeRegister();

		shutdownHBaseTable();

		shutdownHBaseTablePool();

		shutdownZooKeeper();
	}

}
