/**
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package com.openness.crawler.frontier;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import org.apache.hadoop.hbase.util.Bytes;
import org.apache.zookeeper.ZooKeeper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.openness.crawler.crawler.Configurable;
import com.openness.crawler.crawler.CrawlConfig;
import com.openness.crawler.lock.DistributedLock;
import com.openness.crawler.url.WebURL;
import com.openness.crawler.util.Constants;

public class Frontier extends Configurable {

	private static final Logger LOGGER = LoggerFactory
			.getLogger(Frontier.class);

	private ZooKeeper zk;

	private DistributedLock lock;

	private WorkQueue workQueue;

	private InProcessPagesDB inProcessPages;

	private boolean isFinished = false;

	private DocIDServer docIdServer;

	private Counters counters;

	public Frontier(ZooKeeper zk, DocIDServer docIdServer, CrawlConfig config)
			throws IOException {
		super(config);

		this.zk = zk;

		this.lock = new DistributedLock(zk, Constants.ZK_LOCK_FRONTIER,
				Constants.ZK_LOCK_PREFIX);

		this.counters = new Counters(zk, config);

		this.docIdServer = docIdServer;

		try {
			workQueue = new WorkQueue();

			inProcessPages = new InProcessPagesDB();

			if (config.isResumableCrawling()) {
				setScheduledPages(counters
						.getValue(Counters.ReservedCounterNames.SCHEDULED_PAGES));

				long numPreviouslyInProcessPages = inProcessPages.getLength();

				if (numPreviouslyInProcessPages > 0) {
					LOGGER.info("Rescheduling " + numPreviouslyInProcessPages
							+ " URLs from previous crawl.");

					setScheduledPages(getScheduledPage()
							- numPreviouslyInProcessPages);

					while (true) {
						List<WebURL> urls = inProcessPages.get(100);

						if (urls.size() == 0) {
							break;
						}

						scheduleAll(urls);

						inProcessPages.delete(urls);
					}
				}
			} else {
				setScheduledPages(0);
			}
		} catch (IOException e) {
			LOGGER.error("Error while initializing the Frontier: "
					+ e.getMessage());

			workQueue = null;
		}
	}

	private long getScheduledPage() {
		try {
			byte[] data = zk.getData(Constants.ZK_NODE_SCHEDULEDPAGES, false,
					null);

			return Bytes.toLong(data);
		} catch (Exception e) {
			throw new RuntimeException("Frontier getScheduledPages error: "
					+ e.toString());
		}
	}

	private void setScheduledPages(long scheduledPages) {
		if (scheduledPages < 0) {
			return;
		}

		try {
			zk.setData(Constants.ZK_NODE_SCHEDULEDPAGES,
					Bytes.toBytes(scheduledPages), -1);
		} catch (Exception e) {
			throw new RuntimeException("Frontier setScheduledPages error: "
					+ scheduledPages);
		}
	}

	public void scheduleAll(List<WebURL> urls) {
		long maxPagesToFetch = config.getMaxPagesToFetch();

		lock.lock();

		try {
			long newScheduledPage = 0;

			long scheduledPage = getScheduledPage();

			List<WebURL> workUrls = new ArrayList<WebURL>();

			for (WebURL url : urls) {
				if (maxPagesToFetch > 0
						&& (scheduledPage + newScheduledPage) >= maxPagesToFetch) {
					break;
				}

				workUrls.add(url);

				newScheduledPage++;
			}

			try {
				workQueue.puts(workUrls);
			} catch (IOException e) {
				LOGGER.error("Error while puting the url in the work queue.");
			}

			if (newScheduledPage > 0) {
				setScheduledPages(scheduledPage + newScheduledPage);

				counters.increment(
						Counters.ReservedCounterNames.SCHEDULED_PAGES,
						newScheduledPage);
			}
		} catch (Exception e) {
			LOGGER.error("Frontier scheduleAll error: " + e.getMessage());
		} finally {
			lock.unlock();
		}
	}

	public void schedule(WebURL url) {
		long maxPagesToFetch = config.getMaxPagesToFetch();

		lock.lock();

		try {
			if (maxPagesToFetch < 0 || getScheduledPage() < maxPagesToFetch) {
				workQueue.put(url);

				setScheduledPages(getScheduledPage() + 1);

				counters.increment(Counters.ReservedCounterNames.SCHEDULED_PAGES);
			}
		} catch (IOException e) {
			LOGGER.error("Error while puting the url in the work queue.");
		} finally {
			lock.unlock();
		}
	}

	public void getNextURLs(int max, List<WebURL> result) {
		if (isFinished) {
			return;
		}

		lock.lock();

		try {
			try {
				List<WebURL> curResults = workQueue.get(max);

				workQueue.delete(curResults);

				if (inProcessPages != null) {
					inProcessPages.puts(curResults);
				}

				result.addAll(curResults);
			} catch (IOException e) {
				LOGGER.error("Error while getting next urls: " + e.getMessage());
			}
		} finally {
			lock.unlock();
		}
	}

	public void setProcessed(WebURL webURL) {
		lock.lock();

		try {
			counters.increment(Counters.ReservedCounterNames.PROCESSED_PAGES);

			if (inProcessPages != null) {
				if (!inProcessPages.removeURL(webURL)) {
					LOGGER.error("Could not remove: " + webURL.getURL()
							+ " from list of processed pages.");
				}
			}
		} finally {
			lock.unlock();
		}
	}

	public void setProcessed(List<WebURL> urls) {
		if (urls == null || urls.size() == 0) {
			return;
		}

		lock.lock();

		try {
			counters.increment(Counters.ReservedCounterNames.PROCESSED_PAGES,
					urls.size());

			if (inProcessPages != null) {
				inProcessPages.delete(urls);

			}
		} catch (IOException e) {
			LOGGER.error("Could not remove urls(: " + urls.size() + ")"
					+ " from list of processed pages.");
		} finally {
			lock.unlock();
		}
	}

	public long getQueueLength() {
		return workQueue.getLength();
	}

	public long getNumberOfAssignedPages() {
		return inProcessPages.getLength();
	}

	public long getNumberOfProcessedPages() {
		return counters.getValue(Counters.ReservedCounterNames.PROCESSED_PAGES);
	}

	public void sync() {
		workQueue.sync();

		docIdServer.sync();

		counters.sync();
	}

	public boolean isFinished() {
		return isFinished;
	}

	public void close() {
		sync();

		workQueue.close();

		counters.close();
	}

	public void finish() {
		isFinished = true;
	}

}
