package com.openness.crawler.frontier;

import java.io.IOException;
import java.util.List;

import org.apache.hadoop.hbase.util.Bytes;

import com.openness.crawler.url.WebURL;


/**
 * @author Rain
 * @version 2013-5-22 下午4:59:34
 */

public abstract class CrawlerQueue {

	public abstract List<WebURL> get(int max) throws IOException;

	public abstract boolean removeURL(WebURL webUrl);

	public abstract void delete(List<WebURL> urls) throws IOException;

	/*
	 * The key that is used for storing URLs determines the order they are
	 * crawled. Lower key values results in earlier crawling. As a result, URLs
	 * with lower priority numbers will be crawled earlier. If priority numbers
	 * are the same, those found at lower depths will be crawled earlier. If
	 * depth is also equal, those found earlier (therefore, smaller docid) will
	 * be crawled earlier.
	 */
	protected byte[] getRowKey(WebURL url) {
		return Bytes.add(Bytes.toBytes(url.getPriority()),
				Bytes.toBytes(url.getDepth()), Bytes.toBytes(url.getDocid()));
	}

	public abstract void put(WebURL url) throws IOException;

	public abstract long getLength();

	public void sync() {
	}

	public void close() {
	}

}
