package com.openness.spider;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;

import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.zookeeper.ZooKeeper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.openness.spider.commons.GeneralBuffer;
import com.openness.spider.commons.Statics;
import com.openness.spider.hbase.HBaseUtil;
import com.openness.spider.zk.lock.DistributedLock;

/**
 * @author Rain
 * @version 2013-6-21 上午9:46:57
 */

public class UrlBuffer extends GeneralBuffer<Url> {

	private static final Logger LOGGER = LoggerFactory
			.getLogger(UrlBuffer.class);

	private DistributedLock lock;

	public UrlBuffer(SpiderConfig config, ZooKeeper zk) {
		super(config);

		lock = new DistributedLock(zk, Statics.ZK_LOCK_NODE);
	}

	@Override
	public void onStart() throws Exception {
		LOGGER.info(Statics.HOSTNAME + "\t" + Thread.currentThread().getName()
				+ "\t" + "url buffer start");
	}

	@Override
	public void onStop() throws Exception {
		LOGGER.info(Statics.HOSTNAME + "\t" + Thread.currentThread().getName()
				+ "\t" + "url buffer stop");
	}

	@Override
	public void flush(Set<Url> buffer) throws IOException {
		/*
		 * (1)对缓存中的URL进行过滤（即验证某一URL是否在processqueue、workqueue、history中已存在）
		 * 
		 * (2)将剩余的URK存入workqueue中
		 */
		List<Url> urls = new ArrayList<Url>(buffer);

		buffer.clear();

		try {
			lock.lock();

			try {
				// filter processqueue
				List<Get> gets = new ArrayList<Get>();

				for (Url url : urls) {
					gets.add(url.get());
				}

				List<Result> results = HBaseUtil.get(
						Statics.HBASE_TABLE_PROCESSQUEUE, gets);

				for (int index = urls.size() - 1; index >= 0; index--) {
					if (!results.get(index).isEmpty()) {
						urls.remove(index);
					}
				}

				// filter workqueue
				gets.clear();

				results = null;

				for (Url url : urls) {
					gets.add(url.get());
				}

				results = HBaseUtil.get(Statics.HBASE_TABLE_WORKQUEUE, gets);

				for (int index = urls.size() - 1; index >= 0; index--) {
					if (!results.get(index).isEmpty()) {
						urls.remove(index);
					}
				}

				// filter history
				gets.clear();

				results = null;

				for (Url url : urls) {
					gets.add(url.get());
				}

				results = HBaseUtil.get(Statics.HBASE_TABLE_HISTORY, gets);

				for (int index = urls.size() - 1; index >= 0; index--) {
					if (!results.get(index).isEmpty()) {
						urls.remove(index);
					}
				}

				gets.clear();

				gets = null;

				results = null;

				if (urls.size() > 0) {
					// put to workqueue
					List<Put> puts = new ArrayList<Put>();

					for (Url url : urls) {
						puts.add(url.put());
					}

					HBaseUtil.put(Statics.HBASE_TABLE_WORKQUEUE, puts);

					LOGGER.info("urlbuffer flush size: " + urls.size());
				}
			} catch (Exception e) {
				LOGGER.error("urlbuffer flush error: " + e.toString());
			} finally {
				lock.unlock();
			}
		} catch (Exception e) {
			LOGGER.error("lock error: " + e.toString());
		}
	}

	@Override
	public long getBufferSize() {
		return config.getUrlBufferSize();
	}

	@Override
	public long getFlushInternal() {
		return config.getUrlBufferFlushInternal();
	}

	@Override
	public long getSleepTime() {
		return config.getUrlBufferSleepTime();
	}

}
