package com.flute.icrawler.policy.updatepolicy.framework;

import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map.Entry;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;

import org.apache.log4j.Logger;

import com.flute.icrawler.app.entity.CrawlResult;
import com.flute.icrawler.app.util.ParameterKey;
import com.flute.icrawler.config.CrawlConfigUtil;
import com.flute.icrawler.config.Policy;
import com.flute.icrawler.framework.autoadjust.IntervalCrawlUrlPool;
import com.flute.icrawler.framework.framework.CrawlUrl;
import com.flute.icrawler.policy.bdb.storage.BdbStorageUtil;
import com.flute.icrawler.policy.bdb.storage.IStorage;
import com.flute.icrawler.policy.updatepolicy.adjustment.PolicyAdjuster;
import com.flute.icrawler.policy.updatepolicy.bean.PolicyURL;

public class UpdatePolicyController implements java.io.Serializable {

	private static final long serialVersionUID = -5687475124387660924L;

	private static final Logger LOGGER = Logger.getLogger(UpdatePolicyController.class.getName());

	// 更新模块controller
	private static UpdatePolicyController controller = new UpdatePolicyController();

	// 调度线程池
	private static ScheduledThreadPoolExecutor scheduledexecutor = null;

	// 调整粒度线程池
	private static ThreadPoolExecutor threadexecutor = null;

	// 更新粒度的内存数据库
	private static HashMap<Integer, IStorage> storageMap = new HashMap<Integer, IStorage>();

	// 更新粒度 的任务调度池
	private static HashMap<Integer, IScheduled> scheduledMap = new HashMap<Integer, IScheduled>();

	// 更新粒度值list
	private static LinkedList<Policy> policylist = new LinkedList<Policy>();

	// url队列
	private static ConcurrentLinkedQueue<PolicyURL> UrlQueue = new ConcurrentLinkedQueue<PolicyURL>();

	// url pool
	private static IntervalCrawlUrlPool crawlurlspool = null;

	// 更新模块开始状态
	private static boolean isStart = false;

	// 更新页面总数
	private long pageUpdateCount = 0;

	private UpdatePolicyController() {
		//
	}

	/**
	 * 
	 * @return
	 */
	public static UpdatePolicyController getInstance() {
		return controller;
	}

	/**
	 * 启动更新策略模块
	 * 
	 */
	public void start(IntervalCrawlUrlPool crawlpool) {
		LOGGER.info("startup update policy system");
		if (crawlpool == null) {
			throw new RuntimeException("IntervalCrawlUrlPool is null!");
		}
		crawlurlspool = crawlpool;
		try {
			this.initialize();
			this.executeScheduled();
			this.excuteAdjust();
		} catch (Exception e) {
			LOGGER.error("startup update policy exception");
		}
		// this.testUpdate();
		isStart = true;
		LOGGER.info("startup update system success!");
	}

	/**
	 * 关闭更新策略模块
	 */
	public void shutdown() {
		this.shutdownThreadPool();
		this.syncBdb();
		this.colseBdb();
		LOGGER.info("shutdown updatepolicy success");
	}

	/**
	 * 系统初始化
	 * 
	 */
	private void initialize() {
		this.initPolicyList();
		this.setGranularityStorages();
		this.setGranularityScheduled();
		this.initThreadPool();
	}

	/**
	 * 初始化线程池
	 */
	private void initThreadPool() {
		scheduledexecutor = new ScheduledThreadPoolExecutor(5, new ScheduledThreadPoolExecutor.CallerRunsPolicy());
		threadexecutor = new ThreadPoolExecutor(5, 200, 5, TimeUnit.MINUTES, new ArrayBlockingQueue<Runnable>(100),
				new ThreadPoolExecutor.CallerRunsPolicy());
	}

	/**
	 * 执行线程
	 * 
	 * @param command
	 * @param initialDelay
	 * @param period
	 * @param unit
	 * @return
	 * @throws Exception
	 */
	public ScheduledFuture<?> execute(Runnable command, long initialDelay, long period, TimeUnit unit) throws Exception {
		LOGGER.info("execute Policy=" + initialDelay + " Scheduler=" + period);
		return scheduledexecutor.scheduleAtFixedRate(command, initialDelay, period, unit);

	}

	/**
	 * 初始化粒度list
	 */
	private void initPolicyList() {
		List<Policy> policys = CrawlConfigUtil.getBaseConfig().getUpdate().getPolicys().getPolicys();
		if (policys != null && policys.size() > 4) {
			for (Policy polciy : policys) {
				policylist.add(polciy);
			}
		} else {// 如果没有配置就采用默认设置
			throw new RuntimeException("update policy config error!");
		}
		Collections.sort(policylist);
		StringBuffer info = new StringBuffer("policylist[");
		for (Policy p : policylist) {
			info.append(p.getGranularity()).append(",");
		}
		info.append("]");
		LOGGER.info(info.toString());
	}

	/**
	 * 设置粒度的内存数据库
	 * 
	 */
	private void setGranularityStorages() {
		for (Policy p : policylist) {
			IStorage storage = BdbStorageUtil.getStorage("update." + String.valueOf(p.getGranularity()));
			storageMap.put(new Integer(p.getGranularity()), storage);

			LOGGER.info("Policy=" + p.getGranularity() + " IStorage=" + storage.getName());

		}
		LOGGER.info("Init granularity's stroage success");
	}

	/**
	 * 设置粒度改变的定时任务
	 * 
	 */
	private void setGranularityScheduled() {
		for (Policy p : policylist) {
			if (p.getGranularity() <= 0) {
				continue;
			}
			IStorage storage = storageMap.get(new Integer(p.getGranularity()));

			IScheduled scheduled = new PolicyScheduled(p.getGranularity(), p.getScheduler(), storage);

			scheduledMap.put(p.getGranularity(), scheduled);
		}
	}

	/**
	 * 开始执行调度任务
	 * 
	 */
	private void executeScheduled() {
		for (Integer g : scheduledMap.keySet()) {
			scheduledMap.get(g).execute();
		}
	}

	/**
	 * 启动频率调整任务
	 */
	private void excuteAdjust() {
		for (int i = 0; i < 10; i++) {
			new Thread(new PolicyAdjuster(this), "AdjustThread-" + i).start();
		}
	}

	/**
	 * 执行调整更新频率
	 * 
	 * @param command
	 */
	public void executeAdjuester(Runnable command) {
		threadexecutor.execute(command);
	}

	/**
	 * 取得当前的粒度 的下一粒度
	 * 
	 * @param granularity
	 * @return
	 */
	public Integer getNextGranularity(Integer granularity) {
		int index = -1;
		for (Policy p : policylist) {
			if (p.getGranularity() == granularity) {
				index = policylist.indexOf(p);
			}
		}
		if (index + 1 >= policylist.size()) {
			return null;
		} else {
			return policylist.get(index + 1).getGranularity();
		}
	}

	/**
	 * 取得当前的粒度 的上一粒度
	 * 
	 * @param granularity
	 * @return
	 */
	public Integer getPreGranularity(Integer granularity) {
		int index = -1;
		for (Policy p : policylist) {
			if (p.getGranularity() == granularity) {
				index = policylist.indexOf(p);
			}
		}
		if (index - 1 < 0) {
			return null;
		} else {
			return policylist.get(index - 1).getGranularity();
		}
	}

	/**
	 * 取得url对应的粒度值
	 * 
	 * @param url
	 * @return
	 */
	public Integer getGranularity(String url) {
		Iterator<Entry<Integer, IStorage>> it = storageMap.entrySet().iterator();
		while (it.hasNext()) {
			Entry<Integer, IStorage> entry = it.next();
			IStorage stroage = entry.getValue();
			if (stroage.isContains(url)) {
				return entry.getKey();
			}
		}
		return null;
	}

	/**
	 * 按粒度list的index取得粒度值
	 * 
	 * @param url
	 * @return
	 */
	public Integer getGranularity(int index) {
		if (index < policylist.size() && index > -1) {
			return policylist.get(index).getGranularity();
		} else {
			LOGGER.info("getGranularity index = " + index);
			throw new RuntimeException("getGranularity:granularity is null!");
		}
	}

	/**
	 * 按粒度取得policy
	 * 
	 * @param granularity
	 * @return
	 */
	public Policy getPolicy(Integer granularity) {
		for (Policy p : policylist) {
			if (p.getGranularity() == granularity) {
				return p;
			}
		}
		throw new RuntimeException("getPolicy: data is error!");
	}

	/**
	 * 取得粒度的内存数据库
	 * 
	 * @param granularity
	 * @return
	 */
	public IStorage getStorage(Integer granularity) {
		return storageMap.get(granularity);
	}

	/**
	 * 将数据写到磁盘
	 * 
	 */
	private void syncBdb() {
		Iterator<Entry<Integer, IStorage>> it = storageMap.entrySet().iterator();
		while (it.hasNext()) {
			Entry<Integer, IStorage> entry = it.next();
			try {
				entry.getValue().sync();
			} catch (Exception e) {
			}
		}
	}

	/**
	 * 
	 * close bdbs
	 */
	private void colseBdb() {
		Iterator<Entry<Integer, IStorage>> it = storageMap.entrySet().iterator();
		while (it.hasNext()) {
			Entry<Integer, IStorage> entry = it.next();
			try {
				entry.getValue().close();
			} catch (Exception e) {
			}
		}
		BdbStorageUtil.closeBdbEnvironment();
	}

	/**
	 * 关闭线程池
	 */
	private void shutdownThreadPool() {
		try {
			scheduledexecutor.shutdown();
			threadexecutor.shutdown();
		} catch (Exception e) {
		}
	}

	/**
	 * 将需要更新的url加入pool
	 * 
	 * @param url
	 */
	public void addUrl(PolicyURL url) {
		crawlurlspool.addUpdateUrl(url.getCrawlurl());
	}

	/**
	 * 得到粒度的数量
	 * 
	 * @return
	 */
	public int getGranularitysSize() {
		return policylist.size();
	}

	/**
	 * 更新模块是否启动完成
	 * 
	 */
	public boolean isStart() {
		return isStart;
	}

	/**
	 * 总的更新页面数
	 */
	public synchronized void IncrUpdateCount() {
		pageUpdateCount++;
	}

	/**
	 * 获取更新总数
	 * 
	 * @return
	 */
	public synchronized long getUpdateConut() {
		return pageUpdateCount;
	}

	/**
	 * 提交处理后的URL<br>
	 * 更新模块将做后续处理
	 * 
	 * @param url
	 */
	public void submitUrl(CrawlUrl url) {
		CrawlResult crawlResult = (CrawlResult) url.getResultParameter(ParameterKey.RESULT_CRAWL).getValue();
		try {
			UrlQueue.add(new PolicyURL(url.getUrl(), url.getDeep(), new String(crawlResult.getContent(), crawlResult
					.getCharSet()), url));
		} catch (Exception e) {
			LOGGER.error(e);
		}
		LOGGER.info("提交URL进行后续处理" + url.getUrl());
	}

	/**
	 * 提取需要后续处理的url
	 * 
	 * @return
	 */
	public PolicyURL pollUrl() {
		PolicyURL url = null;
		while ((url = UrlQueue.poll()) == null) {
			try {
				Thread.sleep(500);
			} catch (InterruptedException e) {
			}
		}
		return url;
	}

}
