/**
 * 
 */
package com.yufei.infoExtractor.extractor;

import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Set;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;

import com.yufei.infoExtractor.extractor.fetchdecision.DecideByIsHadFetched;
import com.yufei.infoExtractor.extractor.fetchdecision.DecideByUrlSimilarity;
import com.yufei.infoExtractor.listener.EventType;
import com.yufei.infoExtractor.listener.InfoExtractorEvent;
import com.yufei.infoExtractor.listener.InfoExtractorListener;
import com.yufei.infoExtractor.parallel.ParallelThreadExecutor;
import com.yufei.infoExtractor.parallel.executor.UrlFetchParallelThreadExecutor;
import com.yufei.infoExtractor.pfw.entity.PaginationRule;
import com.yufei.infoExtractor.pfw.entity.Seedsite;
import com.yufei.infoExtractor.pfw.entity.UrlParameter;
import com.yufei.infoExtractor.task.InfoExtractorCommonTask;
import com.yufei.infoExtractor.util.CommonUtil;
import com.yufei.infoExtractor.util.HtmlUtil;

/**
 * @author zhaoyufei zhao-0244@qq.com
 *抽取目标Url并且负责去重处理，防止抽取重复的url,另外再次过程中还将对部分目标url进行内容解析以及解析后得到的数据的保存
 * created at 2012-8-14 下午1:31:13
 */
public class UrlExtractor  extends Extractor implements ExtractorAction{
	private InfoExtractorCommonTask task=null;
	public UrlExtractor(InfoExtractorCommonTask task) {
		super();
		this.task=task;
		this.eventListeners.add(new ContentExtractor());
		// TODO Auto-generated constructor stub
	}
	private static final Log mLog = LogFactory.getLog(UrlExtractor.class);
	private List<InfoExtractorListener> eventListeners=new ArrayList<InfoExtractorListener>();
	public void addEventListener(InfoExtractorListener eventListener){
		if(eventListener==null){
			throw new IllegalArgumentException();
		}
		this.eventListeners.add(eventListener);
	}

   private void fireEvent(InfoExtractorEvent event){
	   for(InfoExtractorListener infoExtractorListener:eventListeners){
		   infoExtractorListener.actionPerformed(event);
	   }
   }




	/* (non-Javadoc)
	 * @see com.rzx.crawler.extractor.ExtractorAction#extract(java.util.Map)
	 * 将抽取到的所有目标url放到readyUrlList集合中
	 */
	@Override
	public void extract(Map context) {
		// TODO Auto-generated method stub
		//组装readyUrlList
		//目前处理方式两种：
		//1：传统的递归深度抓取
		//2：分页模板
		//优先使用分页模板进行目标链接的提取
		//判断用户有没有配置分页模板，如果没有则使用传统的递归提取目标链接
		Seedsite seedsite=(Seedsite) context.get("seed");
		if(seedsite.getPaginationRule()==null){
			mLog.info("用户有没有配置分页模板,将使用递归方式提取目标连接");
			this.extractUrlByDepth(context);
		}
		else{
			mLog.info("用户配置了分页模板，将使用分页模板进行目标连接提取");
			mLog.debug("分页模板详细信息："+seedsite.getPaginationRule().toString());
			this.extractUrlByPaginationTemplate(context);
		}
     
	}
	private void extractUrlByPaginationTemplate(Map context){
		mLog.info("开始根据分页模板进行目标链接抽取。。。。");
		List<String> readyUrlList=(List<String>) context.get("readyUrlList");
		Seedsite seedsite=(Seedsite) context.get("seed");
		Set[] linkSet = (Set[]) context.get("linkSet");


		PaginationRule paginationRule=seedsite.getPaginationRule();
		if(!(paginationRule==null||paginationRule.getPaginationTemplate()==null||paginationRule.getUrlParameters().size()==0)){
			List<String> urls=null;
    urls=UrlExtractor.generateUrlsByPaginationRule(paginationRule);
    mLog.info("根据分页模板进行目标链接抽取结束，共抽取到'"+urls.size()+"'条链接");
			for(String url:urls){
				//去重
				boolean isOk=new DecideByIsHadFetched().makeDecision(context, url);

				 isOk=new DecideByUrlSimilarity().makeDecision(context, url);
				if (!isOk) {
					continue;
				}
				linkSet[0].add(url);
			}
		}
		
		
		
		
		
		extractUrlByDepth(context);


		

		
	}
	private void extractUrlByDepth(Map context){
		 InfoExtractorEvent infoExtractorEvent=new InfoExtractorEvent(context);
		infoExtractorEvent.setEventType(EventType.CONTENTEXTRACTOR);
		Integer currentDepth = (Integer) context.get("currentDepth");
		Integer depth = (Integer) context.get("depth");

		if (currentDepth+1>=depth) {
			mLog.info("对深度为'"+currentDepth+"'上的链接的二次链接提取结束，开始对当前深度上的链接进行具体信息抽取");

			fireEvent(infoExtractorEvent);
			mLog.info("对深度为'"+currentDepth+"'上的链接具体信息抽取结束，整个递归深度信息采集结束");

			return;
		}
		
       mLog.info("对深度为'"+currentDepth+"'上采集到的链接进行二次链接提取");
		parseLinksFormHtml(context);
		//对当前深度已经进行子链接抽取完成的所有链接进行内容抽取，数据保存等操作并且最终将清除此深度的所有信息已提高效率
		mLog.info("对深度为'"+currentDepth+"'上的链接的二次链接提取结束，开始对当前深度上的链接进行具体信息抽取");
		fireEvent(infoExtractorEvent);
		mLog.info("对深度为'"+currentDepth+"'上的链接具体信息抽取结束，继续下层处理");

		currentDepth = currentDepth + 1;
		context.put("currentDepth", currentDepth);
		extractUrlByDepth(context);
	    //上一层的链接进行处理
		
		
	}


	// 此方法仅仅抽取当前深度的url中的所有符合条件的链接放到下一深度集合中
	public  void parseLinksFormHtml(Map context) {
		// 首先获取当前深度的所有url
		// 首先判断缓存中有没有此url对应的内容，如果有则听出处理，如果没有则请求此url然后解析返回的网页内容并抽取更多的符合条件的链接，并将解析出的链接依次加入下一深度集合中
		int blockSize=50;
		Set[] linkSet = (Set[]) context.get("linkSet");
		Integer currentDepth = (Integer) context.get("currentDepth");

		final Set<String> readyUrlList=linkSet[currentDepth];


		ParallelThreadExecutor parallelThreadExecutor=new UrlFetchParallelThreadExecutor();
		parallelThreadExecutor.parallelExecuteThreadsForCollectionData(readyUrlList, blockSize, context);
	}



	public static List<String> generateUrlsByPaginationRule(
			PaginationRule paginationRule) {
		if (paginationRule == null
				|| paginationRule.getPaginationTemplate() == null) {
			return null;
	
		}
		List<String> urls = new ArrayList<String>();
		String paginationTemplate = paginationRule.getPaginationTemplate();
		List<UrlParameter> urlParameters = paginationRule.getUrlParameters();
		Collections.sort(urlParameters, new Comparator<UrlParameter>() {
	
			@Override
			public int compare(UrlParameter o1, UrlParameter o2) {
				// TODO Auto-generated method stub
				if (o1.getParameterIndex() > o2.getParameterIndex()) {
					return 1;
				}
				return -1;
			}
		});
	
		String[] strs = new String[urlParameters.size()];
		List<String[]> strses = new ArrayList<String[]>();
		// 每个urlParameter对应的值得数组
		List<String[]> strses1 = new ArrayList<String[]>();
		String[] temp = null;
	
		for (UrlParameter urlParameter : urlParameters) {
			if (urlParameter.getParameterType().equals("String")) {
				temp = urlParameter.getParameterValue().split(",");
			}
			if (urlParameter.getParameterType().equals("Integer")) {
				String maxpageNumber = urlParameter.getParameterValue();
				// 根据分页数差生一个页数数组
			
			    if(urlParameter.getBegainPagNumber()==null&&urlParameter.getPagNumberSpace()==null){
			    	temp = new String[Integer.valueOf(maxpageNumber)];
			    	for (int i = 1; i <= Integer.valueOf(maxpageNumber); i++) {
						temp[i - 1] = String.valueOf(i);
					}
			    }
			    else{
			    	temp = new String[Integer.valueOf(maxpageNumber)];
			    	for (int i = 1; i <= Integer.valueOf(maxpageNumber); i++) {
						temp[i - 1] = String.valueOf(urlParameter.getBegainPagNumber()+(i-1)*urlParameter.getPagNumberSpace());
					}
			    }
				
			}
			for(int i=0;i<temp.length;i++){
				temp[i]=HtmlUtil.encoderUrl(temp[i]);
			}
			strses1.add(temp);
	
		}
		// 计算笛卡尔积
		// 参与笛卡尔积的数组个数递
		strses = CommonUtil.getDKL(strses1);
		String url =null;
		for (String[] stres : strses) {
			 url = CommonUtil.replaceByIndex(paginationTemplate, "\\{\\}",stres);
		//	 url=HtmlUtil.encoderUrl(url);
			urls.add(url);
		}
		return urls;
	}


	

}
