package com.terren.spider.core.html.util;

import java.util.HashSet;
import java.util.List;
import java.util.Set;

import org.apache.commons.collections.CollectionUtils;

import com.terren.spider.core.html.configuration.Configuration;
import com.terren.spider.core.html.downloader.AbuyunHttpClientDownloader;
import com.terren.spider.core.html.downloader.NormalHttpClientDownloader;
import com.terren.spider.core.html.pipeline.BasicHtmlCollectorPipeline;
import com.terren.spider.core.html.pipeline.HtmlArticleSearchDBCollectorPipeline;
import com.terren.spider.core.html.pipeline.HtmlConsolePipeline;
import com.terren.spider.core.html.pipeline.HtmlDBCollectorPipeline;
import com.terren.spider.core.html.processor.BasicHtmlProcessor;
import com.terren.spider.core.html.processor.HtmlArticleSearchPageProcessor;
import com.terren.spider.core.html.processor.HtmlArticleSearchUrlProcessor;
import com.terren.spider.core.html.processor.HtmlPageProcessor;
import com.terren.spider.core.html.processor.HtmlUrlProcessor;
import com.terren.spider.core.html.scheduler.SpiderFileCacheQueueScheduler;
import com.terren.spider.core.html.scheduler.SpiderQueueScheduler;
import com.terren.spider.core.html.scheme.ICustomScheme;
import com.terren.spider.entity.biz.ArticleSearch;
import com.terren.spider.entity.config.SysConfig;
import com.terren.spider.entity.core.CatagoryType;
import com.terren.spider.entity.core.Entry;
import com.terren.spider.entity.core.EntryFormat;
import com.terren.spider.util.BasicUtil;
import com.terren.spider.util.common.BeanUtil;
import com.terren.spider.util.common.FileUtil;
import com.terren.spider.util.common.StringUtil;

import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.scheduler.BloomFilterDuplicateRemover;

public class HtmlFetchUtil<T> extends BasicUtil {
	protected BasicHtmlProcessor pageProcessor;// （默认）抓取页面的processor
	protected BasicHtmlProcessor urlProcessor;// （默认）抓取链接的processor

	private BasicHtmlCollectorPipeline<T> dbCollectorPipeline;// 页面分析 pipeline 【to db】
	private HtmlConsolePipeline<T> consolePipeline;// 页数输出 pipeline 用于测试环境等

	private Spider spider;
	private Configuration configuration;// 爬虫配置
	private Class<T> clazz;
	private Set<String> targetUrls;// 目标页链接集合


	public HtmlFetchUtil(Configuration configuration, Class<T> clazz) throws Exception {
		super();
		init(configuration, clazz);
	}

	/**
	 * 初始化
	 * 
	 * @param entry
	 * @param configuration
	 * @param clazz
	 * @throws Exception
	 */
	private void init(Configuration configuration, Class<T> clazz) throws Exception {
		this.clazz = clazz;
		this.targetUrls = new HashSet<String>();
		if (null != configuration) {
			this.configuration = configuration;
		}

		// webmagic group bug 需要处理一下regex
		// (https://www.google.com/.*), (http://www.google.com/.*)
		configuration.getEntry().setUrlRegex(StringUtil.addGroupRegex(configuration.getEntry().getUrlRegex(), SysConfig.URL_SPLIT));
		configuration.getEntry().setExcludeUrlRegex(StringUtil.addGroupRegex(configuration.getEntry().getExcludeUrlRegex(), SysConfig.URL_SPLIT));

		initProcessorAndPipeline();
	}

	
	protected void initProcessorAndPipeline() throws Exception {
		CatagoryType catagoryType = configuration.getEntry().getCatagoryId()!=null?CatagoryType.valueOfType(configuration.getEntry().getCatagoryId()):CatagoryType.NEWS;
		if (null != catagoryType) {
			switch (catagoryType) {
			case ARTICLE_SEARCH:
				String act = configuration.getEntry().getAct();
				switch (act) {
				case "baiduNews":
					this.pageProcessor = new HtmlArticleSearchPageProcessor(configuration);
					this.urlProcessor = new HtmlArticleSearchUrlProcessor(configuration);
					this.dbCollectorPipeline = new HtmlArticleSearchDBCollectorPipeline<>(configuration, clazz);
					break;
				case "sogouNews":
					this.pageProcessor = new HtmlArticleSearchPageProcessor(configuration);
					this.urlProcessor = new HtmlArticleSearchUrlProcessor(configuration);
					this.dbCollectorPipeline = new HtmlArticleSearchDBCollectorPipeline<>(configuration, clazz);
					break;
				case "googleNews":
					this.pageProcessor = new HtmlArticleSearchPageProcessor(configuration);
					this.urlProcessor = new HtmlArticleSearchUrlProcessor(configuration);
					this.dbCollectorPipeline = new HtmlArticleSearchDBCollectorPipeline<>(configuration, clazz);
					break;
				case "baiduSearch":
					this.pageProcessor = new HtmlArticleSearchPageProcessor(configuration);
					this.urlProcessor = new HtmlArticleSearchUrlProcessor(configuration);
					this.dbCollectorPipeline = new HtmlArticleSearchDBCollectorPipeline<>(configuration, clazz);
					break;
				case "sogouSearch":
					this.pageProcessor = new HtmlArticleSearchPageProcessor(configuration);
					this.urlProcessor = new HtmlArticleSearchUrlProcessor(configuration);
					this.dbCollectorPipeline = new HtmlArticleSearchDBCollectorPipeline<>(configuration, clazz);
					break;

				default:
					this.pageProcessor = new HtmlPageProcessor(configuration);
					this.urlProcessor = new HtmlUrlProcessor(configuration);
					this.dbCollectorPipeline = new HtmlDBCollectorPipeline<T>(configuration, clazz);
					break;
				}
				
				break;
			case TIEBA:
				this.pageProcessor = new HtmlArticleSearchPageProcessor(configuration);
				this.urlProcessor = new HtmlArticleSearchUrlProcessor(configuration);
				this.dbCollectorPipeline = new HtmlArticleSearchDBCollectorPipeline<>(configuration, clazz);
				break;
			/*case ARTICLE_TOPIC:
				this.pageProcessor = new HtmlPageProcessor(entry, configuration);
				this.urlProcessor = new HtmlUrlProcessor(entry, configuration);
				this.dbCollectorPipeline = new HtmlDBCollectorPipeline<T>(entry, configuration, clazz);
				break;
			case ARTICLE_REPLY:
				this.pageProcessor = new HtmlPageProcessor(entry, configuration);
				this.urlProcessor = new HtmlUrlProcessor(entry, configuration);
				this.dbCollectorPipeline = new HtmlDBCollectorReplyPipeline<T>(entry, configuration, clazz);
				break;*/
			default:
				this.pageProcessor = new HtmlPageProcessor(configuration);
				this.urlProcessor = new HtmlUrlProcessor(configuration);
				this.dbCollectorPipeline = new HtmlDBCollectorPipeline<T>(configuration, clazz);
				break;
			}
		}
		this.consolePipeline = new HtmlConsolePipeline<T>(configuration, clazz);
	}

	
	/**
	 * 抓取目标页链接
	 */
	public String[] findUrl(String... urls) throws Exception {
		/**
		 * 添加自定义 的 entry URL
		 */
		Set<String> total = new HashSet<>();
		for (ICustomScheme scheme : configuration.getSchemes()) {// 自定义列表页方案。等
			if (BeanUtil.includeMethod("addEntryUrl", scheme.getClass())) {
				Set<String> customUrls = scheme.addEntryUrl(configuration);
				log.info("[entry:"+configuration.getEntry().getEntryId()+"]"+"[scheme:"+scheme.getClass().getName());
				if (null != customUrls) {
					total.addAll(customUrls);
				}
				log.info("[method:addEntryUrl]"+"[size:"+total.size()+"]");
			}
		}
		
		if (null!= urls) {
			Set<String> urlsSet = BeanUtil.array2Set(urls);
			total.addAll(urlsSet);
		}
		total.remove("");
		total.remove(null);
		if (total.size()==0) {
			return null;
		}
		createUrlSpider(BeanUtil.set2Array(total)).run();// 运行抓取 页面链接
		cleanCache();// 清除缓存
		clearSpider();
		
		/**
		 * 得到链接
		 */
		String[] urlList = initTargetUrls();
		return urlList;
	}
	/**
	 * 抓取目标页链接
	 */
	public String[] findUrl() throws Exception {
		String[] urls = formatEntryUrl(configuration.getEntry());
		return findUrl(urls);
	}
	
	
	/**
	 * 抓取并解析目标页
	 */
	public Integer parsePage(String[] urls) throws Exception {
		
		spider = createParsePageSpider(urls);// 抓取目标页面 及分析入库
		if (null != spider) {
			spider.run();
			saveCollectItems();
			cleanCache();// 清除缓存
		}
		clearSpider();
		return urls!=null?urls.length:0;
	}
	
	
	public void clearSpider() {
		if (null != spider) {
			spider.clearPipeline();
			spider.close();
			log.debug("spider is closed.[entry " + configuration.getEntry().getEntryId() + "]");

		}
	}

	/**
	 * 保存分析数据（这里保存是为了防止遗漏的分析数据没有保存到数据库，在pipeline中已经执行了批量存储，因为有404页面的存在，所以链接总数和分析处理的页面总数有差别，所以会出现遗漏的数据没有保存到数据库）
	 * 
	 * @throws Exception
	 */
	public void saveCollectItems() throws Exception {
		if (dbCollectorPipeline.getCollectItems().size() > 0) {
			dbCollectorPipeline.getSaveUtil().saveBatch(dbCollectorPipeline.getCollectItems(),configuration.getEntry());
		}
	}

	/**
	 * 创建抓取 url的 spider
	 * @return
	 */
	public Spider createUrlSpider(String... urls) {
		clearSpider();
		spider = Spider.create(urlProcessor);//
		spider.setExitWhenComplete(true);
		// addPipeline();
		addScheduler();
		addDownloader();
	//	addRequest(configuration.getProxyRequestParam().getEntryUseProxy(), formatEntryUrl(entry));
		if (null!=urls&&urls.length>0) {
			spider.addUrl(urls);
		}
		spider.thread(configuration.getThreadNum());
		return spider;
	}

	/**
	 * 
	 * @return
	 */
	public Spider createParsePageSpider(String[] urls) {
		//clearSpider();
		if (urls!=null && urls.length > 0) {
			spider = Spider.create(pageProcessor);// 初始化抓取目标页面的processor
			spider.setExitWhenComplete(true);
			addPipeline();
			addScheduler();
			addDownloader();
			//addRequest(configuration.getProxyRequestParam().getContentUseProxy(), urls);
			spider.addUrl(urls);
			spider.thread(configuration.getThreadNum());
			return spider;
		}
		return null;
	}

	/**
	 * 初始化目标页链接集合
	 * 
	 * @return
	 */
	private String[] initTargetUrls() {
		String[] urls = null;
		targetUrls.addAll(urlProcessor.getTargetUrls());
		targetUrls.remove(null);
		pageProcessor.setTargetUrls(targetUrls);
		//urls = targetUrls.toArray(new String[targetUrls.size()]);
		urls = BeanUtil.set2Array(targetUrls);

		
		if (urlProcessor instanceof HtmlArticleSearchUrlProcessor) {
			List<ArticleSearch> list = ((HtmlArticleSearchUrlProcessor) urlProcessor).getArticleList();
			if (null!=list) {
				((HtmlArticleSearchPageProcessor) pageProcessor)
				.setArticleList(list);// 存放搜索页的标题，链接，发布时间等信息
			}
		}
		
		return urls;
	}

	/**
	 * 清除缓存
	 */
	private void cleanCache() {
		switch (configuration.getSchedulerType()) {
		case FileCacheQueueScheduler:// 使用文件保存抓取URL 缓存目录 /temp/cache/ + entryId
			if (spider != null && spider.getStatus() == Spider.Status.Stopped) {
				String filepath = SysConfig.FILE_CACHE_SCHEDULER_PATH + configuration.getEntry().getEntryId();
				FileUtil.delDirectory(filepath);
				log.debug("clean cache complete !");
			}
			break;
		default:
			break;
		}
	}

	/**
	 * 添加pipeline
	 */
	private void addPipeline() {
		if (configuration.getTestMode()) {// 如果是测试模式 则添加
			spider.addPipeline(consolePipeline);
		} else {// 如果不是测试模式
			if (configuration.getSaveToDb()) {// 持久化数据库的pipeline
				spider.addPipeline(dbCollectorPipeline);
			}
			if (configuration.getSaveToHtml()) {// 保存到文件的 pipeline

			}
			if (configuration.getSaveToJson()) {// 保存到json 的 pipeline

			}
		}
	}

	/**
	 * 添加scheduler
	 */
	private void addScheduler() {
		switch (configuration.getSchedulerType()) {
		case QueueScheduler:// 默认 使用内存队列保存待抓取URL
			// spider.setScheduler(new QueueScheduler().setDuplicateRemover(new
			// BloomFilterDuplicateRemover(10000000))); // 10000000是估计的页面数量
			spider.setScheduler(
					new SpiderQueueScheduler().setDuplicateRemover(new BloomFilterDuplicateRemover(10000000))); // 10000000是估计的页面数量
			break;
		case FileCacheQueueScheduler:// 使用文件保存抓取URL
			SpiderFileCacheQueueScheduler scheduler = new SpiderFileCacheQueueScheduler(
					SysConfig.FILE_CACHE_SCHEDULER_PATH + configuration.getEntry().getEntryId());
			spider.setScheduler(scheduler);
			break;
		case PriorityScheduler:

			break;
		case RedisScheduler:

			break;
		default:
			break;
		}
	}

	/*@Deprecated
	protected void addRequest(Boolean useProxy, String... urls) {
		Request[] requests = HttpUtil.getRequests(useProxy, entry, urls);
		if (null != requests) {
			spider.addRequest(requests);
		}
	}*/
	
	
	/**
	 * 格式化列表页链接 从`t_entry_format`读取配置
	 * 
	 * @param urls
	 * @param entry
	 * @return
	 */
	public String[] formatEntryUrl(Entry entry) {
		Set<String> urlset = new HashSet<>();
		if (CollectionUtils.isNotEmpty(entry.getFormats())) {
			for (EntryFormat format : entry.getFormats()) {
				String url_format = format.getEntryurlFormat();
				if (null!=url_format&&!"".equals(url_format)) {
					Object[] args = { entry.getEntryUrl() };
					String url = StringUtil.messageFormat(url_format,args);
					urlset.add(url);
				}else {
					urlset.add(entry.getEntryUrl());
				}
			}
		}else {
			urlset.add(entry.getEntryUrl());
		}
		//return urlset.toArray(new String[urlset.size()]);
		return BeanUtil.set2Array(urlset);
		
	}
	
	
	/**
	 * 添加下载器
	 */
	protected void addDownloader() {
		
	    String domain = configuration.getDomain();
	    /**abuyun白名单策略*/
	    if (null != domain && ProxyDomainStrategyUtil.getAbuyunWhiteListProxy().requiredToHandle(domain)) {//如果此域名配置在白名单下，需要走abuyun代理
	    	spider.setDownloader(new AbuyunHttpClientDownloader());//代理阿布云的下载器
		}
	    else {//不需要走代理，默认下载器
	    	spider.setDownloader(new NormalHttpClientDownloader(configuration));//默认
	    }
	}


	public Spider getSpider() {
		return spider;
	}

	public void setSpider(Spider spider) {
		this.spider = spider;
	}

	public BasicHtmlProcessor getPageProcessor() {
		return pageProcessor;
	}

	public void setPageProcessor(BasicHtmlProcessor pageProcessor) {
		this.pageProcessor = pageProcessor;
	}

	public BasicHtmlProcessor getUrlProcessor() {
		return urlProcessor;
	}

	public void setUrlProcessor(BasicHtmlProcessor urlProcessor) {
		this.urlProcessor = urlProcessor;
	}



	public Set<String> getTargetUrls() {
		return targetUrls;
	}

	public void setTargetUrls(Set<String> targetUrls) {
		this.targetUrls = targetUrls;
	}

	public Class<T> getClazz() {
		return clazz;
	}

	public void setClazz(Class<T> clazz) {
		this.clazz = clazz;
		if (null != this.dbCollectorPipeline) {
			this.dbCollectorPipeline.setClazz(clazz);
		}
		if (null != this.consolePipeline) {
			this.consolePipeline.setClazz(clazz);
		}
	}

	public BasicHtmlCollectorPipeline<T> getDbCollectorPipeline() {
		return dbCollectorPipeline;
	}

	public void setDbCollectorPipeline(BasicHtmlCollectorPipeline<T> dbCollectorPipeline) {
		this.dbCollectorPipeline = dbCollectorPipeline;
	}

	public Configuration getConfiguration() {
		return configuration;
	}

	public HtmlConsolePipeline<T> getConsolePipeline() {
		return consolePipeline;
	}

	public void setConsolePipeline(HtmlConsolePipeline<T> consolePipeline) {
		this.consolePipeline = consolePipeline;
	}

	/**
	 * 设置 configuration
	 * 
	 * @param configuration
	 */
	public void setConfiguration(Configuration configuration) {
		this.configuration = configuration;
		if (null != this.getPageProcessor()) {
			this.pageProcessor.setConfiguration(configuration);
		}
		if (null != this.getUrlProcessor()) {
			this.urlProcessor.setConfiguration(configuration);
		}
		if (null != this.getDbCollectorPipeline()) {
			this.dbCollectorPipeline.setConfiguration(configuration);
		}
		if (null != this.getConsolePipeline()) {
			this.consolePipeline.setConfiguration(configuration);
		}
	}
}
