package com.yishuifengxiao.common.crawler.link;

import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;

import org.apache.commons.lang3.StringUtils;

import com.yishuifengxiao.common.crawler.domain.entity.Page;
import com.yishuifengxiao.common.crawler.domain.model.LinkRule;
import com.yishuifengxiao.common.crawler.extractor.ExtractorFactory;
import com.yishuifengxiao.common.crawler.extractor.links.LinkExtractor;
import com.yishuifengxiao.common.crawler.link.filter.BaseLinkFilter;
import com.yishuifengxiao.common.crawler.link.filter.impl.AbsoluteLinkFilter;
import com.yishuifengxiao.common.crawler.link.filter.impl.HashLinkFilter;
import com.yishuifengxiao.common.crawler.link.filter.impl.HttpLinkFilter;
import com.yishuifengxiao.common.crawler.link.filter.impl.IllegalLinkFilter;
import com.yishuifengxiao.common.crawler.link.filter.impl.RelativeLinkFilter;
import com.yishuifengxiao.common.crawler.link.filter.impl.ShortLinkFilter;
import com.yishuifengxiao.common.tool.exception.ServiceException;

/**
 * 简单链接解析器<br/>
 * 功能如下：<br/>
 * 1 从网页的原始文本中统一转换成网络地址形式<br/>
 * 2 从转换后的地址里提取出所有符合要求的链接
 * 
 * @author yishui
 * @date 2019年11月26日
 * @version 1.0.0
 */
public class LinkExtractDecorator implements LinkExtract {

	/**
	 * 提取器生成工厂
	 */
	private final ExtractorFactory factory = new ExtractorFactory();
	/**
	 * 链接过滤器
	 */
	private final BaseLinkFilter linkFilter = this.createLinkFilter();

	/**
	 * 真实的链接解析器，负责从网页里提取出所有的原始的超链接
	 */
	private final LinkExtractProxy linkExtractProxy = new LinkExtractProxy();

	private LinkExtract linkExtract;

	private LinkRule rule;

	@Override
	public void extract(final Page page) throws ServiceException {
		//@formatter:off 

		// 调用实际处理类对信息进行处理
		this.linkExtractProxy.extract(page);

		// 自定义解析数据
		if (this.linkExtract != null) {
			this.linkExtract.extract(page);
		}

		// 将提取出来的链接根据链接提取规则过滤
		List<String> urls = this.fliter(
				StringUtils.isNotBlank(page.getRedirectUrl()) ? page.getRedirectUrl() : page.getRequest().getUrl(),
				new HashSet<>(page.getLinks()));
		page.setLinks(urls);
		//@formatter:on  
	}

	/**
	 * 从所有的超链接里提取出符合配置规则的链接
	 * 
	 * @param path 当前正在解析的网页内容的地址
	 * @param urls 从当前网页内容里提取出来的链接集合
	 * @return
	 */
	private List<String> fliter(final String path, Set<String> urls) {
		//@formatter:off 
		// 链接统一转换成网络地址形式
		Set<String> links = urls.parallelStream().filter(Objects::nonNull)
				.map(t -> linkFilter.doFilter(path, t.toLowerCase())).collect(Collectors.toSet());
		urls.clear();
		
		// 根据链接解析规则获取所有的链接提取器
		List<LinkExtractor> linkExtractors = this.rule.getRules().parallelStream().map(factory::getLinkExtractor)
				.collect(Collectors.toList());
		// 再从转换后的地址里提取出所有符合要求的链接
		linkExtractors.parallelStream().filter(Objects::nonNull).map(t -> t.extract(new ArrayList<>(links)))
				.forEach(urls::addAll);

		//@formatter:on  
		return urls.parallelStream().filter(Objects::nonNull).collect(Collectors.toList());
	}

	/**
	 * 构建链接过滤器链
	 * 
	 * @return 链接过滤器链
	 */
	private BaseLinkFilter createLinkFilter() {
		RelativeLinkFilter relativeLinkFilter = new RelativeLinkFilter(null);
		HashLinkFilter hashLinkFilter = new HashLinkFilter(relativeLinkFilter);
		AbsoluteLinkFilter absoluteLinkFilter = new AbsoluteLinkFilter(hashLinkFilter);
		HttpLinkFilter httpLinkFilter = new HttpLinkFilter(absoluteLinkFilter);
		ShortLinkFilter shortLinkFilter = new ShortLinkFilter(httpLinkFilter);
		IllegalLinkFilter illegalLinkFilter = new IllegalLinkFilter(shortLinkFilter);
		return illegalLinkFilter;
	}

	public LinkExtractDecorator(LinkRule rule, LinkExtract linkExtract) {
		this.linkExtract = linkExtract;
		this.rule = rule;
	}

}
