package com.cybertron.ironhide.spider.plugin.impl;

import java.net.URL;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import com.cybertron.ironhide.spider.SpiderListener;
import com.cybertron.ironhide.spider.domain.TaskUrl;
import com.cybertron.ironhide.spider.fetcher.FetchResult;
import com.cybertron.ironhide.spider.fetcher.Page;
import com.cybertron.ironhide.spider.plugin.DigPoint;
import com.cybertron.ironhide.spider.plugin.util.DefaultLinkNormalizer;
import com.cybertron.ironhide.spider.plugin.util.LinkNormalizer;
import com.cybertron.ironhide.spider.plugin.util.UrlUtils;
import com.cybertron.ironhide.spider.task.Task;
import com.cybertron.ironhide.spider.xml.Model;
import com.cybertron.ironhide.spider.xml.Rule;
import com.cybertron.ironhide.spider.xml.Site;
import com.cybertron.ironhide.spider.xml.Target;

public class DigPointImpl implements DigPoint {

	private SpiderListener listener;

	public void init(Site site, SpiderListener listener) {
		this.listener = listener;
	}

	public void destroy() {
	}

	public List<TaskUrl> digNewUrls(FetchResult result, Task task)
			throws Exception {
		if (result == null)
			return null;

		List<TaskUrl> urls = new ArrayList<TaskUrl>();

		// 如果当前URL里没有任何页面内容，就无需进一步解析内容里的URL了
		if (result.getPage() == null)
			return urls;
		String html = result.getPage().getContent();
		if (html == null)
			return urls;

		Rule rule = task.site.getTargets().getSourceRule();
		if (rule != null) {

			Model digModel = rule.getDigUrls();
			Model nextPage = rule.getNextPage();
			Map<String, Object> finalFields = new HashMap<String, Object>();
			// 判断是否定义了digUrls
			if (digModel != null) {
				// 构造一个目标
				Target tgt = new Target();
				tgt.setName("dig_urls");
				tgt.setModel(digModel);
				Collection<TaskUrl> newUrls = UrlUtils.digUrls(
						result.getPage(), task, rule, tgt, listener,
						finalFields, false);
				// 解析Model获得urls
				urls.addAll(newUrls);
			}

			// 如果配置了下一页，则进入递归解析
			if (nextPage != null) {
				parseNextPage(rule, task, result.getPage(), urls, finalFields);
			}

		}

		// 修复URL
		URL URL = new URL(task.site.getUrl());
		String port = "";
		if (URL.getPort() != -1) {
			port = ":" + URL.getPort();
		}

		String hostUrl = new StringBuilder("http://").append(URL.getHost())
				.append(port).append("/").toString();

		for (TaskUrl taskUrl : urls) {
			LinkNormalizer ln = new DefaultLinkNormalizer(hostUrl);
			String newUrl = ln.normalize(taskUrl.getUrl());
			if (newUrl.startsWith("mailto:"))
				continue;
			// 去重复
			if (urls.contains(newUrl))
				continue;
			taskUrl.setUrl(newUrl);
		}
		return urls;
	}

	// 递归的额关键是 Page
	public void parseNextPage(Rule rule, Task task, Page page,
			Collection<TaskUrl> urls, Map<String, Object> finalFields)
			throws Exception {
		Model mdl = rule.getNextPage();
		if (mdl == null)
			return;

		Target tgt = new Target();
		tgt.setName("dig_urls");
		tgt.setModel(mdl);
		Collection<TaskUrl> nextUrls = UrlUtils.digUrls(page, task, rule, tgt,
				listener, finalFields, true);
		if (nextUrls == null || nextUrls.isEmpty())
			return;
		urls.addAll(nextUrls);
	}
}
