package crawler.script;

import java.util.ArrayList;
import java.util.List;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import crawler.client.sdk.task.java.CrawlerExtractInfo;
import crawler.client.sdk.task.java.CrawlerTaskInfo;
import crawler.utility.http.HttpClientResponse;
import crawler.utility.http.HttpClientUtil;

public class Exam2 {
	
	/**
	 * 示例2，某些情况不使用传入的URL
	 * 
	 * 这种情况的父URL仍然是传入的URL
	 * 
	 * @param crawlerTaskInfo
	 * @return
	 * @throws Exception
	 */
	public static CrawlerExtractInfo crawler(CrawlerTaskInfo crawlerTaskInfo) throws Exception {
		String url =  "https://www.amazon.com/gp/site-directory/ref=nav_shopall_btn";

		HttpClientUtil httpClientUtil = new HttpClientUtil();
		httpClientUtil.setUrl(url);
		
		HttpClientResponse responseBean = httpClientUtil.get();
		
		String html = responseBean.getContent();
		Document doc = Jsoup.parse(html);
		doc.setBaseUri(url); //设置补全相对路径的基础路径
		Elements elements = doc.select("div.a-column > div  > div.fsdDeptCol > a");

		List<CrawlerTaskInfo> crawleOutput = new ArrayList<CrawlerTaskInfo>();
		CrawlerTaskInfo crawlerURL;
		for (Element element : elements) {
			crawlerURL = new CrawlerTaskInfo();
			//absUrl方法会使用setBaseUri方法设置的基础路径补全URL
			//attr方法返回对应的属性值得原始值，如果需要手动补全可以使用此方法获得原始值进行手动补全
			crawlerURL.setChildUrl(element.absUrl("href"));
		
			crawleOutput.add(crawlerURL);
		}
		
		return new CrawlerExtractInfo(crawleOutput);
	}
	
	public static void main(String[] args) throws Exception {
		CrawlerTaskInfo crawlerTaskInfo = new CrawlerTaskInfo();
		crawlerTaskInfo.setTaskUrl("https://www.baidu.com/");
		CrawlerExtractInfo crawlerExtractInfo = crawler(crawlerTaskInfo);
		List<CrawlerTaskInfo> crawlerTaskInfos = crawlerExtractInfo.getCrawlerTaskInfos();
		System.out.println(crawlerTaskInfos.size());
		for (int i = 0; i < crawlerTaskInfos.size(); i++) {
			System.out.println(crawlerTaskInfos.get(i));
		}
		
	}
}
