package com.terren.spider.core.html.scheme.impl;

import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

import com.terren.spider.entity.biz.ArticleSearch;
import com.terren.spider.entity.core.Entry;

import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.selector.Html;

public class BaiduSearchScheme extends BasicCustomScheme{

	@Override
	public List<ArticleSearch> findTitleInfo(Page page, Entry entry) {
		List<ArticleSearch> articleList = new ArrayList<>();
		//----------------------------------爬取搜索页面的标题，发布时间，url
		List<String> partList = page.getHtml().xpath("//div[@class='result c-container']").all();//待爬取的新闻列表
		for(int i=0;i<partList.size();i++){
			Html childHtml = Html.create(partList.get(i));//将找到的div字符串转为html
			ArticleSearch temp = new ArticleSearch();
			temp.setTitle(childHtml.xpath("//h3[@class='t']/a/allText()").get());
			temp.setNewsSource(childHtml.xpath("//div[@class='result c-container']/div/div[2]/div[2]/a[1]/text()").get());
			temp.setPubTimeStr(childHtml.xpath("//div[@class='result c-container']/div/div[2]/div[1]/span/text()").get());
			if(temp.getPubTimeStr()==null){
				//*[@id="2"]/div[1]/span
				temp.setPubTimeStr(childHtml.xpath("//div[@class='result c-container']/div[1]/span/text()").get());
			}
			if(temp.getNewsSource()==null){
				//*[@id="2"]/div[2]/a[1]
				temp.setNewsSource(childHtml.xpath("//div[@class='result c-container']/div[2]/a[1]/text()").get());
			}
			if(temp.getPubTimeStr()!=null&&temp.getPubTimeStr().contains("-")){
				String tempPub = temp.getPubTimeStr().replace("-", "");
				temp.setPubTimeStr(tempPub.replace("  ", ""));
			}
			temp.setSourceId(entry.getSourceId());
			temp.setTaskId(entry.getTaskId());
			temp.setEntryId(entry.getEntryId());
			String url = childHtml.xpath("//div[@class='result c-container']/h3/a").links().get();
			temp.setUrl(url);
			if(!"".equals(url)&&url!=null){
				articleList.add(temp);
			}
		}
		return articleList;
	}

	@Override
	public Set<String> findHelpPaginationUrl(Page page, Entry entry) {
		List<String> pgList = new ArrayList<String>();// 存放分页list
		boolean flag = false;
		List<String> pageUrls = page.getHtml().$("#page").links().all();
		for (int i = 0; i < pageUrls.size(); i++) {
			if (pageUrls.get(i).contains("rsv_page=1")) {//抓【下一页】链接
				pgList.add(pageUrls.get(i));
			}
			if (pageUrls.get(i).contains("rsv_page=-1")||pageUrls.get(i).contains("rsv_page=1")) {//包含上一页或者下一页链接
				flag = true;
			}
		}
		if (!flag) {//没有上一页链接也没有下一页链接 ，取全部
			pgList.addAll(pageUrls);
		}
		//pageUrls.addAll(pageUrls);
		Set<String> resultSet = new HashSet<>();
		resultSet.addAll(pgList);
		return resultSet;
	}


}
