package design.graduation.crawler.task;

import design.graduation.crawler.model.entity.CrawlerBook;
import org.jsoup.Jsoup;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.processor.PageProcessor;
import us.codecraft.webmagic.scheduler.BloomFilterDuplicateRemover;
import us.codecraft.webmagic.scheduler.QueueScheduler;
import us.codecraft.webmagic.selector.Html;
import us.codecraft.webmagic.selector.Selectable;

import javax.annotation.Resource;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;

/**
 * @author: 洪少尉
 * @Date: 2020/3/29 11:12
 * @Description:
 */
@Component
public class Crawler implements PageProcessor {
	String url = "http://www.zxcs.me";
	@Override
	public void process(Page page) {
		Html html = page.getHtml();
		/*
		  downloadPage  下载页面标识
		  downloadPage 非空表示当前解析页面为下载页面
		 */
		String downloadPage = html.css("div.top_left","text").toString();
		if (downloadPage != null){
			//解析下载页面
			this.downloadPage(page,html);
			return;
		}
		List<Selectable> href = html.css("div.title a").links().nodes();
		if(href.size() == 8){
			//解析首页
			this.homePage(page,href);
			//解析完成，直接退出方法
			return;
		}
		if(href.size() == 0){
			/*
			  href.size()			分类/详细页面标识
			  href.size() > 0		分类页面
			  href.size() == 0		详细页面
			 */
			href = html.css("dl#plist dt").links().nodes();
			//分类页面
			if(href.size() > 0){
				//解析分类页面
				this.sortPage(page,html,href);
			}
		}
	}
	/**
	 * 下载页面处理
	 */
	private void downloadPage(Page page, Html html) {
		CrawlerBook crawlerBook = new CrawlerBook();
		crawlerBook.setBookName(html.css("div.content h2","text").regex("《.*）").toString());
		crawlerBook.setAuthor(html.css("div.content h2","text").regex("作者.*").toString());
		crawlerBook.setDownloadUrl(html.css("span.downfile").nodes().get(0).links().toString());
		crawlerBook.setDownloadStatus(0);
		page.putField("book_download",crawlerBook);
	}
	/**
	 * 列表页面处理
	 */
	private void sortPage(Page page, Html html, List<Selectable> href){
		List<CrawlerBook> crawlerBooks = new ArrayList<>();
		for(int i=0;i<href.size();i++){
			CrawlerBook crawlerBook = new CrawlerBook();
			String bookName = html.css("dl#plist dt a","text").nodes().get(i).regex("《.*）").toString();
			String author = html.css("dl#plist dt a","text").nodes().get(i).regex("作者.*").toString();
			String Classification = Jsoup.parse(html.css("dl#plist").nodes().get(i).css("dd").nodes().get(1)
					.css("a").nodes().get(0).toString()).text();
			Integer primaryClassification = null;
			Integer secondaryClassification = null;
			switch (Classification){
				case "都市·娱乐":
					primaryClassification = 1;
					secondaryClassification = 1;
					break;
				case "精校武侠":
					primaryClassification = 2;
					secondaryClassification = 8;
					break;
				case "精校仙侠":
					primaryClassification = 2;
					secondaryClassification = 9;
					break;
				case "精校奇幻":
					primaryClassification = 3;
					secondaryClassification = 10;
					break;
				case "精校玄幻":
					primaryClassification = 3;
					secondaryClassification = 11;
					break;
				case "精校科幻":
					primaryClassification = 4;
					secondaryClassification = 12;
					break;
				case "精校灵异":
					primaryClassification = 4;
					secondaryClassification = 13;
					break;
				case "精校历史":
					primaryClassification = 5;
					secondaryClassification = 14;
					break;
				case "精校军事":
					primaryClassification = 5;
					secondaryClassification = 15;
					break;
				case "精校竞技":
					primaryClassification = 6;
					secondaryClassification = 16;
					break;
				case "精校游戏":
					primaryClassification = 6;
					secondaryClassification = 17;
					break;
				case "二次元":
					primaryClassification = 7;
					secondaryClassification = 7;
					break;
				default:
					break;
			}
			String tertiaryClassification =	Jsoup.parse(html.css("dl#plist").nodes().get(i).css("dd").nodes().get(1).css("a").nodes().get(1).toString()).text();
			String introduction = null;
			try {
				introduction = Jsoup.parse(html.css("dl#plist").nodes().get(i).css("dd").nodes().get(0).regex("【内容简介】.*").toString()).text();
			}catch (Exception e){
				introduction = Jsoup.parse(html.css("dl#plist").nodes().get(i).css("dd").nodes().get(0).regex("内容简介.*").toString()).text();
			}
			Integer pointer = introduction.lastIndexOf("优质");
			if (pointer >= 0){
				introduction = introduction.substring(0,pointer);
			}
			crawlerBook.setBookName(bookName)
					.setAuthor(author)
					.setPrimaryClassification(primaryClassification)
					.setSecondaryClassification(secondaryClassification)
					.setTertiaryClassification(tertiaryClassification)
					.setIntroduction(introduction)
					.setSaveTime(new Date());
			crawlerBooks.add(crawlerBook);
			String idNum =href.get(i).toString().substring(href.get(i).toString().lastIndexOf("/")).substring(1);
			String pageUrl = "http://www.zxcs.me/download.php?id="+idNum;
			page.addTargetRequest(pageUrl);
		}
		page.putField("books",crawlerBooks);
		//获取所有的页码url
		List<Selectable> pagenum = html.css("div#pagenavi").links().nodes();
		//分类页面的当前页码
		String Curpagenum = html.css("div#pagenavi span", "text").toString();
		//尾页的url
		String tail = pagenum.get(pagenum.size() - 1).toString();
		//截取尾页页码
		String tailnum = tail.substring(tail.lastIndexOf("/")).substring(1);
		//如果尾页页码大于当前页码
		if( Integer.parseInt(tailnum)>Integer.parseInt(Curpagenum) ){
			//当前页码+1拼接到分类url的地址
			String nextpage = tail.substring(0, tail.lastIndexOf("/"))+"/"+(Integer.parseInt(Curpagenum)+1);
			//将url放到任务中
			page.addTargetRequest(nextpage);
		}
	}
	/**
	 * 首页处理
	 */
	private void homePage(Page page, List<Selectable> href) {
		//取出首页url
		href.remove(0);
		//遍历分类页面的url
		for (Object s : href) {
			//将分类页面的url放到任务中
			page.addTargetRequest(s.toString());
		}
	}

	/**
	 * setCharset 			设置编码
	 * setTimeOut 			设置连接超时时间
	 * setRetrySleepTime 	设置重试时间
	 * setRetryTimes  		设置重试次数
	 */
	private Site site = Site.me()
			//根据爬取网站填写
			.setCharset("UTF8")
			.setTimeOut(60*1000)
			.setRetrySleepTime(5*1000)
			.setRetryTimes(5);
	@Override
	public Site getSite() {
		return site;
	}
	@Resource
	SpringDataPipeline springDataPipeline;
	@Scheduled(initialDelay = 1000,fixedDelay = 24*60*60*1000)
	public void process(){
		Spider.create(new Crawler())
				.addUrl(url)
				//设置去重过滤器
				.setScheduler(new QueueScheduler().setDuplicateRemover(new BloomFilterDuplicateRemover(100000)))
				.addPipeline(springDataPipeline)
				.thread(10)
				.run();
	}
}
