package com.zyx.crawlerdemo.webcollector.hfutnews;

import cn.edu.hfut.dmic.webcollector.model.CrawlDatum;
import cn.edu.hfut.dmic.webcollector.model.CrawlDatums;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.plugin.ram.RamCrawler;
import org.jsoup.select.Elements;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;

import org.jsoup.nodes.Element;

/**
 * @author Yaxi.Zhang
 * @since 2021/8/12 14:48
 * desc: 使用RamCrawler爬取
 *      RamCrawler不需要依赖文件系统或数据库，适合一次性的爬取任务
 */
public class HfutNewsCrawler extends RamCrawler {

    String fileFirstLayerOutPut = "data/hfut_newsUrl.txt";
    String contentOutPut = "data/hfut_newsContent.txt";
    String code = "utf-8";
    StringBuilder sbFirst = new StringBuilder();
    StringBuilder sbContent = new StringBuilder();

    public HfutNewsCrawler(int pageNum) throws Exception {
        //添加多页
        for (int pageIndex = 1; pageIndex <= pageNum; pageIndex++) {
            String url = "http://news.****.edu.cn/list-1-" + pageIndex + ".html";
            CrawlDatum datum = new CrawlDatum(url)
                    // 第一层
                    .type("firstLayer")
                    // 页面保存
                    .meta("pageIndex", pageIndex)
                    // 深度为第一层
                    .meta("depth", 1);
            this.addSeed(datum);
        }
    }

    @Override
    public void visit(Page page, CrawlDatums next) {
        int pageIndex = page.metaAsInt("pageIndex");
        int depth = page.metaAsInt("depth");
        if (page.matchType("firstLayer")) {
            // 解析新闻标题页
            Elements results = page.select("div.col-lg-8 > ul").select("li");
            for (int rank = 0; rank < results.size(); rank++) {
                Element result = results.get(rank);
                String href = "http://news.****.edu.cn" +
                        result.select("a").attr("href");
                String title = result.select("a").text();
                String time = result.select("span[class=rt]").text();
                if (title.length() != 0) {
                    // 输出第一层信息
                    sbFirst.append("url:").append(href).append("\ttitle:").append(title).append("\ttime:").append(time).append("\n");
                    try {
                        writeFile(fileFirstLayerOutPut, sbFirst.toString(), code);
                    } catch (IOException e) {
                        // TODO Auto-generated catch block
                        e.printStackTrace();
                    }
					// 添加需要访问的新闻连接,类型为content,用于爬取新闻的详细内容
                    next
							// 将该URL添加到CrawlDatum作为要采集的URL
                            .addAndReturn(href)
                            //内容页面
                            .type("content")
                            //第几页的新闻
                            .meta("pageIndex", pageIndex)
                            //这条新闻的序号
                            .meta("rank", rank);
                }
            }
        }

        // 页面的深度加1, 新闻的详情页
        next.meta("depth", depth + 1);
        //新闻详情页
        if (page.matchType("content")) {
            // 输出结果
            String url = page.url();
            // 新闻在第几页
            int index = page.metaAsInt("pageIndex");
            // 新闻在页面的序号
            int rank = page.metaAsInt("rank");
            // 新闻内容
            String content = page.select("div[id=artibody]").text();
            //输出第二层信息
            sbContent.append("url:").append(url).append("\tIndex:").append(index)
                    .append("\trank:").append(rank).append("\tcontent:").append(content).append("\n");
            try {
                writeFile(contentOutPut, sbContent.toString(), code);
            } catch (IOException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        }

    }

    /**
     * 数据写入指定文档
     *
     * @param file    文件名
     * @param content 需要写入的内容
     * @param code    文件编码
     */
    public static void writeFile(String file, String content, String code)
            throws IOException {
        File result = new File(file);
        OutputStream out = new FileOutputStream(result, false);
        BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(out, code));
        bw.write(content);
        bw.close();
        out.close();
    }

    public static void main(String[] args) throws Exception {
        // 添加爬取的页面数
        HfutNewsCrawler crawler = new HfutNewsCrawler(3);
        // 添加线程数
        crawler.setThreads(10);
        // 启动采集程序
        crawler.start();
    }
}
