package crawler01;

import com.alibaba.excel.EasyExcel;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.junit.Test;
import util.PropertyUtil;

import java.io.*;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * @author raok
 * @create 2022-04-23-20:57
 **/
public class CrawlerTest01 {
    //获取基础数据
    public static String baseUrl0 = PropertyUtil.getPropertiesValue("baseUrl0");
    public static String baseUrl1 = PropertyUtil.getPropertiesValue("baseUrl1");
    public static String[] majorCodes = PropertyUtil.getPropertiesValue("majorCodes").split("\\|");
    public static String[] majorNames = PropertyUtil.getPropertiesValue("majorNames").split("\\|");
    public static String[] stopTimes = PropertyUtil.getPropertiesValue("stopTimes").split("\\|");

    public static boolean shouldContinue = true; //是否控制继续爬取的参数，爬取到指定日期时停止
    public static int count = 0; //计数器，用于记录爬取的条数
    public static List<ExcelClass> listd = new ArrayList<>(); //存储爬取的数据


    public static void main(String[] args) throws IOException {
        //获取网页内容，写入Excel的文件对象
        for (int i = 0; i < majorCodes.length ; i++) {
            mainTest(baseUrl1,majorNames[i],majorCodes[i],stopTimes[i]);
        }
    }

    //爬虫的主要函数
    public static  void mainTest(String baseUrl1, String majorName,String majorCode,String stopTime){
//            writeMessageToFile("\n\n**********当前开始获取内容是【" + major + "】**********");
        String baseUrl2 = baseUrl1 + majorCode;
        //文章列表第一页链接
        String url = baseUrl2 + "/about.html";
        getArticleListFromUrl(url ,stopTime);

        //从文章列表第二页开始循环获取内容
        int page = 2;
        while (shouldContinue){
            String nextUrl = baseUrl2 + "/" + page + ".html"; //拼接第二页开始的网页链接
            getArticleListFromUrl(nextUrl,stopTime);
            page++;
        }
        EasyExcel.write("爬虫数据-" + majorName +".xlsx", ExcelClass.class).sheet(majorName).doWrite(listd);
        //开始查询后一个专业前进行初始化
        listd.clear();
        count = 1;
        shouldContinue = true;
    }

    /**
     * 获取父网页中子网页的信息
     * @param url
     * @param stopTime
     */
    public static void getArticleListFromUrl(String url,String stopTime){
        Document doc = null;
        try {
            doc = Jsoup.connect(url).userAgent("Mozilla/5.0").timeout(3000).get();
//            System.out.println(doc);
            //获得当前页面所有的a标签
//            Elements eles = doc.getElementsByTag("a");
            Elements lists = doc.getElementsByClass("wb-data-list"); //文章list合集
            for (int j = 0; j < lists.size(); j++,count++) {
                ExcelClass excelClass = new ExcelClass();
                Elements ele = lists.get(j).getElementsByTag("a");//链接
                Elements date = lists.get(j).getElementsByClass("wb-data-date");//日期
                String relHref = ele.attr("href"); // == "/"这个是href的属性值，一般都是链接。这里放的是文章的连接
                String linkHref = ele.text();
                String curTime = date.text();
                if (shoudStop(curTime,stopTime)) return;
                excelClass.setHref(baseUrl0 + relHref);
                excelClass.setIndex(count);
                excelClass.setTitle(linkHref + "time: " + curTime);
                System.out.println("\n" + "文章" + count +"标题：" + linkHref + "time: " + curTime );
                String regExp = "(已经由)(.+)(备案)|(领取时间：)(.+)( 2)|(开标时间: )(.+)|(投标保证金或投标担保金额:)(.+)";
                String msg = "备案地：|招标文件发布时间：|开标时间:|保证金金额:";
                getArticleFromUrl(relHref,regExp,msg, excelClass);//可以通过这个url获取文章了
                //加入list,预备写入excel
                listd.add(excelClass);

            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    /**判断是否应该退出当前专业的爬虫
    @parm time 当前文章公告的时间
    @parm stopTime 爬虫截止时间
     */
    private static boolean shoudStop(String time,String stopTime) {
        int timeNumber = Integer.valueOf(time.replace("-", ""));
        if (timeNumber < Integer.valueOf(stopTime)){
            shouldContinue = false;
            return true;
        }
        return false;
    }

    /**
     * 根据筛选条件处理文章内容
     * @param relHref 详情文章的相对链接
     * @param regExp 筛选的正则表达式
     * @param msg 预备写入TXT每行开头
     * @param excelClass 接受内容的类
     * @throws IOException
     */
    private static void getArticleFromUrl(String relHref, String regExp, String msg, ExcelClass excelClass) throws IOException {
        String url = baseUrl0 + relHref;
//        writeMessageToFile("开始获取当前文章内容:" + url);
        Document doc = null;
        try {
            doc = Jsoup.connect(url).userAgent("Mozilla/5.0").timeout(4000).get();
            Elements eles = doc.getElementsByTag("td");
            for (Element ele: eles) {
                String text = ele.text();
//                System.out.println("====txt:" + text);
                String[] regExps = regExp.split("\\|");
                String[] msgs = msg.split("\\|");
                for (int i = 0; i<regExps.length; i++) {
                    getContentFromUrl(text,regExps[i],msgs[i], excelClass);
                }
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    //利用从文章正文中利用正则表达式获取需要的字段
    private static void getContentFromUrl(String substring, String regExp, String msg, ExcelClass excelClass) throws IOException {
        Pattern pattern = Pattern.compile(regExp);
        Matcher matcher = pattern.matcher(substring);
        if (matcher.find()){
            System.out.println(msg + matcher.group(2));
            switch (msg){
                case "备案地：":
                    excelClass.setLocation(matcher.group(2));
                    break;
                case "招标文件发布时间：":
                    excelClass.setOpentime(matcher.group(2));
                    break;
                case "开标时间:":
                    excelClass.setSendtime(matcher.group(2));
                    break;
                case "保证金金额:":
                    excelClass.setPrice(matcher.group(2));
                    break;
            }
//            //写入到TXT
//            writeMessageToFile(msg + matcher.group(2));
        }
    }

//    //将获取的字段写入txt文件
//    private static void writeMessageToFile(String str) throws IOException {
//        File file1 = new File("CrawlerData.txt");
//
//        FileWriter outputStreamWriter = new FileWriter(file1,true); //追加写入
//        outputStreamWriter.write(str + "\n");
//        outputStreamWriter.close();
//    }
}
