package com.xcms.test.crawl;

import com.xcms.test.news.News;
import com.xcms.test.news.NewsDao;

import java.io.*;
import java.net.HttpURLConnection;
import java.net.URL;
import java.net.URLConnection;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

public class NewsCrawl {
    public static void main(String[] args){
        List<String> urls=getNewsList();
    }

    private static List<String> getNewsList() {
        List<String> urls=new ArrayList<>();
        //待爬取的url
        String strurl="https://www.zhiwuwang.com/news/search.php?kw=%E5%A4%9A%E8%82%89";
        try {
            URL url=new URL(strurl);
            URLConnection conn=url.openConnection();    //通过url建立与网页的连接
            HttpURLConnection con=(HttpURLConnection)conn;
            con.setDoInput(true);
            con.setDoOutput(true);
            //设置请求属性，伪装成浏览器访问，防止拦截
            con.setRequestProperty("Accept","text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8");
            con.setRequestProperty("Accept-Language","zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2");
            con.setRequestProperty("Cookie","ipPy=zhengzhou; UM_distinctid=172b2ba37f7249-04eabb4524a6298-4c302c7d-144000-172b2ba37f81b5; CNZZDATA1277635480=278818577-1592131986-https%253A%252F%252Fwww.zhiwuwang.com%252F%7C1592191386; Hm_lvt_86f43783acc56b0c8abb5bb039edc763=1592136636,1592191973,1592192001,1592192196; Hm_lpvt_86f43783acc56b0c8abb5bb039edc763=1592192196");
            con.setRequestProperty("Host","i.tianqi.com");
            con.setRequestProperty("Referer","zhiwuwang.com/news/list-1187.html/");
            con.setRequestProperty("Upgrade-Insecure-Requests","1");
            con.setRequestProperty("User-Agent","Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0");
            con.setRequestProperty("Connection","keep-alive");
            con.setUseCaches(false);
            int code=con.getResponseCode();
            if(200==code){  //访问成功后
                InputStream is=con.getInputStream();    //拿到一个输入流
                //按行读取网页数据，并进行内容分析，用BufferedReader和InputStreamReader把字节流转化为字符流的缓冲流
                BufferedReader br=new BufferedReader(new InputStreamReader(is,"UTF-8"));    //注意编码
                String line=null;
                boolean isStart=false;
                while ((line=br.readLine())!=null){
                    //System.out.println(line);
                    if(line.contains("<option value=\"3\">按浏览人气排序</option>")){
                        isStart=true;
                    }
                    if(line.contains("</ul>")){
                        isStart=false;
                    }
                    if(isStart){
                        Pattern p=Pattern.compile("<a .*href=.+</a>");  //匹配a标签，a标签的正则表达式

                        PrintWriter pw=new PrintWriter(new File("1.txt"));  //放到文本里
                        while ((line=br.readLine())!=null){
                            pw.println(line);
                            Matcher m=p.matcher(line);  //读一行匹配一下
                            while (m.find()){   //如果匹配上
                                String href=m.group();  //获取
                                //匹配到标签后分两步走
                                //第一步：获取标签内容
                                String regEx_html="<[^>]+>";    //定义HTML标签的正则表达式
                                Pattern p_html=Pattern.compile(regEx_html,Pattern.CASE_INSENSITIVE);
                                Matcher m_html=p_html.matcher(href);
                                String htmlStr=m_html.replaceAll("");   //过滤html标签
                                //第二步：获取a里面的href
                                href=href.substring(href.indexOf("href="));
                                if(href.contains("http")&&href.contains("多肉")){  //否则会把不是超链接的也截取过来
                                    if(href.charAt(5)=='\"'){
                                        href=href.substring(6);
                                    }
                                    else {
                                        href=href.substring(5);
                                    }
                                    //截取到引号或者空格或者到">"结束
                                    try {
                                        href=href.substring(0,href.indexOf("\""));
                                    } catch (Exception e){
                                        try {
                                            href=href.substring(0,href.indexOf(" "));
                                        } catch (Exception e1){
                                            href=href.substring(0,href.indexOf(">"));
                                        }
                                    }
                                    System.out.println("地址："+href+",标题："+htmlStr);
                                    urls.add(href);
                                    News news=new News();
                                    news.setTitle(htmlStr);
                                    news.setUrl(href);
                                    NewsDao.insert(news);
                                }
                            }
                        }
                    }
                }
                br.close();
            }

        }  catch (Exception e) {
            e.printStackTrace();
        }
        return urls;
    }
}
