package servlet;

import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
 import servlet.news;
//import news.NewsDao;

import java.io.*;
import java.net.HttpURLConnection;
import java.net.URL;
import java.net.URLConnection;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

@WebServlet(name = "newsServlet")
public class newsServlet extends HttpServlet {
    protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {

    }

    protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
        request.setAttribute("hello","你好");
        PrintWriter outputStream= response.getWriter();

        List<String> urls=new ArrayList<>();
        //待爬取的URL
        String strurl="http://news.baidu.com/";
        //http://news.baidu.com/
        //http://www.xinhuanet.com/
        //建立url爬取核心对象
        try {
            URL url=new URL(strurl);
            //通过url建立与网页的连接
            URLConnection conn=url.openConnection();
            HttpURLConnection con=(HttpURLConnection)conn;

            con.setDoInput(true);
            con.setDoOutput(true);
            //伪装成浏览器访问，防止拦截
//            con.setRequestProperty("Accept","text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8");
//            con.setRequestProperty("Accept-Language","zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2");
//            con.setRequestProperty("Cookie","BAIDUID=5CEA9CDFFD08E479448DFBC5B5E563EC:FG=1; BIDUPSID=5CEA9CDFFD08E479C5BD5A95C28469C1; PSTM=1585878066");
//            con.setRequestProperty("Host","news.baidu.com");
//            con.setRequestProperty("Referer","news.baidu.com");
//            con.setRequestProperty("Upgrade-Insecure-Requests","1");
//            //*****
//            con.setRequestProperty("User-Agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64; AppleWebKit/537.36 (KHTML, like Gecko)) Chrome/87.0.4280.88 Safari/537.36");
//            con.setRequestProperty("Connection","keep-alive");
//            con.setUseCaches(false);
            int code=con.getResponseCode();
            if(200==code) {
                //通过链接取得网页返回的数据
                InputStream is=con.getInputStream();
                //按行读取网页数据，并进行内容分析
                //因此用BufferedReader和InputStreamReader把字节流转化为字符流的缓冲流
                //进行转换时，需要处理编码格式问题
                BufferedReader br = new BufferedReader(new InputStreamReader(is, "UTF-8"));
                //按行读取并打印
                String line = null;
                boolean isStart=false;
                while ((line = br.readLine()) != null) {
                    // System.out.println(line);
                    if (line.contains("<div class=\"hotnews\" alog-group=\"focustop-hotnews\">"))
                        isStart = true;
                    if(line.contains("div id=\"pane-recommend\" class=\"mod-tab-pane pane-recommend \""))
                        isStart=false;

//                    if (line.contains("<div class=\"part2L\" id=\"hpart2L\">"))
//                        isStart = true;
                    //
                    if (isStart) {
//                    System.out.println(line);
                        //正则表达式的匹配规则提取该网页的链接
                        Pattern p = Pattern.compile("<a .*href=.+</a>");
                        //建立一个输出流，用于保存文件,文件名为执行时间，以防重复
                        PrintWriter pw = new PrintWriter(new File("1.txt"));

                        while ((line = br.readLine()) != null) {
                            //System.out.println(line);
                            //编写正则，匹配超链接地址
                            pw.println(line);
                            Matcher m = p.matcher(line);
                            while (m.find()) {
                                String href = m.group();
                                //找到超链接地址并截取字符串
                                //有无引号
                                //匹配到标签后，分两步走
                                //1.获取标签内容
                                String regEx_html = "<[^>]+>"; //定义HTML标签的正则表达式
                                Pattern p_html = Pattern.compile(regEx_html,Pattern.CASE_INSENSITIVE);
                                Matcher m_html = p_html.matcher(href);
                                String htmlStr = m_html.replaceAll(""); //过滤html标签
                                //2.获取a 里面的href
                                System.out.println();
                                href = href.substring(href.indexOf("href="));
                                if (href.contains("http")) {
                                    if (href.charAt(5) == '\"') {
                                        href = href.substring(6);
                                    } else {
                                        href = href.substring(5);
                                    }
                                    //截取到引号或者空格或者到">"结束
                                    try {
                                        href = href.substring(0, href.indexOf("\""));
                                    } catch (Exception e) {
                                        try {
                                            href = href.substring(0, href.indexOf(" "));
                                        } catch (Exception e1) {
                                            href = href.substring(0, href.indexOf(">"));
                                        }
                                    }
                                    outputStream.println("新闻地址：" + href+",标题："+htmlStr);
                                    urls.add(href);
                                    news news=new news();
                                    news.setTitle(htmlStr);
                                    news.setUrl(href);
                                    // NewsDao.insert(news);
                                }
                            }

                        }
                    }
                }
                br.close();
//                int count=-1;
//                byte[] buff=new byte[1024];
//                FileOutputStream out=new FileOutputStream(new File("1.html"));
//                while((count=is.read(buff))!=-1)
//                {
//                    out.write(buff,0,count);
//                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
        request.getRequestDispatcher("news.jsp").forward(request, response);
    }


    }

