package com.cherrish.demo;

import java.io.*;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLConnection;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * @author cherrish
 * @time 2019-04-04 10:05
 * @name DangDangCrawler
 * @desc:
 */
public class DangDangCrawler {
    public static void main(String[] args) {
        //crawler1();
        crawler2();
    }

    private static void crawler1(){
        String strurl="http://search.dangdang.com/?key=%BB%FA%D0%B5%B1%ED&act=input";
        try {
            URL url = new URL(strurl);
            URLConnection conn = url.openConnection();
            InputStream is = conn.getInputStream();
            String charset = conn.getContentEncoding();
            System.out.println(charset);
            charset = null == charset ? "UTF-8" : charset;
            BufferedReader br = new BufferedReader(new InputStreamReader(is, charset));
            String line = null;
            while (null != (line = br.readLine())){
                System.out.println(line);
            }
            br.close();
        } catch (MalformedURLException e) {
            e.printStackTrace();
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    private static void crawler2(){
        workurl("http://book.dangdang.com/", 1);
    }
    private static final String PATH = "D:\\workspace\\test\\001\\do-demo\\demo-spider\\target\\";
    private static final List<String> ALLWAITURL = new ArrayList<>();
    private static final Set<String> ALLOVERURL = new HashSet<>();
    private static final Map<String, Integer> ALLURLDEPTH = new HashMap<>();
    private static final int MAXDEPTH = 2;
    private static final String REGEX = "<a .*href=.+</a>";

    public static void workurl(String strurl,int depth){
        //判断当前url是否爬取过
        if(!(ALLOVERURL.contains(strurl)||depth > MAXDEPTH)){
            //建立url爬取核心对象
            try {
                URL url=new URL(strurl);
                //通过url建立与网页的连接
                URLConnection conn=url.openConnection();
                //通过链接取得网页返回的数据
                InputStream is=conn.getInputStream();

                System.out.println(conn.getContentEncoding());
                //一般按行读取网页数据，并进行内容分析
                //因此用BufferedReader和InputStreamReader把字节流转化为字符流的缓冲流
                //进行转换时，需要处理编码格式问题
                BufferedReader br=new BufferedReader(new InputStreamReader(is,"GBK"));

                //按行读取并打印
                String line=null;
                //正则表达式的匹配规则提取该网页的链接
                Pattern p=Pattern.compile(REGEX);
                //建立一个输出流，用于保存文件,文件名为执行时间，以防重复
                PrintWriter pw=new PrintWriter(new File(PATH + System.currentTimeMillis() + ".txt"));

                while((line=br.readLine())!=null){
                    //System.out.println(line);
                    //编写正则，匹配超链接地址
                    pw.println(line);
                    Matcher m=p.matcher(line);
                    while(m.find()){
                        String href=m.group();
                        //找到超链接地址并截取字符串
                        //有无引号
                        href=href.substring(href.indexOf("href="));
                        if(href.charAt(5)=='\"'){
                            href=href.substring(6);
                        }else{
                            href=href.substring(5);
                        }
                        //截取到引号或者空格或者到">"结束
                        try{
                            href=href.substring(0,href.indexOf("\""));
                        }catch(Exception e){
                            try{
                                href=href.substring(0,href.indexOf(" "));
                            }catch(Exception e1){
                                href=href.substring(0,href.indexOf(">"));
                            }
                        }
                        if(href.startsWith("http:")||href.startsWith("https:")){
                            //输出该网页存在的链接
                            //System.out.println(href);
                            //将url地址放到队列中
                            ALLWAITURL.add(href);
                            ALLURLDEPTH.put(href,depth+1);
                        }

                    }

                }
                pw.close();
                br.close();
            } catch (Exception e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
            //将当前url归列到alloverurl中
            ALLOVERURL.add(strurl);
            System.out.println(strurl+"网页爬取完成，已爬取数量："+ALLOVERURL.size()+"，剩余爬取数量："+ALLWAITURL.size());
        }
        //用递归的方法继续爬取其他链接
        String nexturl=ALLWAITURL.get(0);
        ALLWAITURL.remove(0);
        workurl(nexturl,ALLURLDEPTH.get(nexturl));
    }

    private static final Object OBJ = new Object();
    private static final int MAX_THREAD = 5;
    private static int idleCount = 0;

    private static synchronized void addurl(String url, int depth){
        ALLWAITURL.add(url);
        if(!ALLURLDEPTH.containsKey(url)){
            ALLURLDEPTH.put(url, depth);
        }
    }
    private static synchronized String geturl(){
        String nexurl = ALLWAITURL.get(0);
        ALLWAITURL.remove(0);
        return nexurl;
    }
    public class MyThread extends Thread{
        @Override
        public void run() {
            while (true){
                if(ALLWAITURL.size() > 0){
                    String url = geturl();
                    workurl1(url, ALLURLDEPTH.get(url));
                }else {
                    System.out.println("当前线程准备就绪，等待连接爬取："+this.getName());
                    idleCount++;
                    //建立一个对象，让线程进入等待状态，即wait（）
                    synchronized(OBJ){
                        try{
                            OBJ.wait();
                        }catch(Exception e){

                        }
                    }
                    idleCount--;
                }
            }
        }
    }

    public static void workurl1(String strurl,int depth){
        //判断当前url是否爬取过
        if(!(ALLOVERURL.contains(strurl)||depth>MAXDEPTH)){
            //检测线程是否执行
            System.out.println("当前执行："+Thread.currentThread().getName()+" 爬取线程处理爬取："+strurl);
            //建立url爬取核心对象
            try {
                URL url=new URL(strurl);
                //通过url建立与网页的连接
                URLConnection conn=url.openConnection();
                //通过链接取得网页返回的数据
                InputStream is=conn.getInputStream();

                //提取text类型的数据
                if(conn.getContentType().startsWith("text")){

                }
                System.out.println(conn.getContentEncoding());
                //一般按行读取网页数据，并进行内容分析
                //因此用BufferedReader和InputStreamReader把字节流转化为字符流的缓冲流
                //进行转换时，需要处理编码格式问题
                BufferedReader br=new BufferedReader(new InputStreamReader(is,"GB2312"));

                //按行读取并打印
                String line=null;
                //正则表达式的匹配规则提取该网页的链接
                Pattern p=Pattern.compile("<a .*href=.+</a>");
                //建立一个输出流，用于保存文件,文件名为执行时间，以防重复
                PrintWriter pw=new PrintWriter(new File(PATH+System.currentTimeMillis()+".txt"));

                while((line=br.readLine())!=null){
                    //System.out.println(line);
                    //编写正则，匹配超链接地址
                    pw.println(line);
                    Matcher m=p.matcher(line);
                    while(m.find()){
                        String href=m.group();
                        //找到超链接地址并截取字符串
                        //有无引号
                        href=href.substring(href.indexOf("href="));
                        if(href.charAt(5)=='\"'){
                            href=href.substring(6);
                        }else{
                            href=href.substring(5);
                        }
                        //截取到引号或者空格或者到">"结束
                        try{
                            href=href.substring(0,href.indexOf("\""));
                        }catch(Exception e){
                            try{
                                href=href.substring(0,href.indexOf(" "));
                            }catch(Exception e1){
                                href=href.substring(0,href.indexOf(">"));
                            }
                        }
                        if(href.startsWith("http:")||href.startsWith("https:")){
                    /*
                    //输出该网页存在的链接
                    //System.out.println(href);
                    //将url地址放到队列中
                    allwaiturl.add(href);
                    allurldepth.put(href,depth+1);
                    */
                            //调用addurl方法
                            addurl(href,depth);
                        }

                    }

                }
                pw.close();
                br.close();
            } catch (Exception e) {
                // TODO Auto-generated catch block
                //e.printStackTrace();
            }
            //将当前url归列到alloverurl中
            ALLOVERURL.add(strurl);
            System.out.println(strurl+"网页爬取完成，已爬取数量："+ALLOVERURL.size()+"，剩余爬取数量："+ALLWAITURL.size());
        }
        /*
        //用递归的方法继续爬取其他链接
        String nexturl=allwaiturl.get(0);
        allwaiturl.remove(0);
        workurl(nexturl,allurldepth.get(nexturl));
        */
        if(ALLWAITURL.size()>0){
            synchronized(OBJ){
                OBJ.notify();
            }
        }else{
            System.out.println("爬取结束.......");
        }

    }
}
