package com.jhwang;

import cn.hutool.core.bean.BeanUtil;
import cn.hutool.json.JSONUtil;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import us.codecraft.webmagic.*;
import us.codecraft.webmagic.downloader.HttpClientDownloader;
import us.codecraft.webmagic.pipeline.ConsolePipeline;
import us.codecraft.webmagic.pipeline.JsonFilePipeline;
import us.codecraft.webmagic.pipeline.Pipeline;
import us.codecraft.webmagic.processor.PageProcessor;
import us.codecraft.webmagic.scheduler.PriorityScheduler;
import us.codecraft.webmagic.selector.Html;
import us.codecraft.webmagic.selector.Selectable;
import us.codecraft.webmagic.utils.UrlUtils;

import java.io.*;
import java.lang.annotation.Documented;
import java.net.URL;
import java.util.*;


/**
 * 爬虫类
 * @Auther: jhwang
 * @Date: 2020/5/1 17:42
 * @Description:
 */
public class GithubRepoPageProcessor implements PageProcessor {
    public static String _LEVEL = "_level";

    Spider spider;
    PriorityScheduler scheduler;

    public GithubRepoPageProcessor() {}
    public GithubRepoPageProcessor(String startUrl,String path) {
        new Bean(startUrl,path);
    }

    //启动配置项,设置编码
    private Site site = Site.me().setCharset("GBK");
    public Site getSite() {
        return site;
    }

    /**
     * @param page 返回数据页面或数据对象
     */
    public void process(Page page) {
        /**
         * 处理数据
         */
        Html html = page.getHtml();
        Integer level = (Integer) page.getRequest().getExtra(_LEVEL);
        if(level == 0){
            Selectable $ = html.$(".provincetr a");
            List<String> all = $.all();
            ArrayList<JSONCity> jsonCityArr = new ArrayList<JSONCity>();
            for (String a :all) {
                String id = new Html(a).getDocument().getElementsByTag("a").attr("href").replace(".html","");
                String name = new Html(a).getDocument().getElementsByTag("a").text();
                JSONCity jsonCity = new JSONCity(id,"0",name,"0","");
                jsonCityArr.add(jsonCity);
            }
            page.putField("jsonCityArr",jsonCityArr);
        }

        //处理函数
        ProcessingFunction.setJsonCityArr(page);

        //设置url
        setLevelRequests(page);
    }


    /**
     * 设置爬虫层级，深度爬取
     * @param page
     */
    private void setLevelRequests(Page page){
        Selectable url = page.getUrl();
        List<String> hrefAll = page.getHtml().links().all();
        Iterator var2 = hrefAll.iterator();
        Integer level = (Integer) page.getRequest().getExtra(_LEVEL);
        if(level == null){
            level = 0;
        }
        while(var2.hasNext()) {
            String s = (String)var2.next();
            if (!StringUtils.isBlank(s) && !s.equals("#") && !s.startsWith("javascript:")) {
                s = UrlUtils.canonicalizeUrl(s, url.toString());
                Request request = new Request(s);
                request.putExtra(_LEVEL,level + 1);
                page.getTargetRequests().add(request);
            }
        }
    }

    /**
     * webmagic 属于垂直爬虫需要自己控制爬取数据的url 深度需要自己控制
     * 启动爬虫
     */
    public void run(){
        scheduler = new LevelLimitScheduler(4); //连接控制器
        spider = Spider.create(new GithubRepoPageProcessor())
                .setScheduler(scheduler) //设置爬虫层级默认3层
                .addPipeline(new JSONPipeline()) // addPipeline添加处理程序 设置存储数据 “pipeline” 一般用于数据持久化操作
                .addPipeline(new ConsolePipeline())
                .setDownloader(new DownloadFile()) //爬虫下载
                .thread(10); //开启5个线程
        Request request = new Request(Bean.startUrl);
       // 用于测试不同层级数据获取情况
       // request.putExtra(GithubRepoPageProcessor._LEVEL,2);
        scheduler.push(request,spider);
        //设置代理
//        HttpClientDownloader httpClientDownloader = new HttpClientDownloader();
//        httpClientDownloader.setProxyProvider(SimpleProxyProvider.from(new Proxy("101.101.101.101",8888)));
//        spider.setDownloader(httpClientDownloader);
        spider.run();
    }
}

/**
 * 放置配置数据静态类
 */
class Bean{
    //文件存储路径
    public static String  path;
    //开始爬取url
    public static String  startUrl;

    public Bean(String  startUrl,String  path) {
        Bean.path = path;
        Bean.startUrl = startUrl;
    }
}

/**
 * 控制爬取数据层级
 */
class LevelLimitScheduler extends PriorityScheduler{
    private int levelLimit = 3;

    public LevelLimitScheduler() {
    }

    public LevelLimitScheduler(int levelLimit) {
        this.levelLimit = levelLimit;
    }

    @Override
    public synchronized void push(Request request, Task task) {
        Integer level = ((Integer) request.getExtra(GithubRepoPageProcessor._LEVEL));
        if(level == null){
            level = 0;
            request.putExtra(GithubRepoPageProcessor._LEVEL,level);
        }
        if(level <= levelLimit) {
            super.push(request, task);
        }
    }
}


/**
 * 获取行政区划数据json
 */
class JSONPipeline  implements Pipeline{
    /**
     * @param resultItems
     * @param task
     */
    public void process(ResultItems resultItems, Task task) {

        ProcessingFunction.jsonCityArrFile(resultItems);
    }
}

/**
 * 用于存放文件的json
 */
class JSONCity{
    public String id;
    public String pid;
    public String name;
    public String level;
    public String cityType; //城乡分类代码

    public JSONCity() {
    }

    public JSONCity(String id, String pid, String name, String level,String cityType) {
        this.id = id;
        this.pid = pid;
        this.name = name;
        this.level = level;
        this.cityType = cityType;
    }

    public String getId() {
        return id;
    }

    public void setId(String id) {
        this.id = id;
    }

    public String getPid() {
        return pid;
    }

    public void setPid(String pid) {
        this.pid = pid;
    }

    public String getName() {
        return name;
    }

    public void setName(String name) {
        this.name = name;
    }

    public String getLevel() {
        return level;
    }

    public void setLevel(String level) {
        this.level = level;
    }

    public String getCityType() {
        return cityType;
    }

    public void setCityType(String cityType) {
        this.cityType = cityType;
    }

    public String getSqlBean(String fileName) {
        return "'"+BeanUtil.getProperty(this,fileName)+"'";
    }

    @Override
    public String toString() {
        return "JSONCity{" +
                "id='" + id + '\'' +
                ", pid='" + pid + '\'' +
                ", name='" + name + '\'' +
                ", level='" + level + '\'' +
                ", cityType='" + cityType + '\'' +
                '}';
    }
}

/**
 * 设置自定义的下载设置，请求出现错误则自己处理
 * 这里爬虫我刚开始入手，并不知道如何正确的处理错误请求
 * 我现在的办法是直接将错误请求从新放到请求连接中去,三次请求不成功则把它丢到list数据中去，然后把它序列化到文本中
 */
class DownloadFile extends HttpClientDownloader {
    private LinkedHashMap<String,Integer> map = new LinkedHashMap<String, Integer>();

    /**
     * 存储错误连接的地址
     */
    public static ArrayList<String> urlList = new ArrayList<String>();

    /**
     * 处理错误的方法
     * @param request
     */
    protected void onError(Request request) {
        String url = request.getUrl();
        Integer value = map.get(url);
        if(value != null){
            if(value > 3){
                //重试超过三次，不在重试，并序列化数据
                urlList.add(url);
                ProcessingFunction.JSONtoFile(JSONUtil.toJsonStr(urlList),"/errorUrlList.json");
                return;
            }
            map.put(url,value + 1 );
        }else{
            value = 1;
        }
        map.put(url,value);
        Request req = new Request(url);
        Integer level = (Integer)request.getExtra(GithubRepoPageProcessor._LEVEL);
        req.putExtra(GithubRepoPageProcessor._LEVEL,level);
        Page page = this.download(req, Site.me().setCharset(null).toTask());
        ProcessingFunction.setJsonCityArr(page);
        ProcessingFunction.jsonCityArrFile(page.getResultItems());
        System.out.println("尝试连接："+ value + "次" + request.getUrl());
    }


}

/**
 * 用于下载文件
 */
class DownloadFilePipeline implements Pipeline {

    /**
     * @param resultItems
     * @param task
     */
    public void process(ResultItems resultItems, Task task) {
        Request request = resultItems.getRequest();
        String downloadUrl = request.getUrl();
        String filePath = urlToPath(downloadUrl);
        downloadFile(downloadUrl,filePath);
    }

    public String urlToPath(String url){
        String p1 = Bean.startUrl.replaceAll("index.html", "");
        String p2 = url.replaceAll(p1,"");
        String[] split = p2.split("/");
        StringBuffer sb = new StringBuffer();
        for (String sp :split) {
            sb.append(sp).append("\\");
        }
        p2 = sb.toString().substring(0,sb.length() - 1);
        if(Bean.path.substring(Bean.path.length() -1).equals(File.separator)){
            return Bean.path + p2;
        }
        return Bean.path + File.separator + p2;
    }

    /**
     * 下载文件
     * @param downloadUrl 文件下载地址
     * @param filePath 文件全路径
     */
    public void downloadFile(String downloadUrl,String filePath){
        try {
            File fileNew = new File(filePath);
            File fileParent = fileNew.getParentFile();
            if(!fileParent.exists()){
                fileParent.mkdirs();
            }
            if(!fileNew.exists()){
                fileNew.createNewFile();
            }
            FileUtils.copyURLToFile(new URL(downloadUrl), fileNew);
        } catch (Exception e) {
            e.printStackTrace();
            System.out.println(e.getMessage());
            System.out.println("重新下载数据:");
            downloadFile(downloadUrl,filePath); //重新下载
        }
    }
}

/**
 * 处理函数
 */
class ProcessingFunction {
    /**
     * 存储jsonArr
     */
    public static ArrayList<JSONCity> JSONArr = new ArrayList<JSONCity>();

    public static String getPid(String url){
        int lastIndex = url.lastIndexOf("/");
        String pid = url.substring(lastIndex + 1).replace(".html","");
        return pid;
    }

    /**
     * 页面属性并不是完全按照层级来的，所以我只要获取数据就行
     * @return
     */
    private static Object[] getSelectable(Page page){
        Html html = page.getHtml();
        Selectable $ = html.$(".citytr");
        if($.all().size() > 0){
            return new Object[]{$,"citytr"};
        }

        $ = html.$(".countytr");
        if($.all().size() > 0){
            return new Object[]{$,"countytr"};
        }

        $ = html.$(".towntr");
        if($.all().size() > 0){
            return new Object[]{$,"towntr"};
        }

        $ = html.$(".villagetr");
        if($.all().size() > 0){
            return new Object[]{$,"villagetr"};
        }

        return new Object[]{$,null};
    }

    /**
     * 设置数据
     */
    public static void setJsonCityArr(Page page){
        String url = page.getUrl().toString();
        /**
         * 获取pid
         * 这里很简单可以获取到pid 即路径
         * 但由此衍生的爬虫问题，需要获取上级页面的数据信息，暂时没找到对应的API
         */
        Integer level = (Integer) page.getRequest().getExtra(GithubRepoPageProcessor._LEVEL);
        String pid = getPid(url);
        Selectable $ = (Selectable)getSelectable(page)[0];
        String cls = (String)getSelectable(page)[1];
        List<String> all = $.all();
        ArrayList<JSONCity> jsonCityArr = new ArrayList<JSONCity>();

        if(cls != null && "villagetr".equals(cls)){
            /**
             * 获取pid
             * 这里很简单可以获取到pid 即路径
             * 但由此衍生的爬虫问题，需要获取上级页面的数据信息，暂时没找到对应的API
             */
            for (int i = 0; i < all.size(); i++) {
                Html html1 = new Html("<table>" + all.get(i) + "</table>");
                String id= html1.getDocument().getElementsByTag("td").eq(0).text();
                String  cityType = html1.getDocument().getElementsByTag("td").eq(1).text();
                String name= html1.getDocument().getElementsByTag("td").eq(2).text();
                JSONCity jsonCity = new JSONCity(id,pid,name,level + "",cityType);
                jsonCityArr.add(jsonCity);
            }
        }else{
            for (int i = 0; i < all.size(); i ++) {
                Html html1 = new Html("<table>" + all.get(i) + "</table>");
                JSONCity jsonCity = null;
                Elements a = html1.getDocument().getElementsByTag("a");
                if(a != null && a.size() > 0){
                    String id = a.eq(0).text();
                    String name = a.eq(1).text() ;
                    jsonCity = new JSONCity(id,pid,name,level + "","");
                }else{
                    String id = html1.getDocument().getElementsByTag("td").eq(0).text();
                    String name =  html1.getDocument().getElementsByTag("td").eq(1).text();
                    jsonCity = new JSONCity(id,pid,name,level + "","");
                }
                jsonCityArr.add(jsonCity);
            }
        }

        if(jsonCityArr.size() > 0){
            page.putField("jsonCityArr" , jsonCityArr);
        }
    }

    public static void main(String[] args) {
//        Html html = new Html("<table><tr class=\"villagetr\"><td>130184106204</td><td>122</td><td>南张村村委会</td></tr></table>");
//        String td = html.getDocument().getElementsByTag("td").eq(0).text();
//        System.out.println(td);
//        JSONtoFile("[1,2,3,4]","/jsonCity.json");
//        JSONtoFile("SDFSFS","/jsonCity.json");

        String value = String.join(".","mmm","sssss");
        System.out.println(value);
    }

    public static void jsonCityArrFile(ResultItems resultItems){
        Integer level = (Integer)resultItems.getRequest().getExtra(GithubRepoPageProcessor._LEVEL);
        if(level == null){
            level = 0;
        }
        ArrayList<JSONCity> jsonCityArr = resultItems.get("jsonCityArr");
        //JSONArr.addAll(jsonCityArr);
        //JSONtoFile(com.alibaba.fastjson.JSON.toJSONString(jsonCityArr),"/jsonCity"+level+".json");
        SQLtoFile(conenteSql(jsonCityArr),"/city_table"+ level +".sql"); //文件太大分多个文件存储
    }

    /**
     * json 存储为file
     * 需要改造一下了，一个json文件20多M数据量太大了，需要改成每个层级一个json文件
     * 想了下算了懒得写了
     */
    public static void JSONtoFile(String JSONArr,String jsonPath){

    }

    /**
     * 拼接sql语句
     */
    private static String conenteSql( ArrayList<JSONCity> jsonCityArr){
        StringBuffer stringBuffer = new StringBuffer();
        for (JSONCity city:jsonCityArr) {
            String value = String.join(",",city.getSqlBean("id"), city.getSqlBean("pid"), city.getSqlBean("name"),city.getSqlBean("level"),city.getSqlBean("cityType"));
            String sql = "INSERT INTO city_table (id , pid , name , level , cityType ) VALUES ("+value+");";
            stringBuffer.append(sql).append(System.getProperty("line.separator"));
        }
        return stringBuffer.toString();
    }

    /**
     * 把数据输出为sql语句
     */
    public static void SQLtoFile(String sql,String jsonPath){
        File file = new File(GetPropertis.DOWNLOAD_PATH + jsonPath);
        File fileParent = file.getParentFile();
        if(!fileParent.exists()){
            fileParent.mkdirs();
        }
        long length = file.length();
        PrintWriter printWriter = null;
        FileWriter fileWriter = null;
        try {
            fileWriter= new FileWriter(file,true); //使用追加形式写文件
            printWriter = new PrintWriter(fileWriter,true); //允许一行一行插入
            printWriter.println(sql);
            printWriter.close();
            fileWriter.close();
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}