package com.service;


import com.pojo.News;
import com.pojo.News1;
import com.pojo.NewsList;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.safety.Whitelist;
import org.jsoup.select.Elements;
import org.junit.Test;
import org.springframework.stereotype.Service;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.UUID;


/*
 * @Author 陆微腾
 * @Description  这个服务主要是 数据库没有数据，第一次获取数据时，才会使用。
 * @Date: 2020/4/18
 **/
@Service
public class CrawlerService {
    private String BASE_URL = "http://nyncj.changde.gov.cn";

    /*
     * @Author 陆微腾
     * @Description 目标网站的 页面 ，没有分页
     * @Date: 2020/4/17
     * @MethodName:
     * @Param
     * @return
     **/

    Random r = new Random();
    String[] ua = {"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36 OPR/37.0.2178.32",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2486.0 Safari/537.36 Edge/13.10586",
            "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko",
            "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)",
            "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)",
            "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0)",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 BIDUBrowser/8.3 Safari/537.36",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36 Core/1.47.277.400 QQBrowser/9.4.7658.400",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 UBrowser/5.6.12150.8 Safari/537.36",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 Safari/537.36 SE 2.X MetaSr 1.0",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36 TheWorld 7",
            "Mozilla/5.0 (Windows NT 6.1; W…) Gecko/20100101 Firefox/60.0"};
    int i = r.nextInt(14);

    static final String[] strings = {
            "http://nyncj.changde.gov.cn/zhdt/gzdt/cddt_",
//            "http://nyncj.changde.gov.cn/ywgz/xczx_",
//            "http://nyncj.changde.gov.cn/ywgz/nszd_",
//            "http://nyncj.changde.gov.cn/ywgz/xdny_",
//            "http://nyncj.changde.gov.cn/ywgz/zlaq_",
//            "http://nyncj.changde.gov.cn/ywgz/snsd/snlt_",
//            "http://nyncj.changde.gov.cn/ywgz/snsd/ldyl_"
    };


    public List<String> crawlerUrl(int account) {
        String[] urlss = getUrlss(strings, account);
        List<String> href = new ArrayList<>();

        for (String s : urlss) {
            getNewsUrl(href, s);
        }
        return href;
    }

    public void getNewsUrl(List<String> href, String s) {
        try {
            Document doc = Jsoup.connect(s)
                    .get();
            Elements newsList = doc.getElementsByAttributeValue("class", "newsList");
            Elements li = Jsoup.parse(newsList.html()).getElementsByTag("li");
            Thread.sleep(1000);
            for (int i = 0; i < li.size(); i++) {
                Elements attr = li.get(i).getElementsByTag("a");// attr 北峪湾村有条“橘园路” 标题
                href.add(BASE_URL + attr.attr("href").toString());//url
            }

        } catch (IOException e) {
            e.printStackTrace();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

    @Test
    public void test() {
        News1 news1 = crawlerNews("http://nyncj.changde.gov.cn/zhdt/gzdt/cddt/content_735526");
        System.out.println(news1);
    }

    public News1 crawlerNews(String url) {

        int typeId = typefind(url);
        News1 news = new News1();
        news.setType(typeId);
        //根据url 进行信息爬取
        try {
            Document doc = Jsoup.connect(url)
                    .userAgent(ua[i])
                    .get();// name="ContentStart"
            Thread.sleep(2000);
//            获取标题 title
            String title = doc.getElementsByAttributeValue("class", "title").text();
//            获取 Property
            Elements property = doc.getElementsByAttributeValue("class", "property");
            Elements span = Jsoup.parse(property.html()).getElementsByTag("span");
            //发布时间
            String pubtime = span.get(0).text().replace("发布时间：", "");
            //信息来源
            String newsform = span.get(1).text().replace("信息来源：", "");
            String propertyTxt = Jsoup.clean(property.toString(), Whitelist.simpleText()).replace("字体：【小 大】", "");
//            获取正文内容
            Elements elementsId_Zoom = doc.getElementsByAttributeValue("class", "conTxt");
            String content = Jsoup.clean(elementsId_Zoom.first().html(), Whitelist.basic()).replace("<br>", "\n")
                    .replace("&nbsp;", " ")
                    .replace("<p>", "\t")
                    .replace("</p>", "\n").replace("<span>", "").replace("</span>", "");
            news.setTitle(title);
            news.setContxt(content);
            news.setContxturl(url);
//            news.setProperty(propertyTxt);
            news.setTitle(title);
            news.setPubtime(pubtime);
            news.setNewsfrom(newsform);
            news.setNewsnum("News1-" + UUID.randomUUID().toString().replace("-", "").toUpperCase().substring(5, 11));
        } catch (IOException e) {
            e.printStackTrace();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
        return news;
    }

    private int typefind(String url) {
        int i = 0;
        if (url.contains("cddt")) {
            i = 1;
        }
        if (url.contains("xczx")) {
            i = 2;
        }
        if (url.contains("nszd")) {
            i = 3;
        }
        if (url.contains("xdny")) {
            i = 4;
        }
        if (url.contains("zlaq")) {
            i = 5;
        }
        if (url.contains("snlt")) {
            i = 6;
        }
        if (url.contains("ldyl")) {
            i = 7;
        }
        return i;
    }

    public List<NewsList> crawlerNewsListString(String[] urls) {
        List<NewsList> newslists = new ArrayList<>();
        for (String url : urls) {
            newslists.add(crawlerNewsList(url));
        }
        return newslists;
    }

    /*
     * @Author 陆微腾
     * @Description 输入爬取页数，获取目标网站 TODO
     * @Date: 2020/4/17
     * @MethodName: getUrlss
     * @Param [targetUrl, account]
     * @return java.lang.String[]
     **/

    public String[] getUrlss(String[] targetUrl, int account) {
        int count = 0;
        String[] temple = new String[targetUrl.length * account];
        for (int i = 0; i < targetUrl.length; i++) {
            for (int j = 0; j < account; j++) {
                temple[count] = targetUrl[i] + (j + 1);
                count++;
            }
        }
        return temple;
    }



    /*
     * @Author 陆微腾
     * @Description 根据 单个 url 爬取数据。
     * @Date: 2020/4/17
     * @MethodName: crawlerNewsList
     * @Param [url]
     * @return java.util.List<com.pojo.NewsList>
     **/

    public NewsList crawlerNewsList(String url) {
        NewsList news = null;
        try {
            Document doc = Jsoup.connect(url).get();
            Elements newsList = doc.getElementsByAttributeValue("class", "newsList");
            Elements li = Jsoup.parse(newsList.html()).getElementsByTag("li");

            for (Element element : li) {
                Elements span = element.getElementsByTag("span");//  span.text() 2020-01-20 时间
                Elements attr = element.getElementsByTag("a");// attr 北峪湾村有条“橘园路” 标题
                String href = attr.attr("href").toString();//url
                news = new NewsList(attr.text(), span.text(), href);
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
        return news;
    }


    private String getUrl(Document document) {
        Elements attr = document.select("meta[name=Url]");
        if (attr != null) {
            Element element = attr.get(0);
            String content = element.attr("content");
            return content;
        }
        return null;
    }
}

