package com.jms.crawler.service;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.jms.base.factory.BeanFactory;
import com.jms.crawler.context.DataCrawlContext;
import com.jms.crawler.util.CsvUtil;
import com.jms.crawler.util.DataPreprocess;
import com.jms.crawler.util.DateFormat;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHeaders;
import org.apache.http.client.config.CookieSpecs;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.IOException;
import java.text.ParseException;
import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;

public class WeiboDataCrawlService extends DataCrawlService{
    
    private final static Logger LOGGER = LogManager.getLogger(WeiboDataCrawlService.class);

    private final DataCrawlContext dataCrawlContext = BeanFactory.getBean(DataCrawlContext.class);

    private String cookies;

    //线程池
    private final ExecutorService executor = new ThreadPoolExecutor(2, 4, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>());

    public WeiboDataCrawlService(Map<String, Object> taskContent) {
        this.uniqueId = (String) taskContent.get("taskName");
        this.taskContent = taskContent;
    }

    @Override
    public void stopCrawl() {
        thread_end = 1;
        executor.shutdown();
        try {
            // 等待 60 s
            if (!executor.awaitTermination(60, TimeUnit.SECONDS)) {
                // 如果未结束调用 shutdownNow 取消正在执行的任务
                executor.shutdownNow();
                // 再次等待 60 s，如果还未结束，记录线程结束失败
                if (!executor.awaitTermination(60, TimeUnit.SECONDS)) {
                    LOGGER.error("线程任务未正常执行结束");
                }
            }
        } catch (InterruptedException ie) {
            executor.shutdownNow();
        }
    }

    @Override
    @SuppressWarnings("unchecked")
    public void startCrawl() {
        Map<String, Object> params = (Map<String, Object>) taskContent.get("params");
        String[] important_codes = (String[]) params.get("important_codes");
        String cookies = (String) params.get("cookies");
        String[] connect_csv_filePaths = (String[]) params.get("content_csv_filePaths");
        String[] comment_csv_filePaths = (String[]) params.get("comment_csv_filePaths");
        Date start_date = (Date) params.get("start_date");
        Date end_date = (Date) params.get("end_date");
        this.doDateCrawl(important_codes, cookies, connect_csv_filePaths, comment_csv_filePaths, start_date, end_date);
    }

    /**
     *利用Cookie伪登陆后根据关键词访问微博查找的结果，得到每个文章的正文数据和评论数据的路径，分别存储到两个队列中
     *利用两个线程分别对两个队列进行出队爬取其中的数据
     *爬取的数据存储到数据库表以及csv文件中
     *
     * @param important_codes 关键词们
     * @param cookies 登录微博后的用户Cookie
     * @param connect_csv_filePaths 存储正文内容的csv的路径
     * @param comment_csv_filePaths 存储评论内容的csv文件的路径
     * @param start_date 要爬取数据的起始时间
     * @param end_date 要爬取数据的最后时间
     */
    private void doDateCrawl(String[] important_codes, String cookies, String[] connect_csv_filePaths, String[] comment_csv_filePaths, Date start_date, Date end_date) {
        LOGGER.info(uniqueId + ": Data Crawl Start!");
        thread_end = 2;
        this.cookies = cookies;

        String url;
        String connect_url;
        String comments_url;

        Map<String, String> cookie_map = null;
        try {
            //携带自己的cookie实现伪登陆
            cookie_map = WeiboDataCrawlService.getCookieMap(cookies);
        } catch (Exception e) {
            stopCrawl();
            LOGGER.error("cookie解析失败！", e);
        }

        Random random = new Random();
        for (int m = 0; m < important_codes.length; m++) {
            if (thread_end == 1) break;

            Queue<Map<String, String>> connect_urls = new LinkedList<>();
            Queue<Map<String, String>> comments_urls = new LinkedList<>();

            CrawlConnect connect_thread = new CrawlConnect(connect_urls, connect_csv_filePaths[m]);
            CrawlComment comment_thread = new CrawlComment(comments_urls, comment_csv_filePaths[m]);
            executor.execute(connect_thread);
            executor.execute(comment_thread);

            //页数遍历
            for (int i = 1; i <= 50; i++) {
                if (thread_end == 1) break;
                url = "https://s.weibo.com/weibo?q=" + important_codes[m] + "&Refer=index&page=" + i;
                try {
                    //jsoup获取网页html文档
                    assert cookie_map != null;
                    Document document = Jsoup.connect(url)
                            .timeout(30000)
                            .cookies(cookie_map)
                            .get();


                    Elements card_wrap = document.getElementsByClass("card-wrap");

                    for (Element every_card : card_wrap) {
                        //微博id
                        String mid = every_card.attr("mid");
                        if (mid.equals("")) continue;
                        //微博发布日期
                        String date_str = every_card.getElementsByClass("from").select("a").get(0).text();
                        Date date = DateFormat.format_cn(date_str);
                        if (!(date.after(start_date) && date.before(end_date))) continue;
                        //href="//weibo.com/1749277070/MwC5ArHHV?refer_flag=1001030103_按？分割
                        String[] from_a = every_card.getElementsByClass("from").select("a").get(0).attr("href").split("\\?")[0].split("/");
                        //请求id MwC5ArHHV
                        String mblogid = from_a[from_a.length - 1];
                        //博主uid
                        String uid = from_a[from_a.length - 2];
                        connect_url = "https://weibo.com/ajax/statuses/show?id=" + mblogid;
                        Map<String, String> connect_map = new HashMap<>();
                        connect_map.put("url", connect_url);
                        connect_map.put("important_code", important_codes[m]);
                        connect_urls.offer(connect_map);
                        comments_url = "https://weibo.com/ajax/statuses/buildComments?is_reload=1&id=" + mid + "&is_show_bulletin=2&is_mix=0&count=10&uid=" + uid;
                        Map<String, String> comment_map = new HashMap<>();
                        comment_map.put("url", comments_url);
                        comment_map.put("important_code", important_codes[m]);
                        comments_urls.offer(comment_map);
                        //爬取间隔，防止被限制ip
                        synchronized (this) {
                            this.wait(random.nextInt(3000) + 3000);
                        }
                    }

                    //如果不存在下一页的标签，则跳出循环
                    if (document.getElementsByClass("next").size() == 0) break;
                } catch (IOException | InterruptedException | ParseException e) {
                    e.printStackTrace();
                }
            }
            //资源生产结束
            connect_thread.produce_can_end = 1;
            comment_thread.produce_can_end = 1;
        }
        /*生产结束后，消费者线程未必完成，这会导致前端错误的认为爬取操作已经完成了
        所以生产结束后添加一个对于线程池活动线程数量的检测，当活动线程数量为0时彻底结束爬取操作
         */
        ThreadPoolExecutor threadPoolExecutor = (ThreadPoolExecutor) executor;
        while (threadPoolExecutor.getActiveCount() != 0) {
            synchronized (this) {
                try {
                    this.wait( 3000);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        }
        //正常执行完成之后也需要关闭线程池
        if (thread_end == 2) stopCrawl();

        dataCrawlContext.removeService(this.uniqueId);
        LOGGER.info(uniqueId + ": Data Crawl Finish!");
    }

    //获取资源内容
    private String getResponse(String url, String cookie) throws IOException {
        HttpGet httpGet = createHttpGet(url, cookie);
        CloseableHttpClient httpClient = HttpClients.createDefault();
        CloseableHttpResponse httpResponse = httpClient.execute(httpGet);
        String content = null;
        //状态码是200
        if (httpResponse.getStatusLine().getStatusCode() == 200) {
            HttpEntity entity = httpResponse.getEntity();
            content = EntityUtils.toString(entity, "utf-8");
        }
        httpResponse.close();
        httpClient.close();
        return content;
    }

    //设置httpGet
    private HttpGet createHttpGet(String url, String cookie) {
        HttpGet httpGet = new HttpGet(url);
        httpGet.setConfig(getRequestConfig());
        httpGet.setHeader("Cookie", cookie);
        httpGet.setHeader(HttpHeaders.ACCEPT, "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8");
        httpGet.setHeader(HttpHeaders.ACCEPT_ENCODING, "gzip, deflate, br");
        httpGet.setHeader(HttpHeaders.ACCEPT_LANGUAGE, "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2");
        httpGet.setHeader(HttpHeaders.CONNECTION, "keep-alive");
        httpGet.setHeader(HttpHeaders.HOST, "weibo.com");
        return httpGet;
    }

    //请求配置
    private RequestConfig getRequestConfig() {
        return RequestConfig.custom().setCookieSpec(CookieSpecs.STANDARD).setSocketTimeout(10000).setConnectTimeout(10000)
                .setConnectionRequestTimeout(10000)
                .build();
    }

    //解析cookie
    private static Map<String, String> getCookieMap(String cookies) {
        Map<String, String> map=new HashMap<>();
        String[] cookie = cookies.split(";");
        for (String every_cookie : cookie) {
            String[] eve = every_cookie.split("=");
            map.put(eve[0].trim(), eve[1].trim());
        }
        return map;
    }

    class CrawlConnect implements Runnable{

        Queue<Map<String, String>> connect_urls;
        int produce_can_end  = 0;
        String connect_csv_filePath;
        public CrawlConnect(Queue<Map<String, String>> queue, String filePath) {
            connect_urls = queue;
            connect_csv_filePath = filePath;
        }

        @Override
        public void run() {
            LOGGER.info(uniqueId + ": CrawlConnect Thread is Created!");
            Random random = new Random();
            String headLabel = "微博id,微博内容,发布时间,话题,发布工具,发布位置,博主id,博主昵称,转发数,评论数,点赞数";
            try {
                CsvUtil.writeCsv(connect_csv_filePath, null, headLabel);
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
            while (produce_can_end != 1 || connect_urls.size() > 0) {
                if (thread_end == 1) break;
                //如果队列为空，线程阻塞3s
                if (connect_urls.size() == 0) {
                    try {
                        synchronized (this) {
                            this.wait(3000);
                        }
                    } catch (InterruptedException e) {
                        throw new RuntimeException(e);
                    }
                    continue;
                }

                Map<String, String> map = connect_urls.poll();
                String connect_url = map.get("url");
                LOGGER.info(uniqueId + ": Start crawl connect url : " + connect_url);

                try {
                    //获取正文json
                    JSONObject connect_json = JSON.parseObject(getResponse(connect_url, cookies));
                    if (connect_json == null) {
                        LOGGER.info(uniqueId + ": Request False : " + connect_url);
                        continue;
                    }
                    if (connect_json.getIntValue("ok") == 0) {
                        LOGGER.info(uniqueId + ": No Html : " + connect_url);
                        continue;
                    }
                    //爬取的未格式化日期
                    String date_str = connect_json.getString("created_at");
                    Date create_time = DateFormat.format(date_str);
                    //获取mid
                    String mid = connect_json.getString("mid");
                    //获取用户json
                    JSONObject user_json = connect_json.getJSONObject("user");
                    //正文内容
                    String text = DataPreprocess.preprocess((String) connect_json.get("text_raw"));
                    if (text.equals("")) {
                        LOGGER.info(uniqueId + ": No Text : " + connect_url);
                        continue;
                    }
                    //获取发布工具
                    String source = "";
                    if (connect_json.getString("source") != null)
                        source = DataPreprocess.preprocess(connect_json.getString("source"));
                    //获取发布位置
                    String region_name = "";
                    if (connect_json.getString("region_name") != null)
                        region_name = connect_json.getString("region_name");
                    //获取话题
                    String topic_title = "";
                    if (connect_json.get("topic_struct") != null) {
                        StringBuilder sb = new StringBuilder();
                        JSONArray topic_struct = connect_json.getJSONArray("topic_struct");
                        for (int i = 0; i < topic_struct.size(); i++)
                            sb.append(topic_struct.getJSONObject(i).getString("topic_title"));
                        topic_title = sb.toString();
                    }
                    //用户昵称
                    String nickname = user_json.getString("screen_name");
                    //用户id
                    String uid = user_json.getString("id");
                    //三种数量
                    int reposts_count = connect_json.getIntValue("reposts_count");
                    int comments_count = connect_json.getIntValue("comments_count");
                    int attitudes_count = connect_json.getIntValue("attitudes_count");

                    List<String> dataList = new ArrayList<>();
                    dataList.add(mid);
                    dataList.add(text);
                    dataList.add(create_time.toString());
                    dataList.add(topic_title);
                    dataList.add(source);
                    dataList.add(region_name);
                    dataList.add(uid);
                    dataList.add(nickname);
                    dataList.add(reposts_count + "");
                    dataList.add(comments_count + "");
                    dataList.add(attitudes_count + "");
                    CsvUtil.writeCsv(connect_csv_filePath, dataList, null);

                    LOGGER.info(uniqueId + ": Crawl finish : " + connect_url);

                    //爬取间隔，防止被限制ip
                    synchronized (this) {
                        this.wait(random.nextInt(3000) + 3000);
                    }
                } catch (ParseException | IOException | InterruptedException e) {
                    e.printStackTrace();
                    LOGGER.warn(e.getMessage());
                }
            }
            LOGGER.info(uniqueId + ": CrawlConnect Thread is dead!");
        }
    }

    class CrawlComment implements Runnable {

        Queue<Map<String, String>> comments_urls;
        int produce_can_end = 0;
        String comment_csv_filePath;

        public CrawlComment(Queue<Map<String, String>> queue, String filePath) {
            comments_urls = queue;
            comment_csv_filePath = filePath;
        }

        @Override
        public void run() {
            LOGGER.info(uniqueId + ": CrawlComment Thread is created!");
            Random random = new Random();
            String headLabel = "评论id,微博id,评论内容,发布时间,评论人id,评论人昵称,来源";
            try {
                CsvUtil.writeCsv(comment_csv_filePath, null, headLabel);
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
            while (produce_can_end != 1 || comments_urls.size() > 0) {
                if (thread_end == 1) break;
                //如果队列为空，线程阻塞3s
                if (comments_urls.size() == 0) {
                    try {
                        synchronized (this) {
                            this.wait(3000);
                        }
                    } catch (InterruptedException e) {
                        throw new RuntimeException(e);
                    }
                    continue;
                }
                Map<String, String> map = comments_urls.poll();
                String comments_url = map.get("url");
                String comment_url;
                String mid = comments_url.split("=")[2].split("&")[0];
                long max_id = 0;
                do {
                    if (thread_end == 1) break;
                    if (max_id != 0) comment_url = comments_url + "&max_id=" + max_id;
                    else comment_url = comments_url;
                    LOGGER.info(uniqueId + ": Start crawl comments url : " + comment_url);
                    try {
                        //获取评论json
                        JSONObject comment_json = JSON.parseObject(getResponse(comment_url, cookies));
                        if (comment_json == null) {
                            LOGGER.info(uniqueId + ": Request False : " + comment_url);
                            break;
                        }
                        if (comment_json.getIntValue("ok") == 0) {
                            LOGGER.info(uniqueId + ": No Html : " + comment_url);
                            continue;
                        }
                        //更新max_id
                        max_id = comment_json.getLongValue("max_id");
                        //获取评论arr
                        JSONArray comment_arr = comment_json.getJSONArray("data");
                        if (comment_arr.size() == 0) LOGGER.info(uniqueId + ": No Comments : " + comment_url);
                        for (int cn = 0; cn < comment_arr.size(); cn++) {
                            //获取单个评论
                            JSONObject comment_every = comment_arr.getJSONObject(cn);
                            //获取评论id
                            String cid = comment_every.getString("id");
                            //获取评论内容
                            String comment_text = DataPreprocess.preprocess(comment_every.getString("text"));
                            if (comment_text.equals("")) {
                                LOGGER.info(uniqueId + ": No Text : " + comment_url);
                                continue;
                            }
                            //获取评论创建时间
                            Date comment_time = DateFormat.format(comment_every.getString("created_at"));
                            //获取评论人信息
                            JSONObject comment_user = comment_every.getJSONObject("user");
                            //获取评论人id
                            String comment_uid = comment_user.getString("id");
                            //获取评论人昵称
                            String comment_nickname = comment_user.getString("screen_name");
                            //获取评论来源
                            String source = "";
                            if (comment_every.get("source") != null) source = comment_every.getString("source");

                            List<String> dataList = new ArrayList<>();
                            dataList.add(mid);
                            dataList.add(cid);
                            dataList.add(comment_text);
                            dataList.add(comment_time.toString());
                            dataList.add(comment_uid);
                            dataList.add(comment_nickname);
                            dataList.add(source);
                            CsvUtil.writeCsv(comment_csv_filePath, dataList, null);

                            //评论的评论
                            JSONArray comment_comment = comment_every.getJSONArray("comments");
                            if (comment_comment == null) continue;
                            for (int j = 0; j < comment_comment.size(); j++) {
                                JSONObject comment_every_2  = comment_comment.getJSONObject(j);
                                String cid_2 = comment_every_2.getString("id");
                                String comment_text_2 = DataPreprocess.preprocess(comment_every_2.getString("text"));
                                if (comment_text_2.trim().equals("")) continue;
                                Date comment_time_2 = DateFormat.format(comment_every_2.getString("created_at"));
                                JSONObject comment_user_2 = comment_every_2.getJSONObject("user");
                                String comment_uid_2 = comment_user_2.getString("id");
                                String comment_nickname_2 = comment_user_2.getString("screen_name");
                                String source_2 = "";
                                if (comment_every_2.get("source") != null) source_2 = comment_every_2.getString("source");

                                List<String> dataList_2 = new ArrayList<>();
                                dataList_2.add(mid);
                                dataList_2.add(cid_2);
                                dataList_2.add(comment_text_2);
                                dataList_2.add(comment_time_2.toString());
                                dataList_2.add(comment_uid_2);
                                dataList_2.add(comment_nickname_2);
                                dataList_2.add(source_2);
                                CsvUtil.writeCsv(comment_csv_filePath, dataList_2, null);
                            }
                        }
                        LOGGER.info(uniqueId + ": Crawl finish : " + comment_url);
                        synchronized (this) {
                            this.wait(random.nextInt(3000) + 3000);
                        }
                    } catch (IOException | ParseException | InterruptedException e) {
                        e.printStackTrace();
                        LOGGER.warn(e.getMessage());
                    }
                } while (max_id != 0);
            }
            LOGGER.info(uniqueId + ": CrawlComment Thread is dead!");

        }
    }
}
