package xyz.xiezc.spider.statistic;

import cn.hutool.bloomfilter.BloomFilter;
import cn.hutool.core.collection.CollUtil;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.thread.ThreadUtil;
import cn.hutool.core.util.CharsetUtil;
import cn.hutool.core.util.StrUtil;
import cn.hutool.db.Entity;
import cn.hutool.db.Page;
import cn.hutool.db.PageResult;
import cn.hutool.db.Session;
import cn.hutool.setting.Setting;
import com.alibaba.fastjson.JSON;
import lombok.Data;
import lombok.Synchronized;
import lombok.extern.slf4j.Slf4j;
import xyz.xiezc.ioc.annotation.Component;
import xyz.xiezc.ioc.annotation.Inject;
import xyz.xiezc.spider.common.Request;
import xyz.xiezc.spider.common.XBean;
import xyz.xiezc.spider.control.ScheduleCore;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;

/**
 * @author wb-xzc291800
 * @date 2019/04/25 17:09
 */
@Component
@Data
@Slf4j
public class StatisticImpl implements Statistic {
    @Inject
    Setting setting;

    /**
     * 布隆过滤器, 支持千万级数据过滤,
     * 10 大约相当于可以过滤1600w的数据
     */
    @Inject
    BloomFilter bloomFilter;

    @Inject
    Site site;
    @Inject
    ScheduleCore scheduleCore;


    ExecutorService executorService = ThreadUtil.newSingleExecutor();

    @Override
    public Statistic dealSeedUrls() {
        List<Request> seedUrls = site.getSeedUrls();
        for (Request seedUrl : seedUrls) {
            XBean xBean = new XBean(seedUrl);
            xBean.setStatus(1);
            scheduleCore.pushXBean(xBean);
        }
        return this;
    }

    @Override
    public Statistic addSeedUrl(String url) {
        Request request = new Request();
        request.setUrl(url);
        Map<String, List<String>> header = new HashMap<>();
        site.getCommonHeader().forEach((k, v) ->
                header.put(k, CollUtil.newArrayList(v))
        );
        request.setHeader(header);
        site.getSeedUrls().add(request);
        return this;
    }

    @Override
    public Statistic addCommonHeader(String key, String val) {
        site.getCommonHeader().put(key, val);
        return this;
    }


    /**
     * 启动的时候从缓存的文件中取得对象
     */
    @Override
    public Statistic recover() {
        //TODO 这里有顺序关系的, 不要弄混

        //先从数据库中回复
        recoverFromXsPhotoTable();

        //回复待下载 , 同时去重掉数据库中已下载的
        String downloadUrlsFilePath = setting.getStr("data.dir") + "downloadUrls.txt";
        recoverDownUrls(scheduleCore, downloadUrlsFilePath);

        //最后在回复文件
        String bloomFilterUrlsFilePath = setting.getStr("data.dir") + "bloomFilterUrls.txt";

        recoverFromCacheFile(bloomFilterUrlsFilePath);
        return this;
    }

    /**
     * 从缓存中回复待下载url
     *
     * @param schedule
     * @param downloadUrlsFilePath
     */
    private void recoverDownUrls(ScheduleCore schedule, String downloadUrlsFilePath) {
        log.info("从缓存文件恢复下载器..........");
        try (BufferedReader utf8Reader = FileUtil.getUtf8Reader(downloadUrlsFilePath)) {
            String s = null;
            while ((s = utf8Reader.readLine()) != null) {
                try {
                    if (StrUtil.isBlank(s)) {
                        continue;
                    }
                    XBean xBean = JSON.parseObject(s, XBean.class);
                    if (bloomFilter.add(xBean.getRequest().getUrl())) {
                        schedule.pushXBean(xBean);
                    }
                } catch (Exception e) {
                    log.error("回复出现问题:{}", e.getMessage());
                    log.error("问题字符串:{}", s);
                }
            }
        } catch (Exception e) {
            log.error("回复出现问题1", e);
        }
    }

    /**
     * 从缓存文件中回复过滤器
     *
     * @param bloomFilterUrlsFilePath
     */
    private void recoverFromCacheFile(String bloomFilterUrlsFilePath) {
        //从缓存文件中回复布隆过滤器
        log.info("从缓存文件恢复过滤器..........");
        try (BufferedReader utf8Reader = FileUtil.getUtf8Reader(bloomFilterUrlsFilePath)) {
            String s = null;
            while ((s = utf8Reader.readLine()) != null) {
                if (StrUtil.isBlank(s)) {
                    continue;
                }
                bloomFilter.add(s);
            }
        } catch (Exception e) {
            log.error("回复出现问题", e);
        }
    }

    private void recoverFromXsPhotoTable() {
        //从数据库中回复布隆过滤器
        String cachePhotoDir = setting.getStr("data.dir") + "cachePhoto.txt";
        int lastId = 0;
        try (BufferedReader bufferedReader = FileUtil.getUtf8Reader(cachePhotoDir)) {
            String line = null;

            while ((line = bufferedReader.readLine()) != null) {
                String[] split = line.split("\t");
                bloomFilter.add(split[0]);
                lastId = Integer.valueOf(split[1]);
            }
        } catch (Exception e) {
            log.error("从文件恢复数据库url缓存出现问题,{}", e.getMessage());
        }

        try (BufferedWriter writer = FileUtil.getWriter(cachePhotoDir, CharsetUtil.UTF_8, true)) {
            Session session = Session.create();
            int i = 0;
            while (true) {
                List<Entity> query = session.query("select * from xs_photo where id > ? limit ? ,1000", lastId, i);
                log.info("start:{} ,  end:{} , size:{} ", i, i + 1000, query.size());
                i = i + 1000;
                if (query == null || query.isEmpty()) {
                    break;
                }
                query.stream().forEach(entity -> {
                    String fetch_url = entity.getStr("fetch_url");
                    Integer id = entity.getInt("id");
                    if (StrUtil.isNotBlank(fetch_url)) {
                        bloomFilter.add(fetch_url);
                        try {
                            writer.write(fetch_url + "\t" + id);
                            writer.newLine();
                        } catch (IOException e) {
                            log.error(e.getMessage(), e);
                        }
                    }
                });
            }
        } catch (Exception e) {
            log.error("从数据库中回复数据出错", e);
        }
    }

    private void recoverFromAlbumDb() {
        //从数据库中回复布隆过滤器
        try {
            Session session = Session.create();
            int i = 1;
            while (true) {
                Page page = new Page(i, 1000);
                PageResult<Entity> query = session.page(Entity.create("xs_photo_album"), page);
                log.info("currentPage:{} ,  PageSize:{} , totalPage:{}", query.getPage(), query.getPageSize(), query.getTotalPage());
                query.stream().forEach(entity -> {
                    String fetch_url = entity.getStr("fetch_url");
                    if (StrUtil.isNotBlank(fetch_url)) {
                        bloomFilter.add(fetch_url);
                    }
                });
                if (query.getTotalPage() == i) {
                    break;
                }
                i++;
            }
        } catch (SQLException e) {
            log.error("从数据库中回复数据出错", e);
        }
    }

    /**
     * 持久化数据, 便于重启爬虫后接着爬取
     * 保存状态信息
     */
    private void saveStatus(Set<String> url) {
        site.getBloomFilterUrlsCache().addAll(url);
        executorService.submit(() -> {
            try {
                saveReal();
            } catch (Exception e) {
                log.error("保存状态出现问题", e);
            }
        });
    }

    @Synchronized
    private Statistic saveReal() {
        log.info("开始缓存状态信息.................");
        String bloomFilterUrlsFilePath = setting.getStr("data.dir") + "bloomFilterUrls.txt";//, "bloomFilter");
        String downloadUrlsFilePath = setting.getStr("data.dir") + "downloadUrls.txt";
        if (!site.getBloomFilterUrlsCache().isEmpty()) {
            FileUtil.appendUtf8Lines(site.getBloomFilterUrlsCache(), bloomFilterUrlsFilePath);
            site.getBloomFilterUrlsCache().clear();
        }
        //保存待下载的队列中的信息
        try (PrintWriter printWriter = FileUtil.getPrintWriter(downloadUrlsFilePath, "utf8", false)) {
            scheduleCore.getQueueStream(1).map(JSON::toJSONString)
                    .forEach(obj -> printWriter.println(obj));
        }
        log.info("状态保存完毕......................................");
        return this;
    }

    private void logStatus() {
        Map<String, Object> satus = site.getSatus();
        log.info("过滤器过滤的总数:{}", satus.get("bloomFilterCount"));
        log.info("处理的总数XBean:{}", satus.get("dealXbeanCount"));
        log.info("等待下载的XBean:{}", satus.get("awaitDownUrlCount"));
        log.info("处理的异常XBean:{}", satus.get("errXBeanCount"));
    }


    /**
     * 统计信息
     *
     * @param xBean
     */
    @Override
    public Statistic checkStop(XBean xBean) {
        AtomicInteger dealXbeanCount = site.getDealXbeanCount();
        //去重收到的bean是null,  判断下是否是爬去都完成了,
        if (xBean == null || setting.getBool("spider.stop")) {
            int downUrls = site.getAwaitDownUrlCount().get();
            int dealUrls = dealXbeanCount.get();
            if (downUrls == 0) {
                logStatus();
                log.info("已经爬取的历史记录有: {}条 ", dealUrls);
                log.info("爬虫请求已经全部完成了. 即将进入休眠状态...........................");
                setting.set("spider.stop", "true");
                this.saveReal();
            }
            return this;
        }
        dealXbeanCount.addAndGet(1);
        //每处理10个页面, 打印下状态
        if (dealXbeanCount.get() % setting.getInt("cache.maxTotals") == 0) {
            logStatus();
            //每次处理了一个网页,  查看下布隆过滤的链接的个数, 超过了配置就缓存下
            Set<String> collect = xBean.getNextRequests().stream().map(Request::getUrl).collect(Collectors.toSet());
            this.saveStatus(collect);
        }
        return this;
    }


}
