package com.liu.parse;

import com.liu.utils.FileUtils;
import com.vladsch.flexmark.html2md.converter.FlexmarkHtmlConverter;
import net.htmlparser.jericho.Source;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.*;
import java.nio.file.Paths;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.*;

public abstract class AbstractCrawler {
    private final ExecutorService executorService = Executors.newFixedThreadPool(10);

    public void crawl(String url) {
        createDirectory("./_posts/");
        String baseUrl = getBaseUrl(url);
        String listUrl = baseUrl + "/article/list/";

        for (int i = 1; ; i++) {
            String startUrl = listUrl + i;
            Document doc = fetchDocument(startUrl);
            if (doc == null) break;

            Element articleListElement = doc.select("div.article-list").first();
            if (articleListElement == null) break;

            List<CompletableFuture<Void>> futureList = new LinkedList<>();
            for (Element articleElement : articleListElement.children()) {
                String articleId = articleElement.attr("data-articleid");
                CompletableFuture<Void> future = CompletableFuture.runAsync(() -> crawlDetailById(baseUrl, articleId), executorService);
                futureList.add(future);
            }
            CompletableFuture.allOf(futureList.toArray(new CompletableFuture[0])).join();
        }
        executorService.shutdown();
    }

    public void crawlOne(String url) {
        System.out.println("》》》》》》》爬虫开始《《《《《《《");
        String baseUrl = getBaseUrl(url);
        String articleId = url.substring(url.indexOf("article/details/") + 16);
        createDirectory("./_posts/");
        crawlDetailById(baseUrl, articleId);
        System.out.println("》》》》》》》爬虫结束《《《《《《《");
    }

    protected void crawlDetailById(String baseUrl, String articleId) {
        String startUrl = baseUrl + "/article/details/" + articleId;
        Document doc = fetchDocument(startUrl);
        if (doc == null) return;

        Element htmlElement = doc.select("div#content_views").first();
        String fileName = doc.selectFirst(".title-article").text();
        System.out.println("正在爬取..." + fileName);
        String time = doc.selectFirst("span.time").text().substring(5);
        String jekyllHeader = createJekyllHeader(fileName, time, doc.select("div.tags-box"));

        String html = htmlElement.html();
        Source source = new Source(html);
        source.fullSequentialParse();

        String projectPath = System.getProperty("user.dir");
        String imagesDirPath = Paths.get(projectPath, "images").toString();
        File imagesDir = new File(imagesDirPath);
        if (!imagesDir.exists()) {
            imagesDir.mkdir();
        }
        List<net.htmlparser.jericho.Element> imgElements = source.getAllElements("img");
        for (net.htmlparser.jericho.Element imgElement : imgElements) {
            String src = imgElement.getAttributeValue("src");
            String alt = imgElement.getAttributeValue("alt");
            try {
                String imageDir = FileUtils.downloadImage(src);
                String markdownImageTag = "![](" + imageDir.replace("\\", "/") + ")";
                html = html.replace(imgElement.toString(), markdownImageTag);
            } catch (Exception e) {
                System.err.println("下载图片失败: " + e.getMessage());
            }
        }

        FlexmarkHtmlConverter converter = FlexmarkHtmlConverter.builder().build();
        String markdown = converter.convert(html).replace("\\[", "[").replace("\\]", "]");
        String completeContent = jekyllHeader + markdown;
        saveToFile(fileName, time.split(" ")[0], completeContent);
    }

    protected abstract String getBaseUrl(String url);

    protected abstract String createJekyllHeader(String title, String date, Elements tagsElements);

    private Document fetchDocument(String url) {
        try {
            return Jsoup.connect(url).get();
        } catch (IOException e) {
            System.err.println("Fetching URL failed: " + e.getMessage());
            return null;
        }
    }

    private void createDirectory(String path) {
        File file = new File(path);
        if (!file.exists()) {
            file.mkdir();
        }
    }

    private void saveToFile(String title, String date, String content) {
        String mdFileName = "./_posts/" + date + '-' + title + ".markdown";
        try (FileWriter writer = new FileWriter(mdFileName)) {
            writer.write(content);
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}