package indi.coffeenc.queuedownloader.plugin;

import indi.coffeenc.queuedownloader.TaskHolder;
import indi.coffeenc.queuedownloader.plugin.util.Filter;
import indi.coffeenc.queuedownloader.strategy.Task;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;
import java.nio.charset.StandardCharsets;

/**
 * 本脚本只适合下载禁漫天堂的单本漫画
 * 传入url为漫画的观看页面的url
 */
public class Https18comicVip implements Filter {

    /**
     * 检测传入链接是否符合被爬规则
     */
    public boolean matches(String url){
        return url.startsWith("https://18comic4.biz") ||
                url.startsWith("https://18comic.vip") ||
                url.startsWith("https://18comic.bet");
    }

    public boolean doFilter(String url) {
        /**
         * 如果匹配失败，则返回false
         */
        if (!matches(url))
            return false;
        /**
         * 使用Jsoup模块解析网页内容，分离出所需链接
         */
        Task task = new Task();
        task.setSource(url);
        Document body = Jsoup.parse(getContent(url));
        task.setDir(rootDir() + body.getElementsByClass("pull-left hidden").get(0).text());
        Elements elements = body.select("img[data-page]");
        for (Element element : elements) {
            String url0 = null;
            if (!element.attr("data-original").isEmpty()) {
                url0 = element.attr("data-original");
            }else if(!element.attr("src").isEmpty())
                url0 = element.attr("src");
            if (url0 != null){
                url0 = url0.substring(0,url0.lastIndexOf("?"));
                Task.URL url1 = new Task.URL(url0);
                url1.setTotal(getContentLength(url0));
                task.add(url1);
            }
        }
        /**
         * 将创建的Task添加到TaskHolder中
         */
        System.out.println(task);
        TaskHolder.add(task);
        return true;
    }

    /**
     * 获取HttpURLConnection对象
     */
    private HttpURLConnection getConnection(String url) throws IOException {
        URL url0 = new URL(url);
        HttpURLConnection conn = (HttpURLConnection) url0.openConnection();
        conn.setRequestMethod("GET");
        conn.setRequestProperty("user-agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36");
        conn.connect();
        return conn;
    }

    /**
     * 获取指定url的HTML内容
     */
    private String getContent(String url){
        try {
            HttpURLConnection connection = getConnection(url);
            InputStream in = connection.getInputStream();
            InputStreamReader reader = new InputStreamReader(in, StandardCharsets.UTF_8);
            StringBuilder text = new StringBuilder();
            char[] buffer = new char[1024];
            for (int size = 0;(size = reader.read(buffer,0,1024))!= -1;){
                text.append(buffer,0,size);
            }
            return text.toString();
        } catch (IOException e) {
            e.printStackTrace();
        }
        return null;
    }

    /**
     * 获取指定url的网络文件大小
     */
    private long getContentLength(String url) {
        try {
            return getConnection(url).getContentLengthLong();
        } catch (IOException e) {
            e.printStackTrace();
        }
        return 0;
    }
}
