package com.undsf.comikami._77mh_

import com.undsf.comikami.Chapter
import com.undsf.comikami.Comic
import com.undsf.comikami.DownloadTask
import com.undsf.comikami.Page
import com.undsf.crawler.CachedCrawler
import com.undsf.crawler.ICrawler
import org.jsoup.Jsoup
import org.jsoup.nodes.Document
import org.jsoup.nodes.Element
import org.jsoup.select.Elements

import javax.script.ScriptEngine
import javax.script.ScriptEngineManager
import javax.script.SimpleBindings
import java.util.regex.Matcher
import java.util.regex.Pattern

/**
 * Created by Arathi on 2017/9/13.
 */
class Crawler77mh extends CachedCrawler {
    static ScriptEngineManager scriptEngineManager = new ScriptEngineManager()
    static ScriptEngine jsEngine = scriptEngineManager.getEngineByName("nashorn")
    static SimpleBindings jsBindings = new SimpleBindings()

    static Pattern pattern_coid = Pattern.compile("/\\w*_(\\d+)\\.html")
    static Pattern pattern_cid = Pattern.compile("/(\\d+)\\.html")

    Crawler77mh() {
        super()
    }

    def loadImageServerList() {
        // String url = "http://css.177mh.com/img_v1/cn_svr.aspx"
        // String script = getFromCache(url)
        // jsEngine.eval(script, jsBindings)
    }

    def crawlComicInfo(Comic comic) {
        String indexContent = getFromCache(comic.url)
        Document indexDoc = Jsoup.parse(indexContent)
        Element ul = indexDoc.select("ul.ar_rlos_bor.ar_list_col").first()
        Elements links = ul.select("a")
        List<Chapter> chapterList = new ArrayList<>()
        for (Element link : links) {
            String chapterUrl = link.attr("href")
            Chapter chapter = new Chapter()
            chapter.title = link.text()
            chapter.url = chapterUrl
            chapterList.add(chapter)
            println(chapter)
        }
        return chapterList
    }

    def crawlPageUrl(String chapterUrl) {
        List<Page> pages = new ArrayList<>()
        String chapterContent = getFromCache(chapterUrl)
        Document chapterDoc = Jsoup.parse(chapterContent)
        Element scriptNode = chapterDoc.select("script").get(2)
        String scriptUrl = scriptNode.attr("src")
        String script = getFromCache(scriptUrl)
        jsEngine.eval(script, jsBindings)

        // 通过正则表达式提取cid
        Matcher matcher = pattern_cid.matcher(chapterUrl)
        def cid = null
        if (matcher.find()) {
            cid = matcher.group(1)
        }

        def img_s = jsEngine.eval("img_s", jsBindings)
        def maxPage = jsEngine.eval("maxPage", jsBindings)
        def link_z = jsEngine.eval("link_z", jsBindings)

        // 通过正则表达式提取coid
        matcher = pattern_coid.matcher(link_z)
        def coid = null
        if (matcher.find()) {
            coid = matcher.group(1)
        }

        def imgServerUrl = "http://css.177mh.com/img_v1/cn_svr.aspx?s=" + img_s + "&cid=" + cid + "&coid=" + coid

        String imgServerScript = getFromCache(imgServerUrl)
        jsEngine.eval(imgServerScript, jsBindings)
        def urlPrefix = jsEngine.eval("img_qianzso[img_s]", jsBindings)

        def msg = jsEngine.eval("msg", jsBindings)

        String[] list = ((String)msg).split("\\|")
        if (list.length != maxPage) {
            // TODO 长度不一致
        }

        int pageIndex = 0
        for (def pageResource : list) {
            Page page = new Page()
            page.index = ++pageIndex
            page.url = urlPrefix + pageResource
            pages.add(page)
        }
        return pages
    }

    def buildTasks(Comic comic, String baseDir) {
        DownloadTask root = new DownloadTask()
        root.type = DownloadTask.TASK_TYPE_CREATE_DIRECTORY
        root.directory = baseDir
        root.filename = null
        root.children = new ArrayList<>()

        for (def chapter : comic.chapters) {
            DownloadTask chapterTask = new DownloadTask()
            chapterTask.type = DownloadTask.TASK_TYPE_CREATE_DIRECTORY
            chapterTask.directory = baseDir + "\\" + chapter.title
            chapterTask.children = new ArrayList<>()

            for (def page : chapter.pages) {
                DownloadTask pageTask = new DownloadTask()
                pageTask.type = DownloadTask.TASK_TYPE_DOWNLOAD_FILE
                pageTask.directory = chapterTask.directory
                pageTask.filename = page.index + "." + page.getExtensionName()
                pageTask.url = page.url
                pageTask.children = null

                chapterTask.children.add(pageTask)
            }

            root.children.add(chapterTask)
        }

        return root
    }

    def download(DownloadTask task) {
        if (task.type == DownloadTask.TASK_TYPE_CREATE_DIRECTORY) {
            File dir = new File(task.directory)
            dir.mkdirs()
        }
        else if (task.type == DownloadTask.TASK_TYPE_DOWNLOAD_FILE) {
            downloadFile(task.url, task.directory + "/" + task.filename)
        }

        if (task.children != null) {
            for (def subTask : task.children) {
                download(subTask)
            }
        }
    }

    @Override
    def crawl() {
        // 加载图片服务器列表
        loadImageServerList()

        def comic = new Comic("http://www.77mh.com/colist_231948.html")

        // 第一步：获取所有章节的URL
        def chapterList = crawlComicInfo(comic)
        comic.chapters = chapterList

        // 第二步：获取每个章节的所有页面的URL
        // 第三步：获取每张图片的URL
        for (def chapter : chapterList) {
            chapter.pages = crawlPageUrl(chapter.url)
        }

        // 第四步：生成任务
        def tasks = buildTasks(comic, "D:\\tmp\\comikami\\国立舰队幼儿园")

        // 第五步：下载所有图片
        download(tasks)

        println("下载完成")
    }

    static void main(String[] args) {
        ICrawler crawler = new Crawler77mh()
        crawler.crawl()
    }
}
