package main.kotlin.webMagic

import us.codecraft.webmagic.*
import us.codecraft.webmagic.processor.PageProcessor
import us.codecraft.webmagic.Site
import org.apache.commons.cli.*
import org.jsoup.Jsoup


class CLSpiderDGE (val outdir: String = System.getProperty("user.dir")) : PageProcessor {
    private val site = Site.me()
            .setCharset("GBK")
            .setSleepTime(3000)
            .setDomain("t66y.com")
            .setUserAgent(
                    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31")
    private val listPagePattern = "thread0806\\.php\\?fid=16(&search=&page=[0-5]\\d?$){0,1}"
    private val articalePattern = "htm_data/16/\\d+/\\d+\\.html"
    private val log = DownloadLog()

    private val gonggao = listOf(
            "http://t66y.com/htm_data/16/1106/524942.html",
            "http://t66y.com/htm_data/16/0805/136474.html",
            "http://t66y.com/htm_data/16/1802/344501.html",
            "http://t66y.com/htm_data/16/1110/622028.html",
            "http://t66y.com/htm_data/16/1109/594741.html",
            "http://t66y.com/htm_data/16/1706/2424348.html"
    )

    override fun process(page: Page) {

        println(page.url)
        log.connect()
        if (log.check(page.url.toString()) || page.url.toString() in gonggao) {
            page.setSkip(true)
        }
        log.close()
        if (page.url.regex(listPagePattern).match()) {

            val listPages = page.html.xpath("//div[@class=pages]").links().all()
            val articles = page.html
                    .xpath("//div[@class=t]/table[@id=ajaxtable]/tbody[2]/tr[@class='tr3 t_one tac']/td[@class=tal]/h3/a/@href")
                    .regex(articalePattern)
                    .all()

            page.setSkip(true)

            page.addTargetRequests(listPages)
            page.addTargetRequests(articles)
        } else if (page.url.regex(articalePattern).match()) {
            val content = page.html.xpath("//div[@class='tpc_content do_not_catch']").get()

            if (content == null || content == "") {
                //设置skip之后，这个页面的结果不会被Pipeline处理
                page.setSkip(true)
            }

            page.putField("content", content)
            page.putField("title", page.html.xpath("//title/text()"))
            page.putField("date", getPostDate(page.html.toString()))
        }
    }

    override fun getSite(): Site {
        return site
    }

    /**
     * 提取每个帖子的发帖时间
     * @param html: 帖子整个页面的html
     * @return 2018-4-19 这样的时间受
     */
    private fun getPostDate(html: String): String {
        val soup = Jsoup.parse(html)
        val tr = soup.select("tr[class=tr1] > th > div[class=tipad]").first()
        var date = tr.text().split("Posted:")[1]
        date = date.split("[\\s|@]".toRegex())[0]

        var year = ""
        var month = ""
        var day = ""
        for (i in date.split("-+".toRegex())) {
            if (i.length == 4) year = i
            else if (month == "") month = i
            else day = i
        }

        return "$year-$month-$day"
    }
}


fun main(args: Array<String>) {
    val START_URL = "http://t66y.com/thread0806.php?fid=16&page1"
    val log = DownloadLog()
    log.connect()
    log.create()
    log.close()

    val options = Options()


    val url = Option(
            "t", "thread", true, "使用几个thread"
    )
    options.addOption(url)


    val parser: CommandLineParser = DefaultParser()
    val formatter = HelpFormatter()
    val cmd: CommandLine

    try {
        cmd = parser.parse(options, args)
    } catch (e: ParseException ) {
        println(e.message)
        return
    }


    if (cmd.hasOption("help")) {
        formatter.printHelp("下载器", options)
        return
    }

    var threads = 2
    if (cmd.hasOption("thread")) {
        threads = cmd.getOptionValue("thread").toInt()
    }


    Spider
            .create(CLSpiderDGE())
            .addUrl(START_URL)
            .thread(threads)
            .addPipeline(DGEPagePipeline())
            .run()
}



