package test

import cn.edu.hfut.dmic.webcollector.model.{CrawlDatums, Page}
import cn.edu.hfut.dmic.webcollector.plugin.berkeley.BreadthCrawler
import implicits.Implicits._
import net.ruippeixotog.scalascraper.browser.JsoupBrowser
import net.ruippeixotog.scalascraper.dsl.DSL._
import net.ruippeixotog.scalascraper.dsl.DSL.Extract._
import net.ruippeixotog.scalascraper.dsl.DSL.Parse._
import net.ruippeixotog.scalascraper.model._

import java.io.File

object AssemblyCrawler {

  val parentDir = new File("E:\\mtd_database\\test")
  val outDir = new File(parentDir, "assembly_full_report")

  var i=0

  class Crawler(crawlPath: String, autoParse: Boolean) extends BreadthCrawler(crawlPath, autoParse) {
    override def visit(page: Page, next: CrawlDatums): Unit = {
      val url = page.url().split('/')(4).replaceAll("\\?.*$", "")
      try {
        val pre = page.html()
        pre.toFile(new File(outDir, s"$url.html"))

      } catch {
        case e: Exception =>
          println(e)
          println(url)
      } finally {
        i+=1
        println(i,url)
      }

    }
  }


  def main(args: Array[String]): Unit = {
    val code = new Crawler("crawl", false)
    code.setThreads(10)
    val existIds = outDir.myListFiles.map(_.getName.fileNamePrefix)
    val needGetIds = new File(parentDir, "prokaryotes.csv").csvLines.lineSeqMap.map(_ ("Assembly")).filterNot { x =>
      existIds.contains(x) && {
        val file = new File(outDir, s"${x}.html")
        val browser = JsoupBrowser()
        val doc = browser.parseFile(file)
        val summaryEle = doc >?> element("dl.assembly_summary_new")
        summaryEle.isDefined
      }
    }
    println(needGetIds.size)
    needGetIds.foreach { x =>
      code.addSeed(s"https://www.ncbi.nlm.nih.gov/assembly/${x}?report=full")
    }
    code.start(1)
  }
}
