package test

import cn.edu.hfut.dmic.webcollector.model.{CrawlDatums, Page}
import cn.edu.hfut.dmic.webcollector.plugin.berkeley.BreadthCrawler
import org.apache.commons.io.FileUtils
import implicits.Implicits._

import java.io.File

object BioSampleCrawler {

  val parentDir = new File("E:\\mtd_database\\test")
  val outDir = new File(parentDir, "anno")

  class Crawler(crawlPath: String, autoParse: Boolean) extends BreadthCrawler(crawlPath, autoParse) {
    override def visit(page: Page, next: CrawlDatums): Unit = {
      val url = page.url().split('/')(4)
      try {
        println(url)
        val pre = page.selectText("pre")
        pre.toFile(new File(outDir, s"$url.txt"))
      } catch {
        case e: Exception =>
          println(url)
      }

    }
  }

  def main(args: Array[String]): Unit = {
    val code = new Crawler("crawl", false)
    code.setThreads(10)
    val existIds = outDir.myListFiles.map(_.getName.fileNamePrefix)
    new File(parentDir, "list.txt").lines
      .filterNot { x =>
        existIds.contains(x)
      }
      .foreach { x =>
        code.addSeed("https://www.ncbi.nlm.nih.gov/biosample/" + x + "/?report=full&format=text")
      }
    code.start(1)
  }
}
