package test

import cn.edu.hfut.dmic.webcollector.model.{CrawlDatums, Page}
import cn.edu.hfut.dmic.webcollector.plugin.berkeley.BreadthCrawler

import java.io.File
import implicits.Implicits._
import org.apache.commons.io.FileUtils

/**
 * Created by yz on 13/12/2021
 */
object DataCrawler {

  val outDir = new File("E:\\snp_database\\test")
  val rsOutDir = new File(outDir, "rs")

  class Crawler(crawlPath: String, autoParse: Boolean) extends BreadthCrawler(crawlPath, autoParse) {
    override def visit(page: Page, next: CrawlDatums): Unit = {
      val url = page.url().split('/')(4).replaceAll("\\?.*$", "")
      try {
        val pre = page.html()
        val idDir = new File(rsOutDir, s"${url}")
        pre.toFile(new File(idDir, s"$url.html"))
      } catch {
        case e: Exception =>
          println(e)
          println(url)
      } finally {
      }

    }
  }

  def main(args: Array[String]): Unit = {

    val crawler = new Crawler("crawler", false)
    crawler.setThreads(10)
    val needGetIds = new File(outDir, "营养素相关SNP条目-汇总20211029.xlsx").xlsxLines().lineSeqMap.map { map =>
      map("SNP位点（rs号）（必填）")
    }.distinct.filter { id =>
      id.startsWith("rs")
    }.filterNot { id =>
      val idDir = new File(rsOutDir, id)
      val htmlFile = new File(idDir, s"${id}.html")
      FileUtils.sizeOf(htmlFile) > 0
    }

    println(needGetIds.size)
    println(needGetIds)
    needGetIds.foreach { x =>
      crawler.addSeed(s"https://www.ncbi.nlm.nih.gov/snp/${x}")
    }
    crawler.start(1)

  }


}
