package com.panovue.dataextraction;

import cn.edu.hfut.dmic.webcollector.model.CrawlDatum;
import cn.edu.hfut.dmic.webcollector.model.CrawlDatums;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.plugin.berkeley.BreadthCrawler;
import cn.hutool.core.io.FileUtil;
import cn.hutool.json.JSONObject;
import lombok.extern.slf4j.Slf4j;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.openqa.selenium.chrome.ChromeDriver;
import org.openqa.selenium.chrome.ChromeOptions;

import java.io.File;
import java.util.LinkedList;
import java.util.List;

import static com.panovue.dataextraction.ReaderUtil.saveFile;

@Slf4j
public class SearchCrawler extends BreadthCrawler {

    ChromeOptions options = new ChromeOptions();
    static String key = "CD8A";
    public static String baseDir = "C:\\Project\\dataExtraction\\temp\\";
    public static String baseUrl = "https://www.proteinatlas.org/";

    ChromeDriver driver;

    public SearchCrawler(String crawlPath, boolean autoParse) {
        super(crawlPath, autoParse);
//        options.addArguments("--headless"); // 启用无头模式
//        driver = new ChromeDriver();
//        driver.manage().window().maximize();
        // 设置起始URL
        addSeed("https://www.proteinatlas.org/search/" + key);
    }

    JSONObject root = new JSONObject();

    List<CrawlDatum> crawlDatumList = new LinkedList<>();

    @Override
    public void visit(Page page, CrawlDatums next) {
        Elements table = page.select("table");
        if (page.url().contains("search")) {
            String href = table.first().selectFirst("#searchResult > tbody.searchResult.no_styled_a > tr > td:nth-child(1) > a").attr("href");
            crawlDatumList.add(next.addAndReturn(baseUrl + href));
        } else if (page.url().equals(crawlDatumList.get(0).url())) {
            root.set("marker背景", page.select("td.top").last().text());
            root.set("marker别名", page.selectText("body > table > tbody > tr > td:nth-child(2) > div > table:nth-child(2) > tbody > tr:nth-child(3) > td"));
            root.set("细胞定位", page.selectText("body > table > tbody > tr > td:nth-child(2) > div > table:nth-child(3) > tbody > tr:nth-child(4) > td"));
            root.set("组织特异性", page.selectText("body > table > tbody > tr > td:nth-child(2) > div > table:nth-child(4) > tbody > tr:nth-child(2) > td:nth-child(2)"));
            root.set("组织表达簇", page.selectText("body > table > tbody > tr > td:nth-child(2) > div > table:nth-child(4) > tbody > tr:nth-child(3) > td"));

            root.set("细胞特异性", page.selectText("body > table > tbody > tr > td:nth-child(2) > div > table:nth-child(5) > tbody > tr:nth-child(2) > td:nth-child(2)"));
            root.set("细胞表达簇", page.select("span:contains(Immune cell)").last().parent().parent().select("td").text());
            FileUtil.writeUtf8String(root.toString(), baseDir + key + File.separator + key + ".json");
            String href = page.select("p:contains(TISSUE)").first().parent().parent().attr("href");
            crawlDatumList.add(next.addAndReturn(baseUrl + href));
        } else if (page.url().equals(crawlDatumList.get(1).url())) {
            log.info(page.selectText("title"));
            String href = page.select(".uberon:contains(Soft tissue)").attr("href");
            crawlDatumList.add(next.addAndReturn(baseUrl + href));
        } else if (page.url().equals(crawlDatumList.get(2).url())) {
            log.info(page.selectText("title"));
            Elements select = page.select("a[target=_blank].imid");
            for (Element element : select) {
                String imgSrc = element.attr("href");
                String id = element.attr("id");
                saveFile("https:" + imgSrc, key + File.separator + id + ".jpg");
            }
            String href = page.select("p:contains(PATHOLOGY)").first().parent().parent().attr("href");
            crawlDatumList.add(next.addAndReturn(baseUrl + href));
        }


    }

    @Override
    public void afterStop() {
        if (driver != null) {
            driver.quit();
        }
    }

    public static void main(String[] args) throws Exception {
        // 创建爬虫实例
        SearchCrawler crawler = new SearchCrawler("crawl", true);
        // 设置线程数
        crawler.setThreads(5);
        // 开始爬取
        crawler.start(100);
    }
}
