package top.jiangqiang.crawler.core.entities;

import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.dromara.hutool.core.collection.CollUtil;
import org.dromara.hutool.core.text.StrUtil;
import org.dromara.hutool.core.util.ObjUtil;
import top.jiangqiang.crawler.core.http.constants.HeaderName;
import top.jiangqiang.crawler.core.http.entities.HttpRequestEntity;
import top.jiangqiang.crawler.core.http.entities.QueryParameters;
import top.jiangqiang.crawler.core.http.entities.RequestHeader;
import top.jiangqiang.crawler.core.http.entities.RequestLine;
import top.jiangqiang.crawler.core.http.entities.body.HttpBody;

import java.io.Serializable;
import java.util.*;
import java.util.function.Supplier;

/**
 * @Author: JiangQiang
 * @Date: 2022年05月14日 10:43
 */
@Data
@Slf4j
@NoArgsConstructor
public class Crawler implements HttpRequestEntity, Serializable {
    /**
     * 种子来源链
     */
    protected List<String> sourceList = new ArrayList<>();
    //深度，初始种子作为第1层
    protected Integer depth = 1;
    //当前的URL
    protected String url;
    //存储下一代种子
    protected List<Crawler> crawlers = new ArrayList<>();
    private RequestLine requestLine;
    private QueryParameters queryParameters;
    private RequestHeader requestHeader;
    private HttpBody<?> requestBody;

    public Crawler(String url) {
        this.url = url;
    }

    /**
     * 存储一些额外的数据，例如给同一批来源的种子做个相同的标记，用于分类等等。
     */
    protected Map<String, Object> metaData = new HashMap<>();

    /**
     * 种子唯一标识，默认与url相同，用于比较是否为同一个种子，方便去重。
     * 因为某些情况下，同一个URL在不同时间请求时返回的数据是不一样的，所以不能简单的根据URL进行去重。
     * 自定义id生成规则即可
     */
    protected Supplier<String> idSupplier = this::getUrl;

    /**
     * 创建时子类爬虫深度会自动+1
     *
     * @param url url
     * @return 返回子爬虫
     */

    public Crawler addSeed(String url) {
        if (StrUtil.isNotBlank(url)) {
            //this是当前爬虫，crawler是子爬虫
            Crawler crawler = new Crawler();
            crawler.setUrl(url);
            crawler.setDepth(this.depth + 1);
            crawler.setRequestHeader(ObjUtil.cloneByStream(this.requestHeader));
            crawler.setRequestBody(ObjUtil.cloneByStream(this.requestBody));
            crawler.setRequestLine(ObjUtil.cloneByStream(this.requestLine));
            crawler.setQueryParameters(ObjUtil.cloneByStream(this.queryParameters));
            List<String> subSourceList = new ArrayList<>(ObjUtil.cloneByStream(this.sourceList));
            subSourceList.add(this.url);
            crawler.setSourceList(subSourceList);
            crawler.setMetaData(new HashMap<>(ObjUtil.cloneByStream(this.metaData)));
            crawler.getRequestHeader().addHeader(HeaderName.REFERER.getValue(), this.url);
            crawlers.add(crawler);
            return crawler;
        }
        return null;
    }

    public void addCrawler(Crawler crawler) {
        crawlers.add(crawler);
    }
    public void addSeeds(List<String> urlList) {
        if (CollUtil.isNotEmpty(urlList)) {
            urlList.forEach(this::addSeed);
        }
    }

    public String getId() {
        return idSupplier.get();
    }
    @Override
    public boolean equals(Object o) {
        if (this == o) return true;
        if (!(o instanceof Crawler crawler)) return false;
        return Objects.equals(getId(), crawler.getId());
    }

    @Override
    public int hashCode() {
        return Objects.hashCode(getId());
    }

}
