package org.chen.service.base;

import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.chen.exception.CrawlerException;
import org.chen.mapper.ArticleRepository;
import org.chen.domain.Article;
import org.chen.domain.CrawlerTask;
import org.chen.domain.vo.CrawlerResult;
import org.chen.pipeline.ResultCollectorPipeline;
import org.chen.service.CrawlerService;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.processor.PageProcessor;

import java.time.LocalDateTime;
import java.time.temporal.ChronoUnit;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;

@Slf4j
@RequiredArgsConstructor
public abstract class BaseWebMagicCrawlerService implements CrawlerService {

    protected final ArticleRepository articleRepository;

    @Override
    public CrawlerResult crawl(CrawlerTask task) {
        if (!isSourceAvailable()) {
            throw new CrawlerException(getSourceName() + " source is not available");
        }

        LocalDateTime startTime = LocalDateTime.now();
        List<Article> articles = new ArrayList<>();
        List<String> errors = new ArrayList<>();
        AtomicInteger success = new AtomicInteger(0);
        AtomicInteger failed = new AtomicInteger(0);

        // 创建Pipeline来处理爬取结果
        ResultCollectorPipeline pipeline = new ResultCollectorPipeline(
                articleRepository,
                articles,
                errors,
                success,
                failed
        );

        // 获取爬虫处理器
        PageProcessor pageProcessor = getPageProcessor(task);

        // 获取起始URL
        String startUrl = task.getStartUrl();
        if (startUrl == null || startUrl.isEmpty()) {
            throw new CrawlerException("Start URL is not set in the task");
        }

        log.info("Starting crawler for {} with URL: {}", getSourceName(), startUrl);

        // 创建并运行爬虫
        Spider.create(pageProcessor)
                .addUrl(startUrl)
                .addPipeline(pipeline)
                .thread(getThreadCount())
                .run();

        LocalDateTime endTime = LocalDateTime.now();
        long duration = ChronoUnit.MILLIS.between(startTime, endTime);

        return CrawlerResult.builder()
                .taskId(task.getTaskId())
                .source(getSourceName())
                .total(success.get() + failed.get())
                .success(success.get())
                .failed(failed.get())
                .articles(articles)
                .errors(errors)
                .startTime(startTime)
                .endTime(endTime)
                .duration(duration)
                .build();
    }

    // 获取页面处理器
    protected abstract PageProcessor getPageProcessor(CrawlerTask task);

    // 获取爬虫线程数
    protected int getThreadCount() {
        return 3; // 默认3个线程
    }
}