package com.cw.spider.parse.jxtv;

import cn.wanghaomiao.seimi.annotation.Crawler;
import cn.wanghaomiao.seimi.http.SeimiHttpType;
import cn.wanghaomiao.seimi.struct.Request;
import cn.wanghaomiao.seimi.struct.Response;
import com.cw.spider.parse.AbstractCrawler;
import com.cw.spider.queue.Myqueue;
import com.cw.spider.service.NewService;
import org.seimicrawler.xpath.JXDocument;
import org.springframework.beans.factory.annotation.Autowired;

import java.util.List;
import java.util.stream.Collectors;

@Crawler(name = "jx_content_basic", httpType = SeimiHttpType.OK_HTTP3, delay = 1,
        httpTimeOut = 3000, queue = Myqueue.class)
public class JxContentCrawler extends AbstractCrawler {
    @Autowired
    NewService newService;
    @Override
    public void start(Response response) {
        if (response != null) {
            JXDocument doc = response.document();
            try {
                if (doc != null) {
                    List<Object> urls = doc.sel("//ul[@class='newslist']/li/a/@href");
                    for (Object s : urls) {
                        push(Request.build(s.toString(), JxContentCrawler::getDetail));
                    }
                }

            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }

    public void getDetail(Response response) {
        super.getArticle(response, newService);
    }
}
