import scrapy
from cuc_news.items import CucNewsItem

class CucNewsSpider(scrapy.Spider):
    name = "cuc_news"
    allowed_domains = ["cuc.edu.cn"]
    start_urls = [f"https://www.cuc.edu.cn/news/1901/list{i}.psp" for i in range(1, 5)]

    def parse(self, response):
        # 提取文章链接
        ul_tag = response.css("ul.news_list.list2")
        if ul_tag:
            for li in ul_tag.css("li"):
                a_tag = li.css("a::attr(href)").get()
                if a_tag:
                    full_url = response.urljoin(a_tag)
                    yield scrapy.Request(url=full_url, callback=self.parse_article)

    def parse_article(self, response):
        # 解析文章详情页
        item = CucNewsItem()
        article = response.css("div.article")
        if article:
            # 提取标题
            item['title'] = article.css("h1::text").get(default="未找到标题").strip()
            # 提取文章信息
            p_tag = article.css("p")
            item['publisher'] = p_tag.css("span.arti_publisher::text").get(default="未找到发布者").strip()
            # 提取阅读信息和访问次数
            item['view_info'] = " ".join(
                [
                    response.css("span.arti_viwe::text").get(default="未找到阅读信息").strip(),
                    response.css("span.WP_VisitCount::text").get(default="0").strip()
                ]
            )
            item['update_date'] = p_tag.css("span.arti_update::text").get(default="未找到日期").strip()
            yield item
