import re
from urllib.parse import urljoin

from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from bs4 import BeautifulSoup

from scdaily.items import ScdailyItem


class SclsdailySpider(CrawlSpider):
    name = "sclsdaily"
    start_urls = ["https://zhannei.baidu.com/cse/site?q=%E5%87%89%E5%B1%B1%E5%B7%9E%E7%BB%8F%E6%B5%8E&p=0&srt=lds"
                  "&entry=1&cc=scdaily.cn"]
    head_url = "https://epaper.scdaily.cn"

    rules = (
        Rule(LinkExtractor(
            allow=(r'site\?q=%E5%87%89%E5%B1%B1%E5%B7%9E%E7%BB%8F%E6%B5%8E&p=\d+'),
            process_value=lambda x: urljoin('https://zhannei.baidu.com/cse/', x)
        ), follow=True),
        Rule(LinkExtractor(
            allow=(r'https://epaper\.scdaily\.cn/shtml/scrb/\d{4}\d{2}\d{2}/\d+\.shtml$'),
            canonicalize=True  # 标准化URL
        ), callback='parse_item', follow=False)
    )

    def parse_item(self, response):
        soup = BeautifulSoup(response.text, "html.parser")
        title = response.xpath("/html/body/section/div[1]/div[2]/ul/li[1]/h1/text()").extract_first()
        news = ""
        # 获取新闻内容的标签
        news_tags = soup.find_all("font", attrs={"color": "#231f1f"})
        for new in news_tags:
            news += new.text
        news_data_text = response.xpath("/html/body/header/div/ul/li[1]/p[2]/script/text()").extract_first()
        news_data = re.search(r'(\d{4}年\d{2}月\d{2}日)', news_data_text).group().strip()
        pic_tags = response.xpath("/html/body/section/div[1]/div[2]/ul/li[2]/div[@align='center']")
        pic_link_list = []
        for pic_tag in pic_tags:
            href = pic_tag.xpath("./a/@href").extract_first()
            pic_link = self.head_url + href
            pic_link_list.append(pic_link)
        item = ScdailyItem()
        item["title"] = title
        item["news"] = news
        item["news_data"] = news_data
        item["pic_link_list"] = pic_link_list
        item["news_link"] = response.url
        yield item





