import scrapy
import os
from ..items import SinanewsItem


class NewsSpider(scrapy.Spider):
    name = "news"
    allowed_domains = ["sina.com.cn"]  # 去掉news
    start_urls = ["https://news.sina.com.cn/guide/"]

    def parse(self, response):
        # 获取大类标题和链接
        category_titles = response.xpath('//h3[@class="tit02"]/a/text()').getall()
        category_urls = response.xpath('//h3[@class="tit02"]/a/@href').getall()
        # 获取新闻小类别的标题与链接
        subcategory_titles = response.xpath('//ul[@class="list01"]/li/a/text()').getall()
        subcategory_urls = response.xpath('//ul[@class="list01"]/li/a/@href').getall()

        # 创建文件夹,(zip是内置函数，将多个文件合成一个元组
        # 比如zip(['Alen','Bob'],[10,20])=((Alen,10),(Bob,20))
        for category_title, category_url in zip(category_titles, category_urls):
            category_folder = os.path.join(f'新浪新闻/{category_title.strip()}')  # 文件夹路径
            os.makedirs(category_folder, exist_ok=True)  # 创建文件夹

            # 遍历新闻小类别，生成对应文件夹
            for subcategory_title, subcategory_url in zip(subcategory_titles, subcategory_urls):
                if subcategory_url.replace('https', 'http').startswith(category_url):
                    # 创建小类文件夹
                    sub_folder = os.path.join(f'新浪新闻/{category_title}/{subcategory_title}')
                    os.makedirs(sub_folder, exist_ok=True)

                    # 创建item对象保存数据
                    item = SinanewsItem(
                        category_title=category_title,
                        category_url=category_url,
                        subcategory_title=subcategory_title,
                        subcategory_url=subcategory_url,
                        article_path=sub_folder
                    )
                    yield scrapy.Request(url=subcategory_url, meta={'item': item}, callback=self.subarticle_parse)

    # 提取小类页面内容
    def subarticle_parse(self, response):
        item = response.meta['item']  # 提取item

        urls = response.xpath('//a/@href').getall()

        # 筛选：1.后缀为shtml 2.判断它是否携带了大类的网站，单独页面不爬
        for url in urls:
            if url.endswith('.shtml') and url.replace('https','http').startswith(item['category_url']):
                item['article_url'] = url
                # 对文章链接获取文章数据
                yield scrapy.Request(url=url, meta={'item': item}, callback=self.article_parse)

    def article_parse(self, response):
        item = response.meta['item']

        # 提取文章标题
        article_title = response.xpath('//h1[@class="main-title"]/text()').get()

        # 提取文章内容
        article_content = " ".join(response.xpath(
            '//div[contains(@class,"article") or contains(@class,"article-content")]').xpath('string(.)').get())

        # 把文章标题和内容添加到item里
        item.update(article_title=article_title, article_content=article_content)
        yield item
