from pathlib import Path

import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule


# 关键点：
#
#     allowed_domains：限制只抓取该域名下的链接。
#
#     LinkExtractor()：默认提取所有 <a href> 链接。
#
#     follow=True：递归跟踪链接。

class QuotesSpider(scrapy.Spider):
    name = "wotdesign"
    allowed_domains = ["wot-design-uni.cn"]

    # # 定义抓取规则（跟踪所有 <a> 标签）
    # rules = (
    #     Rule(LinkExtractor(), callback='parse_item', follow=True),
    # )

    # 抓取所有类型的链接
    rules = (
        # 页面链接（递归跟踪）
        Rule(LinkExtractor(tags='a', attrs='href'), callback='parse_item', follow=True),
        # 静态资源（不跟踪）
        Rule(LinkExtractor(tags=['link', 'script', 'img'],
                           attrs=['href', 'src']),
             callback='parse_resource',
             follow=False),
    )


    def start_requests(self):
        urls = [
            "https://wot-design-uni.cn/",
            "https://wot-design-uni.cn/demo/",
        ]
        for url in urls:
            yield scrapy.Request(url=url, callback=self.parse)


    def parse_page(self, response):
        yield {'type': 'page', 'url': response.url}

    def parse_resource(self, response):
        # 根据 URL 后缀判断资源类型
        if response.url.endswith('.css'):
            yield {'type': 'css', 'url': response.url}
        elif response.url.endswith('.js'):
            yield {'type': 'js', 'url': response.url}
        elif response.url.endswith(('.png', '.jpg', '.jpeg')):
            yield {'type': 'image', 'url': response.url}
        elif response.url.endswith(('.ttf', '.woff', '.woff2')):
            yield {'type': 'font', 'url': response.url}

    def parse_item(self, response):
        # 提取当前页面的所有链接（可选）
        all_links = response.css('a::attr(href)').getall()

        # 保存链接到 Item 或文件
        yield {
            'url': response.url,
            'links': [response.urljoin(link) for link in all_links],
        }

    def parse(self, response):
        page = response.url.split("/")[-2]

        url_links = response.css("a::attr(href)").getall()
        self.log(url_links)

        for url in url_links:
            next_page = response.urljoin(url)
            yield scrapy.Request(url=next_page, callback=self.parse)


        # 提取所有 CSS 和 JS 文件链接
        css_links = response.css('link[rel="stylesheet"]::attr(href)').getall()
        css_links_2 = response.css('link[as="style"]::attr(href)').getall()
        font_links = response.css('link[as="font"]::attr(href)').getall()
        js_links = response.css('script[src]::attr(src)').getall()
        img_links = response.css('img[src]::attr(src)').getall()
        iframe_links = response.css('iframe[src]::attr(src)').getall()

        # 提交到 FilesPipeline
        for url in css_links + js_links + img_links + css_links_2 + font_links + iframe_links:
            # print(response.urljoin(url))
            yield {
                'file_urls': [response.urljoin(url)],
                'page_url': response.url,  # 可选：记录来源页面
                'url': response.url,
                'html': response.text,

            }

        # 继续爬取其他页面（可选）
        yield from response.follow_all(css_links + js_links, self.parse)

