import scrapy


class OsgeoSpider(scrapy.Spider):
    name = 'osgeo'
    allowed_domains = ['osgeo.cn']
    start_urls = ['https://www.osgeo.cn/scrapy/index.html']

    # 重写此方法的目的是为了携带请求头headers
    def start_requests(self):
        headers = {
            "Host": "www.osgeo.cn",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36 SLBrowser/6.0.1.9171"
        }

        yield scrapy.Request(url=self.start_urls[0], callback=self.parse, headers=headers)

    def parse(self, response):
        item = {
            "name": "index.html",
            "content": response.text
        }
        yield item
        links = response.xpath("//head/link")
        for temp in links:
            url = temp.xpath("./@href").extract_first()
            link_url = 'https://www.osgeo.cn/scrapy/' + url
            print(url)
            yield scrapy.Request(url=link_url, callback=self.link_parse, cb_kwargs={"filename": url})
        scripts = response.xpath("//head/script")
        for temp in scripts:
            url = temp.xpath("./@src").extract_first()
            script_url = 'https://www.osgeo.cn/scrapy/' + url
            print(url)
            yield scrapy.Request(url=script_url, callback=self.link_parse, cb_kwargs={"filename": url})
        html_links = response.xpath('//div[@class="wy-menu wy-menu-vertical"]//li/a')
        for temp in html_links:
            url = temp.xpath("./@href").extract_first()
            html_url = 'https://www.osgeo.cn/scrapy/' + url
            print(url)
            yield scrapy.Request(url=html_url, callback=self.link_parse, cb_kwargs={"filename": url})

    def link_parse(self, response, filename):
        yield {
            "name": filename,
            "content": response.text
        }
