# -*- coding: utf-8 -*-
import re

import scrapy


class GwSpider(scrapy.Spider):
    name = 'gw'
    allowed_domains = ['osgeo.cn']
    start_urls = ['https://www.osgeo.cn/scrapy/index.html']

    def parse(self, response):
        index_html = response.body
        css_html = response.xpath("//link/@href")
        js_html = response.xpath("//script/@src")

        # html页面
        yield {
            'type':'html',
            'wy':index_html
        }

        # 处理css
        for a in css_html:
            # print(a.extract())
            css_html_01 = 'https://www.osgeo.cn/scrapy/' + str(a.extract())

            css_html_name = str(a.extract()).split('/')[-1]
            print('----------------------------------------',css_html_name)
            yield scrapy.Request(url=css_html_01, callback=self.parse_css, cb_kwargs={"css_name": css_html_name})

        # 处理js
        for b in js_html:
            # print(a.extract())
            js_html_01 = 'https://www.osgeo.cn/scrapy/' + str(b.extract())
            print('==========================================', b.extract())
            js_html_name = str(b.extract()).split('/')[-1]
            # print('----------------------------------------',js_html_name)
            yield scrapy.Request(url=js_html_01, callback=self.parse_js, cb_kwargs={"js_name": js_html_name})

        # 处理其他html页面的跳转

    def parse_css(self, response, css_name):
        css_ym = response.body
        yield {
            'type':'css',
            'wy': css_ym,
            'css_name': css_name
        }

    def parse_js(self, response, js_name):
        js_ym = response.body
        yield {
            'type':'js',
            'wy': js_ym,
            'js_name': js_name
        }