import scrapy


class ArticleSpider(scrapy.Spider):
    name = "article"
    # allowed_domains = ["dytt8.net"]#绑定爬取的域名,不写的话就是全部爬取
    # start_urls = ["https://www.ygdy8.net/html/gndy/dyzz/index.html"]#起始页面
    allowed_domains = ["xiaohua.com"]
    start_urls = ["https://www.xiaohua.com/article/"]

    def parse(self, response):
        # parse response into data item and request objects and return(yield)
        print(111111)
        print(response)
        pass
