import scrapy


class WangyiSpider(scrapy.Spider):
    name = "Wangyi"
    # allowed_domains = ["www.xxx.com"]
    start_urls = ["https://www.163.com/"]
    moudle_urls = []

    def parse(self, response):
        li_list = response.xpath('//*[@id="js_index2017_wrap"]/div[3]/div[1]/div[2]/ul/li')
        alist = [0, 2, 4]
        for index in li_list:
            a_list = index.xpath('./a/@href')
            for a in a_list:
                a_href = str(a)
                print(type(a_href), a_href)
                self.moudle_urls.append(a_href)

        #依次对详情页面发请求
        for url in self.moudle_urls:
            yield scrapy.Request(url=url,callback=self.parse_model)

    #
    def parse_model(self,response):  # 解析每一个详情页面的标题
        pass
