import scrapy


class NscSpider(scrapy.Spider):
    name = "nsc"
    allowed_domains = ["news.163.com"]
    start_urls = ["https://news.163.com/"]

    def parse(self, response):          #接收到第一次请求的响应
        a_list=response.xpath('//div[@class="index_head"]//li/a')     #利用xpath进行数据解析
        l1=[1]
        for i in l1:
            link=a_list[i].xpath('./@href').extract_first()     #这是拿到每个板块的url
            print(link)
            genre=a_list[i].xpath('./text()').extract_first()   #这是拿到了每个板块的名字
            request=scrapy.Request(url=link,callback=self.parse_one)    #这是向每个板块发送请求，然后把响应给回调函数parse_one
            yield request

    def parse_one(self,response):        #拿到板块页面响应
        news_node = response.xpath('//div[@class="news_title"]')
        print("sshuhsaudh:{}".format(len(news_node)))
        for news in news_node:
            info = {}
            info["title"] = news.xpath("./a/text()").extract_first()
            print(info)
            url = news.xpath("./a/@href").extract_first()
            info["url"] = url
            # request=scrapy.Request(url=url,callback=self.parse_two)    #这是向每个板块发送请求，然后把响应给回调函数parse_one
            # info["detail"] = request
            # print(request)
            yield info

    # def parse_two(self,response):
    #     content_list=response.xpath('//div[@class="post_body"]/p/text()').extract()     #这是新闻内容，列表形式
    #     content=''
    #     for value in content_list:
    #         content+=value.strip()                               #构建每条新闻的内容
    #     return content
