import scrapy


class NewsSpider(scrapy.Spider):
    name = "news"
    allowed_domains = ["news.163.com"]
    start_urls = ["http://news.163.com/"]

    def parse(self, response):
        node = response.xpath('//div[@class="bd"/div/ul/li[2]/a/@href').extract()
        for n in node:
            info = scrapy.Request(url=n,callback=self.parse_one)
            print(info)
            yield info

    def parse_one(self,response):
        news_node = response.xpath('//li/div[@class="ndi_main"]/div')
        for news in news_node:
            new_info = {}
            new_info["title"] = news.xpath("./div/div[1]/h3/a/text()").extract.first()
            yield new_info

        # //li/div[@class="ndi_main"]/div/div/div[1]/h3/a/text()
        # pass
