import scrapy
from scrapy import Request

from SpiderScrapy.items import NewsItem

class WangyiSpider(scrapy.Spider):
    name = "wangyi"
    allowed_domains = ["news.163.com"]
    start_urls = ["https://news.163.com"]

    def parse(self, response):
        # with open("wangyi.html", "w", encoding="utf-8") as f:
        #     f.write(response.text)
        # li[1] 要闻
        li = response.xpath('//*[@id="index2016_wrap"]/div[3]/div[2]/div[3]/div[2]/div[5]/div/ul/li[1]')
        div_list = li.xpath("./div[2]/div")
        # //*[@id="index2016_wrap"]/div[3]/div[2]/div[3]/div[2]/div[5]/div/ul/li[1]/div[2]/div
        # //*[@id="index2016_wrap"]/div[3]/div[2]/div[3]/div[2]/div[5]/div/ul/li[1]/div[2]/div[1]/div/div[1]/h3/a
        for div in div_list:
            item = NewsItem()
            detail_url = div.xpath("./a/@href").extract_first()
            title = div.xpath("./a/text()").extract_first()
            item.update({"title": title, "detail_url": detail_url})
            yield Request(url=detail_url, callback=self.parse_detail, meta={"item": item})

    def parse_detail(self, response):
        item = response.meta["item"]
        content = response.xpath('//*[@id="endText"]').get()
        item.update({"content": content})
        yield item
