import scrapy
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver import ChromeOptions

from wangyi.items import WangyiItem


class WySpider(scrapy.Spider):
    name = "wy"
    opt = Options()
    opt.add_argument("--headless")  # 设置无头浏览器  此刻在运行 就不会显示浏览器了
    opt.add_argument('--disable-gpu')
    # 防止检测
    option = ChromeOptions()
    option.add_experimental_option('excludeSwitches', ['enable-automation'])
    # 导入配置
    driver = webdriver.Chrome(options=opt)  # selenium
    # allowed_domains = ["news.163.com"]
    start_urls = ["https://news.163.com/domestic/"]
    li_index = [1, 2]  # 当前要爬取菜单的索引位置
    selenium_url = []  # 存储要经过selenium抓取的url

    def parse(self, response):
        # 抓取 国内 国际1
        menu = response.xpath('/html/body/div/div[3]/div[2]/div[2]/div/ul/li/a/@href').extract()
        # print(menu)
        # 循环获取当前我们要抓取栏目的url
        for i in range(len(menu)):
            if i in self.li_index:
                url = menu[i]
                # 在列表中进行保存,在之后我们会用selenium 进行动态渲染加载内容
                self.selenium_url.append(url)
                # 向详情页发起请求
                yield scrapy.Request(url, callback=self.parse_detail)

    def parse_detail(self, response):
        # 获取每个新闻的url
        news_url = response.xpath('//div[@class="ndi_main"]/div/a/@href').extract()
        for url in news_url:
            yield scrapy.Request(url, callback=self.parse_menu_detail)

    # 对于新闻详情页进行解析
    def parse_menu_detail(self, response):
        # 实例化item
        item = WangyiItem()
        # 匹配标题
        title = response.xpath('//*[@id="container"]/div[1]/h1/text()').extract_first()
        # 匹配内容
        con = ''.join(response.xpath('//div[@id="content"]//text()').extract())
        item['title'] = title
        item['content'] = con
        print(title)
        yield item
