import scrapy
from selenium import webdriver
from selenium.webdriver.firefox.webdriver import Options
from wangyipro.items import WangyiproItem

class WangyiSpider(scrapy.Spider):
    name = 'wangyi'
    #allowed_domains = ['www.xxx.com']
    start_urls = ['https://news.163.com/']
    modol_list=[]

    # fox_option=Options()
    # fox_option.add_argument('--handless')
    # fox_option.add_argument('--disable-gpu')
    #创建一个基于selenium的对象进行五大板块动态加载数据的抓取
    def __init__(self):
        # self.fox_option.add_argument('--handless')
        # self.fox_option.add_argument('--disable-gpu')
        self.fox_option=Options()
        self.fox_option.add_argument('--handless')
        self.fox_option.add_argument('--disable-gpu')
        self.bro=webdriver.Firefox(executable_path='C:/py/Scripts/geckodriver.exe',firefox_options=self.fox_option)

    def parse(self, response):
        li_list=response.xpath('//*[@class="ns_area list"]/ul/li')
        astr_page_num=[3,4,8]
        for list in astr_page_num:
            modol=li_list[list].xpath('./a/@href').extract_first()
            #print(modol)
            #print(modol)
            self.modol_list.append(modol)   #抓取解析首页页面德连接数据
        #进行五大板块url连接详情页的爬取
        for url in self.modol_list:
            #pass
            #print(url)
            yield scrapy.Request(url,callback=self.modol_requests)#对五大板块进行手动发送请求的构造
    #每一个板块对应的新闻标题相关的内容都是动态加载的
    def modol_requests(self,response):#解析出每一个板块页面中对应新闻的标题和新闻详情页的url

        #print(response)
        #response.xpath
        #解析动态加载出来的数据
        conts=response.xpath('//div[@class="ndi_main"]/div')
        for titles in conts:
            item=WangyiproItem()
            title=titles.xpath('.//div[@class="news_title"]/h3/a/@href').extract_first()
            title_cc=titles.xpath(('.//div[@class="news_title"]/h3/a/text()')).extract_first()
            # print(title2)
            title2="".join(title_cc).strip()
            item['aa']=title2
            yield scrapy.Request(title,callback=self.ditle,meta={'item':item})
    #解析每条链接的详情页
    def ditle(self,response):
        item=response.meta['item']
        # item=WangyiproItem()


        post_bodys=response.xpath('//*[@id="content"]/div[2]//text()').extract()
        # titles=response.xpath('/html/body/div[2]/div[1]/h1/text()').extract()
        conten="".join(post_bodys)
        item['bb']=conten
        yield item


    #关闭bro
    def closed(self,spider):
        self.bro.quit()

