import scrapy
from urllib.parse import quote
from selenium import webdriver
import arrow

class templateSpider(scrapy.Spider):
    name = "template"
    # handle_httpstatus_list = [404]
    # download_delay = 2
    # strat_urls = []

    def __init__(self, *args, **kwargs):
        super().__init__( *args, **kwargs)
        # proxyIp = "localhost"
        # proxyPort = 18080
        # option = webdriver.FirefoxOptions()
        # option.add_argument('-headless')
        # # option.set_headless()
        # profile = webdriver.FirefoxProfile()
        #
        # profile.set_preference('network.proxy.type', 1)
        # profile.set_preference("network.proxy.http", proxyIp)
        # profile.set_preference("network.proxy.http_port", proxyPort)
        #
        # profile.set_preference("network.proxy.ssl", proxyIp)
        # profile.set_preference("network.proxy.ssl_port", proxyPort)
        #
        # profile.accept_untrusted_certs = True
        # profile.set_preference('permissions.default.stylesheet', 2)
        # ## Disable images
        # profile.set_preference('permissions.default.image', 2)
        # ## Disable Flash
        # profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')
        # profile.set_preference("general.useragent.override", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/604.3.5 (KHTML, like Gecko) Version/11.0.1 Safari/604.3.5")
        # self.driver = webdriver.Firefox(firefox_profile=profile, executable_path="drivers/geckodriver",
        #                            firefox_options=option)
        #

    def start_requests(self):
        # urls = [
        #     'http://quotes.toscrape.com/page/1/',
        #     'http://quotes.toscrape.com/page/2/',
        # ]
        # for url in urls:
        #     yield scrapy.Request(url=url, callback=self.parse)
        url = "http://news.baidu.com/ns?word=神州信息 (新兴市场战略)&pn=0&cl=2&ct=0&tn=news&rn=50&ie=utf-8&bt=0&et=0"
        yield scrapy.Request(url, callback=self.parse)

    def parse(self, response):
        for each in response.css(".result"):
            url = each.css(".c-title a::attr(href)").extract_first()
            dic = {}
            dic['title'] = each.css(".c-title a::text").extract_first()
            dic['url'] = url if "?" not in url else url.split('?')[0]
            dic['auth_update'] = each.css(".c-author::text").extract_first()
            dic['auth_format'] = dic['auth_update'].split('\xa0\xa0')[0]
            dic['update_format'] = arrow.get(dic['auth_update'].split('\xa0\xa0')[1],"YYYY年MM月DD日 HH:mm").datetime
            dic['abs'] = "".join(each.css(".c-summary::text").extract())
            yield dic
        # self.log('Saved file %s' % response.url)

    # def closed(self, reason):
    #     self.driver.close()