import os
import pickle

import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from CnblogsSpider.items import LagouJobItem, LagouJobItemLoader
from selenium.webdriver.chrome.options import Options
from selenium import webdriver

from CnblogsSpider.utils.common import get_md5


class LagouSpider(CrawlSpider):
    name = 'lagou'
    allowed_domains = ['www.lagou.com']
    # start_urls = ['https://www.lagou.com/']
    start_urls = ['https://www.lagou.com/resume/myresume.html/']

    rules = (
        Rule(LinkExtractor(allow=r'Items/'), callback='parse_item', follow=True),
    )

    def start_requests(self):
        cookies = []
        if os.path.exists('D:/CnblogsSpider/CnblogsSpider/cookies/lagou.cookie'):
            f = open('D:/CnblogsSpider/CnblogsSpider/cookies/lagou.cookie', 'rb')
            cookies = pickle.load(f)
            f.close()
        # chrome_option = Options()
        # chrome_option.add_argument('--disable-extensions')
        # chrome_option.add_experimental_option('debuggerAddress', '127.0.0.1:80')
        # browser = webdriver.Chrome(executable_path='C:/Program Files (x86)/Google/Chrome/Application/chromedriver.exe',
        #                             chrome_options=chrome_option)
        # browser.get('https://www.lagou.com/')
        # cookies = browser.get_cookies()
        # f = open('D:/CnblogsSpider/CnblogsSpider/cookies/lagou.cookie', 'wb')
        # pickle.dump(cookies, f)
        # f.close()
        cookie_dict = {}
        for cookie in cookies:
            cookie_dict[cookie['name']] = cookie['value']
        # return [scrapy.Request(url=self.start_urls[0], dont_filter=True, cookies=cookie_dict)]
        yield scrapy.Request(url=self.start_urls[0], dont_filter=True, cookies=cookie_dict)

    def parse_item(self, response):
        item_loader = LagouJobItemLoader(item=LagouJobItem(), response=response)
        item_loader.add_css('title', '.job-name h1::text')
        item_loader.add_css('url', response.url)
        item_loader.add_css('url_object_id', get_md5(response.url))
        item_loader.add_css('salary', '.job_request .salary::text')
        item_loader.add_xpath('job_city', '//*[@class="job_request"]/h3/span[2]/text()')
        item_loader.add_xpath('work_years', '//*[@class="job_request"]/h3/span[3]/text()')
        item_loader.add_xpath('work_years', '//*[@class="job_request"]/h3/span[3]/text()')
        return item_loader.load_item()
