import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.loader import ItemLoader
from scrapy.spiders import CrawlSpider, Rule
from selenium import webdriver
import pickle, time, os
from items import LagouJobItem, LagouJobItemLoader
from utils.connon import get_md5
from datetime import datetime


class LagouSpider(CrawlSpider):
    name = 'lagou'
    allowed_domains = ['www.lagou.com']
    start_urls = ['https://www.lagou.com/']

    headers = {
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36"
    }

    custom_settings = {
        "COOKIES_ENABLED": False,
        "DOWNLOAD_DELAY": 10,  # 减缓页面下载速度
        'DEFAULT_REQUEST_HEADERS': {
            'Accept': 'application/json, text/javascript, */*; q=0.01',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.8',
            'Connection': 'keep-alive',
            'Cookie': 'RECOMMEND_TIP=true; user_trace_token=20200625224111-606d3a94-1861-46de-927b-40b89e624701; LGUID=20200625224111-74b17c1b-2ca8-4291-ad58-ca176140ff99; _ga=GA1.2.1856150522.1593096072; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1611891511; _gid=GA1.2.406314849.1611911126; LG_HAS_LOGIN=1; JSESSIONID=ABAAAECABIEACCA21DA80CFF07CC921F2105F14815AC56B; showExpriedIndex=1; showExpriedCompanyHome=1; showExpriedMyPublish=1; hasDeliver=0; privacyPolicyPopup=false; index_location_city=%E6%B7%B1%E5%9C%B3; WEBTJ-ID=20210129%E4%B8%8B%E5%8D%885:06:17170617-1774d64a4081c9-0c1a71dcfe0bf8-13e3563-2073600-1774d64a409d06; sensorsdata2015session=%7B%7D; gate_login_token=04f27fe47715b2d2795c83245f0dabbc8bb50d58c67edf8fe2a2e9b2cad2983a; LG_LOGIN_USER_ID=831026493b350208600e733f62c401cb799fd764a5dbcd8bee2e1f0ec9d3f6ad; _putrc=9BC5133834F575FC123F89F2B170EADC; login=true; unick=%E4%BF%A1; TG-TRACK-CODE=index_search; X_MIDDLE_TOKEN=d7eb44c8f9b7fb08802e9c65039dca26; __lg_stoken__=e63cdf8c453eb8ad96681f1b3e492592041d35544bb15e4fdab8e752087e1fa8e2278a8cb7a6d77ebc3d6011b4f3ca4844a81b6b7967d0db7b3441a423f460227c576d65a1bf; PRE_UTM=; PRE_HOST=; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2Fshenzhen-zhaopin%2Fyunweijingli%2F; LGSID=20210130113531-1c05a142-ca1d-4fa5-9be5-96b9d86e4075; PRE_SITE=https%3A%2F%2Fwww.lagou.com; _gat=1; SEARCH_ID=2752aa8e860046c7ac5659317fe63c22; X_HTTP_TOKEN=5f86626580fbf7519328791161cc3b1c566da3b8a3; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2216149355%22%2C%22first_id%22%3A%22172ebec7fc38c8-0d29aa8c5fecce-4353760-1327104-172ebec7fc489e%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24os%22%3A%22Windows%22%2C%22%24browser%22%3A%22Chrome%22%2C%22%24browser_version%22%3A%2288.0.4324.104%22%2C%22lagou_company_id%22%3A%22%22%7D%2C%22%24device_id%22%3A%22172ebec7fc38c8-0d29aa8c5fecce-4353760-1327104-172ebec7fc489e%22%7D; LGRID=20210130114359-95f8a88d-92c9-4724-a3c8-866a4ebec847; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1611978240',
            'Host': 'www.lagou.com',
            'Origin': 'https://www.lagou.com',
            'Referer': 'https://www.lagou.com/',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
        }
    }

    rules = (
        Rule(LinkExtractor(allow=("zhaopin/.*",)), follow=True),
        Rule(LinkExtractor(allow=("gongsi/v1/j/.*",)), follow=True),
        Rule(LinkExtractor(allow=r'jobs/\d+.html'), callback='parse_job', follow=True),
    )

    # def start_requests(self):
    #     # 去使用selenium模拟登录后拿到cookie交给scrapy的request使用
    #     # 1. 通过selenium 模拟登录
    #     cookies = {}
    #     if os.path.exists("D:/scrapy/ArticleSpider/ArticleSpider/cookies/lagou.cookie"):
    #         cookies = pickle.load(open("D:/scrapy/ArticleSpider/ArticleSpider/cookies/lagou.cookie", "rb"))
    #     if not cookies:
    #         browser = webdriver.Chrome(executable_path="D:/chromeDriver/cd/chromedriver.exe")
    #         browser.get("https://passport.lagou.com/login/login.html")
    #         browser.find_element_by_xpath(
    #             '//form[@class="active"]/div[@class="input_item clearfix" and @data-propertyname="username" and @data-controltype="Phone"]/input').send_keys(
    #             "15014134753")
    #         browser.find_element_by_xpath(
    #             '//form[@class="active"]/div[@class="input_item clearfix" and @data-propertyname="password" and @data-controltype="Password"]/input').send_keys(
    #             "newxin.001206")
    #         browser.find_element_by_xpath(
    #             '//div[@class="input_item btn_group clearfix sense_login_password"]/input').click()
    #         time.sleep(10)
    #         cookies = browser.get_cookies()
    #         pickle.dump(cookies, open("D:/scrapy/ArticleSpider/ArticleSpider/cookies/lagou.cookie", "wb"))
    #
    #     cookie_dict = {}
    #     for cookie in cookies:
    #         cookie_dict[cookie["name"]] = cookie["value"]
    #
    #     for url in self.start_urls:
    #         yield scrapy.Request(url, dont_filter=True, cookies=cookie_dict, headers=self.headers)

    def parse_job(self, response):
        print(response.text)
        item_loader = LagouJobItemLoader(item=LagouJobItem(), response=response)
        item_loader.add_xpath("title", '//span[@class="position-head-wrap-name"]/text()')
        item_loader.add_value("url", response.url)
        item_loader.add_value("url_object_id", get_md5(response.url))
        item_loader.add_xpath("salary", '//dd[@class="job_request"]/h3/span[1]/text()')
        item_loader.add_xpath("job_city", '//dd[@class="job_request"]/h3/span[2]/text()')
        item_loader.add_xpath("work_year", '//dd[@class="job_request"]/h3/span[3]/text()')
        item_loader.add_xpath("degree_need", '//dd[@class="job_request"]/h3/span[4]/text()')
        item_loader.add_xpath("job_type", '//dd[@class="job_request"]/h3/span[5]/text()')
        item_loader.add_xpath("tags", '//ul[@class="position-label clearfix"]/li/text()')
        item_loader.add_xpath("publish_time", '//p[@class="publish_time"]/text()')
        item_loader.add_xpath("job_advantage", '//dd[@class="job-advantage"]/p/text()')
        item_loader.add_xpath("job_desc", '//div[@class="job-detail"]')
        item_loader.add_xpath("job_addr", '//div[@class="work_addr"]')
        item_loader.add_xpath("company_name", '//img[@class="b2"]/@alt')
        item_loader.add_xpath("company_url", '//dl[@class="job_company"]/dt/a/@href')
        item_loader.add_value("crawl_time", datetime.now())
        job_item = item_loader.load_item()
        return job_item
