import time
from urllib import parse
import scrapy
import re
from scrapy.http import request
from scrapy.utils.trackref import NoneType
from selenium import webdriver
import requests
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# from urllib.request import quote, unquote
from selenium.webdriver.common.keys import Keys

class JobsSpider(scrapy.Spider):
    name = 'jobs'
    allowed_domains = ['www.indeed.com']
    start_urls = ['https://www.indeed.com/jobs?q=software+engineer&l=Michigan&sort=date']

    def start_requests(self):
        print("init---------------")
        opt = Options()
        opt.add_argument('--no-sandbox')
        opt.add_argument('--disable-gpu')
        opt.add_experimental_option('excludeSwitches', ['enable-automation'])
        opt.add_argument("--disable-dev-usage")
        desired_capabilities = DesiredCapabilities.CHROME  # 修改页面加载策略
        desired_capabilities["pageLoadStrategy"] = "none"

        # opt.add_argument('--headless')
        self.chrome_driver = 'C:/Users/dd/Desktop/chromedriver.exe'
        self.browser = webdriver.Chrome(self.chrome_driver, options=opt)
        self.browser.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
            "source": """
            Object.defineProperty(navigator, 'webdriver', {
            get: () => undefined
            })
        """
        })
        self.browser.set_window_size(1920, 900)
        self.browser.set_page_load_timeout(30)
        return super().start_requests()

    def parse(self, response):
        allListsJob = response.xpath('//*[@id="mosaic-provider-jobcards"]/a')
        for aitem in allListsJob:
            item = {}
            item["job_title"] = aitem.xpath('.//table[@class="jobCard_mainContent"]//h2[has-class("jobTitle")]/span/text()').get("None").replace(",","").strip()
            item["com_name"] = aitem.xpath('.//table[@class="jobCard_mainContent"]//div[has-class("company_location")]/pre/span[1]/a/text()').get("None").replace(",","").strip()
            item["job_location"] = aitem.xpath('.//table[@class="jobCard_mainContent"]//div[has-class("company_location")]/pre/div[@class="companyLocation"]/text()').get("None").replace(",","").strip()
            item["salary"] = aitem.xpath(".//span[@class='salary-snippet']/text()").get('None').replace(",","").strip()
            item["desc"] = aitem.xpath('.//div[@class="job-snippet"]/ul/li/text()').get("None").replace(","," ").strip()
            item["date_of_posting"] = aitem.xpath(".//span[@class='date']/text()").get("None").replace(",","").strip()
            item["rating"] = aitem.xpath('.//table[@class="jobCard_mainContent"]//div[has-class("company_location")]/pre//span[@class="ratingNumber"]/span/text()').get("None").replace(",","").strip()
            print(item)
            with open("./ama/csvhandle/jobs.csv","a",encoding="utf-8") as f:
                f.write(",".join(list(item.values())))
                f.write("\n")
        if response.xpath('//*[@id="resultsCol"]/nav/div/ul/li[6]/a/@href').get('None') != "None":
            yield scrapy.Request(
                "https://www.indeed.com/" +response.xpath('//*[@id="resultsCol"]/nav/div/ul/li[6]/a/@href').get('None'),
                callback=self.parse
            )

