import time
import scrapy
from BossZP.items import zhipinModel
from scrapy import Selector
from selenium import webdriver
from selenium.webdriver.chrome.options import Options

class ZhipinComSpider(scrapy.Spider):
    name = 'zhipin.com'
    allowed_domains = ['zhipin.com']
    startPage = 1                                                     # 开始页面
    #trade = input("城市与行业")
    start_urls = [
        "https://www.zhipin.com/c101210100-p100901/?ka=sel-city-101210100"
        #'https://www.zhipin.com/c101210100-p100109/?ka=sel-city-101210100',
        # "https://www.zhipin.com/job_detail/0ce600d35e017bcc0HV80tm5EFU~.html?ka=search_list_jname_34_blank&lid=nlp-mmVduAc9YV.search.34",
    ]
    positionURL = start_urls[0][0:42]# 基准页面
    cookies = ''
    curPage = startPage
    next_URL = start_urls[0]


    USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'
    Chrome_Agent = 'user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'

    header = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'User-Agent': USER_AGENT,
        'Referer': 'https://www.zhipin.com/',
        "cookie": cookies
    }

    #cookies = {
    #    "Hm_lvt_194df3105ad7148dcf2b98a91b5e727a" : "1599492304",
    #    "lastCity" : "100010000",
    #    "__g" : "-",
    #    "__fid" : "b04aa721c6a75316f007c7a40",
    #    "__c" : "1599492305",
    #    "__l" : "l=%2Fwww.zhipin.com%2Fc1",
    #    "__a"  :  "40597480.1599492305..15994",
    #    "__zp_stoken__"  : "67f6bCxlABUxrACx6e1t5eQE0",
    #    "Hm_lpvt_194df3105ad7148dcf2b98a91b5e727a"  :  "1599495770",
    #}

    custom_settings = {
        #"ITEM_PIPELINES": {
        #    'tutorial.pipelines.ZhipinPipeline': 300,
        #},
        #"DOWNLOADER_MIDDLEWARES": {
        #    'tutorial.middlewares.ZhipinMiddleware': 299,
            #   'tutorial.middlewares.ProxyMiddleware':301
        #},
        "DEFAULT_REQUEST_HEADERS": header
    }
    chrome_options = Options()
    chrome_options.add_argument('--headless')
    chrome_options.add_argument('--no-sandbox')
    chrome_options.add_argument('--disable-dev-shm-usage')
    chrome_options.add_argument('blink-settings=imagesEnabled=false')
    chrome_options.add_argument('--disable-gpu')
    chrome_options.add_argument(Chrome_Agent)

    def start_requests(self):
        return [self.next_request()]

    def parse(self, response):
        with open("zhipin.html", "w") as f:
            f.write(response.text)
        items = []

        print("\nrequest ---------------> \n" + response.url)
        res = Selector(text=response.text)

        status_continue = res.xpath("//p[@class='gray']/text()").extract_first()
        print('*'*100)
        print(status_continue)
        if status_continue == "正在加载中...":
            time.sleep(90)
            self.cookies = self.Selenium_Cookies()
            print("暂停30秒")
            time.sleep(30)
            body = open('selenium.html').read()
            # 使用scrapy自身的Selector解析文本
            res = Selector(text=body)

        check = res.xpath("//h3[@class='gray']/text()")
        if check == "当前IP地址可能存在异常访问行为，完成验证后即可正常使用":
            self.crawler.engine.close_spider(self, "IP地址被封禁")

        jobs = res.xpath("//div[@class='job-primary']")
        if str(jobs) is None:
            self.crawler.engine.close_spider(self, "已爬取全部页面")

        for job in jobs:
            item = zhipinModel()
            item['job_id'] = job.xpath(".//div[@class='primary-box']/@data-jobid").extract_first()
            item['job_name'] = job.xpath(".//span[@class='job-name']/a/text()").extract_first()
            item['job_href'] = job.xpath(".//div[@class='primary-box']/@href").extract_first()
            item['job_area'] = job.xpath(".//span[@class='job-area']/text()").extract_first()
            item['salary'] = job.xpath(".//span[@class='red']/text()").extract_first()
            job_limit = job.xpath(".//div[@class='job-limit clearfix']/p/text()").extract()
            if len(job_limit) == 3:
                item['work_daytime'] = job_limit[0]
                item['work_year'] = job_limit[1]
                item['edu'] = job_limit[2]
            else:
                item['work_daytime'] = "None"
                item['work_year'] = job_limit[0]
                item['edu'] = job_limit[1]
            item['company_name'] = job.xpath(".//div[@class='info-company']//h3[@class='name']/a/text()").extract_first()
            item['industry_field'] = job.xpath(".//div[@class='info-company']//a[@class='false-link']/text()").extract_first()
            item['finance_stage'] = job.xpath(".//div[@class='info-company']//p/text()").extract()[0]
            item['company_size'] = job.xpath(".//div[@class='info-company']//p/text()").extract()[1]
            labels = job.xpath(".//div[@class='tags']//span/text()").extract()
            item['job_labels'] = ''
            for label in labels:
                item['job_labels'] = item['job_labels'] + '/' + str(label)

            welfares = job.xpath(".//div[@class='info-desc']/text()").extract_first()
            if welfares is not None:
                item['welfare'] = welfares.replace("，", "/")
            else:
                item['welfare'] = 'None'

            item['updated_at'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
            item['platform'] = "zhipin"
            item['city'] = "杭州"

            items.append(item)
            yield item
        if self.curPage < 1000:
            yield self.next_request()

    # 发送请求
    def next_request(self):
        self.curPage += 1
        print("*" * 100)
        print("zhipin page:" + str(self.curPage))
        self.next_URL = self.positionURL + ("?page=%d&ka=page-%d" %(self.curPage, self.curPage))
        print("(60s)访问冷却中······")
        time.sleep(60)
        return scrapy.Request(self.next_URL,headers=self.header,callback=self.parse)

    def Selenium_Cookies(self):
        print(self.next_URL)
        print('>>>>>---Selenium----------')
        web = webdriver.Chrome('/usr/local/bin/chromedriver', options=self.chrome_options)
        web.get(self.next_URL)
        print("(20s)页面加载中······")
        time.sleep(20)
        getCookies = web.get_cookies()
        getCookies = getCookies[::-1]
        Cookies = ""
        for cookie in getCookies:
            name = cookie.get('name')
            value = cookie.get('value')
            Cookies += name + "=" + value + "; "
        with open("selenium.html", "w") as f:
            f.write(web.page_source)
        print(Cookies)
        print("Selenium get Cookies Done!")
        print('---------NEXT-------->>>>>')
        web.quit()
        return Cookies



