# -*- coding: utf-8 -*-
import scrapy
import time
import json
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
print('syspath += ' + os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from www_job_com.items import WwwJobComItem
from selenium import webdriver#支持网页脚本
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
#接口：https://we.51job.com/api/job/search-pc?api_key=51job&timestamp=1718105974&keyword=Qt&searchType=2&function=&industry=&jobArea=110300&jobArea2=&landmark=&metro=&salary=&workYear=&degree=&companyType=&companySize=&jobType=&issueDate=&sortType=0&pageNum=2&requestId=8c6f4cdeb1385105b3c84db814bc7595&keywordType=&pageSize=20&source=1&accountId=&pageCode=sou%7Csou%7Csoulb&u_atoken=2411093d2a078aa2e11f4df75d89aeff&u_asession=01IsIw5W-9QS3NOvPfl6e1xUfA4Y8C-LJU-U9CHzLiUOB6TpeLOQWw5EkuO1PWxknVJB-YY_UqRErInTL5mMzm-GyPlBJUEqctiaTooWaXr7I&u_asig=05Ju4pRRMDauGGVqy-c_gQE7FxLMbfJy3d6_JVVRbfidgjAIPcV24TMCbBdIrj3Co6tYmluDj39MSETBif5VN8PvzNFigoJzvVoa7xCkt2V_-u18NE04XeCMJAIByPlmMn3ZYHbhtgBFlMnpbO7EXVKUtfscfboS4VrslPF88R_4fBzhvSc0Kr8URjOX9Xe4tkaFz52ZL0wSSeBMFpVEI75YxuDdwJdvTs3wo1QVyrNHhVQ0CgfAckO5j-QEuxuIrnewtyftWbfuxhWtuQjX92YSbo21IoqKumRO3QXK1wAo8MI7Z-80-lKwjxRoAdqb7gF-n1f-CA6GZoq9ddPTuuQA&u_aref=cD8WFSKdmXy1U7TQlHrzti34Ko0%3D
class Job51Spider(scrapy.Spider):
    name = 'job51'
    allowed_domains = ['we.51job.com']
    start_urls = ['https://we.51job.com']
    keywords = ['Qt','c#','java','python','c++']
    nKeywords = 0
    positionUrl = ''
    ajaxUrl = ''
    curPage = 0
    headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.5735.289 Safari/537.36'}
    def __init__(self):
        # super(MySpider, self).__init__(*args, **kwargs)
        self.driver = webdriver.Chrome()  # 或者其他浏览器
        self.n = 1
        self.curPage = 1
        self.page_total = 0
    def start_requests(self):
        return [self.next_request()]
    def parse(self, response):
        self.driver.get(response.url)
        # 加载动态内容
        WebDriverWait(self.driver, 20).until(
            EC.presence_of_all_elements_located((By.CLASS_NAME, 'joblist-item'))
        )

        pagination_buttons = self.driver.find_elements(By.CLASS_NAME,'number')
        # 获取最后一个按钮，通常包含最大页码
        last_button = pagination_buttons[-1]
        # 解析页码
        try:
            self.page_total  = int(last_button.text)
        except ValueError:
            # 如果按钮文本不是数字，可以尝试其他方法解析总页数
            pass
        print("request -> " + response.url)
        # html = response.body.decode('utf-8')
        # job_list = response.css('div.joblist-item')
        while self.curPage <= self.page_total:
            self.curPage += 1

            options = webdriver.ChromeOptions()
            options.add_argument("--disable-blink-features=AutomationControlled")  # 就是这一行告诉chrome去掉了webdriver痕迹，令navigator.webdriver=false，极其关键
            # 还有其他options配置，此处和问题无关，略去
            self.driver = webdriver.Chrome(options=options)
            # driver = webdriver.Chrome()  # 或者其他浏览器
            self.ajaxUrl = "https://we.51job.com/api/job/search-pc?api_key=51job&timestamp={}&keyword={}&searchType=2&function=&industry=&jobArea=110300&jobArea2=&landmark=&metro=&salary=&workYear=&degree=&companyType=&companySize=&jobType=&issueDate=&sortType=0&pageNum={}&requestId=8c6f4cdeb1385105b3c84db814bc7595&keywordType=&pageSize=20&source=1&accountId=&pageCode=sou%7Csou%7Csoulb&u_atoken=f989f698e5791bf26a53e97b97b9ad80&u_asession=01wDiKqxrDqN47zQHc34GFfDihHYp0Uj90fZ9qgvCgZXZG9xBO4eLWm28-oGt70qYcJB-YY_UqRErInTL5mMzm-GyPlBJUEqctiaTooWaXr7I&u_asig=05WFvKOct9DmHgJDMC2jUSYGSIgKykdcGi30q2l8RYNk4EDp6wqJZ3m5JpxgJ3RSnXD0wajBuUVeQEtm4k48B_US8_9T9iA06ThB4A7fEtVlkWbKSzO6FHKt-KjhXALsYlvSi_FFTlEitaH09ZFJdzyLx-RqbcxX5IqusbtdtkenjBzhvSc0Kr8URjOX9Xe4tkE7a_ZrK2dO-IukDhoF2_MUCqiQPO6w5jT0KYlPb5crSybbzu_pZbEFahP9ZTU2bz4bmBTXQDjC20ok2Me6zvMvwkMAJhptxDj95BQIouw7wMI7Z-80-lKwjxRoAdqb7gF-n1f-CA6GZoq9ddPTuuQA&u_aref=TqRhZLzAE53txwpx9wrIK44a9xY%3D".format(time.time(), self.keywords[self.nKeywords], self.curPage-1)
            self.driver.get(self.ajaxUrl)
            print('ajaxUrl = ' + self.ajaxUrl)
            time.sleep(5)
            # 模拟滑块滑动：根据实际情况调整选择器和滑动函数
            slider = self.driver.find_element(By.ID, 'nc_1_n1z')
            actions = ActionChains(self.driver)
            actions.click_and_hold(slider)  # 按住滑块
            # 滑动滑块从左到右
            print("滑动滑块从左到右 start")
            # actions.move_by_offset(252, 0).perform()
            actions.move_by_offset(100, 0).perform()
            time.sleep(0.5)
            actions.move_by_offset(100, 0).perform()
            time.sleep(0.2)
            actions.move_by_offset(52, 0).perform()
            print("滑动滑块从左到右 end")
            time.sleep(5)
            # 释放滑块
            actions.release().perform()

            json0 = self.driver.find_element(By.TAG_NAME, 'pre').text
            self.save_json_to_file(json0)
            # print(json0)
            json_obj = json.loads(json0)
            job_list = json_obj['resultbody']['job']['items']
            # job_list= self.driver.find_elements(By.CLASS_NAME,'joblist-item')
            if (len(job_list) > 1):
                print("51job Nums:" + str(len(job_list)))
                for job in job_list:
                    item = WwwJobComItem()
                    str_time = ''#job.css('span::text').extract_first().strip()
                    if (str_time == "发布时间"):
                        continue
                    else:
                        item['position_id'] = self.n #job.css('p.t1 > input::attr(value)').extract_first().strip()
                        self.n = self.n+1
                        item["position_name"] = job['jobName']#job.find_element(By.CLASS_NAME,'jname').text#job.css('joblist-item-top > span ::text').extract_first().strip()
                        # salary = job.find_element(By.CLASS_NAME,'sal').text#job.css('joblist-item-top > span ::text').extract()[1].strip()
                        item["salary"] = job['provideSalaryString']
                        item['city'] = job['jobAreaString']#job.find_element(By.CLASS_NAME,'area').text#job.css('div.area > div::text').extract_first().strip()
                        item['work_year'] = job['workYearString']
                        item['education'] = job['degreeString']
                        item['company_name'] = job['fullCompanyName']# job.find_element(By.CLASS_NAME,'cname').text
                        item['industry_field'] = job['industryType1Str']#job.find_elements(By.CLASS_NAME,'dc')[0].text#job.css('span.text-cut ::text').extract()[1].strip()#行业
                        item['finance_stage'] = job['companyTypeString']#job.find_elements(By.CLASS_NAME,'dc')[1].text#job.css('span.dc.shrink-0 > div.bl > a ::text').extract()[0].strip()#资金阶段
                        item['company_size'] = job['companySizeString']#job.find_elements(By.CLASS_NAME,'dc')[2].text#job.css('span.dc.shrink-0 > div.bl > a ::text').extract()[1].strip()
                        item['position_lables'] = str(job['jobTags'])#job.find_element(By.CLASS_NAME,'tags').text.replace('\n',',')#position_labels

                        item['time'] = ""
                        item['updated_at'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
                        item['platform'] = "51job"
                        #自定义扩展部分
                        proJson = json.loads(job['property'])
                        item['keyword'] = proJson['keyword']#搜索关键字
                        item['pagenum'] = proJson['pageNum']#页码
                        item['x_bd'] = job['lon']#经度
                        item['y_bd'] = job['lat']#纬度
                        item['jobDescribe'] = job['jobDescribe']#岗位描述
                        item['jobHref'] = job['jobHref']#岗位链接
                        item['companyHref'] = job['companyHref']#公司介绍链接

                        yield item
                # #模拟设置页面
                # self.driver.get(response.url)
                # # 加载动态内容
                # WebDriverWait(self.driver, 20).until(
                #     EC.presence_of_all_elements_located((By.CLASS_NAME, 'jump_page'))
                # )
                # pageinput= self.driver.find_element(By.ID,'jump_page')
                # pageinput.clear()
                # pageinput.send_keys(str(self.curPage))
                # pagejump0 = self.driver.find_element(By.CLASS_NAME,'jumpPage')
                # pagejump0.click()
                # time.sleep(6)
        self.curPage = 1 #更换关键字后需重置页面
        if self.nKeywords < len(self.keywords):
            self.nKeywords = self.nKeywords + 1
            yield self.next_request()
        # self.driver.quit()

    # 发送请求
    def next_request(self):
        self.positionUrl = "https://we.51job.com/pc/search?jobArea=110300&keyword={}&searchType=2&keywordType=".format(self.keywords[self.nKeywords])
        # self.positionUrl = "https://we.51job.com/api/job/search-pc?api_key=51job&timestamp=1719364739&keyword=Qt&searchType=2&function=&industry=&jobArea=000000&jobArea2=&landmark=&metro=&salary=&workYear=&degree=&companyType=&companySize=&jobType=&issueDate=&sortType=0&pageNum=1&requestId=&keywordType=&pageSize=20&source=1&accountId=&pageCode=sou%7Csou%7Csoulb"
        # self.positionUrl = "http://search.51job.com/list/170200,000000,0000,00,9,99,php,2," + str(self.curPage) + ".html"
        print("51job page:" + str(self.curPage))
        # time.sleep(10)
        return scrapy.http.FormRequest(self.positionUrl,
                                       headers=self.headers,
                                       callback=self.parse)

    def save_json_to_file(self,strjson):
        # 打开一个文件
        fo = open("json_{}{}.txt".format(self.keywords[self.nKeywords],self.curPage-1), "w")
        fo.write(strjson)

        # 关闭打开的文件
        fo.close()