# -*- coding: utf-8 -*-
import scrapy
import time
from www_job_com.items import WwwJobComItem
from selenium import webdriver#支持网页脚本
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
#接口：https://we.51job.com/api/job/search-pc?api_key=51job&timestamp=1718105974&keyword=Qt&searchType=2&function=&industry=&jobArea=110300&jobArea2=&landmark=&metro=&salary=&workYear=&degree=&companyType=&companySize=&jobType=&issueDate=&sortType=0&pageNum=2&requestId=8c6f4cdeb1385105b3c84db814bc7595&keywordType=&pageSize=20&source=1&accountId=&pageCode=sou%7Csou%7Csoulb&u_atoken=2411093d2a078aa2e11f4df75d89aeff&u_asession=01IsIw5W-9QS3NOvPfl6e1xUfA4Y8C-LJU-U9CHzLiUOB6TpeLOQWw5EkuO1PWxknVJB-YY_UqRErInTL5mMzm-GyPlBJUEqctiaTooWaXr7I&u_asig=05Ju4pRRMDauGGVqy-c_gQE7FxLMbfJy3d6_JVVRbfidgjAIPcV24TMCbBdIrj3Co6tYmluDj39MSETBif5VN8PvzNFigoJzvVoa7xCkt2V_-u18NE04XeCMJAIByPlmMn3ZYHbhtgBFlMnpbO7EXVKUtfscfboS4VrslPF88R_4fBzhvSc0Kr8URjOX9Xe4tkaFz52ZL0wSSeBMFpVEI75YxuDdwJdvTs3wo1QVyrNHhVQ0CgfAckO5j-QEuxuIrnewtyftWbfuxhWtuQjX92YSbo21IoqKumRO3QXK1wAo8MI7Z-80-lKwjxRoAdqb7gF-n1f-CA6GZoq9ddPTuuQA&u_aref=cD8WFSKdmXy1U7TQlHrzti34Ko0%3D
class Job51Spider(scrapy.Spider):
    name = 'job51'
    allowed_domains = ['we.51job.com']
    start_urls = ['https://we.51job.com']
    keywords = ['Qt','c++','c#','java','python']#'c++','c#',
    nKeywords = 0
    positionUrl = ''
    curPage = 0
    headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.5735.289 Safari/537.36'}
    def __init__(self):
        # super(MySpider, self).__init__(*args, **kwargs)
        self.driver = webdriver.Chrome()  # 或者其他浏览器
        self.n = 1
        self.curPage = 1
        self.page_total = 0
    def start_requests(self):
        return [self.next_request()]
    def parse(self, response):
        self.driver.get(response.url)
        # 加载动态内容
        WebDriverWait(self.driver, 20).until(
            EC.presence_of_all_elements_located((By.CLASS_NAME, 'joblist-item'))
        )
        if self.nKeywords>1 and self.nKeywords<=len(self.keywords):
            # 模拟设置页面
            keywordInput = self.driver.find_element(By.ID, 'keywordInput')
            # pageinput = self.driver.find_element_by_xpath('//*[@id="jump_page"]')
            # keywordInput.clear()
            strKeyW = self.keywords[self.nKeywords-1]
            keywordInput.send_keys(strKeyW)
            search_btn = self.driver.find_element(By.ID, 'search_btn')
            # pagejump = self.driver.find_element_by_xpath('//*[@id="app"]/div/div[2]/div/div/div[2]/div/div[2]/div/div[3]/div/div/span[3]')
            search_btn.click()
            time.sleep(6)

        pagination_buttons = self.driver.find_elements(By.CLASS_NAME,'number')
        # 获取最后一个按钮，通常包含最大页码
        last_button = pagination_buttons[-1]
        # 解析页码
        try:
            self.page_total  = int(last_button.text)
        except ValueError:
            # 如果按钮文本不是数字，可以尝试其他方法解析总页数
            pass
        print("request -> " + response.url)
        html = response.body.decode('utf-8')
        # job_list = response.css('div.joblist-item')
        self.curPage = 0
        while self.curPage <= self.page_total:
            self.curPage += 1
            job_list= self.driver.find_elements(By.CLASS_NAME,'joblist-item')
            # job_list = self.driver.find_elements_by_css_selector('.joblist-item')
            if (len(job_list) > 1):
                print("51job Nums:" + str(len(job_list)))
                for job in job_list:
                    item = WwwJobComItem()
                    str_time = ''#job.css('span::text').extract_first().strip()
                    if (str_time == "发布时间"):
                        continue
                    else:
                        item['position_id'] = self.n #job.css('p.t1 > input::attr(value)').extract_first().strip()
                        self.n = self.n+1
                        item["position_name"] = job.find_element(By.CLASS_NAME,'jname').text#job.css('joblist-item-top > span ::text').extract_first().strip()
                        salary = job.find_element(By.CLASS_NAME,'sal').text#job.css('joblist-item-top > span ::text').extract()[1].strip()
                        item["salary"] = salary
                        # item["salary"] = salaryif (salary.find("万/月-") > -1):
                        #     salary = salary.replace("万/月", "").split("-")
                        #     item["salary"] = str(float(salary[0]) * 10) + "K-" + str(float(salary[1]) * 10) + "K"
                        #     item["avg_salary"] = (float(salary[0]) * 10 + float(salary[1]) * 10) / 2
                        # elif (salary.find("万/年-") > -1):
                        #     salary = salary.replace("万/年", "").split("-")
                        #     item["salary"] = str(float(salary[0]) / 12) + "K-" + str(float(salary[1]) / 12) + "K"
                        #     item["avg_salary"] = (float(salary[0]) / 12 + float(salary[1]) / 12) / 2
                        # elif (salary.find("元/天-") > -1):
                        #     continue
                        # else:
                        #     #salary = salary.replace("千/月", "").split("-")
                        #     item["salary"] = salary#salary[0] + "K-" + salary[1] + "K"
                        #     item["avg_salary"] = ''# (float(salary[0]) + float(salary[1])) / 2
                        item["avg_salary"] = ''
                        item['city'] = job.find_element(By.CLASS_NAME,'area').text#job.css('div.area > div::text').extract_first().strip()
                        item['work_year'] = ""
                        item['education'] = ""
                        item['company_name'] = job.find_element(By.CLASS_NAME,'cname').text#job.css('div.joblist-item-bot > div.bl > a ::text').extract_first().strip()
                        # driver.find_element(By.CSS_SELECTOR
                        item['industry_field'] = job.find_elements(By.CLASS_NAME,'dc')[0].text#job.css('span.text-cut ::text').extract()[1].strip()#行业
                        item['finance_stage'] = job.find_elements(By.CLASS_NAME,'dc')[1].text#job.css('span.dc.shrink-0 > div.bl > a ::text').extract()[0].strip()#资金阶段
                        if len(job.find_elements(By.CLASS_NAME,'dc'))>2:
                            item['company_size'] = job.find_elements(By.CLASS_NAME,'dc')[2].text#job.css('span.dc.shrink-0 > div.bl > a ::text').extract()[1].strip()
                        else:
                            item['company_size'] = ''
                            # listlabels = job.css('div.joblist-item-mid > div.tags > div.tag ::text').extract()
                        # position_labels = ','.join(listlabels)
                        lstTags = job.find_elements(By.CLASS_NAME, 'tags')
                        if len(lstTags) > 0:
                            item['position_lables'] = job.find_element(By.CLASS_NAME,'tags').text.replace('\n',',')#position_labels
                        else:
                            item['position_lables'] = ''
                        item['time'] = ""#str_time
                        item['updated_at'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
                        item['platform'] = "51job"
                        # 加载动态内容
                        # WebDriverWait(self.driver, 20).until(
                        #     EC.presence_of_all_elements_located((By.CLASS_NAME, 'bmsg'))
                        # )
                        # item["city"] += self.driver.find_element(By.CLASS_NAME,'bmsg').text

                        # pageJob = job.find_element(By.CLASS_NAME,'jname')#self.driver.find_element(By.CLASS_NAME, 'jumpPage')
                        # pageJob.click()
                        #自定义扩展部分
                        item['keyword'] = self.keywords[self.nKeywords-1] # 搜索关键字
                        item['pagenum'] = str(self.curPage)  # 页码
                        item['x_bd'] = '' # 经度
                        item['y_bd'] = '' # 纬度
                        item['jobDescribe'] = ''  # 岗位描述
                        item['jobHref'] = ''  # 岗位链接
                        item['companyHref'] = '' # 公司介绍链接
                        yield item
                if self.curPage <= self.page_total:
                    #模拟设置页面
                    pageinput= self.driver.find_element(By.ID,'jump_page')
                    # pageinput = self.driver.find_element_by_xpath('//*[@id="jump_page"]')
                    pageinput.clear()
                    pageinput.send_keys(str(self.curPage))
                    pagejump0 = self.driver.find_element(By.CLASS_NAME,'jumpPage')
                    # pagejump = self.driver.find_element_by_xpath('//*[@id="app"]/div/div[2]/div/div/div[2]/div/div[2]/div/div[3]/div/div/span[3]')
                    pagejump0.click()
                    time.sleep(6)
        yield self.next_request()
        # self.driver.quit()

    # 发送请求
    def next_request(self):
        if self.nKeywords<len(self.keywords):
            print("51job keyword : " + self.keywords[self.nKeywords])
            self.positionUrl = "https://we.51job.com/pc/search?jobArea=110300&keyword={}&searchType=2&keywordType=".format(self.keywords[self.nKeywords])
            # self.positionUrl = "http://search.51job.com/list/170200,000000,0000,00,9,99,php,2," + str(self.curPage) + ".html"
            self.nKeywords += 1
            time.sleep(10)
            return scrapy.http.FormRequest(self.positionUrl,
                                           headers=self.headers,
                                           callback=self.parse)
