# coding: utf-8
# 文件名称: 拉钩_spider_selenium.py
# 创建时间: 2021/6/27 19:20

from selenium import webdriver
from lxml import etree
import time
import random
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import json

class LagouSpider:
    driver_path = r'D:\Edgedriver\edgedriver_win64\msedgedriver.exe'
    def __init__(self):
        self.driver = webdriver.Edge(executable_path=LagouSpider.driver_path)
        self.url = 'https://www.lagou.com/jobs/list_SEO?labelWords=&fromSearch=true&suginput='
        self.positions = []

    def run(self):
        self.driver.get(self.url)
        # 首先清除由于浏览器打开已有的cookies
        self.driver.delete_all_cookies()
        with open(r'D:\python\lagou_cookie.txt', 'r') as f:
            cookieslist = json.load(f)

            for cookie in cookieslist:
                # 并不是所有cookie都含有expiry 所以要用dict的get方法来获取
                if isinstance(cookie.get('expiry'), float):
                    cookie['expiry'] = int(cookie['expiry'])
                self.driver.add_cookie(cookie)

        time.sleep(5)


        while True:
            print('-------------')
            source = self.driver.page_source # 获取页面源代码
            # 显性等待
            WebDriverWait(driver=self.driver, timeout=10).until(  # 判断这个XPATH元素是否加载完成，等待时间10秒
                EC.presence_of_element_located((By.XPATH, '//div[@class="pager_container"]/span[last()]'))
            )
            self.parse_list_page(source)
            # 点击下一页
            next_bnt = self.driver.find_element_by_xpath('//div[@class="pager_container"]/span[last()]')
            # 判断“下一页”是否还能点击（即最后一页）
            if "pager_next pager_next_disabled" in  next_bnt.get_attribute('class'):
                break
            else:
                next_bnt.click()
            time.sleep(1)

    # 从列表页获取每个详情页的url
    def parse_list_page(self, source):
        html = etree.HTML(source)
        links = html.xpath('//a[@class="position_link"]/@href')
        for link in links:
           self.request_detail_page(link)
           times = random.randint(1,3)
           time.sleep(times)

    # 访问详情页
    def request_detail_page(self, url):
        # self.driver.get(url)
        # 在新的窗口打开详情页
        self.driver.execute_script(f"window.open('{url}')")
        # 切换窗口
        self.driver.switch_to_window(self.driver.window_handles[1])
        source = self.driver.page_source
        self.parse_detail_page(source)

        # 关闭当前窗口
        self.driver.close()
        # 切换会列表也窗口
        self.driver.switch_to_window(self.driver.window_handles[0])

    # 解析详情页
    def parse_detail_page(self, source):
        html = etree.HTML(source)
        title = html.xpath('//span[@class="ceil-job"]/text()')[0]
        gongzi = html.xpath('//span[@class="ceil-salary"]/text()')[0]
        position = {
            '职位':title,
            '工资':gongzi
        }
        self.positions.append(position)
        print(position)
        print('='*40)





if __name__ == '__main__':
    spider = LagouSpider()
    spider.run()

