# -*- coding:utf-8 -*-
"""
业务逻辑：
    1、打开目标网站
    2、构建翻页
    3、每次翻页的时候获取审查元素
    4、获取岗位详情页的地址
    5、打开详情页的获取数据
"""
import time
from lxml import etree
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC


class FindJob(object):
    def __init__(self):
        self.driver = webdriver.Chrome()
        self.wait = WebDriverWait(self.driver, 10, 0.5)

    def get_url_list(self):
        for num in range(1, 4):
            url = "https://www.yupao.com/zhaohuo/a373c0/?page={}&keywords=%E9%94%80%E5%94%AE".format(num)
            self.driver.get(url)
            time.sleep(1)
            response = self.driver.page_source
            html = etree.HTML(response)
            data_list = html.xpath('//*[@id="yp-main"]/div[3]/div[2]/div/div/div[1]/div/div[1]//div[@class="ResumeCard_resume-card__nQknm"]')
            for url in data_list:
                detail_url = url.xpath('./div[2]/div[1]/div[2]/div[1]/a/@href')[0]
                self.get_detail_index(detail_url)
                # 抓取主页面的数据
                # title = url.xpath('./div[2]/div[1]/div[2]/div[1]/a/span/text()')[0]
                # sex = url.xpath('./div[2]/div[1]/div[2]/div[2]/span/text()')[0]
                # info = "".join(url.xpath('./div[3]/div[1]//span/text()'))
                # data = url.xpath('./p/text()')[0]
                # code = url.xpath('./div[3]/div[2]/div/span/text()')[0]
                # print(title, sex, info, data, code)
            time.sleep(1)

    # 打开详情页抓取数据
    def get_detail_index(self, detail_url):
        self.driver.get(detail_url)
        response = self.driver.page_source
        html = etree.HTML(response)
        name = html.xpath('//*[@id="yp-main"]/div[3]/div[1]/div/div[1]/div[2]/div/div[1]/div[2]/p[1]/span/text()')[0]
        type = "".join(html.xpath('//*[@id="yp-main"]/div[3]/div[1]/div/div[1]/div[2]/div/div[3]/div/p//span/text()'))
        postion = html.xpath('//*[@id="yp-main"]/div[3]/div[1]/div/div[1]/div[2]/div/div[4]/div/text()')[0]
        print(name, type, postion)



if __name__ == '__main__':
    spider = FindJob()
    spider.get_url_list()























