from selenium import webdriver
from lxml import etree
import re
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
# req_url = 'https://www.baidu.com'
# driver = webdriver.Chrome(executable_path=driver_path)
# driver.get(req_url)

class LagouSpider():
  driver_path = r"D:\chromedriver\chromedriver.exe"
  def __init__(self):
    self.driver = webdriver.Chrome(executable_path=LagouSpider.driver_path)
    self.url = 'https://www.lagou.com/jobs/list_python/p-city_2?&cl=false&fromSearch=true&labelWords=&suginput='
    self.positions = []
  def run(self):
    self.driver.get(self.url)
    self.driver.find_element_by_xpath("//div[@class='body-btn']").click()
    pn = 0
    while True:
      pn += 1
      source = self.driver.page_source
      self.parse_list_page(source)
      WebDriverWait(driver=self.driver, timeout=10).until(
        EC.presence_of_element_located((By.XPATH,"//span[@action='next']"))
      )
      next_btn = self.driver.find_element_by_xpath("//span[@action='next']")
      # next_btn = self.driver.find_element_by_xpath("//div[@class='pager_container']/span[last()]")
      # if "pager_next_disabled" in next_btn.get_attribute("class"):
      if pn > 3:
        print('end')
        break
      else:
        print(next_btn.text)
        next_btn.click()
      time.sleep(2)
  def parse_list_page(self,source):
    html = etree.HTML(source)
    links = html.xpath("//a[@class='position_link']/@href")
    for link in links:
      self.request_detail_page(link)
      time.sleep(2)
      break
  def request_detail_page(self, url):
    # self.driver.get(url)
    self.driver.execute_script("window.open('%s')"%url)
    self.driver.switch_to_window(self.driver.window_handles[1])
    source = self.driver.page_source
    self.parse_detail_page(source)
    self.driver.close()
    self.driver.switch_to_window(self.driver.window_handles[0])
  def parse_detail_page(self, source):
    html = etree.HTML(source)
    position_name = html.xpath("//span[@class='position-head-wrap-name']/text()")[0]
    job_request_spans = html.xpath("//dd[@class='job_request']//span")
    salary = job_request_spans[0].xpath(".//text()")[0].strip()
    city = job_request_spans[1].xpath(".//text()")[0].strip()
    city = re.sub(r"[\s/]","",city)
    work_years = job_request_spans[2].xpath(".//text()")[0].strip()
    work_years = re.sub(r"[\s/]", "", work_years)
    education = job_request_spans[3].xpath(".//text()")[0].strip()
    education = re.sub(r"[\s/]", "", education)
    desc = "".join(html.xpath("//dd[@class='job_bt']//text()")).strip()
    position = {
      'name': position_name,
      'salary': salary,
      'city': city,
      'work_years': work_years,
      'education': education
    }
    self.positions.append(position)
    print(position)
    print("="*40)
    
if __name__ == '__main__':
  spider = LagouSpider()
  spider.run()
