from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time

url = "https://www.che168.com/china/tesla/#pvareaid=2042128"
driver = webdriver.Chrome()
driver.get(url)

wait = WebDriverWait(driver, 10)

# 等待列表加载完成
# wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'viewlist-ul')))
wait.until(EC.presence_of_element_located((By.XPATH, '//ul[@class="viewlist-ul"]')))


# 获取总页数
total_pages = int(driver.find_element_by_class_name('page-item-info').text.split('/')[1])

for page in range(1, total_pages + 1):
    print(f"Scraping page {page}...")
    # 点击页面
    if page != 1:
        page_link = driver.find_element_by_xpath(f'//li[@class="page-item"]/a[text()="{page}"]')
        driver.execute_script("arguments[0].scrollIntoView();", page_link)
        page_link.click()
        time.sleep(2)

    # 获取列表信息
    car_list = driver.find_elements_by_xpath('//ul[@class="viewlist-ul"]/li')
    print(f"page {page} car list length: {len(car_list)}")
    for car in car_list:
        name = car.find_element_by_xpath('.//h2/a').text
        buy_time = car.find_element_by_xpath('.//p[contains(text(), "购买时间")]/span').text
        price = car.find_element_by_xpath('.//p[@class="priType-s"]/b[1]')

        print(name, buy_time, price.text)

driver.quit()
