# # -*- coding: utf-8 -*-

# from selenium import webdriver
# from selenium.webdriver.chrome.options import Options
# from selenium.webdriver.common.by import By
# from selenium.webdriver.support.ui import WebDriverWait
# from selenium.webdriver.support import expected_conditions as EC

# from .base_crawler import BaseCrawler


# class XiaohongshuCrawler(BaseCrawler):
#     def __init__(self):
#         # 设置 Chrome 选项
#         chrome_options = Options()
#         # chrome_options.add_argument('--no-sandbox')
#         # chrome_options.add_argument('--disable-dev-shm-usage')
#         # chrome_options.add_argument('--headless')  # 如果需要无头模式
#         # chrome_options.add_argument('--disable-gpu')

#         # 使用 Remote WebDriver 连接到 Zalenium
#         self.driver = webdriver.Remote(
#             command_executor='http://192.168.222.129:4444',
#             options=chrome_options
#         )

#     def fetch_data(self):
#         try:
#             # 打开指定页面
#             url = 'https://car.autohome.com.cn/searchcar#pvareaid=6863968'
#             self.driver.get(url)

#             # 等待页面加载完成
#             wait = WebDriverWait(self.driver, 20)
#             wait.until(EC.presence_of_element_located((By.TAG_NAME, 'body')))

#             # 获取页面源代码
#             page_source = self.driver.page_source

#             # 打印页面的前几个字符
#             print(page_source[:100])  # 打印前100个字符

#             wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '.findcar-list__content')))

#             car_items = self.driver.find_elements(By.CSS_SELECTOR, '.findcar-list__content .item')
#             # 定义要等待的元素的选择器
#             # 设置显式等待
#             self._extract_car_data(car_items)

#             # 等待分页元素加载完成
#             wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '.findcar-page__num a')))

#             # 获取分页链接
#             page_links = self.driver.find_elements(By.CSS_SELECTOR, '.findcar-page__num a')

#             # 打印分页链接的数量
#             print("找到的分页链接数量: {len(page_links)}")

#             # 点击分页链接并获取内容
#             for link in page_links:
#                 link.click()
#                 wait.until(lambda driver: driver.execute_script('return document.readyState') == 'complete')
#                 wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '.findcar-list__content .item')))

#                 # 获取目标元素
#                 car_items = self.driver.find_elements(By.CSS_SELECTOR, '.findcar-list__content .item')

#                 # 打印目标元素的数量
#                 print("当前页找到的元素数量: {len(car_items)}")

#                 self._extract_car_data(car_items)
#         finally:
#             # 关闭 WebDriver
#             self.driver.quit()

#     def _extract_car_data(self, car_items):
#         # 查找所有汽车条目
#         for item in car_items:
#             try:
#                 # 提取车名
#                 car_name = item.find_element(By.CSS_SELECTOR, 'h3').text

#                 # 提取价格范围
#                 price_range = item.find_element(By.CSS_SELECTOR, '.price').text

#                 # 提取车型及其指导价
#                 model_prices = item.find_elements(By.CSS_SELECTOR, 'dl dd a')
#                 print(f'Model Prices: {model_prices}')  # 打印 model_prices 的值
#                 # 提取车型名称和价格
#                 for model_price in model_prices:
#                     try:
#                         # 尝试获取 span 元素的文本
#                         model_span = model_price.find_element(By.CSS_SELECTOR, 'span')
#                         model_span2 = model_price.find_element(By.CSS_SELECTOR, 'span:nth-child(2)')
#                         model_text = model_span.get_attribute('textContent')
#                         model_text2 = model_span2.get_attribute('textContent')
#                         print(f'Model Span: {model_text}, Model Text: {model_text2}')  # 打印 model_span 和 model_text 的值
#                     except Exception as e:
#                         print(f'Error finding span within model_price: {e}')
#                 # 打印结果
#                 print(f'Car Name: {car_name}')
#                 print(f'Price Range: {price_range}')
#                 print('-' * 40)  # 分隔线
#             except Exception as e:
#                 print(f'Error extracting data from an item: {e}')


# if __name__ == '__main__':
#     extractor = BilibiliCrawler()
#     extractor.fetch_data()