import json
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from lxml import etree
import csv
from selenium.webdriver.edge.options import Options
from selenium.webdriver.common.action_chains import ActionChains


url='https://www.che168.com/china/a0_0msdgscncgpi1ltocsp1exx0/'

#   todo 创建驱动参数
option=Options()
option.add_experimental_option("detach",True)   #   todo 阻止自动关闭

#   todo 创建浏览器驱动
driver=webdriver.Edge(options=option)

#   todo 创建一个用于缓慢滑动的
actions=ActionChains(driver)

#   todo 打开网页
driver.get(url)
#   全屏
driver.maximize_window()

#   todo 爬取50页数据
for j in range(1,51):
    #   定义rows存储当前页的数据
    rows=[]
    print("当前在",j,"页爬取数据")

    # todo 缓慢滚动到页面底部 加载数据     可以根据下面循环滑动的次数，来决定下滑的距离
    for _ in range(10):  # 分16步滚动
        actions.scroll_by_amount(0, 500).perform()  # 每次滚动500像素
        time.sleep(1)  # 控制滚动速度
    time.sleep(5)

    #   todo 由于数据是ul里面嵌套li标签，所以我们下面拿到所有的li标签
    lis = driver.find_elements(By.XPATH, '//*[@id="goodStartSolrQuotePriceCore0"]/ul/li')

    #   todo 循环进去拿数据
    count = 0
    for x in lis:
        #   todo 由于有的页数存在广告，所以将广告筛选出去
        count += 1
        if count == 53:
            break
        else:
            #   todo 定义row存储当前车子的数据
            row = []

            #   todo 拿到车子的名车型号
            try:
                name = x.find_element(By.XPATH, './a/div[2]/h4').text
                print(name)
                row.append(name)
            except Exception as e:
                name=x.find_element(By.XPATH,'./a/div[3]/h4').text
                print(name)
                row.append(name)

            try:
                #   todo 拿到车子已经行驶的公里数/上牌时间/地址/车商的会员时长
                info = x.find_element(By.XPATH, './a/div[2]/p').text
                print(info)
                row.append(info)
            except Exception as e:
                info = x.find_element(By.XPATH, './a/div[3]/p').text
                print(info)
                row.append(info)

            try:
                #   todo 拿到车子目前的售价  price
                price = x.find_element(By.XPATH, './a/div[3]/div[1]/span/em').text
                print(price)
                row.append(price)
            except Exception as e:
                price = x.find_element(By.XPATH, './a/div[2]/div[1]/span').text
                print(price)
                row.append(price)


            try:
                #   todo 拿到车子的原价
                old_price = x.find_element(By.XPATH, './a/div[3]/div[1]/span/em').text
                print(old_price)
                row.append(old_price)
            except Exception as e:
                old_price=x.find_element(By.XPATH,'./a/div[2]/div[1]/s').text
                print(old_price)
                row.append(old_price)

            #   todo 拿到车子的图片的url
            try:
                image_url =x.find_element(By.XPATH, './a/div[2]/img').get_attribute("src")
                print(image_url)
                row.append(image_url)
            except Exception as e:
                image_url =x.find_element(By.XPATH, './a/div[1]/img').get_attribute("src")
                print(image_url)
                row.append(image_url)


            #   todo 拿到车子详情页的url
            info_url=x.find_element(By.XPATH,'./a').get_attribute("href")
            print(info_url)
            row.append(info_url)

            try:
                #   todo 拿到车子的过户情况
                guohu = x.find_element(By.XPATH, './a/div[2]/div[1]/span[2]/i[2]').text
                print(guohu)
                row.append(guohu)
            except Exception as e:
                guohu="不知道"
                print(guohu)
                row.append(guohu)
            rows.append(row)
    #   todo 打印当前页的数据
    for k in rows:
        print(k)
    #   todo 将数据写入文件
    # with open('source.csv', 'a', newline='', encoding='utf-8') as file:
    #     # 创建csv写入对象
    #     writer = csv.writer(file)
    #     #   todo 如果是第一页的话先写入字段名称
    #     if (j == 1):
    #         writer.writerow([
    #            "name","info","price","old_price","image_url","info_url","guohu"
    #         ])
    #         #   写入数据
    #         writer.writerows(rows)
    #     else:
    #         #   写入数据
    #         writer.writerows(rows)
    #   todo 当前页的数据爬取完毕，点击下一页
    driver.find_element(By.XPATH, '//*[@id="listpagination"]/a[9]').click()



#   todo 退出驱动
# driver.quit()







