import json
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from lxml import etree
import csv
from selenium.webdriver.edge.options import Options
from selenium.webdriver.common.action_chains import ActionChains



url='https://search.damai.cn/search.htm'

#   todo 创建驱动参数
option=Options()
option.add_experimental_option("detach",True)   #   todo 阻止自动关闭

#   todo 创建浏览器驱动
driver=webdriver.Edge(options=option)

#   todo 创建一个用于缓慢滑动的
actions=ActionChains(driver)

#   todo 打开网页
driver.get(url)

#   todo 爬取多页数据
for t in range(1,110):
    print("第",t,"页开始爬取数据")
    #   网页最大化
    driver.maximize_window()
    time.sleep(3)

    # todo 缓慢滚动到页面底部 加载数据     可以根据下面循环滑动的次数，来决定下滑的距离
    for _ in range(15):  # 分16步滚动
        actions.scroll_by_amount(0, 500).perform()  # 每次滚动500像素
        time.sleep(0.3)  # 控制滚动速度
    time.sleep(1)

    #   todo 拿到当前网页的字符串html源码
    str_html = driver.page_source
    #   todo 将字符串转化为html对象，为了下面的xpath爬取数据
    html = etree.HTML(str_html)

    #   todo 数据存储的方式为div嵌套,所以我们先拿到最外层的div盒子
    # max_div = html.xpath('/html/body/div[2]/div[2]/div[1]/div[3]/div[1]/div')

    #   创建一个空列表存储row数据
    rows=[]
    #   todo 循环遍历拿到数据
    #   todo 由于是div嵌套，所以我们需要往下找//div[@class='']     因为每一页是30条数据
    for x in range(1,31):
        row=[]
        #   todo 拿到城市的数据
        city=html.xpath('/html/body/div[2]/div[2]/div[1]/div[3]/div[1]/div/div['+str(x)+']/div/div[1]/span/text()')
        # print(city[0].replace("【","").replace("】","").strip())
        row.append(city[0].replace("【","").replace("】","").strip())

        #   todo 拿到演唱会的标题
        title=html.xpath('/html/body/div[2]/div[2]/div[1]/div[3]/div[1]/div/div['+str(x)+']/div/div[1]/a/text()')
        # print(title[0].strip())
        row.append(title[0].strip())

        #   todo 拿到购票页面的连接
        info_url=html.xpath('/html/body/div[2]/div[2]/div[1]/div[3]/div[1]/div/div['+str(x)+']/div/div[1]/a/@href')
        # print(info_url[0].strip())
        row.append(info_url[0].strip())

        #   todo 拿到艺人名字
        user=html.xpath('/html/body/div[2]/div[2]/div[1]/div[3]/div[1]/div/div['+str(x)+']/div/div[2]/text()')
        # print(user[0].strip())
        row.append(user[0].strip())

        #   todo 拿到海报照片的url
        image_url=html.xpath('/html/body/div[2]/div[2]/div[1]/div[3]/div[1]/div/div['+str(x)+']/a/img/@src')
        # print(image_url[0].strip())
        row.append(image_url[0].strip())

        #   todo 拿到演唱会地点
        #   演唱会地点的xpath路径可能存在多种xpath路径，个别数据的xpath路径不同
        place=html.xpath('/html/body/div[2]/div[2]/div[1]/div[3]/div[1]/div/div['+str(x)+']/div/div[3]/text()')
        if len(place)==0:
            place=html.xpath('/html/body/div[2]/div[2]/div[1]/div[3]/div[1]/div/div['+str(x)+']/div/div[3]/a/text()')
        # print(place[0].strip())
        row.append(place[0].strip())

        #   todo 拿到演唱会的时间
        #   todo 用驱动打开的浏览器可能存在和本地浏览器的xpath路径不一样，导致拿到空数据
        times=html.xpath('/html/body/div[2]/div[2]/div[1]/div[3]/div[1]/div/div['+str(x)+']/div/div[4]/text()')
        if times[0]==" ":
            times=html.xpath('/html/body/div[2]/div[2]/div[1]/div[3]/div[1]/div/div['+str(x)+']/div/div[3]/text()')
        print(times[0].strip().replace("\n",""))
        row.append(times[0].strip().replace("\n",""))

        #   todo 拿到门票的价格
        price=html.xpath('/html/body/div[2]/div[2]/div[1]/div[3]/div[1]/div/div['+str(x)+']/div/div[6]/span/text()')
        if len(price)==0:
            price = html.xpath('/html/body/div[2]/div[2]/div[1]/div[3]/div[1]/div/div['+str(x)+']/div/div[5]/span/text()')
        # print(price[0].strip())
        row.append(price[0].strip())

        #   todo 拿到演唱会的售票状态
        statu = html.xpath('/html/body/div[2]/div[2]/div[1]/div[3]/div[1]/div/div['+str(x)+']/div/div[6]/text()')
        if len(statu)==0:
            statu=html.xpath('/html/body/div[2]/div[2]/div[1]/div[3]/div[1]/div/div['+str(x)+']/div/div[5]/text()')
        # print(statu[0].strip())
        row.append(statu[0].strip())
        rows.append(row)
        print(rows)

    # todo 将爬取到的数据写到本地
    # with open('source.csv', 'a', newline='', encoding='utf-8') as file:
    #     #   创建csv写入对象
    #     writer = csv.writer(file)
    #     #   todo 如果是第一页的话先写入字段名称
    #     if (t == 1):
    #         writer.writerow([
    #            "city","title","info_url","users","image_url","place","time","price","statu"
    #         ])
    #         #   写入数据
    #         writer.writerows(rows)
    #     else:
    #         #   写入数据
    #         writer.writerows(rows)

    #   todo 定位到下一页的标签 并且点击
    try:
        next_icon = WebDriverWait(driver, 10).until(
            #   todo 外面的button不让点击，所以我们等待里面li标签加载完，点击里面的标签
            EC.element_to_be_clickable((By.XPATH, '//button[@class="btn-next"]/i'))
        )
        next_icon.click()
        time.sleep(1)
    except Exception as e:
        print(f"通过图标点击失败: {e}")














