import requests
import urllib.request
from lxml import etree,html
import json
from selenium import webdriver
from PIL import Image
from selenium.webdriver.chrome.service import Service as ChromService
from selenium.webdriver.common.by import By
# service = ChromService(executable_path='chromedriver.exe')
# driver = webdriver.Chrome(service=service)

url = 'https://uieieuiwavceoei.uesonme.net/zh/dashboard/users?page=2&limit=10'
headers = {
    # ':authority':'uyeiiejutefej.pict.life',
    # ':method':'GET',
    # ':path':'/zh/dashboard/users',
    # ':scheme':'https',
    'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    # 'Accept-Encoding':'gzip, deflate, br, zstd',
    'Accept-Language':'zh-CN,zh;q=0.9,en-GB;q=0.8,en;q=0.7',
    'Cache-Control':'no-cache',
    'Cookie':'token=0e13ae18d5037e1779c22a643445e5a443e0734d24af288b95c18ac4b033ecd7%7Cf049f35d5a822de963888fe6c9f3e21e286ec73071025b05dd813ab38646a97c; __Secure-next-auth.callback-url=https%3A%2F%2Fuyeiiejutefej.pict.life; __Secure-next-auth.session-token=eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..OL7UkwmkvJCM8s7h.J_NKH4Ief45sF3gZmUpAwgei5hdXQw_zDHuHgLE3Dp5fyJmvAMby3Hn1HIIVxkLoXs2YllWeKrwOxZZdY19ULMhtI1ZoC-ko8XEPInAgyPuZJzoUaRdKsZfjuoHcK3Aql3tKe5p2M_k_CiwAz5WoujzqW5cHx8aH-ZpbXA7tqdU3nDUTt16q9SPZYg.rv0yyo2LDX2PQl2qt6lVLQ',
    'Pragma':'no-cache',
    'Priority':'u=0, i',
    # 'Referer:https':'//uyeiiejutefej.pict.life/zh/auth/sign?__cf_chl_tk=N501tn53.WJRm1G9CSL9IT27ilThMAfsu32WAz7c2as-1716084577-0.0.1.1-1557',
    'Sec-Ch-Ua':'"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
    'Sec-Ch-Ua-Arch':'"x86"',
    'Sec-Ch-Ua-Bitness':'"64"',
    'Sec-Ch-Ua-Full-Version':'"124.0.6367.208"',
    'Sec-Ch-Ua-Full-Version-List':'"Chromium";v="124.0.6367.208", "Google Chrome";v="124.0.6367.208", "Not-A.Brand";v="99.0.0.0"',
    'Sec-Ch-Ua-Mobile':'?0',
    'Sec-Ch-Ua-Model':'""',
    'Sec-Ch-Ua-Platform':'"Windows"',
    'Sec-Ch-Ua-Platform-Version':'"10.0.0"',
    'Sec-Fetch-Dest':'document',
    'Sec-Fetch-Mode':'navigate',
    'Sec-Fetch-Site':'same-origin',
    'Sec-Fetch-User':'?1',
    'Upgrade-Insecure-Requests':'1',
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36'
}

# request = urllib.request.Request(url=url,headers=headers)
# response = urllib.request.urlopen(request)
# print(response.read().decode('utf-8'))




response = requests.get(url=url,headers=headers)
content = response.text
# 确保请求成功  
# if response.status_code == 200:  
#     # 获取返回的 HTML 内容  
#     page_content = response.text  
    
#      # 使用 lxml 解析 HTML 内容  
#     tree = html.fromstring(page_content)  
    
#     # 使用 XPath 解析数据  
#     # 假设我们要获取所有 <h1> 标签的内容  
#     headings = tree.xpath("//div[@class='flex items-center']")  # 替换为你的 XPath 表达式  
#     #print(headings)
#     # 输出抓取到的内容  
#     for heading in headings:  
#         print(heading)  
# else:  
#     print(f"请求失败，状态码：{response.status_code}")  
# with open(r'D:\jupyter\python\python爬虫\dyf\users.html','w',encoding='utf-8') as f:
#     f.write(content)
html_content = etree.HTML(content)
tree = etree.parse(content)
th_list = tree.xpath("//div[@class='flex items-center']")
print(th_list)

# 使用selenium爬取数据
# driver.get(url)
# # 注入javascript代码
# for key, value in headers.items():
#     driver.execute_script("var xhr = new XMLHttpRequest();")
#     driver.execute_script(f"xhr.setRequestHeader('{key}', '{value}');")
#     # 刷新页面
# driver.refresh()
# th_list = driver.find_element(by=By.XPATH,value='//thead[@class="ant-table-thead"]//th')
# print(th_list)