# 详情页（爬取第三页的表格）

# pip install requests 用来发送请求的工作
import requests
# pip install lxml 用来解析网页数据的模块
from lxml import etree

# 发送给谁
url = 'https://vip.titan007.com/changeDetail/overunder.aspx?id=2804329&companyid=1&l=0'

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.5845.97 Safari/537.36 Core/1.116.541.400 QQBrowser/19.4.6579.400',
    'Referer': 'https://vip.titan007.com/count/goalCount.aspx?t=2&sid=2702239&cid=1&l=0',
    'Cookie': 'Hm_lvt_a88664a99dbcb9c7c07dc420114041b3=1757745742; Hm_lpvt_a88664a99dbcb9c7c07dc420114041b3=1757745742; HMACCOUNT=A927B412270D4E60'
}
# 发送请求
response = requests.get(url, headers=headers)
# 设置编码格式 utf-8 GBK
response.encoding = response.apparent_encoding

# 处理结果

html_tree = etree.HTML(response.text)

table_elements = html_tree.xpath('//table')

# 写入文件 - 只写入第一个table
with open('table_content.html', 'w', encoding='utf-8-sig') as f:
    if table_elements:
        table_html = etree.tostring(table_elements[0], encoding='unicode', pretty_print=True)
        # 去掉&#13;和其他不必要的字符
        cleaned_html = table_html.replace('&#13;', '').replace('\r', '')
        f.write(cleaned_html)
    else:
        f.write("未找到任何table元素")