import time

import requests
import re
import json

headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
    "Cache-Control": "max-age=0",
    "Connection": "keep-alive",
    "Content-Type": "application/x-www-form-urlencoded",
    "Origin": "https://sou.chinanews.com.cn",
    "Referer": "https://sou.chinanews.com.cn/search.do",
    "Sec-Fetch-Dest": "document",
    "Sec-Fetch-Mode": "navigate",
    "Sec-Fetch-Site": "same-origin",
    "Sec-Fetch-User": "?1",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36 Edg/138.0.0.0",
    "sec-ch-ua": "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"138\", \"Microsoft Edge\";v=\"138\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Windows\""
}
count = 0
url = "https://sou.chinanews.com.cn/search/news"
for page in range(1, 1001):
    data = {
        "q": "新能源",
        "searchField": "all",
        "sortType": "time",
        "dateType": "all",
        "startDate": "",
        "endDate": "",
        "channel": "all",
        "editor": "",
        "shouQiFlag": "show",
        "pageNum": f"{page}"
    }
    response = requests.post(url, headers=headers, data=data, verify=False)

    html_str = response.text
    pattern = r'var docArr = (\[.*?\]);'
    match = re.search(pattern, html_str, re.DOTALL)  # re.DOTALL让.匹配换行符

    if match:
        doc_arr_str = match.group(1)  # 获取[]内的内容
        # 2. 清洗数据：去除<em>标签（如需保留标签可跳过）
        cleaned_str = re.sub(r'<em>|</em>', '', doc_arr_str)
        # 3. 转换为Python列表（自动解码Unicode编码，如\u65B0\u80FD\u6E90→新能源）
        doc_arr = json.loads(cleaned_str)
        print(doc_arr)
        # 4. 提取需要的字段（示例：标题、发布时间、内容、链接）
        for idx, item in enumerate(doc_arr, 1):
            count += 1
            print(f"第{count}条数据：")
            print(f"标题：{item['title']}")
            print(f"发布时间：{item['pubtime']}")
            print(f"内容：{re.sub(r'</?em>', '', item['content_without_tag'])}")  # 显示前100字
            print(f"链接：{item['url']}\n")
    else:
        print("未找到docArr数据")
        break

    time.sleep(2)
