import asyncio
import aiohttp
from bs4 import BeautifulSoup


async def fetch_page(session, url, form_data=None):
    """异步获取页面内容"""
    if form_data:
        async with session.post(url, data=form_data) as response:
            return await response.text()
    else:
        async with session.get(url) as response:
            return await response.text()


async def get_forms_async(soup):
    """异步提取表单数据"""
    try:
        viewstate = soup.find("input", attrs={"name": "__VIEWSTATE"})["value"] if soup.find("input", attrs={
            "name": "__VIEWSTATE"}) else ""
        viewstategenerator = soup.find("input", attrs={"name": "__VIEWSTATEGENERATOR"})["value"] if soup.find("input",
                                                                                                              attrs={
                                                                                                                  "name": "__VIEWSTATEGENERATOR"}) else ""
        eventvalidation = soup.find("input", attrs={"name": "__EVENTVALIDATION"})["value"] if soup.find("input", attrs={
            "name": "__EVENTVALIDATION"}) else ""

        return {
            "__EVENTTARGET": "lkbt_next",
            "__VIEWSTATE": viewstate,
            "__VIEWSTATEGENERATOR": viewstategenerator,
            "__EVENTVALIDATION": eventvalidation
        }
    except Exception as e:
        print(f"提取表单数据出错: {e}")
        return None


async def crawl_pages():
    """异步爬取所有页面"""
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
    }

    async with aiohttp.ClientSession(headers=headers) as session:
        url = "https://www.cnzj5u.com/list_block.aspx?stid=187&rptid=8"

        # 获取第一页
        html = await fetch_page(session, url)
        soup = BeautifulSoup(html, "html.parser")

        # 处理第一页数据
        title_table = soup.find("table", attrs={"id": "GridView1"})
        if title_table:
            h2_list = title_table.find_all("h2")
            for h2 in h2_list:
                print(h2.text)

        # 异步获取后续页面
        for page in range(2, 100):
            form_data = await get_forms_async(soup)
            if not form_data:
                break

            html = await fetch_page(session, url, form_data)
            soup = BeautifulSoup(html, "html.parser")

            title_table = soup.find("table", attrs={"id": "GridView1"})
            if title_table:
                h2_list = title_table.find_all("h2")
                for h2 in h2_list:
                    print(h2.text)
            else:
                break


# 运行异步爬虫
asyncio.run(crawl_pages())
