import requests, asyncio, aiohttp
from util.headers import get_header
from lxml import etree


def get_auther():
    url = "https://www.gushiwen.cn/mingjus/"
    res = requests.get(url, headers=get_header())
    # print(res.text)
    tree = etree.HTML(res.text)
    items = tree.xpath("//div[@id='type2']")
    for item in items:
        auther = item.xpath(".//a/text()")
        herf = item.xpath(".//a/@href")
        d2 = {"auther": auther, "herf": herf}
        return d2


async def fetch(session, url):
    async with session.get(url) as res:
        return await res.text()


async def main(page):
    async with aiohttp.ClientSession() as session:
        res = await asyncio.gather(
            *[
                fetch(
                    session,
                    f"https://www.gushiwen.cn/mingjus/default.aspx?astr={i}&page={page}",
                )
                for i in get_auther().get("auther")
            ]
        )
        for ree in res:
            tree = etree.HTML(ree)
            items = tree.xpath('//div[@class="left"]//div[@class="sons"]/div[@class="cont"]')
            for item in items:
                cotent = item.xpath("./a[1]/text()")
                source = item.xpath("./a[2]/text()")
                # print(cotent, source)
                if cotent and source:
                    d1 = {"cotent": cotent, "source": source}
                print(d1)


for page in range(1, 11):
    print(f"第{page}页开始爬取")
    asyncio.run(main(page))
    print("爬取完毕")
