import os
import time
from random import random

import httpx
import aiofiles
from parsel import Selector

class Law:
    """
    法律条文
    """
    title: str = ""  # 标题
    content: str = ""  # 内容
    type: str = ""  # 法律类型
    url: str = ""  # 链接

    def __str__(self):
        return f"""
            title={self.title}, 
            content={self.content}, 
            type={self.type}, 
            url={self.url}
        """

BASE_HOST = "https://lawrefbook.github.io"
HEADERS = {
    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'accept-language': 'zh-CN,zh;q=0.9',
    'cache-control': 'no-cache',
    'pragma': 'no-cache',
    'priority': 'u=0, i',
    'sec-ch-ua': '"Chromium";v="136", "Google Chrome";v="136", "Not.A/Brand";v="99"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
    'sec-fetch-dest': 'document',
    'sec-fetch-mode': 'navigate',
    'sec-fetch-site': 'none',
    'sec-fetch-user': '?1',
    'upgrade-insecure-requests': '1',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36',
}

HEADERS1 = {
    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'accept-language': 'zh-CN,zh;q=0.9',
    'cache-control': 'no-cache',
    'pragma': 'no-cache',
    'priority': 'u=0, i',
    'referer': 'https://lawrefbook.github.io/',
    'sec-ch-ua': '"Chromium";v="136", "Google Chrome";v="136", "Not.A/Brand";v="99"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
    'sec-fetch-dest': 'document',
    'sec-fetch-mode': 'navigate',
    'sec-fetch-site': 'same-origin',
    'sec-fetch-user': '?1',
    'upgrade-insecure-requests': '1',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36',
}

async def parse_law_type_tags(law_type: str | None, law_menu_tags: list[Selector], law_menus: list[Law]):
    for law_type_tag in law_menu_tags:
        law_type_label_tag = law_type_tag.xpath("./label").extract_first()
        if law_type_label_tag:
            current_law_type = law_type_tag.xpath("./label/a/text()").extract_first().strip()
            current_law_type = law_type + "-" + current_law_type if law_type else current_law_type

            children_tags = law_type_tag.xpath("./ul/li")
            await parse_law_type_tags(current_law_type, children_tags, law_menus)
        else:
            law = Law()
            law.type = law_type
            law.title = law_type_tag.xpath("./a/text()").extract_first().strip()
            law.url = law_type_tag.xpath("./a/@href").extract_first()
            law_menus.append(law)


async def get_law_menus() -> list[Law]:
    async with httpx.AsyncClient() as client:
        laws = []
        response = await client.get(BASE_HOST, headers=HEADERS, timeout=60)
        if response.status_code != 200:
            raise Exception("send request got error status code, reason：", response.text)
        selector = Selector(text=response.text)
        css_selector = "main > aside.book-menu > div > nav > ul"
        menu_top_ul = selector.css(css_selector)[0]
        law_menu_tags = menu_top_ul.xpath("./li")
        await parse_law_type_tags(None, law_menu_tags, laws)
        return laws

async def fetch_law_content(law: Law):
    async with httpx.AsyncClient() as client:
        retries = 3
        for attempt in range(retries):
            try:
                response = await client.get(BASE_HOST + law.url, headers=HEADERS1, timeout=120)
                response.raise_for_status()  # 如果响应状态码不是200，会抛出异常
                selector = Selector(text=response.text)
                article_tag = selector.css("main > div > article")[0]

                law_content = []
                article_children = article_tag.xpath("./*")
                for child in article_children:
                    row = child.xpath(".//text()").extract_first()
                    if row.endswith("\n"):
                        law_content.append("")
                    law_content.append(row)

                Law.content = "\n".join(law_content).lstrip("\n")
                return
            except httpx.RequestError as e:
                print(f"请求失败，尝试 {attempt + 1} 次: {e}")
            except httpx.HTTPStatusError as e:
                print(f"HTTP状态错误，尝试 {attempt + 1} 次: {e}")
            except Exception as e:
                print(f"其他错误，尝试 {attempt + 1} 次: {e}")

            if attempt < retries - 1:
                await asyncio.sleep(2)  # 等待2秒后重试

        # 如果重试次数用完，仍然失败，抛出异常
        raise Exception("请求失败，重试次数用完")


async def save_law(law: Law):
    directories = law.type.split('-')
    path = os.path.join('laws', *directories)

    os.makedirs(path, exist_ok=True)
    filename = law.title + ".txt"
    filepath = os.path.join(path, filename)
    # 异步写入文件
    async with aiofiles.open(filepath, 'w', encoding='utf-8') as file:
        await file.write(law.content)


async def run_crawler(start_index: int):
    laws = await get_law_menus()
    total_laws = len(laws)
    top_start_time = time.time()

    for index, law in enumerate(laws, start=1):
        if index < start_index:
            continue

        remaining = total_laws - index
        print(f"开始爬取第{index}条：{law.title}，还剩{remaining}条")

        start_time = time.time()
        await fetch_law_content(law)
        await save_law(law)

        print(f"爬取完成：耗时{time.time() - start_time}秒")
        await asyncio.sleep(random())

    print(f"任务爬取完成.......总耗时{time.time() - top_start_time}秒")

if __name__ == '__main__':
    import asyncio
    asyncio.run(run_crawler(2976))