import json
import os

import requests


class HoudaskCrawler:
    BASE_URL = "http://www.houdask.com/api/other/laws/lawsContent/anon/"

    def __init__(self):
        self.tree_item = ""
        self.tree = {}
        self.res = self.fetch_tree()

    @staticmethod
    def fetch_tree():
        """Fetches the law tree from the API."""
        try:
            response = requests.post(f"{HoudaskCrawler.BASE_URL}getTree")
            response.raise_for_status()
            return response.json().get("data", [])
        except requests.RequestException as e:
            print(f"Error fetching tree data: {e}")
            return []

    @staticmethod
    def fetch_article(law_id: str):
        """Fetches an article by law ID from the API."""
        try:
            response = requests.post(
                url=f"{HoudaskCrawler.BASE_URL}getArticle",
                headers={
                    "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"
                },
                data={"data": json.dumps({"id": law_id})},
            )
            response.raise_for_status()
            return response.json()
        except requests.RequestException as e:
            print(f"Error fetching article {law_id}: {e}")
            return {}

    @staticmethod
    def convert_json_to_markdown(json_data):
        """Converts JSON law content to markdown format."""
        markdown_content = ""
        for item in json_data.get("data", []):
            law_content = item.get("lawWebContent", "")
            if law_content:
                markdown_content += f"{law_content}\n\n"
        return markdown_content

    @staticmethod
    def ensure_directory_exists(folder: str):
        """Ensures that a folder exists, creates it if it doesn't."""
        os.makedirs(folder, exist_ok=True)

    def parse_tree(self, data):
        """Parses the law tree structure and builds an internal representation."""
        for item in data:
            item_type = item.get("type")
            if item_type != "3":
                if "id" in item and "content" in item:
                    if item_type == "1":
                        self.tree_item = item["content"]
                        self.tree[self.tree_item] = {}
                    else:
                        self.tree[self.tree_item][item["content"]] = item["id"]
                # Recursively parse child nodes
                if "children" in item:
                    self.parse_tree(item["children"])

    def process_tree(self):
        """Processes the tree, fetching and saving the articles in JSON and Markdown formats."""
        for item, sub_tree in self.tree.items():
            for sub_item, law_id in sub_tree.items():
                self.ensure_directory_exists(f"docs/json/{item}")
                self.ensure_directory_exists(f"docs/md/{item}")

                article_data = self.fetch_article(law_id)
                if article_data:
                    # Save JSON
                    json_path = f"docs/json/{item}/{sub_item}.json"
                    with open(json_path, "w", encoding="utf-8") as json_file:
                        json.dump(article_data, json_file, ensure_ascii=False)

                    # Convert to Markdown and save
                    markdown_content = self.convert_json_to_markdown(article_data)
                    markdown_path = f"docs/md/{item}/{sub_item}.md"
                    with open(markdown_path, "w", encoding="utf-8") as md_file:
                        md_file.write(markdown_content)

    def run(self):
        """Main entry point to run the entire crawling and processing."""
        if not self.res:
            print("No data to process.")
            return

        self.parse_tree(self.res)
        self.process_tree()
