import os
import time
import requests
import html2text
import hashlib
from urllib.parse import urljoin, urlparse
from playwright.sync_api import sync_playwright
import re

# Config
DOCS_ROOT = "/Users/tunan/maoxiandao_doc/docs"

class TreeTraverser:
    def __init__(self):
        self.start_url = "https://maplestoryworlds-creators.nexon.com/en/docs/?postId=472"
        self.output_file = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "document_links.txt")
        self.total_nodes = 0
        self.max_retries = 3
        
        # Content Scraper Setup
        self.converter = html2text.HTML2Text()
        self.converter.ignore_links = False
        self.converter.ignore_images = False
        self.converter.body_width = 0 # No wrapping
        self.session = requests.Session()

    def scrape(self):
        with sync_playwright() as p:
            # headless=True for stability, use desktop context
            browser = p.chromium.launch(headless=True)
            context = browser.new_context(
                viewport={'width': 1920, 'height': 1080},
                user_agent='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
            )
            self.page = context.new_page()
            
            print(f"🌐 Visiting {self.start_url}")
            try:
                self.page.goto(self.start_url, wait_until="domcontentloaded", timeout=60000)
            except:
                print("Navigation timed out, proceeding...")
            
            # Cookie Banner Handling
            try:
                time.sleep(2)
                banner_btn = self.page.query_selector('#accept-recommended-btn-handler')
                if banner_btn and banner_btn.is_visible():
                    banner_btn.click()
                    print("Clicked 'Allow All' cookies")
                    time.sleep(1)
            except:
                pass

            print("⏳ Waiting for tree view container...")
            self.page.wait_for_selector('#App > main > div.contents_wrap > div.tree_view_container', timeout=60000)
            
            # Re-write links file (or we could just append/skip)
            # Since user wants restore, let's overwrite
            with open(self.output_file, 'w', encoding='utf-8') as f:
                f.write("# Document Links\n")
                f.write("# Format: Node Name <tab> URL <tab> Local Path\n")
            
            print("🚀 Starting Path-Based DFS traversal...")
            
            # Get initial root count
            root_uls = self.page.query_selector_all('#App > main > div.contents_wrap > div.tree_view_container > ul')
            root_count = len(root_uls)
            print(f"Found {root_count} root ULs")
            
            for i in range(root_count):
                root_ul = root_uls[i]
                root_lis = root_ul.query_selector_all(':scope > li')
                li_count = len(root_lis)
                
                print(f"Root UL {i+1} has {li_count} items")
                
                for j in range(li_count):
                    # Path: [root_ul_index, root_li_index]
                    self.traverse_recursive([i, j], [])
            
            print(f"✅ Traversal complete. Found {self.total_nodes} nodes.")
            browser.close()

    def click_node_and_get_url(self, page, text_div, text, depth):
        previous_url = page.url
        
        # Strategy 1: JS Click on Text Div (Most reliable based on testing)
        try:
            page.evaluate("el => el.click()", text_div)
        except:
            pass
            
        # Wait for URL change
        start_time = time.time()
        while time.time() - start_time < 2.0:
            if page.url != previous_url:
                return page.url
            time.sleep(0.1)
            
        # Strategy 2: JS Click on Parent Label (Fallback)
        print(f"{'  ' * depth}⚠ URL didn't change for {text}, trying label click...")
        try:
            # Get parent label
            page.evaluate("el => el.closest('.label').click()", text_div)
        except:
            pass
        
        start_time = time.time()
        while time.time() - start_time < 2.0:
            if page.url != previous_url:
                return page.url
            time.sleep(0.1)

        # Strategy 3: Standard Click on Text Div (Last resort)
        print(f"{'  ' * depth}⚠ URL still didn't change, trying standard click...")
        try:
            text_div.scroll_into_view_if_needed()
            text_div.click(timeout=2000, force=True)
        except:
            pass
            
        start_time = time.time()
        while time.time() - start_time < 3.0:
            if page.url != previous_url:
                return page.url
            time.sleep(0.1)
            
        print(f"{'  ' * depth}⚠ URL still didn't change for {text}. Assuming same URL or failed.")
        return page.url

    def traverse_recursive(self, path, parent_titles):
        """
        Traverse a node identified by 'path'.
        path[0]: index of root UL
        path[1:]: indices of nested LIs
        """
        depth = len(path) - 1
        
        # 1. Resolve the node (re-find it in DOM)
        li_handle = self.resolve_node(path)
        if not li_handle:
            print(f"{'  ' * depth}❌ Failed to resolve node at path {path}")
            return

        # 2. Extract Info
        label = li_handle.query_selector('.label')
        if not label:
            return
            
        text_div = label.query_selector('div[style*="max-width"]')
        if not text_div:
            return
            
        text = text_div.inner_text().strip()
        
        # 3. Check/Handle Expansion (Necessary to find children later)
        expand_btn = label.query_selector('.isHavingChildren')
        has_children = False
        if expand_btn:
            classes = expand_btn.get_attribute('class') or ""
            is_open = 'isHavingChildrenAndOpen' in classes
            if not is_open:
                # Expand
                print(f"{'  ' * depth}▶ Expanding: {text}")
                self.robust_expand(expand_btn, label)
            has_children = True

        # 3. Extract Link/Info AND SCRAPE CONTENT
        current_url = self.click_node_and_get_url(self.page, text_div, text, depth)
        
        # Construct Local Path
        safe_text = self.sanitize_filename(text)
        current_hierarchy = parent_titles + [safe_text]
        local_path_str = os.path.join(*current_hierarchy)
        
        self.save_link(text, current_url, local_path_str)
        self.total_nodes += 1
        
        # --- SCRAPE CONTENT HERE ---
        # We are at the page, so scrape it now!
        # Check if already scraped? User wanted restore, so maybe overwrite or skip existing.
        # Let's skip existing to be faster if re-running.
        
        target_dir = os.path.join(DOCS_ROOT, local_path_str)
        md_filename = os.path.basename(local_path_str) + ".md"
        if not md_filename or md_filename == ".md": md_filename = "index.md"
        md_file_path = os.path.join(target_dir, md_filename)
        
        if not (os.path.exists(md_file_path) and os.path.getsize(md_file_path) > 100):
            print(f"{'  ' * depth}📥 Scraping content for {text}...")
            # Wait for viewer
            try:
                self.page.wait_for_selector('.viewer_contents', timeout=5000)
                content_handle = self.page.query_selector('.viewer_contents')
                if content_handle:
                    html = content_handle.inner_html()
                    self.process_content(html, current_url, local_path_str)
                else:
                    print(f"{'  ' * depth}❌ No content found for {text}")
            except:
                print(f"{'  ' * depth}❌ Timeout waiting for content for {text}")
        else:
             print(f"{'  ' * depth}⏩ Skipping existing content for {text}")
             
        # ---------------------------

        # 4. Traverse Children
        if has_children:
            # Re-resolve node because navigation happened
            li_handle = self.resolve_node(path)
            if not li_handle:
                print(f"{'  ' * depth}⚠ Lost node after navigation: {text}")
                return
                
            child_ul = li_handle.query_selector(':scope > ul')
            if child_ul:
                # Count children
                child_lis = child_ul.query_selector_all(':scope > li')
                child_count = len(child_lis)
                
                for k in range(child_count):
                    self.traverse_recursive(path + [k], current_hierarchy)
            else:
                 print(f"{'  ' * depth}❓ Child UL hidden/missing for {text}")

    def sanitize_filename(self, name):
        # Replace Roman numerals with standard letters if desired, or just allow them.
        # Here we just use a more permissive filter: allow anything that isn't a reserved OS char.
        # Reserved: / \ : * ? " < > |
        # Also, we want to preserve Roman numerals like Ⅰ, Ⅱ which are not isalpha() in some python versions/env?
        # Actually, isalpha() for Ⅰ returned False in the test.
        
        # Remove invalid filesystem characters
        name = re.sub(r'[\\/*?:"<>|]', "", name)
        # Collapse whitespace
        name = " ".join(name.split())
        return name.strip()

    def resolve_node(self, path):
        # Start at container
        container = self.page.query_selector('#App > main > div.contents_wrap > div.tree_view_container')
        if not container:
            return None
            
        root_uls = container.query_selector_all(':scope > ul')
        if len(root_uls) <= path[0]:
            return None
            
        current_element = root_uls[path[0]] # This is a UL
        
        for i, idx in enumerate(path[1:]):
            lis = current_element.query_selector_all(':scope > li')
            if len(lis) <= idx:
                return None
            
            li = lis[idx]
            
            # If this is the target node (last in path), return it
            if i == len(path[1:]) - 1:
                return li
            
            # Else, prepare for next iteration
            label = li.query_selector('.label')
            expand_btn = label.query_selector('.isHavingChildren')
            if expand_btn:
                classes = expand_btn.get_attribute('class') or ""
                if 'isHavingChildrenAndOpen' not in classes:
                    self.robust_expand(expand_btn, label)
            
            current_element = li.query_selector(':scope > ul')
            if not current_element:
                return None
                
        return None

    def robust_expand(self, expand_btn, label):
        try:
            expand_btn.scroll_into_view_if_needed()
            expand_btn.click(timeout=2000, force=True)
        except:
            self.page.evaluate("el => el.click()", expand_btn)
        
        time.sleep(0.5)
        
        classes = expand_btn.get_attribute('class') or ""
        if 'isHavingChildrenAndOpen' not in classes:
             try:
                 self.page.evaluate("el => el.parentElement.click()", expand_btn)
             except:
                 pass
             time.sleep(0.5)
        
        classes = expand_btn.get_attribute('class') or ""
        if 'isHavingChildrenAndOpen' not in classes:
             text_div = label.query_selector('div[style*="max-width"]')
             if text_div:
                 try:
                     text_div.click(force=True)
                 except:
                     self.page.evaluate("el => el.click()", text_div)
                 time.sleep(1.0)

    def save_link(self, text, url, path):
        with open(self.output_file, 'a', encoding='utf-8') as f:
            f.write(f"{text}\t{url}\t{path}\n")

    def download_image(self, img_url, base_url, save_dir):
        try:
            full_url = urljoin(base_url, img_url)
            img_hash = hashlib.md5(full_url.encode('utf-8')).hexdigest()
            path = urlparse(full_url).path
            ext = os.path.splitext(path)[1]
            if not ext or len(ext) > 5:
                ext = ".png"
            
            filename = f"{img_hash}{ext}"
            local_path = os.path.join(save_dir, filename)
            
            if not os.path.exists(local_path):
                try:
                    response = self.session.get(full_url, timeout=10)
                    if response.status_code == 200:
                        with open(local_path, 'wb') as f:
                            f.write(response.content)
                    else:
                        print(f"    Failed to download image (status {response.status_code}): {full_url}")
                        return img_url
                except Exception as e:
                     print(f"    Failed to download image (exception): {e}")
                     return img_url
            
            return f"images/{filename}"
            
        except Exception as e:
            print(f"    Error processing image {img_url}: {e}")
            return img_url

    def process_content(self, html_content, page_url, local_path_str):
        target_dir = os.path.join(DOCS_ROOT, local_path_str)
        os.makedirs(target_dir, exist_ok=True)
        
        md_filename = os.path.basename(local_path_str) + ".md"
        if not md_filename or md_filename == ".md":
             md_filename = "index.md"
             
        md_file_path = os.path.join(target_dir, md_filename)
        img_dir = os.path.join(target_dir, "images")
        os.makedirs(img_dir, exist_ok=True)
        
        # Pre-process HTML to fix table issues
        # html2text doesn't handle <br> in tables well (it makes newlines which break md tables)
        # We can replace <br> in td/th with space or something else, or let html2text do it but we might need to clean up md.
        # Better approach: Use a custom handle or just simple regex replace in HTML before conversion.
        # Replacing <br> with <br> tag that html2text keeps? No, html2text converts <br> to \n.
        # We want to keep it as single line in MD table.
        # Let's replace <br> inside <td> with ' ' or just remove it for now, or use a placeholder.
        # Actually, standard MD tables don't support multiline. 
        # So we should probably replace <br> with <br/> and hope renderer supports it, 
        # OR replace with space.
        
        # Simple regex to replace <br> with space in the whole HTML might be too aggressive, 
        # but for docs usually okay.
        # Let's try to be specific to tables if possible, but parsing HTML with regex is bad.
        # However, for this specific issue, let's try to just replace <br> with space in HTML.
        # html_content = html_content.replace("<br>", " ").replace("<br/>", " ")
        
        # Actually, let's configure html2text to NOT wrap tables?
        # self.converter.body_width = 0 is already set.
        
        # Let's try replacing newlines in the generated markdown table?
        # That's hard to detect.
        
        # Let's try replacing <br> with a placeholder that html2text ignores, then put <br> back?
        # Or just space.
        html_content = re.sub(r'<br\s*/?>', ' ', html_content)
        
        # Also remove <p> tags inside <td> if they cause newlines
        # This is getting complicated.
        
        markdown = self.converter.handle(html_content)
        
        def replace_img(match):
            alt = match.group(1)
            url = match.group(2)
            new_path = self.download_image(url, page_url, img_dir)
            return f"![{alt}]({new_path})"
            
        markdown = re.sub(r'!\[(.*?)\]\((.*?)\)', replace_img, markdown)
        
        with open(md_file_path, 'w', encoding='utf-8') as f:
            f.write(markdown)
            
        print(f"  Saved to {md_file_path}")

if __name__ == "__main__":
    scraper = TreeTraverser()
    scraper.scrape()
