import os
import time
import requests
import html2text
import hashlib
from urllib.parse import urljoin, urlparse
from playwright.sync_api import sync_playwright
import re
import sys

# Config
# Use paths relative to the script directory
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
DEFAULT_LINKS_FILE = "document_links.txt"
DEFAULT_DOCS_ROOT = "doc"
SKIP_EXISTING = False

class ContentScraper:
    def __init__(self, links_file=DEFAULT_LINKS_FILE, docs_root=DEFAULT_DOCS_ROOT):
        # Resolve links_file path relative to project root
        if not os.path.isabs(links_file):
            # If links_file is relative, assume it's in project root (parent of script directory)
            self.links_file = os.path.join(os.path.dirname(SCRIPT_DIR), links_file)
        else:
            self.links_file = links_file
            
        # Resolve docs_root path relative to project root
        if not os.path.isabs(docs_root):
            self.docs_root = os.path.join(os.path.dirname(SCRIPT_DIR), docs_root)
        else:
            self.docs_root = docs_root
        self.converter = html2text.HTML2Text()
        self.converter.ignore_links = False
        self.converter.ignore_images = False
        self.converter.body_width = 0 # No wrapping
        self.session = requests.Session()
        
    def download_image(self, img_url, base_url, save_dir):
        try:
            full_url = urljoin(base_url, img_url)
            
            # Create hash for filename
            img_hash = hashlib.md5(full_url.encode('utf-8')).hexdigest()
            
            # Get extension
            path = urlparse(full_url).path
            ext = os.path.splitext(path)[1]
            if not ext or len(ext) > 5:
                ext = ".png"
            
            filename = f"{img_hash}{ext}"
            local_path = os.path.join(save_dir, filename)
            
            if not os.path.exists(local_path):
                # Download
                print(f"    Downloading image: {full_url}")
                try:
                    response = self.session.get(full_url, timeout=10)
                    if response.status_code == 200:
                        with open(local_path, 'wb') as f:
                            f.write(response.content)
                    else:
                        print(f"    Failed to download image (status {response.status_code}): {full_url}")
                        return img_url
                except Exception as e:
                     print(f"    Failed to download image (exception): {e}")
                     return img_url
            
            return f"images/{filename}"
            
        except Exception as e:
            print(f"    Error processing image {img_url}: {e}")
            return img_url

    def process_content(self, html_content, page_url, local_path_str):
        # Determine the target directory for this node
        # The user wants a directory created for the md document first.
        # local_path_str is like "Basic Creation Guide/Getting Started"
        # We will create: DOCS_ROOT/Basic Creation Guide/Getting Started/
        
        target_dir = os.path.join(self.docs_root, local_path_str)
        os.makedirs(target_dir, exist_ok=True)
        
        # The MD filename. Let's use the basename of the path.
        # e.g. "Getting Started.md"
        md_filename = os.path.basename(local_path_str) + ".md"
        if not md_filename or md_filename == ".md":
             md_filename = "index.md"
              
        md_file_path = os.path.join(target_dir, md_filename)
        
        # Image Directory: DOCS_ROOT/Basic Creation Guide/Getting Started/images/
        img_dir = os.path.join(target_dir, "images")
        os.makedirs(img_dir, exist_ok=True)
        
        # Pre-process HTML to fix table issues
        # Replace <br> tags with spaces to prevent table cell breaks
        html_content = re.sub(r'<br\s*/?>', ' ', html_content)
        

        
        # Function to process images within HTML tables
        def process_table_images(table_html, base_url, img_dir):
            # Regex to find img tags in HTML
            img_pattern = re.compile(r'<img[^>]*src=(?:"(.*?)"|\'(.*?)\')[^>]*>', re.DOTALL)
            
            def replace_table_img(match):
                full_img_tag = match.group(0)
                # Get the img_url from whichever capture group has a value
                img_url = match.group(1) if match.group(1) else match.group(2)
                # Download image and get local path
                local_img_path = self.download_image(img_url, base_url, img_dir)
                # Replace the src attribute with local path
                updated_img_tag = full_img_tag.replace(img_url, local_img_path)
                return updated_img_tag
            
            # Replace all img tags in the table
            return img_pattern.sub(replace_table_img, table_html)
        
        # Extract all original HTML tables to preserve cell merging information
        original_tables = []
        # Regular expression to find HTML tables (simple version)
        table_pattern = re.compile(r'(<table[^>]*>.*?</table>)', re.DOTALL)
        
        # Replace tables with placeholders and store originals
        def table_replacer(match):
            table_content = match.group(1)
            # Process images within the table before storing
            processed_table = process_table_images(table_content, page_url, img_dir)
            original_tables.append(processed_table)
            placeholder = f'[[TABLE_PLACEHOLDER_{len(original_tables)-1}]]'
            return placeholder
        
        # Use the placeholder HTML for conversion to Markdown
        html_with_placeholders = table_pattern.sub(table_replacer, html_content)
        
        # Convert to Markdown using html2text
        markdown = self.converter.handle(html_with_placeholders)
        
        # Process Images in Markdown
        def replace_img(match):
            alt = match.group(1)
            url = match.group(2)
            # Download to img_dir, return relative path "images/xxx.png"
            new_path = self.download_image(url, page_url, img_dir)
            return f"![{alt}]({new_path})"
            
        # Regex to find images: ![alt](url)
        markdown = re.sub(r'!\[(.*?)\]\((.*?)\)', replace_img, markdown)
        
        # Simple image spacing fix - add empty lines between images
        markdown = re.sub(r'(!\[.*?\]\(.*?\))\s*(!\[.*?\]\(.*?\))', r'\1\n\n\2', markdown)
        
        # Automatically detect and fix tables with cell merging
        def convert_mydesk_context_menu_table(markdown_table):
            """
            Convert Markdown table to HTML table with automatic cell merging
            for consecutive cells with the same content, especially handling
            the MyDesk Context Menu table format where sub-rows may have fewer columns.
            """
            lines = markdown_table.strip().split('\n')
            if len(lines) < 3:  # Need at least header, separator, and one data row
                return markdown_table
            
            # Parse table rows
            table_data = []
            for line in lines:
                line = line.strip()
                if not line: continue
                # Remove leading | if present
                if line.startswith('|'):
                    line = line[1:]
                # Remove trailing | if present
                if line.endswith('|'):
                    line = line[:-1]
                # Split by | and strip each cell
                cells = [cell.strip() for cell in line.split('|')]
                table_data.append(cells)
            
            # Separate header, separator, and data rows
            # Handle special case where table starts with separator row (no header)
            separator_index = 0
            while separator_index < len(table_data):
                # Check if this row is a separator (contains only dashes)
                if all(re.match(r'^\s*-+\s*$', cell) for cell in table_data[separator_index]):
                    break
                separator_index += 1
            if separator_index >= len(table_data):
                return markdown_table
            
            # If we found separator at index 0, there's no header
            if separator_index == 0:
                # Use first data row as temporary header (will be replaced in HTML)
                header = ['Column1', 'Column2', 'Column3']  # Default headers
                data_rows = table_data[separator_index + 1:]
            else:
                header = table_data[0]
                data_rows = table_data[separator_index + 1:]
            
            # Filter out any remaining separator rows in data_rows
            filtered_data_rows = []
            for row in data_rows:
                if not all(re.match(r'^\s*-+\s*$', cell) for cell in row):
                    filtered_data_rows.append(row)
            data_rows = filtered_data_rows
            
            if not data_rows:
                return markdown_table
            
            num_columns = len(header)
            
            # Handle MyDesk Context Menu special case - ensure all data rows have correct number of columns
            for i in range(len(data_rows)):
                if len(data_rows[i]) < num_columns:
                    # If this row has fewer columns than header, it's likely a sub-item
                    # Check previous row to see if it's a parent item
                    if i > 0 and len(data_rows[i-1]) == num_columns:
                        # This is a sub-item, inherit the first column value from parent
                        parent_value = data_rows[i-1][0]
                        # Extend this row to have the correct number of columns
                        while len(data_rows[i]) < num_columns:
                            data_rows[i].append('')
                        # Set the first column to match parent
                        data_rows[i][0] = parent_value
            

            
            # Create merged cells map
            # merged_cells[i][j] = (value, rowspan)
            merged_cells = []
            for i in range(len(data_rows)):
                row = []
                # Ensure this row has enough columns
                while len(data_rows[i]) < num_columns:
                    data_rows[i].append('')
                
                for j in range(num_columns):
                    if i == 0:
                        # First row, no merging yet
                        row.append((data_rows[i][j], 1))
                    else:
                        # Ensure previous row has enough columns
                        while len(data_rows[i-1]) < num_columns:
                            data_rows[i-1].append('')
                        
                        # Check if current cell is same as cell above
                        if j < len(data_rows[i]) and j < len(data_rows[i-1]) and data_rows[i][j] == merged_cells[i-1][j][0]:
                            # Same as cell above, merge them
                            merged_cells[i-1][j] = (merged_cells[i-1][j][0], merged_cells[i-1][j][1] + 1)
                            row.append((None, 0))  # Mark as merged
                        else:
                            # New cell, no merging
                            row.append((data_rows[i][j], 1))
                merged_cells.append(row)
            
            # Generate HTML table
            html = '<table>\n  <thead>\n    <tr>\n'
            for cell in header:
                html += f'      <th>{cell}</th>\n'
            html += '    </tr>\n  </thead>\n  <tbody>\n'
            
            for i, row in enumerate(merged_cells):
                html += '    <tr>\n'
                for j, (cell_value, rowspan) in enumerate(row):
                    if cell_value is None:  # Already merged
                        continue
                    if rowspan > 1:
                        html += f'      <td rowspan="{rowspan}">{cell_value}</td>\n'
                    else:
                        html += f'      <td>{cell_value}</td>\n'
                html += '    </tr>\n'
            
            html += '  </tbody>\n</table>'
            return html
        
        # Convert any remaining Markdown tables to HTML for consistency (for tables not found in original HTML)
        # This handles cases where html2text might have created new tables that weren't in the original HTML
        def convert_remaining_markdown_tables(markdown_table):
            """
            Convert Markdown table to HTML table with automatic cell merging
            for consecutive cells with the same content.
            """
            lines = markdown_table.strip().split('\n')
            if len(lines) < 3:  # Need at least header, separator, and one data row
                return markdown_table
            
            # Parse table rows
            table_data = []
            for line in lines:
                line = line.strip()
                if not line: continue
                # Remove leading | if present
                if line.startswith('|'):
                    line = line[1:]
                # Remove trailing | if present
                if line.endswith('|'):
                    line = line[:-1]
                # Split by | and strip each cell
                cells = [cell.strip() for cell in line.split('|')]
                table_data.append(cells)
            
            # Separate header, separator, and data rows
            separator_index = 0
            while separator_index < len(table_data):
                if all(re.match(r'^\s*-+\s*$', cell) for cell in table_data[separator_index]):
                    break
                separator_index += 1
            if separator_index >= len(table_data):
                return markdown_table
            
            if separator_index == 0:
                header = ['Column1', 'Column2', 'Column3']  # Default headers
                data_rows = table_data[separator_index + 1:]
            else:
                header = table_data[0]
                data_rows = table_data[separator_index + 1:]
            
            # Filter out any remaining separator rows in data_rows
            filtered_data_rows = []
            for row in data_rows:
                if not all(re.match(r'^\s*-+\s*$', cell) for cell in row):
                    filtered_data_rows.append(row)
            data_rows = filtered_data_rows
            
            if not data_rows:
                return markdown_table
            
            num_columns = len(header)
            
            # Handle rows with fewer columns than header
            for i in range(len(data_rows)):
                if len(data_rows[i]) < num_columns:
                    # Extend row to match header length
                    while len(data_rows[i]) < num_columns:
                        data_rows[i].append('')
            
            # Create merged cells map
            merged_cells = []
            for i in range(len(data_rows)):
                row = []
                for j in range(num_columns):
                    if i == 0:
                        row.append((data_rows[i][j], 1))
                    else:
                        # Check if current cell is same as cell above
                        if data_rows[i][j] == merged_cells[i-1][j][0]:
                            merged_cells[i-1][j] = (merged_cells[i-1][j][0], merged_cells[i-1][j][1] + 1)
                            row.append((None, 0))  # Mark as merged
                        else:
                            row.append((data_rows[i][j], 1))
                merged_cells.append(row)
            
            # Generate HTML table
            html = '<table>\n  <thead>\n    <tr>\n'
            for cell in header:
                html += f'      <th>{cell}</th>\n'
            html += '    </tr>\n  </thead>\n  <tbody>\n'
            
            for i, row in enumerate(merged_cells):
                html += '    <tr>\n'
                for j, (cell_value, rowspan) in enumerate(row):
                    if cell_value is None:  # Already merged
                        continue
                    if rowspan > 1:
                        html += f'      <td rowspan="{rowspan}">{cell_value}</td>\n'
                    else:
                        html += f'      <td>{cell_value}</td>\n'
                html += '    </tr>\n'
            
            html += '  </tbody>\n</table>'
            return html
        
        # Check for any remaining Markdown tables and convert them
        lines = markdown.split('\n')
        in_table = False
        table_lines = []
        new_markdown = []
        
        for i, line in enumerate(lines):
            stripped = line.strip()
            
            # Check if this line is part of a Markdown table
            is_table_line = False
            if stripped.startswith('|'):
                is_table_line = True
            elif '---' in stripped:
                is_table_line = True
            elif in_table and ('|' in stripped or stripped == ''):
                is_table_line = True
            
            if not in_table and is_table_line:
                in_table = True
                table_lines = [line]
            elif in_table:
                if is_table_line:
                    table_lines.append(line)
                else:
                    # End of table
                    in_table = False
                    markdown_table = '\n'.join(table_lines)
                    html_table = convert_mydesk_context_menu_table(markdown_table)
                    new_markdown.append(html_table)
                    if stripped:
                        new_markdown.append(line)
            else:
                new_markdown.append(line)
        
        # Handle table at the end of content
        if in_table:
            markdown_table = '\n'.join(table_lines)
            html_table = convert_mydesk_context_menu_table(markdown_table)
            new_markdown.append(html_table)
        
        # Reconstruct the final markdown content
        markdown = '\n'.join(new_markdown)
        
        # Restore original HTML tables to maintain cell merging
        for i, table in enumerate(original_tables):
            placeholder = f'[[TABLE_PLACEHOLDER_{i}]]'
            markdown = markdown.replace(placeholder, table)
        
        # Save Markdown
        with open(md_file_path, 'w', encoding='utf-8') as f:
            f.write(markdown)
            
        print(f"  Saved to {md_file_path}")

    def run(self):
        # Read Links
        links = []
        if not os.path.exists(self.links_file):
            print(f"Links file not found: {self.links_file}")
            return
            
        with open(self.links_file, 'r', encoding='utf-8') as f:
            for line in f:
                if line.startswith("#"): continue
                parts = line.strip().split('\t')
                if len(parts) >= 3:
                    links.append({
                        'name': parts[0],
                        'url': parts[1],
                        'path': parts[2]
                    })
        
        print(f"Found {len(links)} links to scrape from {self.links_file}.")
        
        with sync_playwright() as p:
            browser = p.chromium.launch(headless=True)
            # Use desktop context to avoid mobile layout/overlays
            context = browser.new_context(
                viewport={'width': 1920, 'height': 1080},
                user_agent='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
            )
            page = context.new_page()
            
            for i, link in enumerate(links):
                local_path = link['path']
                
                # Check if exists
                target_dir = os.path.join(self.docs_root, local_path)
                md_filename = os.path.basename(local_path) + ".md"
                if not md_filename or md_filename == ".md": md_filename = "index.md"
                md_file_path = os.path.join(target_dir, md_filename)
                
                if SKIP_EXISTING and os.path.exists(md_file_path):
                    if os.path.getsize(md_file_path) > 10:
                        print(f"[{i+1}/{len(links)}] Skipping existing: {link['name']}")
                        continue
                
                print(f"[{i+1}/{len(links)}] Scraping: {link['name']} ({link['url']})")
                
                try:
                    page.goto(link['url'], wait_until="domcontentloaded", timeout=60000)
                    
                    # Wait for hydration
                    time.sleep(3)
                    
                    # Try to handle cookie banner (once per session usually, but let's check)
                    try:
                         banner_btn = page.query_selector('#accept-recommended-btn-handler')
                         if banner_btn and banner_btn.is_visible():
                             banner_btn.click()
                             time.sleep(1)
                    except:
                        pass
                    
                    # Wait for content
                    content_handle = None
                    try:
                        # Try multiple selectors
                        # Updated selector based on user input:
                        # #App > main > div.contents_wrap > div.renderContent > div.text_content_container > div.text_content
                        # Also keep .viewer_contents as fallback
                        
                        # Wait for either
                        page.wait_for_selector('.text_content, .viewer_contents', timeout=30000)
                        
                        # Prefer text_content if available
                        content_handle = page.query_selector('.text_content')
                        if not content_handle:
                            content_handle = page.query_selector('.viewer_contents')
                            
                    except:
                        print(f"    Warning: Content selectors not found for {link['url']}")
                        print(f"    Page Title: {page.title()}")
                        # pass  <-- Don't pass here, let's try a broader selector or just proceed if handle is None
                        
                    if not content_handle:
                        # Fallback: try to find any text content container
                        try:
                            # Sometimes the class is different or it's inside an iframe (unlikely here but possible)
                            # Let's try to query just 'main' or a specific div structure
                            content_handle = page.query_selector('div.renderContent')
                        except:
                            pass

                    if content_handle:
                        # Pre-process HTML to fix table issues
                        # Only replace <br> with space in tables to prevent cell breaks
                        page.evaluate("""
                            const tables = document.querySelectorAll('table');
                            tables.forEach(table => {
                                table.querySelectorAll('br').forEach(br => br.replaceWith(' '));
                            });
                        """);
                        
                        # Simple image spacing fix - removed to avoid syntax errors
                        # We'll handle image spacing in the post-processing step instead
                        
                        html = content_handle.inner_html()
                        
                        # Global cleanup if needed (optional, but keeping it minimal)
                        # Removed the global regex replacement of <br> to preserve structure outside tables
                        
                        self.process_content(html, link['url'], local_path)
                    else:
                        print("    Error: No content found.")
                        
                except Exception as e:
                    print(f"    Error scraping {link['url']}: {e}")
                    
            browser.close()

if __name__ == "__main__":
    links_file = DEFAULT_LINKS_FILE
    docs_root = DEFAULT_DOCS_ROOT
    
    if len(sys.argv) > 1:
        links_file = sys.argv[1]
    if len(sys.argv) > 2:
        docs_root = sys.argv[2]
        
    scraper = ContentScraper(links_file=links_file, docs_root=docs_root)
    scraper.run()
