import os
import time
import requests
import html2text
import hashlib
from urllib.parse import urljoin, urlparse
from playwright.sync_api import sync_playwright
import re
import sys

# Config
# Use paths relative to the script directory
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
DEFAULT_LINKS_FILE = "document_links_zh.txt"
DEFAULT_DOCS_ROOT = "doc_zh"
SKIP_EXISTING = True

class ContentScraper:
    def __init__(self, links_file=DEFAULT_LINKS_FILE, docs_root=DEFAULT_DOCS_ROOT):
        # Resolve links_file path relative to project root
        if not os.path.isabs(links_file):
            # If links_file is relative, assume it's in project root (parent of script directory)
            self.links_file = os.path.join(os.path.dirname(SCRIPT_DIR), links_file)
        else:
            self.links_file = links_file
            
        # Resolve docs_root path relative to project root
        if not os.path.isabs(docs_root):
            self.docs_root = os.path.join(os.path.dirname(SCRIPT_DIR), docs_root)
        else:
            self.docs_root = docs_root
        self.converter = html2text.HTML2Text()
        self.converter.ignore_links = False
        self.converter.ignore_images = False
        self.converter.body_width = 0 # No wrapping
        self.session = requests.Session()
        
    def download_image(self, img_url, base_url, save_dir):
        try:
            full_url = urljoin(base_url, img_url)
            
            # Create hash for filename
            img_hash = hashlib.md5(full_url.encode('utf-8')).hexdigest()
            
            # Get extension
            path = urlparse(full_url).path
            ext = os.path.splitext(path)[1]
            if not ext or len(ext) > 5:
                ext = ".png"
            
            filename = f"{img_hash}{ext}"
            local_path = os.path.join(save_dir, filename)
            
            if not os.path.exists(local_path):
                # Download
                print(f"    Downloading image: {full_url}")
                try:
                    response = self.session.get(full_url, timeout=10)
                    if response.status_code == 200:
                        with open(local_path, 'wb') as f:
                            f.write(response.content)
                    else:
                        print(f"    Failed to download image (status {response.status_code}): {full_url}")
                        return img_url
                except Exception as e:
                     print(f"    Failed to download image (exception): {e}")
                     return img_url
            
            return f"images/{filename}"
            
        except Exception as e:
            print(f"    Error processing image {img_url}: {e}")
            return img_url

    def process_content(self, html_content, page_url, local_path_str):
        print(f"    Processing content for: {local_path_str}")
        try:
            # local_path_str is like "Basic Creation Guide/Getting Started"
            # We will create: DOCS_ROOT/Basic Creation Guide/Getting Started/
            
            target_dir = os.path.join(self.docs_root, local_path_str)
            os.makedirs(target_dir, exist_ok=True)
            
            # The MD filename. Let's use the basename of the path.
            # e.g. "Getting Started.md"
            md_filename = os.path.basename(local_path_str) + ".md"
            if not md_filename or md_filename == ".md":
                 md_filename = "index.md"
                 
            md_file_path = os.path.join(target_dir, md_filename)
            
            # Image Directory: DOCS_ROOT/Basic Creation Guide/Getting Started/images/
            img_dir = os.path.join(target_dir, "images")
            os.makedirs(img_dir, exist_ok=True)
        
            # Pre-process HTML to fix table issues
            # Replace <br> tags with spaces to prevent table cell breaks
            html_content = re.sub(r'<br\s*/?>', ' ', html_content)
        
            # Function to process images within HTML tables
            def process_table_images(table_html, base_url, img_dir):
                # Regex to find img tags in HTML
                img_pattern = re.compile(r'<img[^>]*src=(?:"(.*?)"|\'(.*?)\')[^>]*>', re.DOTALL)
                
                def replace_table_img(match):
                    full_img_tag = match.group(0)
                    # Get the img_url from whichever capture group has a value
                    img_url = match.group(1) if match.group(1) else match.group(2)
                    # Download image and get local path
                    local_img_path = self.download_image(img_url, base_url, img_dir)
                    # Replace the src attribute with local path
                    updated_img_tag = full_img_tag.replace(img_url, local_img_path)
                    return updated_img_tag
                
                # Replace all img tags in the table
                return img_pattern.sub(replace_table_img, table_html)
        
            # Extract all original HTML tables to preserve cell merging information
            original_tables = []
            # Regular expression to find HTML tables (simple version)
            table_pattern = re.compile(r'(<table[^>]*>.*?</table>)', re.DOTALL)
            
            # Replace tables with placeholders and store originals
            def table_replacer(match):
                table_content = match.group(1)
                # Process images within the table before storing
                processed_table = process_table_images(table_content, page_url, img_dir)
                original_tables.append(processed_table)
                placeholder = f'[[TABLE_PLACEHOLDER_{len(original_tables)-1}]]'
                return placeholder
            
            # Use the placeholder HTML for conversion to Markdown
            html_with_placeholders = table_pattern.sub(table_replacer, html_content)
            
            # Convert to Markdown using html2text
            markdown = self.converter.handle(html_with_placeholders)
            
            # Process Images in Markdown
            def replace_img(match):
                alt = match.group(1)
                url = match.group(2)
                # Download to img_dir, return relative path "images/xxx.png"
                new_path = self.download_image(url, page_url, img_dir)
                return f"![{alt}]({new_path})"
                
            # Regex to find images: ![alt](url)
            markdown = re.sub(r'!\[(.*?)\]\((.*?)\)', replace_img, markdown)
            
            # Simple image spacing fix - add empty lines between images
            markdown = re.sub(r'(!\[.*?\]\(.*?\))\s*(!\[.*?\]\(.*?\))', r'\1\n\n\2', markdown)
            
            # Fix spacing between text and images - add empty lines
            # Text followed by image
            markdown = re.sub(r'([^\n])\s*(!\[.*?\]\(.*?\))', r'\1\n\n\2', markdown)
            # Image followed by text
            markdown = re.sub(r'(!\[.*?\]\(.*?\))\s*([^\n])', r'\1\n\n\2', markdown)
            
            # Automatically detect and fix tables with cell merging
            def convert_mydesk_context_menu_table(markdown_table):
                """
                Convert Markdown table to HTML table with automatic cell merging
                for consecutive cells with the same content, especially handling
                the MyDesk Context Menu table format where sub-rows may have fewer columns.
                """
                lines = markdown_table.strip().split('\n')
                if len(lines) < 3:  # Need at least header, separator, and one data row
                    return markdown_table
                
                # Parse table rows
                table_data = []
                for line in lines:
                    line = line.strip()
                    if not line: continue
                    # Remove leading | if present
                    if line.startswith('|'):
                        line = line[1:]
                    # Remove trailing | if present
                    if line.endswith('|'):
                        line = line[:-1]
                    # Split by | and strip each cell
                    cells = [cell.strip() for cell in line.split('|')]
                    table_data.append(cells)
                
                # Separate header, separator, and data rows
                # Handle special case where table starts with separator row (no header)
                separator_index = 0
                while separator_index < len(table_data):
                    # Check if this row is a separator (contains only dashes)
                    if all(re.match(r'^\s*-+\s*$', cell) for cell in table_data[separator_index]):
                        break
                    separator_index += 1
                if separator_index >= len(table_data):
                    return markdown_table
                
                # If we found separator at index 0, there's no header
                if separator_index == 0:
                    # Use first data row as temporary header (will be replaced in HTML)
                    header = ['Column1', 'Column2', 'Column3']  # Default headers
                    data_rows = table_data[1:]
                else:
                    header = table_data[0]
                    data_rows = table_data[separator_index+1:]
                
                if not data_rows:
                    return markdown_table
                
                # Determine max columns for proper HTML structure
                max_columns = max(len(row) for row in data_rows)
                
                # Handle case where header has fewer columns than data
                if len(header) < max_columns:
                    header += [''] * (max_columns - len(header))
                
                # Detect merged cells
                merged_cells = []
                current_row = 0
                while current_row < len(data_rows):
                    row = data_rows[current_row]
                    # Skip empty rows
                    if not any(cell.strip() for cell in row):
                        current_row += 1
                        continue
                    
                    # Handle rows with fewer columns (likely merged cells)
                    if len(row) < max_columns:
                        # For rows with fewer columns, they likely continue the previous row
                        # This is a common pattern in MyDesk context menu tables
                        if current_row > 0:
                            prev_row = data_rows[current_row - 1]
                            # Take the first len(row) cells from previous row as parent
                            parent_cells = prev_row[:len(row)]
                            # Check if this row is a continuation of the previous
                            if all(cell in parent_cells for cell in row):
                                # This is a sub-row, merge with previous
                                merged_cells.append((current_row - 1, parent_cells, row))
                                current_row += 1
                                continue
                    
                    # Normal row, check for horizontal merges
                    current_col = 0
                    while current_col < len(row):
                        cell_value = row[current_col]
                        if not cell_value:  # Skip empty cells
                            current_col += 1
                            continue
                            
                        # Check how many consecutive cells have the same value
                        span = 1
                        while current_col + span < len(row) and row[current_col + span] == cell_value:
                            span += 1
                        
                        if span > 1:
                            merged_cells.append((current_row, current_col, cell_value, span))
                            current_col += span
                        else:
                            current_col += 1
                    
                    current_row += 1
                
                # Generate HTML table
                html = '<table border="1" class="docutils">\n'
                
                # Header row
                html += '    <thead>\n'
                html += '        <tr>\n'
                for col in header:
                    html += f'            <th>{col}</th>\n'
                html += '        </tr>\n'
                html += '    </thead>\n'
                
                # Body rows with merged cells
                html += '    <tbody>\n'
                
                # Track merged cells to skip
                skip_cells = set()
                
                for row_idx, row in enumerate(data_rows):
                    html += '        <tr>\n'
                    
                    # Ensure row has the correct number of columns
                    while len(row) < max_columns:
                        row.append('')
                    
                    for col_idx, cell in enumerate(row):
                        if (row_idx, col_idx) in skip_cells:
                            continue
                            
                        # Check for merged cells
                        colspan = 1
                        rowspan = 1
                        
                        # Check for horizontal merge in current row
                        for merge in merged_cells:
                            if len(merge) == 4 and merge[0] == row_idx and merge[1] == col_idx:
                                colspan = merge[3]
                                # Mark cells to skip
                                for i in range(1, colspan):
                                    skip_cells.add((row_idx, col_idx + i))
                                break
                        
                        # Check for vertical merge
                        # This is more complex and might need additional logic
                        # For now, we'll handle horizontal merges
                        
                        # Add cell with appropriate colspan
                        if colspan > 1:
                            html += f'            <td colspan="{colspan}">{cell}</td>\n'
                        else:
                            html += f'            <td>{cell}</td>\n'
                    
                    html += '        </tr>\n'
                
                html += '    </tbody>\n'
                html += '</table>'
                
                return html
            
            # Convert any remaining markdown tables
            def convert_remaining_markdown_tables(markdown_content):
                lines = markdown_content.split('\n')
                new_markdown = []
                in_table = False
                table_lines = []
                
                for line in lines:
                    stripped = line.strip()
                    
                    # Detect table separator line (--- | ---)
                    if re.match(r'^\s*\|?\s*-+\s*(?:\|\s*-+\s*)*\|?\s*$', stripped):
                        if table_lines:  # If we have previous lines, this is a table
                            in_table = True
                            table_lines.append(line)
                    elif in_table and stripped.startswith('|'):
                        # Table data row
                        table_lines.append(line)
                    elif in_table and stripped:
                        # End of table
                        in_table = False
                        markdown_table = '\n'.join(table_lines)
                        html_table = convert_mydesk_context_menu_table(markdown_table)
                        new_markdown.append(html_table)
                        if stripped:
                            new_markdown.append(line)
                    else:
                        new_markdown.append(line)
                
                # Handle table at the end of content
                if in_table:
                    markdown_table = '\n'.join(table_lines)
                    html_table = convert_mydesk_context_menu_table(markdown_table)
                    new_markdown.append(html_table)
                
                return '\n'.join(new_markdown)
            
            # Restore original tables and convert to proper HTML
            for i, table in enumerate(original_tables):
                placeholder = f'[[TABLE_PLACEHOLDER_{i}]]'
                if placeholder in markdown:
                    try:
                        # Convert the HTML table back to markdown then to proper HTML
                        # This ensures consistent table formatting
                        html_table = convert_mydesk_context_menu_table(table)
                        markdown = markdown.replace(placeholder, html_table)
                    except Exception as e:
                        print(f"    Error processing table {i} for: {local_path_str}")
                        print(f"    Error: {e}")
                        import traceback
                        traceback.print_exc()
                        # Keep placeholder as fallback
                        continue
            
            # Convert any remaining markdown tables
            markdown = convert_remaining_markdown_tables(markdown)
            
            # Write the Markdown to file
            with open(md_file_path, 'w', encoding='utf-8') as f:
                f.write(markdown)
            
            print(f"    Saved to {md_file_path}")
            return md_file_path
        except Exception as e:
            print(f"    Error processing content for: {local_path_str}")
            print(f"    Error: {e}")
            import traceback
            traceback.print_exc()
            return None

    def run(self):
        # Read Links
        links = []
        if not os.path.exists(self.links_file):
            print("Links file not found!")
            return
            
        with open(self.links_file, 'r', encoding='utf-8') as f:
            for line in f:
                if line.startswith("#"): continue
                parts = line.strip().split('\t')
                if len(parts) >= 3:
                    links.append({
                        'name': parts[0],
                        'url': parts[1],
                        'path': parts[2]
                    })
        
        print(f"Found {len(links)} links to scrape.")
        
        with sync_playwright() as p:
            browser = p.chromium.launch(headless=True)
            # Use desktop context to avoid mobile layout/overlays
            context = browser.new_context(
                viewport={'width': 1920, 'height': 1080},
                user_agent='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
            )
            page = context.new_page()
            
            for i, link in enumerate(links):
                local_path = link['path']
                
                # Check if exists
                target_dir = os.path.join(self.docs_root, local_path)
                md_filename = os.path.basename(local_path) + ".md"
                if not md_filename or md_filename == ".md": md_filename = "index.md"
                md_file_path = os.path.join(target_dir, md_filename)
                
                if SKIP_EXISTING and os.path.exists(md_file_path):
                    if os.path.getsize(md_file_path) > 10:
                        print(f"[{i+1}/{len(links)}] Skipping existing: {link['name']}")
                        continue
                
                print(f"[{i+1}/{len(links)}] Scraping: {link['name']} ({link['url']})")
                
                try:
                    page.goto(link['url'], wait_until="domcontentloaded", timeout=60000)
                    
                    # Wait for hydration
                    time.sleep(3)
                    
                    # Try to handle cookie banner (once per session usually, but let's check)
                    try:
                         banner_btn = page.query_selector('#accept-recommended-btn-handler')
                         if banner_btn and banner_btn.is_visible():
                             banner_btn.click()
                             time.sleep(1)
                    except:
                        pass
                    
                    # Wait for content
                    content_handle = None
                    try:
                        # Try multiple selectors
                        # Updated selector based on user input:
                        # #App > main > div.contents_wrap > div.renderContent > div.text_content_container > div.text_content
                        # Also keep .viewer_contents as fallback
                        
                        # Wait for either
                        page.wait_for_selector('.text_content, .viewer_contents', timeout=30000)
                        
                        # Prefer text_content if available
                        content_handle = page.query_selector('.text_content')
                        if not content_handle:
                            content_handle = page.query_selector('.viewer_contents')
                            
                    except:
                        print(f"    Warning: Content selectors not found for {link['url']}")
                        print(f"    Page Title: {page.title()}")
                        pass
                        
                    if content_handle:
                        # Pre-process HTML to fix table issues using DOM manipulation
                        # This is safer than global regex as it only affects tables
                        page.evaluate("""
                            const tables = document.querySelectorAll('table');
                            tables.forEach(table => {
                                // Replace <br> with space
                                table.querySelectorAll('br').forEach(br => br.replaceWith(' '));
                                
                                // Replace <p> with <span> + space to avoid block-level breaks
                                table.querySelectorAll('p').forEach(p => {
                                    const span = document.createElement('span');
                                    span.innerHTML = p.innerHTML + ' ';
                                    p.replaceWith(span);
                                });
                            });
                        """)
                        
                        html = content_handle.inner_html()
                        
                        # Global cleanup if needed (optional, but keeping it minimal)
                        # Removed the global regex replacement of <br> to preserve structure outside tables
                        
                        self.process_content(html, link['url'], local_path)
                    else:
                        print("    Error: No content found.")
                        
                except Exception as e:
                    print(f"    Error scraping {link['url']}: {e}")
                    
            browser.close()

if __name__ == "__main__":
    scraper = ContentScraper()
    scraper.run()