import asyncio
from playwright.async_api import async_playwright
import time
import os
import argparse
import json
import random
import re
from datetime import datetime
import traceback

async def automate_slider_verification(page, doi_folder, take_screenshot):
    """
    Attempt to automate slider-based verification
    
    Args:
        page: Playwright page object
        doi_folder: Folder to save screenshots
        take_screenshot: Function to take screenshots
    
    Returns:
        True if automation successful, False otherwise
    """
    print("  - Attempting to automate slider verification...")
    
    # Take a screenshot before automation attempt
    await take_screenshot(os.path.join(doi_folder, "verification_automation_attempt.png"))
    
    try:
        # First, get more detailed information about the verification puzzle
        puzzle_info = await page.evaluate('''() => {
            // Get all the relevant elements
            const puzzle = {
                container: document.querySelector('.verify-img-panel, .slideverify-panel, .verify-bar-area'),
                gap: document.querySelector('.verify-gap, .verify-img-gap, .slideverify-gap'),
                slider: document.querySelector('.verify-move-block, .slideverify-btn, .slide-btn'),
                subBlock: document.querySelector('.verify-sub-block')
            };
            
            if (!puzzle.container) return { error: "Container not found" };
            
            // Get dimensions and positions
            const containerRect = puzzle.container.getBoundingClientRect();
            let gapInfo = null;
            let sliderInfo = null;
            let subBlockInfo = null;
            
            if (puzzle.gap) {
                const gapRect = puzzle.gap.getBoundingClientRect();
                gapInfo = {
                    x: gapRect.x,
                    y: gapRect.y,
                    width: gapRect.width,
                    height: gapRect.height,
                    left: puzzle.gap.style.left,
                    top: puzzle.gap.style.top
                };
            }
            
            if (puzzle.slider) {
                const sliderRect = puzzle.slider.getBoundingClientRect();
                sliderInfo = {
                    x: sliderRect.x,
                    y: sliderRect.y,
                    width: sliderRect.width,
                    height: sliderRect.height
                };
            }
            
            if (puzzle.subBlock) {
                const subBlockRect = puzzle.subBlock.getBoundingClientRect();
                subBlockInfo = {
                    x: subBlockRect.x,
                    y: subBlockRect.y,
                    width: subBlockRect.width,
                    height: subBlockRect.height,
                    backgroundPosition: puzzle.subBlock.style.backgroundPosition || window.getComputedStyle(puzzle.subBlock).backgroundPosition
                };
            }
            
            // Extract additional information from the styles if available
            const styles = {
                inlineLeft: puzzle.gap ? puzzle.gap.style.left : null,
                inlineTop: puzzle.gap ? puzzle.gap.style.top : null,
                computedLeft: puzzle.gap ? window.getComputedStyle(puzzle.gap).left : null,
                computedTop: puzzle.gap ? window.getComputedStyle(puzzle.gap).top : null
            };
            
            // Look for hints in the CSS
            const extractCSSValue = (css) => {
                if (!css) return null;
                const match = css.match(/(-?[\d.]+)px/);
                return match ? parseFloat(match[1]) : null;
            };
            
            const leftValue = extractCSSValue(styles.inlineLeft || styles.computedLeft);
            const topValue = extractCSSValue(styles.inlineTop || styles.computedTop);
            
            // For background position extraction
            const extractBackgroundPosition = (positionString) => {
                if (!positionString) return null;
                const matches = positionString.match(/(-?[\d.]+)px\s+(-?[\d.]+)px/);
                if (matches) {
                    return {
                        x: parseFloat(matches[1]),
                        y: parseFloat(matches[2])
                    };
                }
                return null;
            };
            
            const bgPosition = extractBackgroundPosition(
                subBlockInfo ? subBlockInfo.backgroundPosition : null
            );
            
            return {
                containerDimensions: {
                    x: containerRect.x,
                    y: containerRect.y,
                    width: containerRect.width,
                    height: containerRect.height
                },
                gapInfo,
                sliderInfo,
                subBlockInfo,
                cssValues: {
                    left: leftValue,
                    top: topValue,
                    backgroundPosition: bgPosition
                }
            };
        }''')
        
        print(f"  - Puzzle information: {puzzle_info}")
        
        # Determine gap position from our detailed information
        gap_position = None
        
        if 'cssValues' in puzzle_info and puzzle_info['cssValues']['left'] is not None:
            # Get from CSS left value directly
            gap_position = puzzle_info['containerDimensions']['x'] + puzzle_info['cssValues']['left']
            print(f"  - Using gap position from CSS: {gap_position}px")
        elif 'gapInfo' in puzzle_info and puzzle_info['gapInfo'] and 'x' in puzzle_info['gapInfo']:
            # Get directly from element position
            gap_position = puzzle_info['gapInfo']['x']
            print(f"  - Using gap position from element: {gap_position}px")
        elif 'subBlockInfo' in puzzle_info and puzzle_info['subBlockInfo'] and 'backgroundPosition' in puzzle_info['cssValues']:
            # For puzzles where the gap is represented by background position
            bg_pos = puzzle_info['cssValues']['backgroundPosition']
            if bg_pos and 'x' in bg_pos:
                # The background-position x value represents the negative of where the gap is
                gap_position = puzzle_info['containerDimensions']['x'] - bg_pos['x']
                print(f"  - Using gap position from background position: {gap_position}px")
        
        # Fallback method: check directly for the gap element
        if gap_position is None:
            gap_element = await page.query_selector('.verify-gap, .verify-img-gap, .slideverify-gap')
            if gap_element:
                gap_box = await gap_element.bounding_box()
                if gap_box:
                    gap_position = gap_box['x'] + (gap_box['width'] / 2)  # Center of gap
                    print(f"  - Using gap position from element bounding box: {gap_position}px")
        
        # Another fallback: try to extract from style
        if gap_position is None:
            # Try to extract gap position from style attribute
            gap_styles = await page.evaluate('''() => {
                const gapElements = document.querySelectorAll('.verify-gap, .verify-img-gap, .slideverify-gap');
                if (gapElements.length > 0) {
                    return Array.from(gapElements).map(el => el.style.cssText || el.getAttribute('style'));
                } else {
                    // Try to search in the image panel
                    const panels = document.querySelectorAll('.verify-img-panel, .slideverify-panel');
                    if (panels.length > 0) {
                        const children = Array.from(panels[0].children);
                        return children.filter(el => el.style && el.style.left).map(el => el.style.cssText || el.getAttribute('style'));
                    }
                }
                return [];
            }''')
            
            # Extract the 'left' value from the style string if it exists
            if gap_styles and len(gap_styles) > 0:
                for style in gap_styles:
                    if style and 'left' in style:
                        left_match = re.search(r'left:\s*([\d.]+)px', style)
                        if left_match:
                            container_element = await page.query_selector('.verify-img-panel, .slideverify-panel')
                            container_box = await container_element.bounding_box() if container_element else None
                            container_x = container_box['x'] if container_box else 0
                            
                            left_offset = float(left_match.group(1))
                            gap_position = container_x + left_offset + 25  # Add half the width of typical gap (50px)
                            print(f"  - Using gap position from style attribute: {gap_position}px")
                            break
        
        # Check for common slider verification elements (the handle we'll drag)
        slider_selectors = [
            ".slideverify-btn", # Common slider button
            ".slide-btn",       # Another common slider button
            ".verify-move-block",
            ".slider",
            ".sliderContainer .sliderMask",
            ".verify-slider",
            ".verify-left-bar .verify-move-block",  # More specific for CNKI
            "text=向右滑动完成验证" # "Slide right to complete verification" text
        ]
        
        slider_element = None
        slider_handle = None
        
        # Find the slider element
        for selector in slider_selectors:
            if await page.is_visible(selector, timeout=1000):
                slider_element = selector
                slider_handle = await page.query_selector(selector)
                print(f"  - Found slider element: {selector}")
                break
        
        if not slider_element or not slider_handle:
            print("  - No slider element found, cannot automate")
            return False
        
        # Get the bounding box of the slider using the ElementHandle
        slider_box = await slider_handle.bounding_box()
        if not slider_box:
            print("  - Could not get slider position")
            return False
        
        # Calculate drag distance based on gap position if available
        if gap_position is not None:
            # Calculate how far to drag to center the slider on the gap
            # Adjust for the slider's width to align it with the gap
            if 'gapInfo' in puzzle_info and puzzle_info['gapInfo'] and 'width' in puzzle_info['gapInfo']:
                gap_width = puzzle_info['gapInfo']['width']
            else:
                gap_width = 50  # Default assumption for gap width
                
            # Calculate center positions
            slider_center_x = slider_box['x'] + (slider_box['width'] / 2)
            
            # If we're using the CSS left value, we need to adjust it to get the center of the gap
            if 'cssValues' in puzzle_info and puzzle_info['cssValues']['left'] is not None:
                # The CSS left value is relative to the container, so we need to:
                # 1. Find the container's left edge
                container_x = puzzle_info['containerDimensions']['x']
                # 2. Add the CSS left value to get the gap's left edge
                # 3. Add half the gap width to target the center
                gap_center_x = container_x + puzzle_info['cssValues']['left'] + (gap_width / 2)
            else:
                # Otherwise just use the detected position plus half the gap width
                gap_center_x = gap_position + (gap_width / 2)
            
            # The distance to move is the difference between centers
            drag_distance = gap_center_x - slider_center_x
            
            print(f"  - Targeting gap position at {gap_center_x}px")
            print(f"  - Slider center at {slider_center_x}px")
            print(f"  - Calculated distance to gap: {drag_distance}px")
        else:
            print("  - Could not determine gap position, using proportional estimation")
            # Get the dimensions of the slider container
            container_selectors = [
                ".slideverify-track",
                ".slide-track",
                ".verify-body",
                ".slideway",
                ".slider-container",
                ".sliderContainer",
                ".verify-bar-area"
            ]
            
            container_element = None
            container_handle = None
            for selector in container_selectors:
                if await page.is_visible(selector, timeout=1000):
                    container_element = selector
                    container_handle = await page.query_selector(selector)
                    print(f"  - Found container element: {selector}")
                    break
            
            if not container_element or not container_handle:
                print("  - Could not find slider container, using default distance")
                # If we can't find the container, use a default distance (60% of page width)
                drag_distance = int(page.viewport_size()['width'] * 0.6) - slider_box['x']
            else:
                container_box = await container_handle.bounding_box()
                if not container_box:
                    print("  - Could not get container position, using default distance")
                    drag_distance = int(page.viewport_size()['width'] * 0.6) - slider_box['x']
                else:
                    # Use a percentage-based estimation
                    drag_distance = container_box['width'] * 0.65
                    print(f"  - Container width: {container_box['width']}")
                    print(f"  - Using percentage-based drag distance: {drag_distance}px")
        
        # Starting position (center of the slider)
        start_x = slider_box['x'] + slider_box['width'] / 2
        start_y = slider_box['y'] + slider_box['height'] / 2
        
        # Move to the slider, press down, move, and release
        await page.mouse.move(start_x, start_y)
        await page.mouse.down()
        
        # Use a more human-like movement: not too fast, with slight randomness
        steps = 20
        for i in range(1, steps + 1):
            # Add slight randomness to the movement with decreasing randomness as we get closer
            randomness = max(3 * (1 - i/steps), 0.5)
            current_step = (drag_distance * i / steps) + (random.randint(-int(randomness), int(randomness)) if i < steps else 0)
            await page.mouse.move(start_x + current_step, start_y + random.randint(-1, 1))
            # Random small delay to simulate human movement
            await asyncio.sleep(random.uniform(0.01, 0.05))
        
        # Ensure we reach exactly the target position in the final move
        await page.mouse.move(start_x + drag_distance, start_y)
        await page.mouse.up()
        
        # Take a screenshot after attempt
        await take_screenshot(os.path.join(doi_folder, "verification_automation_completed.png"))
        
        # Wait a moment to see if verification was successful
        await asyncio.sleep(3)
        
        # Check if verification was successful 
        # (URL change or verification element disappearance would indicate success)
        initial_url = page.url
        await asyncio.sleep(3)
        
        # Check for success
        success_indicators = [
            # URL changed
            page.url != initial_url,
            # Verification elements no longer visible
            not await page.is_visible("text=向右滑动完成验证", timeout=1000),
            not await page.is_visible(".slideverify-btn", timeout=1000),
            not await page.is_visible(".verify-move-block", timeout=1000),
            # Success messages or elements
            await page.is_visible("text=验证成功", timeout=1000),
            await page.is_visible(".success-icon", timeout=1000)
        ]
        
        if any(success_indicators):
            print("  - Slider verification automation successful!")
            await take_screenshot(os.path.join(doi_folder, "verification_automation_success.png"))
            return True
        else:
            print("  - Slider verification automation attempt did not succeed")
            print("  - Trying one more time with adjusted position...")
            
            # Try again with slightly longer drag if first attempt failed
            await page.mouse.move(start_x, start_y)
            await page.mouse.down()
            
            # Adjust drag distance slightly
            adjusted_drag = drag_distance * 1.1  # Try 10% longer
            
            # More steps for smoother motion
            steps = 25
            for i in range(1, steps + 1):
                current_step = (adjusted_drag * i / steps) + (random.randint(-1, 1) if i < steps else 0)
                await page.mouse.move(start_x + current_step, start_y + random.randint(-1, 1))
                await asyncio.sleep(random.uniform(0.01, 0.05))
            
            await page.mouse.move(start_x + adjusted_drag, start_y)
            await page.mouse.up()
            
            await asyncio.sleep(3)
            
            # Check again for success
            second_attempt_success = any([
                page.url != initial_url,
                not await page.is_visible("text=向右滑动完成验证", timeout=1000),
                not await page.is_visible(".slideverify-btn", timeout=1000),
                not await page.is_visible(".verify-move-block", timeout=1000),
                await page.is_visible("text=验证成功", timeout=1000),
                await page.is_visible(".success-icon", timeout=1000)
            ])
            
            if second_attempt_success:
                print("  - Second attempt succeeded!")
                await take_screenshot(os.path.join(doi_folder, "verification_automation_success_second_try.png"))
                return True
            
            # If still not successful, try a third time with a different strategy
            print("  - Trying third attempt with precise gap targeting...")
            
            # Let's try one more approach - scan across multiple possible positions
            await page.mouse.move(start_x, start_y)
            await page.mouse.down()
            
            # Try a scanning approach - move across several possible positions
            potential_positions = [0.5, 0.6, 0.7, 0.8, 0.9]
            
            if 'containerDimensions' in puzzle_info and 'width' in puzzle_info['containerDimensions']:
                container_width = puzzle_info['containerDimensions']['width']
            else:
                container_box = await container_handle.bounding_box() if container_handle else None
                container_width = container_box['width'] if container_box else page.viewport_size()['width'] * 0.8
            
            for position in potential_positions:
                scan_distance = container_width * position - slider_box['width']
                await page.mouse.move(start_x + scan_distance, start_y)
                await asyncio.sleep(0.5)  # Pause briefly at each position
            
            # Final position - try 70% of container width
            final_distance = container_width * 0.7 - slider_box['width']
            await page.mouse.move(start_x + final_distance, start_y)
            await page.mouse.up()
            
            await asyncio.sleep(3)
            
            # Check again for success
            third_attempt_success = any([
                page.url != initial_url,
                not await page.is_visible("text=向右滑动完成验证", timeout=1000),
                not await page.is_visible(".slideverify-btn", timeout=1000),
                not await page.is_visible(".verify-move-block", timeout=1000),
                await page.is_visible("text=验证成功", timeout=1000),
                await page.is_visible(".success-icon", timeout=1000)
            ])
            
            if third_attempt_success:
                print("  - Third attempt succeeded!")
                await take_screenshot(os.path.join(doi_folder, "verification_automation_success_third_try.png"))
                return True
            
            return False
            
    except Exception as e:
        print(f"  - Error during slider verification automation: {str(e)}")
        traceback.print_exc()
        return False

async def automate_verification(page, doi_folder, take_screenshot):
    """
    Attempt to automate different types of verification
    
    Args:
        page: Playwright page object
        doi_folder: Folder to save screenshots
        take_screenshot: Function to take screenshots
    
    Returns:
        True if automation successful, False otherwise
    """
    print("  - Attempting to automate verification...")
    
    # Determine the type of verification present
    verification_types = {
        "slider": [
            "text=向右滑动完成验证", # "Slide right to complete verification" text
            ".slideverify-btn",
            ".slide-btn",
            ".verify-move-block"
        ],
        "captcha": [
            "img[alt='验证码']",
            ".verification-code",
            "#captchaImg",
            ".JCAPTCHA_img"
        ],
        "click": [
            ".click-verification",
            "text=点击完成验证"  # "Click to complete verification" text
        ]
    }
    
    detected_type = None
    for vtype, selectors in verification_types.items():
        for selector in selectors:
            if await page.is_visible(selector, timeout=1000):
                detected_type = vtype
                print(f"  - Detected {vtype} verification")
                break
        if detected_type:
            break
    
    if not detected_type:
        print("  - Could not determine verification type")
        return False
    
    # Handle different verification types
    if detected_type == "slider":
        return await automate_slider_verification(page, doi_folder, take_screenshot)
    elif detected_type == "captcha":
        print("  - CAPTCHA verification requires human intervention")
        return False
    elif detected_type == "click":
        print("  - Click verification not yet automated")
        return False
    
    return False

async def wait_for_human_verification(page, doi_folder, take_screenshot, timeout=300):
    """
    Wait for human to complete verification
    
    Args:
        page: Playwright page object
        doi_folder: Folder to save screenshots
        take_screenshot: Function to take screenshots
        timeout: Maximum time to wait in seconds
    
    Returns:
        True if verification seems completed, False if timed out
    """
    print("  - Human verification detected! Attempting automated verification first...")
    
    # Take a screenshot of the verification page
    await take_screenshot(os.path.join(doi_folder, "verification_required.png"))
    
    # First try to automate the verification
    if await automate_verification(page, doi_folder, take_screenshot):
        print("  - Automated verification successful!")
        return True
    
    # If automation fails, fall back to waiting for human intervention
    print(f"  - Automated verification failed. Please complete the verification manually.")
    print(f"  - Waiting up to {timeout} seconds for verification to complete...")
    
    # Record start time
    start_time = time.time()
    verification_completed = False
    
    # Check every 2 seconds if the page has changed (verification completed)
    initial_url = page.url
    
    while time.time() - start_time < timeout:
        # Take periodic screenshots to show progress
        if int((time.time() - start_time) / 5) % 2 == 0:  # Every 10 seconds
            await take_screenshot(os.path.join(doi_folder, f"verification_waiting_{int(time.time() - start_time)}.png"))
        
        # Check if URL has changed, which might indicate successful verification
        if page.url != initial_url:
            print("  - URL changed, verification might be completed.")
            verification_completed = True
            break
        
        # Check if we can find elements that would indicate we're past verification
        try:
            # Elements that would typically be present on CNKI article pages
            selectors_to_check = [
                ".btn-dlpdf",  # Download PDF button
                ".abstract-text",  # Abstract section
                ".wxmain",  # Main content area
                "#ChDivSummary",  # Summary div
                ".brief-information"  # Article info
            ]
            
            for selector in selectors_to_check:
                if await page.is_visible(selector, timeout=100):
                    print(f"  - Found post-verification element: {selector}")
                    verification_completed = True
                    break
                    
            if verification_completed:
                break
        except Exception:
            pass
            
        # Wait for 2 seconds before checking again
        await asyncio.sleep(2)
        
    # Final verification status
    if verification_completed:
        print("  - Verification appears to be completed. Continuing...")
        await take_screenshot(os.path.join(doi_folder, "verification_completed.png"))
        return True
    else:
        print("  - Verification timeout. Please run the script again or increase timeout.")
        return False

async def process_doi(page, doi, results_dir, index, total, take_screenshot, auto_verify=False):
    """Process a single DOI and save results"""
    try:
        print(f"Processing DOI {index}/{total}: {doi}")
        
        # Safe filename without illegal characters
        safe_doi = doi.replace('/', '_').replace('\\', '_').replace(':', '_')
        
        # Create a subfolder for this DOI's screenshots
        doi_folder = os.path.join(results_dir, safe_doi)
        os.makedirs(doi_folder, exist_ok=True)
        
        # First make sure we're on the right page
        if not page.url.startswith('http://www.chinadoi.cn/portal/index.htm'):
            await page.goto('http://www.chinadoi.cn/portal/index.htm')
            await page.wait_for_load_state('networkidle')
        
        # Take screenshot of initial state
        await take_screenshot(os.path.join(doi_folder, "1_initial.png"))
        
        # Look for the specific DOI input field with id="doiInput" and class="input3"
        input_selector = '#doiInput'
        await page.wait_for_selector(input_selector, state='visible', timeout=10000)
        
        # First clear the input by clicking on it (this will trigger the onfocus event)
        await page.click(input_selector)
        
        # Take screenshot after clicking the input
        await take_screenshot(os.path.join(doi_folder, "2_input_clicked.png"))
        
        # Type the DOI
        await page.fill(input_selector, doi)
        
        # Take screenshot after typing the DOI
        await take_screenshot(os.path.join(doi_folder, "3_doi_entered.png"))
        
        # Click the submit button with class="btn_doi"
        button_selector = '.btn_doi'
        await page.wait_for_selector(button_selector, state='visible', timeout=10000)
        
        # Click and wait for navigation using a more modern approach
        async with page.expect_navigation() as navigation_info:
            await page.click(button_selector)
        
        # Wait for results to load
        await page.wait_for_load_state('networkidle')
        await asyncio.sleep(3)  # Extra wait to ensure results have loaded
        
        # Take screenshot of final results
        await take_screenshot(os.path.join(doi_folder, "4_results.png"))
        
        # Check if we navigated to Wanfang Data site
        current_url = page.url
        result_data = {
            'doi': doi,
            'timestamp': datetime.now().isoformat(),
            'url': current_url
        }
        
        if current_url.startswith('https://d.wanfangdata.com.cn/'):
            print(f"  - Navigated to Wanfang Data: {current_url}")
            result_data['redirected_to'] = 'wanfangdata'
            
            # First, extract article metadata that doesn't require login
            try:
                # Extract common metadata from the page
                metadata = await page.evaluate('''() => {
                    // Helper function to get text content safely
                    const getText = (selector) => {
                        const el = document.querySelector(selector);
                        return el ? el.textContent.trim() : null;
                    };
                    
                    // Extract all the metadata we can find
                    return {
                        title: getText('h1') || getText('.article-title') || getText('.title'),
                        authors: Array.from(document.querySelectorAll('.author, .authors a')).map(a => a.textContent.trim()),
                        abstract: getText('.abstract, .summary, .article-summary') || getText('.abstract-text'),
                        keywords: Array.from(document.querySelectorAll('.keywords a, .keyword a')).map(a => a.textContent.trim()),
                        journal: getText('.journal-name') || getText('.publisher-name'),
                        year: getText('.year') || getText('.publish-date'),
                        volume: getText('.volume'),
                        issue: getText('.issue'),
                        doi: getText('*[data-doi]') || document.querySelector('*[data-doi]')?.getAttribute('data-doi'),
                        publishDate: getText('.publish-date, .online-date'),
                        citations: getText('.cited-count'),
                        downloads: getText('.download-count')
                    };
                }''')
                
                if metadata:
                    result_data['metadata'] = metadata
                    print(f"  - Extracted article metadata: {metadata['title']}")
            except Exception as meta_err:
                print(f"  - Error extracting metadata: {str(meta_err)}")
            
            # Try to find and handle the download button
            try:
                # Wait for the download button to be visible
                download_selector = 'a.download.buttonItem'
                await page.wait_for_selector(download_selector, state='visible', timeout=5000)
                
                # Take screenshot before clicking download
                await take_screenshot(os.path.join(doi_folder, "5_wanfang_before_download.png"))
                
                # Extract the download URL
                download_url = await page.evaluate(f'''() => {{
                    const downloadLink = document.querySelector('{download_selector}');
                    return downloadLink ? downloadLink.href : null;
                }}''')
                
                if download_url:
                    print(f"  - Found download URL: {download_url}")
                    result_data['download_url'] = download_url
                    
                    # Create download directory
                    download_path = os.path.join(doi_folder, "downloaded_document")
                    os.makedirs(download_path, exist_ok=True)
                    
                    # Since we can't use Playwright's download handling directly here,
                    # let's try opening the download in a new page and saving the content
                    page2 = await page.context.new_page()
                    
                    # Navigate to the download URL
                    try:
                        await page2.goto(download_url, timeout=30000)
                        await page2.wait_for_load_state('networkidle')
                        
                        # Take a screenshot of the download page
                        await page2.screenshot(path=os.path.join(doi_folder, "6_download_page.png"))
                        
                        # Check if we were redirected to a login page
                        current_url2 = page2.url
                        page_content = await page2.content()
                        
                        # Check for common login page indicators
                        login_indicators = [
                            "login" in current_url2.lower(),
                            "register" in current_url2.lower(),
                            "auth" in current_url2.lower(),
                            "注册" in page_content,
                            "登录" in page_content,
                            "会员" in page_content,
                            "忘记密码" in page_content
                        ]
                        
                        if any(login_indicators):
                            print("  - Login required for downloading PDF")
                            result_data['login_required'] = True
                            result_data['login_url'] = current_url2
                            
                            # Save the login page HTML for reference
                            html_content = await page2.content()
                            html_path = os.path.join(download_path, "login_page.html")
                            with open(html_path, 'wb') as f:
                                f.write(html_content.encode('utf-8'))
                            result_data['login_page_path'] = html_path
                            
                            # Check if there are any login options or alternatives
                            login_options = await page2.evaluate('''() => {
                                const links = Array.from(document.querySelectorAll('a[href]'));
                                return links
                                    .filter(link => 
                                        link.href.toLowerCase().includes('login') || 
                                        link.href.toLowerCase().includes('register') ||
                                        link.href.toLowerCase().includes('auth') ||
                                        link.textContent.includes('登录') ||
                                        link.textContent.includes('注册')
                                    )
                                    .map(link => ({
                                        href: link.href,
                                        text: link.textContent.trim()
                                    }));
                            }''')
                            
                            if login_options and len(login_options) > 0:
                                result_data['login_options'] = login_options
                                print(f"  - Found {len(login_options)} login options")
                                
                            result_data['download_success'] = False
                        else:
                            # Get the content type
                            content_type = await page2.evaluate('''() => {
                                return document.contentType || 'text/html';
                            }''')
                            
                            # Save the content based on its type
                            if "pdf" in content_type.lower():
                                # It's a PDF - download it using browser's PDF functionality
                                pdf_data = await page2.pdf()
                                pdf_path = os.path.join(download_path, "document.pdf")
                                with open(pdf_path, 'wb') as f:
                                    f.write(pdf_data)
                                result_data['download_path'] = pdf_path
                                result_data['download_success'] = True
                                print(f"  - Downloaded PDF file to {pdf_path}")
                            else:
                                # It's likely HTML - save it and check if it contains PDF content or links
                                html_content = await page2.content()
                                html_path = os.path.join(download_path, "download_page.html")
                                with open(html_path, 'wb') as f:
                                    f.write(html_content.encode('utf-8'))
                                
                                # Check for embedded PDF content
                                embedded_pdf = await page2.evaluate('''() => {
                                    return !!document.querySelector('embed[type="application/pdf"], object[type="application/pdf"], iframe[src*=".pdf"]');
                                }''')
                                
                                if embedded_pdf:
                                    print("  - Found embedded PDF content")
                                    result_data['embedded_pdf_found'] = True
                                    # Try to extract the embedded PDF URL
                                    pdf_src = await page2.evaluate('''() => {
                                        const embed = document.querySelector('embed[type="application/pdf"]');
                                        const obj = document.querySelector('object[type="application/pdf"]');
                                        const iframe = document.querySelector('iframe[src*=".pdf"]');
                                        return (embed && embed.src) || (obj && obj.data) || (iframe && iframe.src) || null;
                                    }''')
                                    
                                    if pdf_src:
                                        result_data['embedded_pdf_url'] = pdf_src
                                        print(f"  - Embedded PDF URL: {pdf_src}")
                                    
                                    result_data['download_path'] = html_path
                                    result_data['download_success'] = True
                                else:
                                    # No PDF found, log HTML response
                                    print("  - No PDF content found in the response")
                                    result_data['download_path'] = html_path
                                    result_data['download_success'] = False
                    except Exception as nav_err:
                        print(f"  - Error navigating to download URL: {str(nav_err)}")
                        result_data['download_error'] = str(nav_err)
                        result_data['download_success'] = False
                    
                    # Close the second page
                    await page2.close()
                else:
                    print("  - Could not find download URL")
                    result_data['download_url'] = None
                    result_data['download_success'] = False
                
                # Take screenshot after download attempt
                await take_screenshot(os.path.join(doi_folder, "7_after_download_attempt.png"))
            except Exception as e:
                print(f"  - Download error: {str(e)}")
                result_data['download_error'] = str(e)
                result_data['download_success'] = False
        elif current_url.startswith('https://www.chndoi.org/Resolution/Handler'):
            print(f"  - Navigated to CHNDOI Handler: {current_url}")
            result_data['redirected_to'] = 'chndoi'
            
            # Try to find and click the domestic link (境内)
            try:
                # Look for the link that contains '境内'
                # Using a more specific selector that looks for an <a> tag inside an <li>, where the text after the link contains '境内'
                domestic_selector = "li a:has(+ text:contains('境内'))"
                
                # This is a backup selector if the above doesn't work
                backup_selectors = [
                    "a:has-text('https://link.cnki.net')",  # Link URL typically starts with this for domestic links
                    "li:has-text('境内') a",                 # <li> element containing '境内' text and finding the <a> inside it
                    "a:near(:text('境内'))"                  # Link near the text '境内'
                ]
                
                # Wait for the domestic link to be visible
                try:
                    await page.wait_for_selector(domestic_selector, state='visible', timeout=5000)
                    selected_selector = domestic_selector
                except Exception:
                    # Try backup selectors if the main one fails
                    selected_selector = None
                    for selector in backup_selectors:
                        if await page.is_visible(selector):
                            selected_selector = selector
                            break
                
                if not selected_selector:
                    raise Exception("Could not find the domestic link")
                
                # Take screenshot before clicking the link
                await take_screenshot(os.path.join(doi_folder, "5_chndoi_before_click.png"))
                
                # Get the href attribute to log it
                link_href = await page.get_attribute(selected_selector, 'href')
                domestic_url = link_href if link_href else "Unknown"
                print(f"  - Found domestic link: {domestic_url}")
                result_data['domestic_url'] = domestic_url
                
                # Click the domestic link
                async with page.expect_navigation() as navigation_info:
                    await page.click(selected_selector)
                
                # Wait for the page to load
                await page.wait_for_load_state('networkidle')
                await asyncio.sleep(2)
                
                # Take screenshot after clicking the link
                await take_screenshot(os.path.join(doi_folder, "6_domestic_link_result.png"))
                
                # Check for human verification screen
                verification_selectors = [
                    "img[alt='验证码']",  # Verification code image
                    ".verification-code",  # Verification code container
                    "#captchaImg",  # CAPTCHA image
                    "text=系统检测到您的访问行为异常",  # "System detected abnormal access behavior" text
                    "text=请帮助我们完成 验证",  # "Please help us complete verification" text
                    ".JCAPTCHA_img",  # CAPTCHA image
                    "text=向右滑动完成验证",  # "Slide right to complete verification" text
                    ".slideverify-img",  # Slide verification image
                    ".verify-img-panel"  # Verification image panel
                ]
                
                is_verification_required = False
                for selector in verification_selectors:
                    if await page.is_visible(selector, timeout=1000):
                        is_verification_required = True
                        break
                
                # Additional check for verification by looking at page title or content
                if not is_verification_required:
                    page_content = await page.content()
                    if "验证码" in page_content or "验证" in page_content or "captcha" in page_content.lower():
                        is_verification_required = True
                
                if is_verification_required:
                    print("  - Human verification detected on the page")
                    result_data['human_verification_required'] = True
                    
                    # Wait for human to complete verification
                    verification_timeout = 300  # 5 minutes
                    verification_completed = await wait_for_human_verification(
                        page, doi_folder, take_screenshot, verification_timeout
                    )
                    
                    result_data['verification_completed'] = verification_completed
                    
                    if verification_completed:
                        # Take a final screenshot after verification
                        await take_screenshot(os.path.join(doi_folder, "7_after_verification.png"))
                        await page.wait_for_load_state('networkidle')
                        
                        # Update the URL in the result data
                        result_data['final_url'] = page.url
                    else:
                        print("  - Verification not completed within the timeout period")
                else:
                    # Update the URL in the result data if no verification required
                    result_data['final_url'] = page.url
                    print(f"  - Clicked domestic link, navigated to: {page.url}")
                    result_data['domestic_clicked'] = True
            except Exception as e:
                print(f"  - Could not click domestic link: {str(e)}")
                result_data['domestic_clicked'] = False
                result_data['domestic_error'] = str(e)
        else:
            print(f"  - Navigated to: {current_url}")
            result_data['redirected_to'] = 'other'
            
            # Check for human verification on other sites too
            verification_texts = [
                "验证码", "请完成验证", "人机验证", "验证", "captcha", 
                "verification", "security check", "系统检测到您的访问行为异常"
            ]
            page_content = await page.content()
            page_text = await page.inner_text('body')
            
            is_verification_required = False
            for text in verification_texts:
                if text in page_content or text in page_text:
                    is_verification_required = True
                    break
            
            if is_verification_required:
                print("  - Human verification detected on the page")
                result_data['human_verification_required'] = True
                
                # Wait for human to complete verification
                verification_timeout = 300  # 5 minutes
                verification_completed = await wait_for_human_verification(
                    page, doi_folder, take_screenshot, verification_timeout
                )
                
                result_data['verification_completed'] = verification_completed
                
                if verification_completed:
                    # Take a final screenshot after verification
                    await take_screenshot(os.path.join(doi_folder, "7_after_verification.png"))
                    await page.wait_for_load_state('networkidle')
                    
                    # Update the URL in the result data
                    result_data['final_url'] = page.url
                else:
                    print("  - Verification not completed within the timeout period")
        
        # Also save a copy of the results in the main folder for easier browsing
        await take_screenshot(os.path.join(results_dir, f"{safe_doi}_result.png"))
        
        # Save the entire page HTML for analysis and debugging
        html_content = await page.content()
        html_path = os.path.join(results_dir, f"{safe_doi}.html")
        with open(html_path, 'w', encoding='utf-8') as f:
            f.write(html_content)
        
        # Try different selectors for the content
        content_selectors = [
            '.detail-content-wrap',
            '.resultContent',
            '.ant-card-body',
            '.ant-result-content',
            'main',
            '#root',
            'body'
        ]
        
        content_found = False
        for selector in content_selectors:
            if await page.is_visible(selector):
                try:
                    result_data['content'] = await page.inner_text(selector)
                    result_data['content_selector'] = selector
                    content_found = True
                    break
                except Exception:
                    continue
        
        if not content_found:
            result_data['content'] = "No content found with known selectors"
        
        # Save the result as JSON
        json_path = os.path.join(results_dir, f"{safe_doi}.json")
        with open(json_path, 'w', encoding='utf-8') as f:
            json.dump(result_data, f, ensure_ascii=False, indent=2)
        
        # Also save as text for easier viewing
        text_path = os.path.join(results_dir, f"{safe_doi}.txt")
        with open(text_path, 'w', encoding='utf-8') as f:
            f.write(f"DOI: {doi}\n")
            f.write(f"URL: {current_url}\n\n")
            f.write(result_data.get('content', 'No content'))
        
        print(f"  - Saved results for DOI: {doi}")
        return True
        
    except Exception as e:
        print(f"Error processing DOI {doi}: {str(e)}")
        # Log the error to a file
        error_log_path = os.path.join(results_dir, "errors.log")
        with open(error_log_path, 'a', encoding='utf-8') as f:
            f.write(f"{datetime.now().isoformat()} - DOI: {doi} - Error: {str(e)}\n")
        return False

async def main():
    # Parse command-line arguments
    parser = argparse.ArgumentParser(description='Search DOIs on chinadoi.cn')
    parser.add_argument('--limit', type=int, default=0, help='Limit number of DOIs to process (0 = all)')
    parser.add_argument('--start', type=int, default=0, help='Start index (0-based)')
    parser.add_argument('--headless', action='store_true', help='Run browser in headless mode')
    parser.add_argument('--slow_mo', type=int, default=100, help='Slow down operations by the specified ms')
    parser.add_argument('--debug', action='store_true', help='Enable debug mode (more verbose output)')
    parser.add_argument('--full_page', action='store_true', help='Take full-page screenshots instead of just viewport')
    parser.add_argument('--width', type=int, default=1280, help='Width of the browser viewport')
    parser.add_argument('--height', type=int, default=800, help='Height of the browser viewport')
    parser.add_argument('--verification_timeout', type=int, default=300, help='Maximum time to wait for human verification in seconds')
    parser.add_argument('--wait_between', type=int, default=3, help='Wait time in seconds between DOI requests')
    parser.add_argument('--auto_verify', action='store_true', help='Try to automate verification challenges')
    parser.add_argument('--stealth', action='store_true', help='Use enhanced stealth mode to avoid detection')
    parser.add_argument('--user_data_dir', type=str, help='Path to Chrome user data directory to reuse existing profile')
    args = parser.parse_args()
    
    # Read DOIs from file
    with open('doi_values.txt', 'r') as f:
        all_dois = [line.strip() for line in f if line.strip().startswith('10.')]
    
    # Apply limit and start index
    if args.limit > 0:
        doi_list = all_dois[args.start:args.start + args.limit]
    else:
        doi_list = all_dois[args.start:]
    
    total_dois = len(doi_list)
    print(f"Found {len(all_dois)} DOIs starting with '10.'")
    print(f"Processing {total_dois} DOIs starting from index {args.start}")
    
    # Create results directory if it doesn't exist
    results_dir = 'doi_results'
    os.makedirs(results_dir, exist_ok=True)
    
    # Log file for status updates
    log_file = os.path.join(results_dir, "process_log.txt")
    with open(log_file, 'a', encoding='utf-8') as f:
        f.write(f"\n\n--- New run started at {datetime.now().isoformat()} ---\n")
        f.write(f"Processing {total_dois} DOIs starting from index {args.start}\n")
        f.write(f"Screenshot settings: full_page={args.full_page}, viewport={args.width}x{args.height}\n")
        f.write(f"Verification timeout: {args.verification_timeout} seconds\n")
    
    # Initialize playwright
    async with async_playwright() as p:
        # Set up browser launch options
        browser_launch_options = {
            'headless': args.headless,
            'slow_mo': args.slow_mo,
        }
        
        # Add user data directory if specified for profile reuse
        if args.user_data_dir:
            browser_launch_options['user_data_dir'] = args.user_data_dir
            print(f"Using Chrome profile from: {args.user_data_dir}")
        
        # Launch browser with the specified options
        browser = await p.chromium.launch(**browser_launch_options)
        
        # Default user agent that closely mimics a real Chrome browser on macOS
        real_chrome_ua = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36'
        
        # Base context options
        context_options = {
            'viewport': {'width': args.width, 'height': args.height},
            'user_agent': real_chrome_ua,
            'locale': 'zh-CN',
            'timezone_id': 'Asia/Shanghai',
            'color_scheme': 'no-preference',
        }
        
        # Enhanced stealth options to avoid detection
        if args.stealth:
            print("Using enhanced stealth mode to avoid detection")
            # Add more realistic browser properties
            context_options.update({
                'ignore_https_errors': True,  # Ignore HTTPS errors
                'has_touch': True,  # Simulate touch capability
                'device_scale_factor': 2.0,  # Higher resolution display
            })
            
            if 'permissions' in dir(p.chromium):
                context_options['permissions'] = ['geolocation']
                
            if 'geolocation' in context_options:
                context_options['geolocation'] = {'longitude': 116.3, 'latitude': 39.9}  # Beijing coordinates
        
        # Create context with options
        context = await browser.new_context(**context_options)
        
        # Define our route handler function to add headers
        async def route_handler(route):
            # Get the original headers
            headers = route.request.headers
            
            # Add or update headers
            if args.stealth:
                headers.update({
                    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
                    'Accept-Encoding': 'gzip, deflate, br',
                    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
                    'Cache-Control': 'max-age=0',
                    'Connection': 'keep-alive',
                    'Sec-Ch-Ua': '"Chromium";v="122", "Google Chrome";v="122", "Not(A:Brand";v="24"',
                    'Sec-Ch-Ua-Mobile': '?0',
                    'Sec-Ch-Ua-Platform': '"macOS"',
                    'Sec-Fetch-Dest': 'document',
                    'Sec-Fetch-Mode': 'navigate',
                    'Sec-Fetch-Site': 'none',
                    'Sec-Fetch-User': '?1',
                    'Upgrade-Insecure-Requests': '1',
                })
            else:
                headers.update({
                    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
                    'Cache-Control': 'max-age=0',
                    'Connection': 'keep-alive',
                    'Sec-Fetch-Dest': 'document',
                    'Sec-Fetch-Mode': 'navigate',
                    'Sec-Fetch-Site': 'none',
                    'Sec-Fetch-User': '?1',
                    'Upgrade-Insecure-Requests': '1',
                    'sec-ch-ua': '"Google Chrome";v="122", "Chromium";v="122", "Not(A:Brand";v="24"',
                    'sec-ch-ua-mobile': '?0',
                    'sec-ch-ua-platform': '"macOS"'
                })
            
            # Continue with modified headers
            await route.continue_(headers=headers)
            
        # Add the route handler
        await context.route('**/*', route_handler)
            
        # Enable debug logs if requested
        if args.debug:
            context.set_default_timeout(60000)  # Longer timeout for debugging
            page = await context.new_page()
            page.on("console", lambda msg: print(f"BROWSER LOG: {msg.text}"))
        else:
            page = await context.new_page()
        
        # Add script to evade common bot detection methods
        await page.add_init_script("""
        // Overwrite the navigator.webdriver property to hide automation
        Object.defineProperty(navigator, 'webdriver', {
            get: () => false,
        });
        
        // Add a fake web audio API to evade fingerprinting
        const audioContext = window.AudioContext || window.webkitAudioContext;
        if (audioContext) {
            const origCreateOscillator = audioContext.prototype.createOscillator;
            audioContext.prototype.createOscillator = function() {
                const oscillator = origCreateOscillator.apply(this, arguments);
                oscillator.start = function() { /* Empty */ };
                return oscillator;
            };
        }
        
        // Add fake plugins
        Object.defineProperty(navigator, 'plugins', {
            get: () => {
                return [
                    {
                        0: {
                            type: 'application/pdf',
                            suffixes: 'pdf',
                            description: 'Portable Document Format'
                        },
                        name: 'Chrome PDF Plugin',
                        description: 'Portable Document Format',
                        filename: 'internal-pdf-viewer',
                    },
                    {
                        0: {
                            type: 'application/pdf',
                            suffixes: 'pdf',
                            description: 'Portable Document Format'
                        },
                        name: 'Chrome PDF Viewer',
                        description: 'Portable Document Format',
                        filename: 'mhjfbmdgcfjbbpaeojofohoefgiehjai',
                    },
                    {
                        0: {
                            type: 'application/x-nacl',
                            suffixes: '',
                            description: 'Native Client Executable'
                        },
                        name: 'Native Client',
                        description: '',
                        filename: 'internal-nacl-plugin',
                    }
                ];
            }
        });
        """)
        
        # Function to take a screenshot with the correct settings
        async def take_screenshot(path):
            await page.screenshot(path=path, full_page=args.full_page)
        
        # Go to the DOI website
        await page.goto('http://www.chinadoi.cn/portal/index.htm')
        await page.wait_for_load_state('networkidle')
        
        # For debugging, take a screenshot of the initial page
        await take_screenshot(os.path.join(results_dir, "initial_page.png"))
        
        # Process each DOI
        successful = 0
        failed = 0
        verification_encountered = 0
        
        for i, doi in enumerate(doi_list):
            # Log the start of processing for this DOI
            with open(log_file, 'a', encoding='utf-8') as f:
                f.write(f"{datetime.now().isoformat()} - Starting DOI {i+1}/{total_dois}: {doi}\n")
                
            success = await process_doi(page, doi, results_dir, i + 1, total_dois, take_screenshot, args.auto_verify)
            if success:
                successful += 1
                with open(log_file, 'a', encoding='utf-8') as f:
                    f.write(f"{datetime.now().isoformat()} - Successfully processed DOI: {doi}\n")
            else:
                failed += 1
                with open(log_file, 'a', encoding='utf-8') as f:
                    f.write(f"{datetime.now().isoformat()} - Failed to process DOI: {doi}\n")
            
            # Check if the current URL contains verification-related terms
            current_url = page.url
            if "verify" in current_url.lower() or "captcha" in current_url.lower() or "validate" in current_url.lower():
                verification_encountered += 1
                with open(log_file, 'a', encoding='utf-8') as f:
                    f.write(f"{datetime.now().isoformat()} - Verification detected in URL: {current_url}\n")
                
                # If verification is encountered multiple times in a row, increase wait time
                if verification_encountered > 2:
                    wait_time = args.wait_between * 3  # Triple the wait time
                    print(f"Multiple verifications encountered. Increasing wait time to {wait_time} seconds.")
                    with open(log_file, 'a', encoding='utf-8') as f:
                        f.write(f"{datetime.now().isoformat()} - Increased wait time to {wait_time} seconds due to multiple verifications\n")
                else:
                    wait_time = args.wait_between
            else:
                verification_encountered = 0  # Reset counter if no verification
                wait_time = args.wait_between
            
            # Add a pause between requests to be respectful to the server and avoid triggering CAPTCHA
            print(f"Waiting {wait_time} seconds before next DOI...")
            await asyncio.sleep(wait_time)
            
            # Try various methods to get back to a clean state for the next search
            try:
                # Try to clear the input field using its specific ID
                if await page.is_visible('#doiInput'):
                    # First focus on the input to trigger the onfocus event
                    await page.click('#doiInput')
                    # Then clear it with an empty string
                    await page.fill('#doiInput', '')
                else:
                    # If we can't find the input field, reload the page
                    await page.goto('http://www.chinadoi.cn/portal/index.htm')
                    await page.wait_for_load_state('networkidle')
            except Exception:
                # If anything fails, just reload the page
                await page.goto('http://www.chinadoi.cn/portal/index.htm')
                await page.wait_for_load_state('networkidle')
        
        # Log completion
        with open(log_file, 'a', encoding='utf-8') as f:
            f.write(f"Completed at {datetime.now().isoformat()}\n")
            f.write(f"Successful: {successful}, Failed: {failed}\n")
        
        # Close the browser
        await browser.close()
        
        print(f"\nCompleted processing {total_dois} DOIs")
        print(f"Successful: {successful}, Failed: {failed}")
        print(f"Results saved to {os.path.abspath(results_dir)}")

if __name__ == "__main__":
    asyncio.run(main())
