import json
import os
from mcp.server.fastmcp import FastMCP
from crawl4ai import (
    AsyncWebCrawler,
    CacheMode,
    CrawlerRunConfig,
)
from .constant import SCRAPE_BASE, CSS_SELECTOR, LIST_ITEM_CSS_SELECTOR
from .util import get_browser_config
from .extraction_strategy import get_ExtractionStrategy
from .js_code import get_js_code
from .auth import get_stored_cookies, get_login_cookies
from .io import save_to_csv, transform, transformToEntity
from config import Settings

# from time import sleep
from random import uniform
from app.mcps.tigres import get_client, upload_file, create_presigned_url

headless = True


async def extract_links(
    crawler: AsyncWebCrawler,
    list_url: str,
    css_selector: str,
    session_id: str,
) -> tuple[list[str], bool]:
    """
    Extract the links of Song Lyrics.

    Args:
        crawler (AsyncWebCrawler): The web crawler instance.
        list_url (str): The URL to extract single poetry links.
        css_selector (str): The CSS selector to target the content.

    Returns:
        Tuple[List[dict], bool]:
            - A list of processed Song Lyric from the page.
            - bool: A flag indicating if the func invoke success.

    """
    # url = f"{SCRAPE_BASE}/gushi/songsan.aspx"

    result = await crawler.arun(
        url=list_url,
        config=CrawlerRunConfig(
            # cache_mode=CacheMode.BYPASS,  # Do not use cached data
            # scan_full_page=True,
            # delay_before_return_html=2.5,
            # wait_for="js:() => window.loaded === true",
            # extraction_strategy=llm_strategy,  # Strategy for data extraction
            css_selector=css_selector,  # Target specific content on the page
            session_id=session_id,  # Unique session ID for the crawl
            remove_overlay_elements=True,
            exclude_external_links=True,
            exclude_social_media_links=True,
        ),
    )
    if not (result.success):
        print(f"Error extract links: {result.error_message}")
        return [], False

    print(result.markdown)
    all_links = []
    for link in result.links.get("internal", []):
        print("Internal links count:", len(result.links.get("internal", [])))
        print("External links count:", len(result.links.get("external", [])))
        print(f"Internal Link: {link['href']} - {link['text']}")
        all_links.append(link["href"])
    return all_links, True


async def extract_poetry(
    crawler: AsyncWebCrawler,
    url: str,
    css_selector: str,
    session_id: str,
    delay_before_return_html: float,
) -> tuple[dict, bool]:
    """
    Extract the links of Song Lyrics.

    Args:
        crawler (AsyncWebCrawler): The web crawler instance.
        url (str): The crawl URL of the Song Lyric.
        css_selector (str): The CSS selector to target the content.
        session_id (str): The session identifier.

    Returns:
        Tuple[List[dict], bool]:
            - the Song Lyric json object extracted from the page.
            - bool: A flag indicating if the func invoke success.

    """
    extraction = get_ExtractionStrategy()
    js_code = get_js_code()

    result = await crawler.arun(
        url=url,
        config=CrawlerRunConfig(
            magic=True,
            simulate_user=True,
            override_navigator=True,
            cache_mode=CacheMode.BYPASS,  # Do not use cached data
            css_selector=css_selector,  # Target specific content on the page
            # scan_full_page=True,
            delay_before_return_html=delay_before_return_html,
            # wait_for="js:() => window.loaded === true",
            # wait_for="css:div.sons[id^=fanyiquan]",
            # js_code="(()=> {fanyiShow(1208,'F3973AF7E6FB30DB')})()",
            # js_code="document.querySelector(\"div.sons[id^=fanyi] a[href^='javascript:fanyiShow']\").click()",
            js_code=js_code,
            # js_only=True,
            extraction_strategy=extraction,  # Strategy for data extraction
            session_id=session_id,  # Unique session ID for the crawl
            remove_overlay_elements=True,
            exclude_external_links=True,
            exclude_social_media_links=True,
        ),
    )
    if not (result.success):
        print(f"Error extract links: {result.error_message}")
        return {}, False

    # print("Extracted content:", result.extracted_content)

    # Parse extracted content
    extracted_data = json.loads(result.extracted_content)
    if not extracted_data:
        print(f"No extracted_data found on url {url}.")
        return {}, False

    # After parsing extracted content
    # print("Extracted data:", extracted_data)

    poetries = []
    for item in extracted_data:
        # print("Processing item:", type(item))
        if item.get("error") is False:
            item.pop("error", None)  # Remove the 'error' key if it's False
        transformed = transform(item)
        # print("transformed:", transformed)
        poetries.append(transformed)
    # print('cleaned_html:',result.cleaned_html[0:500])
    # print("transformed:", transformed)
    print(f"Extracted {len(poetries)} Poetry from url {url}.")
    return poetries[0], True


async def crawl_poetry_with_cookie(cookies, detail_segment: str, settings: Settings):
    """
    Main function to crawl Poetry data from the website.
    """
    print("-------cookies--------")
    print(cookies)
    print("----------------------")
    browser_config = get_browser_config(cookies, headless)
    session_id = "poetry_crawl_session"
    detail_url = f"{SCRAPE_BASE}{detail_segment}"
    async with AsyncWebCrawler(config=browser_config) as crawler:
        extracted, success = await extract_poetry(
            crawler, detail_url, CSS_SELECTOR, session_id, uniform(2, 4)
        )
        # print("extracted:", extracted)
        if success:
            print(f"success processed {detail_segment}")
            
            tmp_file_url = f"poetry-{os.path.basename(detail_segment)}.csv"
            save_to_csv([extracted], tmp_file_url)
            # print(f"Saved {len(all_poetries)} Poetries to '{tmp_file_url}'.")
            s3_client = get_client(settings)
            upload_file(s3_client, tmp_file_url, settings.bucket_name)


async def crawl_poetry_by_basic_auth(
    cookie_file_url: str,
    user: str,
    pwd: str,
    detail_segment: str,
    settings: Settings,
):
    """
    Main function to crawl Poetry data from the website.
    """
    # Initialize configurations
    # print("settings=>", settings)

    # cookies
    # cookie_file_url = "cookies.json"
    cookies = get_stored_cookies(cookie_file_url)
    if not cookies:
        print("-------no cookies, login to fetch--------")
        try:
            cookies = await get_login_cookies(
                f"{SCRAPE_BASE}/user/login.aspx",
                f"{SCRAPE_BASE}/user/collect.aspx",
                # f"{SCRAPE_BASE}/RandCode.ashx",
                user,
                pwd,
                cookie_file_url,
                False,
            )
            print(f"Successfully authenticated. Retrieved {len(cookies)} cookies.")
        except Exception as e:
            print(f"Authentication failed: {str(e)}")
            return
    await crawl_poetry_with_cookie(cookies, detail_segment, settings)


async def crawl_poetries_with_cookie(
    cookies, list_segment: str, settings: Settings, object_name: str
):
    """
    Main function to crawl Poetry data from the website.
    """
    print("-------cookies--------")
    print(cookies)
    print("----------------------")
    browser_config = get_browser_config(cookies, headless)
    # llm_strategy = get_llm_strategy()

    # Initialize state variables
    session_id = "poetry_crawl_session"
    all_links = []
    all_poetries = []

    all_links = [
        # "https://www.gushiwen.cn/shiwenv_f4c976914347.aspx",
        # "https://www.gushiwen.cn/shiwenv_5ac613721d19.aspx",
    ]

    list_url = f"{SCRAPE_BASE}{list_segment}"
    async with AsyncWebCrawler(config=browser_config) as crawler:
        all_links, success = await extract_links(
            crawler, list_url, LIST_ITEM_CSS_SELECTOR, session_id
        )
        # all_links = all_links[285:]
        # all_links = [
        #     "https://www.gushiwen.cn/shiwenv_ddde447c0ea8.aspx",
        #     "https://www.gushiwen.cn/shiwenv_f4c976914347.aspx",
        # ]
        # print(all_links)
        processed = 0
        for url in all_links:
            # sleep(uniform(0, 2))
            if processed % 3 == 0:
                batch_session_id = f"{session_id}-{processed}"
            extracted, success = await extract_poetry(
                crawler, url, CSS_SELECTOR, batch_session_id, uniform(2, 4)
            )
            if success:
                processed += 1
                all_poetries.append(extracted)
                print(f"processing {processed}/{len(all_links)}")

    # Start the web crawler context
    # https://docs.crawl4ai.com/api/async-webcrawler/#asyncwebcrawler
    # async with AsyncWebCrawler(config=browser_config) as crawler:

    # Save the collected venues to a CSV file

    if all_poetries:
        tmp_file_url = f"complete_poetries-{object_name}.csv"
        save_to_csv(all_poetries, tmp_file_url)
        # print(f"Saved {len(all_poetries)} Poetries to '{tmp_file_url}'.")
        s3_client = get_client(settings)
        upload_file(s3_client, tmp_file_url, settings.bucket_name, object_name)
    else:
        print("No venues were found during the crawl.")

    # # Display usage statistics for the LLM strategy
    # llm_strategy.show_usage()


async def crawl_poetries_by_basic_auth(
    cookie_file_url: str,
    user: str,
    pwd: str,
    list_segment: str,
    settings: Settings,
    object_name: str,
):
    """
    Main function to crawl Poetry data from the website.
    """
    # Initialize configurations
    # print("settings=>", settings)

    # cookies
    # cookie_file_url = "cookies.json"
    cookies = get_stored_cookies(cookie_file_url)
    if not cookies:
        print("-------no cookies, login to fetch--------")
        try:
            cookies = await get_login_cookies(
                f"{SCRAPE_BASE}/user/login.aspx",
                f"{SCRAPE_BASE}/user/collect.aspx",
                # f"{SCRAPE_BASE}/RandCode.ashx",
                user,
                pwd,
                cookie_file_url,
                False,
            )
            print(f"Successfully authenticated. Retrieved {len(cookies)} cookies.")
        except Exception as e:
            print(f"Authentication failed: {str(e)}")
            return
    await crawl_poetries_with_cookie(cookies, list_segment, settings, object_name)


# Initialize FastMCP server
mcp = FastMCP("scraper")
