import json
import os
import time
from typing import Optional

from inscode_agent.utils.str import unescape_html
from langchain_core.runnables import RunnableConfig
from langgraph.types import StreamWriter
from pydantic import BaseModel

from ...config.agents import AgentActions
from .base import BaseNodes, tool_node
from ...utils.search import load_web_content, web_search_serper, web_content_blog
from ...models.graph import GraphState, AgentStepLog
from loguru import logger


class LoadDocSchema(BaseModel):
    urls: str


class SearchWebSchema(BaseModel):
    search_question: str


class WebNodes(BaseNodes):

    @tool_node(args_schema=LoadDocSchema)
    def load_doc_from_web(self, state: GraphState, writer: StreamWriter, *, config: Optional[RunnableConfig] = None):
        """
        Description: Loads document content from given web URLs. This tool should be used when you need to fetch and extract content from web pages.
        Parameters:
        - urls: (required) A comma-separated list of URLs to load documents from. Each URL should be a valid web address.
        Usage:
        <load_doc_from_web>
        <urls>https://example1.com, https://example2.com, https://example3.com</urls>
        </load_doc_from_web>
        """
        urls = state.goto.params.get("urls", "").split(",")
        urls = [url.strip() for url in urls if url.strip()]

        if not urls:
            return self.node_failure(message="No valid URLs provided")

        try:
            contents = None
            for attempt in range(3):
                contents = load_web_content(urls)
                if contents:
                    break
                logger.warning(f"Attempt {attempt + 1} failed to load content from URLs: {urls}")
                time.sleep(2)
            
            if not contents:
                logger.warning(f"Failed to load content from URLs after 3 attempts: {urls}")
                return self.node_failure(message="Failed to load content from URLs after 3 attempts")

            logger.info(f"Successfully loaded web content from URLs: {urls}")
            writer(AgentStepLog.build_tool_action(
                action=AgentActions.SEARCH_WEB.value,
                output=json.dumps([{"url": url} for url in contents.keys()], ensure_ascii=False)
            ))

            message_content = "Web content below:\n\n" + "\n\n".join(
                [f"URL: {url}\nContent: {content}" for url, content in contents.items()])
            return self.node_success(message_content)
        except Exception as e:
            logger.error(f"Error loading web content: {str(e)}")
            return self.node_failure(message=f"Error loading web content: {str(e)}")

    @tool_node(args_schema=SearchWebSchema)
    def search_web_content(self, state: GraphState, writer: StreamWriter, *, config: Optional[RunnableConfig] = None):
        """
        Description: Search web pages using search engine. Use for obtaining latest information or finding references, Searches web content based on a search question and returns relevant chunks and article information.
        Parameters:
        - search_question: (required) The search query or question to find relevant web content
        Usage:
        <search_web_content>
        <search_question>How to implement authentication in React?</search_question>
        </search_web_content>

        Note: This tool should only be used when you need to search for information that you don't already know. If you can answer the question based on your existing knowledge, there's no need to use this tool.
        """
        search_question = state.goto.params.get("search_question", "")
        if not search_question:
            return self.node_failure(message="No search question provided")

        try:
            logger.info(f"Searching web content for: {search_question}")
            results = web_search_serper(query=search_question, num=int(os.getenv("WEB_SEARCH_TOP", 3)))
            if not results:
                logger.warning(f"No search results found for query: {search_question}")
                return self.node_failure(message="No search results found")

            logger.info(
                f"Searching web results for query: {search_question}, urls: {[result.get('url', '') for result in results]}")
            writer(AgentStepLog.build_tool_action(
                action=AgentActions.SEARCH_WEB.value,
                output=json.dumps([{"title": result["title"], "url": result["url"]} for result in results],
                                  ensure_ascii=False)
            ))

            message_content = "Search results:\n\n" + "\n\n".join([
                f"url: {result.get('url', '')}\n"
                f"title: {result.get('title', '')}\n"
                f"{result.get('snippet', '')}"
                f"{result.get('content', '')}"
                for result in results
            ])
            return self.node_success(message_content)
        except Exception as e:
            logger.error(f"Error searching web content: {str(e)}")
            return self.node_failure(message=f"Error searching web content: {str(e)}")

    @tool_node(args_schema=LoadDocSchema)
    def load_blog_content(self, state: GraphState, writer: StreamWriter, *, config: Optional[RunnableConfig] = None):
        """
        Description: Load blog content from CSDN blog URLs. This tool extracts and returns the full content of CSDN blog posts.
        Parameters:
        - urls: (required) A comma-separated list of CSDN blog URLs to load content from. Each URL should be in format like 'https://blog.csdn.net/xxx/article/details/123456789' or with parameters like 'https://blog.csdn.net/xxx/article/details/123456789?spm=1000.2115.3001.10524'
        Usage:
        <load_blog_content>
        <urls>https://blog.csdn.net/weixin_57038822/article/details/123661469, https://blog.csdn.net/weixin_57038822/article/details/123661470?spm=1000.2115.3001.10524</urls>
        </load_blog_content>
        """
        urls = state.goto.params.get("urls", "").split(",")
        urls = [url.strip() for url in urls if url.strip()]

        if not urls:
            return self.node_failure(message="No blog URLs provided")

        try:
            article_ids = []
            url_map = {}  # Map article IDs to their original URLs
            for url in urls:
                # 处理带有参数的URL
                article_id_part = url.split('details/')[-1]
                # 移除URL参数部分
                if '?' in article_id_part:
                    article_id = article_id_part.split('?')[0]
                else:
                    article_id = article_id_part

                if not article_id.isdigit():
                    logger.warning(f"Invalid CSDN blog URL format: {url}")
                    return self.node_failure(message=f"Invalid CSDN blog URL format: {url}")
                article_ids.append(article_id)
                url_map[article_id] = url

            articles = web_content_blog(article_ids=",".join(article_ids))

            if not articles:
                logger.warning(f"Failed to load blog content for article IDs: {article_ids}")
                return self.node_failure(message="Failed to load blog content")

            logger.info(f"Successfully loaded blog content from URLs: {urls}")
            writer(AgentStepLog.build_tool_action(
                action=AgentActions.SEARCH_WEB.value,
                output=json.dumps(
                    [{"url": url_map[article_id], "title": articles[article_id]["title"]} for article_id in
                     articles], ensure_ascii=False)
            ))

            message_content = "Blog contents:\n\n" + "\n\n".join([
                f"URL: {url_map[article_id]}\n"
                f"Title: {articles[article_id]['title']}\n"
                f"Content: {unescape_html(articles[article_id]['content'])}"
                for article_id in articles
            ])
            return self.node_success(message_content)
        except Exception as e:
            logger.error(f"Error loading blog content: {str(e)}")
            return self.node_failure(message=f"Error loading blog content: {str(e)}")
