import json
import os
import time
from typing import Optional, List, Dict

from inscode_agent.utils.str import unescape_html
from langchain_core.runnables import RunnableConfig
from langgraph.types import StreamWriter
from pydantic import BaseModel

from ...config.agents import AgentActions
from .base import BaseNodes, tool_node
from ...utils.search import load_web_content, web_search_serper, web_content_blog
from ...models.graph import GraphState, AgentStepLog
from loguru import logger


class LoadDocSchema(BaseModel):
    urls: str


class SearchWebSchema(BaseModel):
    search_question: str


class WebNodes(BaseNodes):

    @tool_node(args_schema=LoadDocSchema)
    def load_doc_from_web(self, state: GraphState, writer: StreamWriter, *, config: Optional[RunnableConfig] = None):
        """
        加载网页文档内容。从指定URL获取并提取网页内容，支持CSDN博客等站点。
        参数:
        - urls: (必填) URL列表，逗号分隔，支持CSDN博客链接
        用法:
        <load_doc_from_web>
        <urls>https://example.com, https://blog.csdn.net/user/article/details/123456</urls>
        </load_doc_from_web>
        """
        urls = state.goto.params.get("urls", "").split(",")
        urls = [url.strip() for url in urls if url.strip()]

        if not urls:
            return self.node_failure(message="No valid URLs provided")

        try:
            contents = self.load_content_from_urls(urls)

            if not contents:
                logger.warning(f"Failed to load content from URLs after 3 attempts: {urls}")
                return self.node_failure(message="Failed to load content from URLs after 3 attempts")

            logger.info(f"Successfully loaded web content from URLs: {urls}")
            
            writer(AgentStepLog.build_tool_action(
                action=AgentActions.SEARCH_WEB.value,
                output=json.dumps(
                    [{"url": data['url'], "title": data['title']} for data in contents.values()], ensure_ascii=False)
            ))
            
            message_content = "Web content below:\n\n" + "\n\n".join([
                f"URL: {data['url']}\n"
                f"Title: {data['title']}\n"
                f"Content: {data['content']}"
                for data in contents.values()
            ])

            return self.node_success(message_content)
        except Exception as e:
            logger.error(f"Error loading web content: {str(e)}")
            return self.node_failure(message=f"Error loading web content: {str(e)}")

    @tool_node(args_schema=SearchWebSchema)
    def search_web_content(self, state: GraphState, writer: StreamWriter, *, config: Optional[RunnableConfig] = None):
        """
        搜索引擎查询。获取最新信息或查找参考资料，返回相关网页摘要和链接。
        参数:
        - search_question: (必填) 搜索关键词或问题
        用法:
        <search_web_content>
        <search_question>React身份验证实现方法</search_question>
        </search_web_content>
        注意：仅在需要搜索未知信息时使用，已知内容请直接回答。
        """
        search_question = state.goto.params.get("search_question", "")
        if not search_question:
            return self.node_failure(message="No search question provided")

        try:
            logger.info(f"Searching web content for: {search_question}")
            results = web_search_serper(query=search_question, num=int(os.getenv("WEB_SEARCH_TOP", 3)))
            if not results:
                logger.warning(f"No search results found for query: {search_question}")
                return self.node_failure(message="No search results found")

            logger.info(
                f"Searching web results for query: {search_question}, urls: {[result.get('url', '') for result in results]}")
            writer(AgentStepLog.build_tool_action(
                action=AgentActions.SEARCH_WEB.value,
                output=json.dumps([{"title": result["title"], "url": result["url"]} for result in results],
                                  ensure_ascii=False)
            ))

            message_content = "Search results:\n\n" + "\n\n".join([
                f"url: {result.get('url', '')}\n"
                f"title: {result.get('title', '')}\n"
                f"{result.get('snippet', '')}"
                f"{result.get('content', '')}"
                for result in results
            ])
            return self.node_success(message_content)
        except Exception as e:
            logger.error(f"Error searching web content: {str(e)}")
            return self.node_failure(message=f"Error searching web content: {str(e)}")

    @classmethod
    def load_content_from_urls(cls, urls: List[str]) -> Dict[str, Dict[str, str]]:
        """
        Load content from URLs and return structured data.
        
        Args:
            urls: List of URLs to load content from
            
        Returns:
            Dict mapping URLs to content data with keys: 'content', 'title', 'url'
            Title field may be empty for some sources.
        """
        if not urls:
            return {}
        
        # Separate CSDN blog URLs from other URLs
        blog_urls = []
        other_urls = []
        
        for url in urls:
            csdn_blog_domains = os.getenv("CSDN_BLOG_DOMAIN", ['blog.csdn.net'])
            is_inner_url = any(domain in url for domain in csdn_blog_domains)
            if is_inner_url and 'article/details/' in url:
                blog_urls.append(url)
            else:
                other_urls.append(url)
        
        result = {}
        
        # Handle CSDN blog URLs
        if blog_urls:
            try:
                article_ids = []
                url_map = {}
                
                for url in blog_urls:
                    article_id = url.split('details/')[-1].split('?')[0]
                    if article_id.isdigit():
                        article_ids.append(article_id)
                        url_map[article_id] = url
                    else:
                        logger.warning(f"Invalid CSDN blog URL format: {url}")
                
                if article_ids:
                    articles = web_content_blog(article_ids=",".join(article_ids))
                    if articles:
                        for article_id, article_data in articles.items():
                            original_url = url_map[article_id]
                            result[original_url] = {
                                'content': unescape_html(article_data['content']),
                                'title': article_data['title'],
                                'url': original_url
                            }
            except Exception as e:
                logger.error(f"Error loading blog content: {str(e)}")
        
        # Handle other URLs
        if other_urls:
            try:
                contents = None
                for attempt in range(3):
                    contents = load_web_content(other_urls)
                    if contents:
                        break
                    logger.warning(f"Attempt {attempt + 1} failed to load content from URLs: {other_urls}")
                    time.sleep(2)
                
                if contents:
                    for url, content in contents.items():
                        result[url] = {
                            'content': content,
                            'title': '',
                            'url': url
                        }
            except Exception as e:
                logger.error(f"Error loading web content: {str(e)}")
        
        return result