import requests
import logging
import time
import random
import re
from bs4 import BeautifulSoup
from typing import Any, Dict, List, Optional, Type
from tenacity import retry, stop_after_attempt, wait_exponential
from langchain_core.tools import BaseTool
from pydantic import BaseModel, ConfigDict, Field, model_validator
from search_collection.search import (
    BaiduSearchEngine,
    BingSearchEngine,
    DuckDuckGoSearchEngine,
    GoogleSearchEngine,
    WebSearchEngine,
)
from search_collection.search.base import SearchItem
# logging.basicConfig(level=logging.INFO)


class SearchResult(BaseModel):
    """Represents a single search result returned by a search engine."""

    model_config = ConfigDict(arbitrary_types_allowed=True)

    position: int = Field(description="Position in search results")
    url: str = Field(description="URL of the search result")
    title: str = Field(default="", description="Title of the search result")
    description: str = Field(
        default="", description="Description or snippet of the search result"
    )
    source: str = Field(description="The search engine that provided this result")
    raw_content: Optional[str] = Field(
        default=None, description="Raw content from the search result page if available"
    )

    def __str__(self) -> str:
        """String representation of a search result."""
        return f"{self.title} ({self.url})"

class ToolResult(BaseModel):
    """Represents the result of a tool execution."""

    output: Any = Field(default=None)
    error: Optional[str] = Field(default=None)
    base64_image: Optional[str] = Field(default=None)
    system: Optional[str] = Field(default=None)

    class Config:
        arbitrary_types_allowed = True

    def __bool__(self):
        return any(getattr(self, field) for field in self.__fields__)

    def __add__(self, other: "ToolResult"):
        def combine_fields(
            field: Optional[str], other_field: Optional[str], concatenate: bool = True
        ):
            if field and other_field:
                if concatenate:
                    return field + other_field
                raise ValueError("Cannot combine tool results")
            return field or other_field

        return ToolResult(
            output=combine_fields(self.output, other.output),
            error=combine_fields(self.error, other.error),
            base64_image=combine_fields(self.base64_image, other.base64_image, False),
            system=combine_fields(self.system, other.system),
        )

    def __str__(self):
        return f"Error: {self.error}" if self.error else self.output

    def replace(self, **kwargs):
        """Returns a new ToolResult with the given fields replaced."""
        # return self.copy(update=kwargs)
        return type(self)(**{**self.dict(), **kwargs})

class LangSearchInput(BaseModel):
    """Input for the integrated search tool."""

    query: str = Field(description="search query to look up")

class SearchMetadata(BaseModel):
    """Metadata about the search operation."""

    model_config = ConfigDict(arbitrary_types_allowed=True)

    total_results: int = Field(description="Total number of results found")
    language: str = Field(description="Language code used for the search")
    country: str = Field(description="Country code used for the search")

class SearchResponse(ToolResult):
    """Structured response from the web search tool, inheriting ToolResult."""

    # query: str = Field(description="The search query that was executed")
    results: List[SearchResult] = Field(
        default_factory=list, description="List of search results"
    )
    # metadata: Optional[SearchMetadata] = Field(
    #     default=None, description="Metadata about the search"
    # )

    @model_validator(mode="after")
    def populate_output(self) -> "SearchResponse":
        """Populate output or error fields based on search results."""
        if self.error:
            return self

        # result_text = [f"Search results for '{self.query}':"]
        result_list = []
        for i, result in enumerate(self.results, 1):
            # Add title with position number
            # result_item = dict()
            # title = result.title.strip() or "No title"
            # result_item['title'] = result.title.strip() or "No title"
            # result_text.append(f"\n{i}. {title}")

            # Add URL with proper indentation
            # result_text.append(f"   URL: {result.url}")
            # result_item['url'] = result.url

            # Add description if available
            # if result.description.strip():
            #     result_text.append(f"   Description: {result.description}")

            # Add content preview if available
            if result.raw_content:
                # content_preview = result.raw_content[:1000].replace("\n", " ").strip()
                content_preview = result.raw_content[:2000].replace("\n", " ").strip()
                # if len(result.raw_content) > 1000:
                if len(result.raw_content) > 2000:
                    content_preview += "..."
                # result_text.append(f"   Content: {content_preview}")
                # result_item['content'] = content_preview
                result_list.append(f"{i}. {content_preview}")
            
            # result_list.append(result_item)

        # Add metadata at the bottom if available
        # if self.metadata:
        #     result_text.extend(
        #         [
        #             f"\nMetadata:",
        #             f"- Total results: {self.metadata.total_results}",
        #             f"- Language: {self.metadata.language}",
        #             f"- Country: {self.metadata.country}",
        #         ]
        #     )

        self.output = "\n".join(result_list)
        # self.output = result_list
        return self


class Proxy(BaseModel):
    """Crawler proxies and select one randomly"""

    proxy_urls: List[str] = Field(
        default=[
            "http://www.ip3366.net/free/",
            "https://www.iphaiwai.com/free/",
            "http://www.ip3366.net/",
            "http://www.89ip.cn/"
        ]
    )
    base_headers: dict = Field(
        default={
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
            'Accept-Encoding': 'gzip, deflate, sdch',
            'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7'
        }
    )
    proxy_list: Optional[List[dict]] = Field(default=None)

    @model_validator(mode="after")
    def get_proxy_list(self) -> "Proxy":
        if self.proxy_list is None:
            self.proxy_list = []

        proxy_url = random.choice(self.proxy_urls)
        html = self.get_page(proxy_url)
        if "ip3366" in proxy_url:
            if html:
                find_tr = re.compile('<tr>(.*?)</tr>', re.S)
                trs = find_tr.findall(html)
                for s in range(1, len(trs)):
                    find_ip = re.compile(r'<td>(\d+\.\d+\.\d+\.\d+)</td>')
                    re_ip_address = find_ip.findall(trs[s])
                    find_port = re.compile(r'<td>(\d+)</td>')
                    re_port = find_port.findall(trs[s])
                    for address, port in zip(re_ip_address, re_port):
                        address_port = address + ":" + port
                        proxy = {"http": "http://" + address_port.replace(' ', ''),
                                "https": "http://" + address_port.replace(' ', '')}
                        self.proxy_list.append(proxy)
        elif "iphaiwai" in proxy_url:
            if html:
                find_ip = re.compile(r"<td class=\"kdl-table-cell\">(\d+\.\d+\.\d+\.\d+)</td>")
                re_ip_address = find_ip.findall(html)
                find_port = re.compile(r"<td class=\"kdl-table-cell\">(\d+)</td>")
                re_port = find_port.findall(html)
                for address, port in zip(re_ip_address, re_port):
                    address_port = address + ":" + port
                    proxy = {"http": "http://" + address_port.replace(' ', ''),
                             "https": "http://" + address_port.replace(' ', '')}
                    self.proxy_list.append(proxy)
        elif "89ip" in proxy_url:
            if html:
                find_tr = re.compile('<tr>(.*?)</tr>', re.S)
                trs = find_tr.findall(html)
                for s in range(1, len(trs)):
                    pattern = r'<td>\s*(\S+)\s*</td>'
                    matches = re.findall(pattern, trs[s])
                    address, port = matches[0], matches[1]
                    address_port = address + ":" + port
                    proxy = {"http": "http://" + address_port.replace(' ', ''),
                             "https": "http://" + address_port.replace(' ', '')}
                    self.proxy_list.append(proxy)
        return self

    def get_page(self, url, options=None):
        if options is None:
            options = {}
        headers = dict(self.base_headers, **options)
        try:
            response = requests.get(url, headers=headers)
            if response.status_code == 200:
                return response.text
        except ConnectionError:
            return None

    def get_random_ip(self):
        proxies = random.choice(self.proxy_list)
        return proxies


class WebContentFetcher:
    """Utility class for fetching web content."""

    proxy: Proxy = Proxy()

    def fetch_content(self, url: str, timeout: int = 10) -> Optional[str]:
        """
        Fetch and extract the main content from a webpage.

        Args:
            url: The URL to fetch content from
            timeout: Request timeout in seconds

        Returns:
            Extracted text content or None if fetching fails
        """
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
            'Accept-Encoding': 'gzip, deflate, sdch',
            'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7'
        }

        try:
            # Use asyncio to run requests in a thread pool
            # response = await asyncio.get_event_loop().run_in_executor(
            #     None, lambda: requests.get(url, headers=headers, timeout=timeout)
            # )
            try:
                proxies = self.proxy.get_random_ip()
                # response = requests.get(url, headers=headers, proxies=proxies, timeout=timeout)
                response = requests.get(url, headers=headers, timeout=timeout)
            except Exception as e:
                logging.error(
                    f"Failed to fetch content from {url} for the reason of: {e}"
                )
                return None

            if response.status_code != 200:
                if response.url != url:
                    url = response.url
                    try:
                        proxies = self.proxy.get_random_ip()
                        # response = requests.get(url, headers=headers, proxies=proxies, timeout=timeout)
                        response = requests.get(url, headers=headers, timeout=timeout)
                    except Exception as e:
                        logging.error(
                            f"Failed to fetch content from {url} for the reason of: {e}"
                        )
                        return None
                if response.status_code != 200:
                    logging.warning(
                        f"Failed to fetch content from {url}: HTTP {response.status_code}"
                    )
                    return None

            # Parse HTML with BeautifulSoup
            soup = BeautifulSoup(response.text, "html.parser")

            # Remove script and style elements
            for script in soup(["script", "style", "header", "footer", "nav"]):
                script.extract()

            # Get text content
            text = soup.get_text(separator="\n", strip=True)

            # Clean up whitespace and limit size (100KB max)
            text = " ".join(text.split())
            return text[:10000] if text else None

        except Exception as e:
            logging.warning(f"Error fetching content from {url}: {e}")
            return None


class IntegratedSearchToolsResults(BaseTool):
    """
    集成了百度、必应、谷歌和duckduckgo的搜索工具
    """

    name: str = "Integrated_search_engine_results"
    description: str = (
        "A search engine optimized for comprehensive, accurate, and trusted results. "
        "Useful for when you need to answer questions about current events. "
        "Input should be a search query."
    )
    parameters: dict = {
        "type": "object",
        "properties": {
            "num_results": {
                "type": "integer",
                "description": "(optional) The number of search results to return. Default is 5.",
                "default": 5,
            },
            "lang": {
                "type": "string",
                "description": "(optional) Language code for search results (default: cn).",
                "default": "cn",
            },
            "country": {
                "type": "string",
                "description": "(optional) Country code for search results (default: cn).",
                "default": "cn",
            },
            "fetch_content": {
                "type": "boolean",
                "description": "(optional) Whether to fetch full content from result pages. Default is false.",
                "default": False,
            },
        },
        "required": ["query"],
    }

    args_schema: Type[BaseModel] = LangSearchInput
    """The tool response format."""

    _search_engine: dict[str, WebSearchEngine] = {
        "baidu": BaiduSearchEngine(),
        "bing": BingSearchEngine(),
        "google": GoogleSearchEngine(),
        "duckduckgo": DuckDuckGoSearchEngine()
    }
    content_fetcher: WebContentFetcher = WebContentFetcher()

    default_engine: str = "Baidu"
    fallback_engines: List[str] = ["Bing", "Google", "DuckDuckGo"]

    def _run(
        self,
        query: str,
        num_results: int = 5,
        lang: Optional[str] = None,
        country: Optional[str] = None,
        fetch_content: bool = True,
    ) -> SearchResponse:
        """
        Execute a Web search and return detailed search results.

        Args:
            query: The search query to submit to the search engine
            num_results: The number of search results to return (default: 5)
            lang: Language code for search results (default from config)
            country: Country code for search results (default from config)
            fetch_content: Whether to fetch content from result pages (default: False)

        Returns:
            A structured response containing search results and metadata
        """
        # Settings the search parameters
        retry_delay = 30

        max_retries = 3

        # Use config values for language and country if not specified
        if lang is None: lang = "cn"

        if country is None: country = "cn"

        search_params = {"lang": lang, "country": country}

        # Try searching with retries when all engines fail
        for retry_count in range(max_retries + 1):
            results = self._try_all_engines(query, num_results, search_params)

            if results:
                # Fetch content if requested
                if fetch_content:
                    results = self._fetch_content_for_results(results)

                # Return a successful structured response
                return SearchResponse(
                    status="success",
                    # query=query,
                    results=results,
                    # metadata=SearchMetadata(
                    #     total_results=len(results),
                    #     language=lang,
                    #     country=country,
                    # ),
                )

            if retry_count < max_retries:
                # All engines failed, wait and retry
                logging.warning(
                    f"All search engines failed. Waiting {retry_delay} seconds before retry {retry_count + 1}/{max_retries}..."
                )
                time.sleep(retry_delay)
            else:
                logging.error(
                    f"All search engines failed after {max_retries} retries. Giving up."
                )

        # Return an error response
        return SearchResponse(
            # query=query,
            error="All search engines failed to return results after multiple retries.",
            results=[],
        )
    
    def _try_all_engines(
        self, query: str, num_results: int, search_params: Dict[str, Any]
    ) -> List[SearchResult]:
        """Try all search engines in the configured order."""
        engine_order = self._get_engine_order()
        failed_engines = []

        for engine_name in engine_order:
            engine = self._search_engine[engine_name]
            logging.info(f"🔎 Attempting search with {engine_name.capitalize()}...")
            search_items = self._perform_search_with_engine(
                engine, query, num_results, search_params
            )
            
            if not search_items:
                failed_engines.append(engine_name)
                continue

            if failed_engines:
                logging.info(
                    f"Search successful with {engine_name.capitalize()} after trying: {', '.join(failed_engines)}"
                )

            # Transform search items into structured results
            return [
                SearchResult(
                    position=i + 1,
                    url=item.url,
                    title=item.title
                    or f"Result {i+1}",  # Ensure we always have a title
                    description=item.description or "",
                    source=engine_name,
                )
                for i, item in enumerate(search_items)
            ]

        if failed_engines:
            logging.error(f"All search engines failed: {', '.join(failed_engines)}")
        return []
    
    def _fetch_content_for_results(
        self, results: List[SearchResult]
    ) -> List[SearchResult]:
        """Fetch and add web content to search results."""
        if not results:
            return []

        # Create tasks for each result
        # tasks = [self._fetch_single_result_content(result) for result in results]
        fetched_results = [self._fetch_single_result_content(result) for result in results]

        # Type annotation to help type checker
        # fetched_results = await asyncio.gather(*tasks)

        # Explicit validation of return type
        return [
            (
                result
                if isinstance(result, SearchResult)
                else SearchResult(**result.dict())
            )
            for result in fetched_results
        ]

    def _fetch_single_result_content(self, result: SearchResult) -> SearchResult:
        """Fetch content for a single search result."""
        if result.url:
            content = self.content_fetcher.fetch_content(result.url)
            if content:
                result.raw_content = content
        return result

    def _get_engine_order(self) -> List[str]:
        """Determines the order in which to try search engines."""
        preferred = (self.default_engine.lower() if self.default_engine else "google")
        fallbacks = (
            [engine.lower() for engine in self.fallback_engines]
            if self.fallback_engines else []
        )

        # Start with preferred engine, then fallbacks, then remaining engines
        engine_order = [preferred] if preferred in self._search_engine else []
        engine_order.extend(
            [
                fb
                for fb in fallbacks
                if fb in self._search_engine and fb not in engine_order
            ]
        )
        engine_order.extend([e for e in self._search_engine if e not in engine_order])

        return engine_order

    @retry(
        stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=10)
    )
    def _perform_search_with_engine(
        self,
        engine: WebSearchEngine,
        query: str,
        num_results: int,
        search_params: Dict[str, Any],
    ) -> List[SearchItem]:
        """Execute search with the given engine and parameters."""
        # return await asyncio.get_event_loop().run_in_executor(
        #     None,
        #     lambda: list(
        #         engine.perform_search(
        #             query,
        #             num_results=num_results,
        #             lang=search_params.get("lang"),
        #             country=search_params.get("country"),
        #         )
        #     ),
        # )
        engineResults = None
        try:
            engineResults = engine.perform_search(
                                query,
                                num_results=num_results,
                                lang=search_params.get("lang"),
                                country=search_params.get("country"),
                            )
        except:
            pass
        return engineResults
