import os
import json
from concurrent.futures import ThreadPoolExecutor

import requests
from typing import List, Dict, Optional, Union, Sequence
from urllib.parse import urlparse

from bs4 import BeautifulSoup
from loguru import logger
from playwright.sync_api import sync_playwright


def load_web_content(urls: Union[str, Sequence[str]] = None, timeout: int = 120) -> Dict[str, str]:
    """
    Load web content through proxy service.

    Args:
        urls: Single URL string or sequence of URLs to fetch
        timeout: Request timeout in seconds

    Returns:
        Dict mapping URLs to their page content as strings
    """
    if not urls:
        return {}

    urls = [urls] if isinstance(urls, str) else urls
    urls = [url.strip() for url in urls if is_valid_web_url(url.strip())]
    if not urls:
        return {}

    # 分离 Stack Overflow URLs 和其他 URLs
    stackoverflow_urls = []
    other_urls = []

    for url in urls:
        if 'stackoverflow.com' in url.lower():
            stackoverflow_urls.append(url)
        else:
            other_urls.append(url)

    result = {}

    # 处理 Stack Overflow URLs
    for url in stackoverflow_urls:
        content = load_stackoverflow_content(url)
        if content:
            result[url] = content

    # 处理其他 URLs
    if other_urls:
        web_load_api = os.getenv("WEB_LOAD_API")
        if not web_load_api:
            logger.warning("WEB_LOAD_API environment variable not set, using local web content loader")
            other_content = load_web_content_local(other_urls, timeout)
        else:
            try:
                headers = {'Content-Type': 'application/json'}
                data = {"urls": other_urls}
                res = requests.post(url=web_load_api, headers=headers, json=data, timeout=timeout)
                res.raise_for_status()
                other_content = {url: data.get('page_content') or data.get('content', '') for url, data in res.json().items()}
            except requests.exceptions.RequestException as e:
                logger.warning(f"Failed to fetch content: {str(e)}")
                other_content = {}

        result.update(other_content)

    return result


def load_web_content_local(url: Union[str, Sequence[str]] = None, timeout: int = 30) -> Dict[str, str]:
    if url is None or len(url) == 0:
        return {}
    url_to_docs = {}
    try:
        if isinstance(url, str):
            url = [url]

        def fetch_document(u):
            try:
                with sync_playwright() as p:
                    browser = p.chromium.launch(headless=True)
                    page = browser.new_page()
                    page.goto(u)
                    page.wait_for_selector("body", state="attached")
                    html = page.content()
                    browser.close()

                    soup = BeautifulSoup(html, "html.parser")
                    for tag in soup(["script", "style", "meta", "link", "noscript", "head", "title"]):
                        tag.decompose()
                    text = soup.get_text(separator="\n", strip=True)
                    return "\n".join([line.strip() for line in text.splitlines() if line.strip()])
            except Exception as e:
                logger.error(f"load doc from web error: {e}")
                return None

        with ThreadPoolExecutor(max_workers=5) as executor:
            results = list(executor.map(fetch_document, url))
            for u, doc in zip(url, results):
                if doc:
                    url_to_docs[u] = doc
    except Exception as e:
        logger.error(f"Failed to load documents from the web.{e}")
    return url_to_docs


def is_valid_web_url(url: str) -> bool:
    """
    Validate if a URL is a valid web URL.

    Args:
        url: URL string to validate

    Returns:
        bool indicating if URL is valid
    """
    if not url:
        return False

    try:
        parsed = urlparse(url)
        # Check scheme and netloc are valid
        if not (parsed.scheme and parsed.netloc):
            return False

        # Check if netloc is not just an IP address
        netloc = parsed.netloc.split(':')[0]  # Remove port if present
        # Check if netloc contains only digits and dots (likely an IP)
        if all(c.isdigit() or c == '.' for c in netloc):
            return False

        return True
    except Exception:
        return False


def web_search_serper(query: str, num: int = 6, filter_files: bool = True) -> Optional[List[Dict[str, str]]]:
    """
    Search web content using Serper API.

    Args:
        query: Search query string
        num: Number of results to return
        filter_files: Whether to filter file URLs and return only num results

    Returns:
        List of search results containing title, url and snippet
    """
    if not query:
        return None
    serper_key = os.getenv("WEB_SEARCH_SERPER_KEY")
    if not serper_key:
        logger.error("WEB_SEARCH_SERPER_KEY environment variable not set")
        return None

    url = os.getenv("WEB_SEARCH_SERPER_URL", "https://google.serper.dev/search")
    payload = json.dumps({
        "q": query,
        "gl": "cn",
        "num": num * 2 if filter_files else num
    })
    headers = {
        'X-API-KEY': serper_key,
        'Content-Type': 'application/json'
    }

    try:
        response = requests.post(url, headers=headers, data=payload)
        response.raise_for_status()
        data = response.json()
        organic_results = data.get("organic", [])

        # 如果需要过滤文件链接
        if filter_files:
            filtered_results = []
            for item in organic_results:
                link = item["link"]
                # 过滤掉文件链接（如pdf, doc等）
                if not any(link.lower().endswith(ext) for ext in ['.pdf', '.doc', '.docx', '.xls', '.xlsx', '.ppt', '.pptx']):
                    filtered_results.append(item)
                if len(filtered_results) >= num:
                    break
            organic_results = filtered_results[:num]

        urls = [item["link"] for item in organic_results]

        web_contents = load_web_content(urls)

        results = []
        for item in organic_results:
            result = {
                "title": item["title"],
                "url": item["link"],
                "snippet": item.get("snippet", "")
            }

            if web_contents and item["link"] in web_contents:
                result["content"] = web_contents[item["link"]]

            results.append(result)
        return results

    except Exception as e:
        logger.error(f"Search error: {e}")
        return None


def web_content_blog(article_ids: Union[str, List[str]]) -> Optional[Dict[str, Dict[str, str]]]:
    """
    Fetch blog content by article IDs.

    Args:
        article_ids: Single article ID string or list of article IDs

    Returns:
        Dict mapping article IDs to their content
    """
    if not article_ids:
        return None

    # Convert single string to list if needed
    if isinstance(article_ids, str):
        article_ids = [aid.strip() for aid in article_ids.split(",") if aid.strip()]

    blog_url = os.getenv("BLOG_DETAIL_URL")
    blog_retry_url = os.getenv("BLOG_DETAIL_RETRY_URL")
    if not blog_url and not blog_retry_url:
        logger.error("BLOG_DETAIL_URL environment variable not set")
        return None

    articles = {}
    headers = {'Content-Type': 'application/json'}

    def clean_html_content(content: str) -> str:
        """Clean HTML content by replacing <p> tags with newlines"""
        if not content:
            return content
        
        import re
        # Replace <p> tags with newlines
        content = re.sub(r'<p>', '\n', content)
        content = re.sub(r'</p>', '', content)
        # Clean up multiple consecutive newlines
        content = re.sub(r'\n\s*\n', '\n\n', content)
        # Strip leading/trailing whitespace
        content = content.strip()
        return content

    def fetch_blog_content(article_id: str, blog_api_urls: []):
        response = None
        try:
            for api_url in blog_api_urls:
                if not api_url:
                    continue
                url = api_url.format(article_id)
                response = requests.get(url, headers=headers)
                if response.status_code == 200:
                    data = response.json()
                    if data and data.get('data'):
                        return response
        except Exception:
            logger.error('fetch blog content error')
        return response

    try:
        # Request each article individually
        for article_id in article_ids:
            try:
                response = fetch_blog_content(article_id=article_id, blog_api_urls=[blog_url, blog_retry_url])
                response.raise_for_status()

                data = response.json()
                item = (data.get('data', {}) or {}).get(article_id)

                if not isinstance(item, dict):
                    logger.error(f"Invalid blog article format for ID: {article_id}")
                    continue

                raw_content = item.get('content', '')
                cleaned_content = clean_html_content(raw_content)

                articles[article_id] = {
                    "title": item.get('title', ''),
                    "content": cleaned_content
                }
            except requests.RequestException as e:
                logger.error(f"Failed to fetch blog article: {e}, Article ID: {article_id}")
                continue

        return articles if articles else None

    except Exception as e:
        logger.error(f"Failed to fetch blog articles: {e}")
        return None

def load_stackoverflow_content(question_url: str) -> Optional[str]:
    """
    通过Stack Overflow API获取问题内容
    :param question_url: 问题页面URL
    :return: 格式化的问题内容字符串
    """
    # 解析URL获取问题ID
    try:
        question_id = urlparse(question_url).path.split('/')[2]
        if not question_id.isdigit():
            raise ValueError("Invalid Stack Overflow URL")
    except Exception as e:
        logger.error(f"URL解析错误: {e}")
        return None

    # API端点
    api_url = f"https://api.stackexchange.com/2.3/questions/{question_id}"

    # 请求参数
    params = {
        "site": "stackoverflow",
        "filter": "withbody",  # 获取完整正文
        "order": "desc",
        "sort": "votes"
    }

    # 请求头设置
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
        "Accept": "application/json",
        "Accept-Encoding": "gzip, deflate"
    }

    try:
        # 发送API请求
        response = requests.get(api_url, params=params, headers=headers)
        response.raise_for_status()  # 检查HTTP错误

        # 解析JSON响应
        data = response.json()

        if not data.get("items"):
            logger.warning("未找到问题数据")
            return None

        question_data = data["items"][0]

        # 提取关键信息并格式化为文本
        content_parts = []
        content_parts.append(f"Title: {question_data['title']}")
        content_parts.append(f"Score: {question_data['score']}")
        content_parts.append(f"View Count: {question_data['view_count']}")
        content_parts.append(f"Tags: {', '.join(question_data['tags'])}")
        content_parts.append(f"\nQuestion Body:\n{question_data['body']}")

        # 获取回答
        if question_data.get("answers"):
            content_parts.append("\nAnswers:")
            for i, answer in enumerate(question_data["answers"], 1):
                accepted = " (Accepted)" if answer.get("is_accepted") else ""
                content_parts.append(f"\nAnswer {i} (Score: {answer['score']}){accepted}:")
                content_parts.append(answer["body"])

        return "\n".join(content_parts)

    except requests.exceptions.RequestException as e:
        logger.error(f"请求失败: {e}")
        return None
    except json.JSONDecodeError as e:
        logger.error(f"JSON解析错误: {e}")
        return None
