import os
import json
import time
from typing import Dict, List, Optional, Any
from pathlib import Path
from bs4 import BeautifulSoup
from langchain_core.tools import tool
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import traceback
from config import SILLICON_API_KEY, MODEL_NAME, SILLICON_API_URL, SOCIAL_PLATFORMS, PHONE_PATTERNS, CONTACT_KEYWORDS
from utils.logger import logger
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage
from langgraph.prebuilt import create_react_agent
import re

def find_contact_links(soup: BeautifulSoup, base_url: str = "") -> List[str]:
    """查找About us, Contact us等相关链接（支持英语和阿拉伯语）"""
    from urllib.parse import urljoin, urlparse
    
    contact_links = set()
    
    # 查找所有链接
    for link in soup.find_all('a', href=True):
        href = link['href'].strip()
        link_text = link.get_text().strip().lower()
        
        # 检查链接文本是否包含联系相关关键词
        for keyword in CONTACT_KEYWORDS:
            if keyword in link_text:
                # 处理相对链接
                if href.startswith('/'):
                    if base_url:
                        parsed_base = urlparse(base_url)
                        full_url = f"{parsed_base.scheme}://{parsed_base.netloc}{href}"
                        contact_links.add(full_url)
                elif href.startswith('http'):
                    contact_links.add(href)
                break
    
    return list(contact_links)


def _extract_contact_info_core(html: str) -> Dict[str, Any]:
    """
    核心联系信息提取函数，被其他函数复用
    
    Args:
        html: HTML内容
    
    Returns:
        联系信息字典
    """
    if not html:
        return {"emails": [], "phones": [], "social_media": {}}
    
    soup = BeautifulSoup(html, 'html.parser')
    
    # 提取邮箱
    emails = set()
    # 从mailto链接提取
    for link in soup.find_all('a', href=True):
        href = link['href']
        if href.startswith('mailto:'):
            email = href.replace('mailto:', '').split('?')[0]
            if '@' in email:
                emails.add(email.strip())
    
    # 从文本中提取邮箱
    email_pattern = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
    html_content = str(soup)
    text_content = soup.get_text()
    found_emails = re.findall(email_pattern, text_content)
    emails.update(found_emails)
    
    # 提取电话号码 (阿联酋格式)
    phones = set()
    
    for pattern in PHONE_PATTERNS:
        found_phones = re.findall(pattern, html_content)
        phones.update([phone.strip() for phone in found_phones])
    
    # 提取社交媒体链接
    social_media = {}
    
    for link in soup.find_all('a', href=True):
        href = link['href']
        for platform, domains in SOCIAL_PLATFORMS.items():
            for domain in domains:
                if domain in href.lower():
                    social_media[platform] = href
                    break
    
    return {
        "emails": list(emails),
        "phones": list(phones),
        "social_media": social_media
    }


def extract_contact_info_simple(html: str) -> Dict[str, Any]:
    """
    简单提取联系信息，不进行二次查找
    
    Args:
        html: HTML内容
    
    Returns:
        联系信息字典
    """
    try:
        return _extract_contact_info_core(html)
    except Exception as e:
        logger.error(f"[❌] 简单提取联系信息失败: {e} - {traceback.format_exc()}")
        return {"emails": [], "phones": [], "social_media": {}}


def extract_basic_contact_info(html: str, base_url: str = "") -> Dict[str, Any]:
    """
    🔍 前置信息提取：预提取联系信息、社交媒体链接和页面关键链接
    
    Args:
        html: 原始HTML内容
        base_url: 基础URL，用于处理相对链接
    
    Returns:
        预提取的信息字典，包含:
        - emails: 邮箱列表
        - phones: 电话列表 
        - social_media: 社交媒体字典
        - page_links: 页面关键链接列表（供Agent后续使用）
    """
    try:
        # 输入验证：长度小于4000或含有"隐私设置错误"直接返回空
        if not html or len(html) < 1500 or "隐私设置错误" in html:
            logger.info(f"[📋] 输入验证失败: 长度={len(html) if html else 0}, 含隐私错误={'隐私设置错误' in html if html else False}")
            return {"emails": [], "phones": [], "social_media": {}, "page_links": []}
        
        # 使用核心提取函数
        result = _extract_contact_info_core(html)
        logger.info(f"[📋] 预提取完成: {len(result['emails'])}个邮箱, {len(result['phones'])}个电话, {len(result['social_media'])}个社交媒体")
        
        # 🔗 提取公司信息相关链接（供Agent后续使用）
        page_links = extract_company_related_links(html, base_url)
        result["page_links"] = page_links
        logger.info(f"[🔗] 提取到 {len(page_links)} 个公司相关链接")
        
        # 如果基本联系信息为空，查找About us/Contact us链接
        if not result["emails"] and not result["phones"]:
            logger.info(f"[🔍] 基本联系信息不完整或为空，尝试查找联系页面链接...")
            soup = BeautifulSoup(html, 'html.parser')
            contact_links = find_contact_links(soup, base_url)
            
            if contact_links:
                logger.info(f"[🔗] 找到 {len(contact_links)} 个联系相关链接")
                
                # 爬取联系页面并提取信息
                # 提取base_url的域名用于域名限制
                from urllib.parse import urlparse
                allowed_domain = None
                if base_url:
                    parsed_base = urlparse(base_url)
                    allowed_domain = parsed_base.netloc.lower()
                    if allowed_domain.startswith('www.'):
                        allowed_domain = allowed_domain[4:]
                
                for link in contact_links[:3]:  # 最多尝试3个链接
                    logger.info(f"[🕷️] 正在爬取联系页面: {link}")
                    # 直接调用函数而不是通过工具接口
                    contact_html = _crawl_html_with_selenium_direct(link, allowed_domain=allowed_domain)
                    
                    if contact_html:
                        # 提取联系信息
                        contact_info = _extract_contact_info_core(contact_html)
                        
                        # 合并结果
                        if contact_info["emails"]:
                            result["emails"].extend(contact_info["emails"])
                        if contact_info["phones"]:
                            result["phones"].extend(contact_info["phones"])
                        if contact_info["social_media"]:
                            result["social_media"].update(contact_info["social_media"])
                        
                        # 如果找到了联系信息就停止
                        if result["emails"] or result["phones"]:
                            logger.info(f"[✅] 在联系页面找到信息，停止进一步搜索")
                            break
            else:
                logger.info(f"[❌] 未找到联系相关链接")
        
        return result
        
    except Exception as e:
        logger.error(f"[❌] 预提取联系信息失败: {e} - {traceback.format_exc()}")
        return {"emails": [], "phones": [], "social_media": {}, "page_links": []}


def clean_html(html: str) -> str:
    """
    HTML预处理工具：去除对信息提取无用的标签和内容
    
    Args:
        html: 原始HTML内容
    
    Returns:
        清理后的HTML内容
    """
    try:
        if not html:
            return ""
        
        soup = BeautifulSoup(html, 'html.parser')
        
        # 移除所有 <style> 标签及其内容
        for style in soup.find_all('style'):
            style.decompose()
        for header in soup.find_all('header'):
            header.decompose()
        # 移除非关键的 <meta> 标签，保留 description、keywords 等
        for meta in soup.find_all('meta'):
            if meta is None or not hasattr(meta, 'get'):
                continue
            try:
                name = meta.get('name', '').lower()
                property_attr = meta.get('property', '').lower()
                if name not in ['description', 'keywords', 'author'] and not property_attr.startswith('og:'):
                    meta.decompose()
            except (AttributeError, TypeError):
                # 如果meta元素有问题，直接移除
                try:
                    meta.decompose()
                except:
                    pass
        
        # 移除所有内联样式属性
        for tag in soup.find_all(attrs={'style': True}):
            del tag['style']
            
        # 获取清理后的HTML
        cleaned_html = str(soup)
        
        # 移除HTML注释
        cleaned_html = re.sub(r'<!--.*?-->', '', cleaned_html, flags=re.DOTALL)
        
        # 清理多余的空白字符和换行符
        cleaned_html = re.sub(r'\n\s*\n', '\n', cleaned_html)
        cleaned_html = re.sub(r'\s+', ' ', cleaned_html)

        logger.info(f"HTML预处理完成，原始长度: {len(html)}, 清理后长度: {len(cleaned_html)}")
        return cleaned_html
        
    except Exception as e:
        logger.error(f"HTML预处理失败: {e} - {traceback.format_exc()}")
        return html  # 如果处理失败，返回原始HTML

# extract_text_content 函数功能已整合到其他函数中

def get_original_html(index: int, url: str = None, html_cache_dir: str = "data/html_cache") -> Optional[str]:
    """
    获取本地HTML文件内容
    
    Args:
        index: HTML文件索引
        url: 对应的URL（用于日志记录）
        html_cache_dir: HTML缓存目录
    
    Returns:
        HTML内容字符串，如果文件不存在返回None
    """
    try:
        html_file_path = Path(html_cache_dir) / f"{index}.html"
        
        if not html_file_path.exists():
            if url:
                logger.warning(f"HTML文件不存在: {html_file_path} (URL: {url})")
            else:
                logger.warning(f"HTML文件不存在: {html_file_path}")
            return None
        
        with open(html_file_path, 'r', encoding='utf-8') as f:
            html_content = f.read()
        
        if url:
            logger.info(f"成功读取HTML文件: {html_file_path} (URL: {url})")
        else:
            logger.info(f"成功读取HTML文件: {html_file_path}")
        return html_content
    
    except Exception as e:
        logger.error(f"读取HTML文件失败: {e}")
        return None


def _crawl_html_with_selenium_direct(url: str, headless: bool = True, allowed_domain: str = None) -> str:
    """
    使用Selenium爬取指定URL的HTML内容（内部函数版本）
    
    Args:
        url: 要爬取的URL
        headless: 是否使用无头模式
        allowed_domain: 允许爬取的域名，如果提供则只能爬取该域名下的页面
        
    Returns:
        HTML内容字符串
    """
    # Debug信息：记录函数调用
    logger.info(f"[🕷️] 调用_crawl_html_with_selenium_direct，URL: {url}, 无头模式: {headless}")
    
    # 域名验证
    if allowed_domain:
        from urllib.parse import urlparse
        
        def normalize_domain(domain_or_url: str) -> str:
            """标准化域名，统一处理URL和纯域名格式"""
            # 如果不是完整URL，先添加协议
            if not domain_or_url.startswith(('http://', 'https://')):
                 domain_or_url = 'http://' + domain_or_url
            
            parsed = urlparse(domain_or_url)
            domain = parsed.netloc.lower()
            
            # 移除www前缀
            if domain.startswith('www.'):
                domain = domain[4:]
                
            return domain
        
        current_domain = normalize_domain(url)
        allowed_domain_clean = normalize_domain(allowed_domain)
            
        if current_domain != allowed_domain_clean:
            error_msg = f"[🚫 DOMAIN_RESTRICTION] 禁止爬取不同域名的网页！当前尝试爬取: {current_domain}，仅允许爬取: {allowed_domain_clean}。请使用当前网页域名下的链接。"
            logger.warning(error_msg)
            return error_msg
    try:
        # 配置Chrome选项
        chrome_options = Options()
        if headless:
            chrome_options.add_argument('--headless')
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--disable-dev-shm-usage')
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument('--window-size=1920,1080')
        chrome_options.add_argument('--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36')
        
        # 创建WebDriver实例
        driver = webdriver.Chrome(options=chrome_options)
        
        try:
            # 访问URL
            driver.get(url)
            
            # 等待页面加载
            WebDriverWait(driver, 10).until(
                EC.presence_of_element_located((By.TAG_NAME, "body"))
            )
            
            # 等待额外时间让动态内容加载
            time.sleep(2)
            
            # 获取页面HTML并提取文本内容
            html_content = driver.page_source
            
            # 使用BeautifulSoup提取纯文本内容，避免token溢出
            soup = BeautifulSoup(html_content, 'html.parser')
            # 移除script和style标签
            for script in soup(["script", "style"]):
                script.decompose()
            # 获取纯文本内容
            text_content = soup.get_text()
            # 清理多余的空白字符
            text_content = '\n'.join(line.strip() for line in text_content.splitlines() if line.strip())
            
            logger.info(f"[✅] 成功爬取HTML内容，原始长度: {len(html_content)} 字符，文本长度: {len(text_content)} 字符")
            return text_content
        
        finally:
            driver.quit()
    
    except Exception as e:
        logger.error(f"[❌] Selenium爬取失败: {e} - {traceback.format_exc()}")
        return ""


# extract_key_info 函数已删除，因为在代码库中未被使用
@tool
def crawl_html_with_selenium(url: str, headless: bool = True, allowed_domain: str = None) -> str:
    """
    使用Selenium爬取指定URL的HTML内容
    
    Args:
        url: 要爬取的URL
        headless: 是否使用无头模式
        allowed_domain: 允许爬取的域名，如果提供则只能爬取该域名下的页面
        
    Returns:
        HTML内容字符串
    """
    # Debug信息：记录工具调用
    logger.info(f"[AGENT_TOOL_DEBUG] 调用crawl_html_with_selenium，URL: {url}, 无头模式: {headless}")
    
    # 调用内部函数执行实际逻辑
    result = _crawl_html_with_selenium_direct(url, headless, allowed_domain)
    
    # Debug信息：记录工具执行结果
    if result and not result.startswith("[🚫 DOMAIN_RESTRICTION]"):
        logger.info(f"[AGENT_TOOL_DEBUG] crawl_html_with_selenium执行成功，提取文本长度: {len(result)}")
    else:
        logger.error(f"[AGENT_TOOL_DEBUG] crawl_html_with_selenium执行失败或被域名限制")
    
    return result


def call_ai_api(html_content: str, url: str = "", pre_extracted: Dict[str, Any] = None, max_retries: int = 2) -> Dict[str, Any]:
    """
    使用LangGraph预构建Agent分析HTML内容并提取联系信息
    
    Args:
        html_content: HTML内容
        url: 网页URL
        pre_extracted: 预提取的联系信息
        max_retries: 最大重试次数
    
    Returns:
        AI分析结果
    """
    try:
        from config import SYS_PROMPT

        # 初始化模型
        model = ChatOpenAI(
            api_key=SILLICON_API_KEY,
            base_url=SILLICON_API_URL.replace('/chat/completions', ''),
            model=MODEL_NAME,
            temperature=0.1,
            max_tokens=10000
        )
        
        # 只提供crawl_html_with_selenium工具
        tools = [crawl_html_with_selenium]
        
        # 构建提示词
        base_prompt = SYS_PROMPT.format(url=url) if "{url}" in SYS_PROMPT else SYS_PROMPT
        
        # 构建预提取信息提示
        pre_extracted_info = ""
        if pre_extracted:
            emails_count = len(pre_extracted.get('emails', []))
            phones_count = len(pre_extracted.get('phones', []))
            social_count = len(pre_extracted.get('social_media', {}))
            
            page_links_count = len(pre_extracted.get('page_links', []))
            page_links_info = ', '.join(pre_extracted.get('page_links', [])) if pre_extracted.get('page_links') else '未发现'
            
            # 从HTML中提取纯文本内容
            soup = BeautifulSoup(html_content, 'html.parser')
            # 移除script和style标签
            for script in soup(["script", "style"]):
                script.decompose()
            # 获取纯文本内容
            text_content = soup.get_text()
            # 清理多余的空白字符
            text_content = '\n'.join(line.strip() for line in text_content.splitlines() if line.strip())
            
            pre_extracted_info = f"""
**当前URL**: {url}
**当前URL的text内容**: {text_content}
**已预提取的联系信息：**
- 邮箱 ({emails_count}个): {', '.join(pre_extracted.get('emails', [])) if pre_extracted.get('emails') else '未发现'}
- 电话 ({phones_count}个): {', '.join(pre_extracted.get('phones', [])) if pre_extracted.get('phones') else '未发现'}
- 社交媒体 ({social_count}个): {', '.join([f"{k}: {v}" for k, v in pre_extracted.get('social_media', {}).items()]) if pre_extracted.get('social_media') else '未发现'}
- 页面关键链接 ({page_links_count}个): {page_links_info}
            """
        
        # 构建系统提示词
        system_prompt = f"""{base_prompt}
        
{pre_extracted_info}
        
请开始分析当前HTML内容并提取联系信息。HTML内容已通过全局变量加载。
        """
        
        # 创建React Agent
        agent = create_react_agent(
            model=model,
            tools=tools,
            prompt=system_prompt
        )
        
        for attempt in range(max_retries):
            try:
                logger.info(f"第{attempt + 1}次调用LangGraph Agent分析URL: {url}")
                
                # HTML内容已通过参数传递，无需设置全局变量
                
                # 构建输入消息
                user_message = f"""请分析当前网页并提取联系信息。
                
**工作要求：**
1. 首先直接分析已加载的HTML内容，提取联系信息和公司信息
2. 整合预提取的信息，补充缺失的内容
3. 只有在信息严重不足且发现重要补充链接时才考虑使用crawl_html_with_selenium工具
4. 找到基本信息后立即以JSON格式返回结果
                
开始分析！"""
                
                # 调用Agent，设置合理的递归限制
                logger.info(f"开始调用Agent，递归限制设置为8")
                try:
                    result = agent.invoke(
                        {"messages": [HumanMessage(content=user_message)]},
                        config={"recursion_limit": 8}
                    )
                    
                    # Debug信息：输出所有消息和工具调用
                    logger.info(f"=== AI Agent Debug信息 ===")
                    logger.info(f"总消息数量: {len(result['messages'])}")
                    
                    tool_calls_count = 0
                    for i, msg in enumerate(result["messages"]):
                        if hasattr(msg, 'tool_calls') and msg.tool_calls:
                            for tool_call in msg.tool_calls:
                                tool_calls_count += 1
                                logger.info(f"工具调用 {tool_calls_count}: {tool_call['name']} - 参数: {str(tool_call.get('args', {}))[:200]}")
                        elif hasattr(msg, 'content') and msg.content:
                            content_preview = msg.content[:200] + "..." if len(msg.content) > 200 else msg.content
                            logger.info(f"消息 {i+1}: {type(msg).__name__} - {content_preview}")
                    
                    logger.info(f"总工具调用次数: {tool_calls_count}")
                    logger.info(f"=== Debug信息结束 ===")
                    
                except Exception as agent_error:
                    logger.error(f"Agent调用过程中发生错误: {str(agent_error)}")
                    logger.error(f"错误类型: {type(agent_error).__name__}")
                    raise agent_error
                
                # 获取最后的AI响应
                final_message = result["messages"][-1]
                ai_response = final_message.content.strip()
                
                logger.info(f"Agent最终响应长度: {len(ai_response)}")
                logger.info(f"Agent最终响应内容: {ai_response}...")
                
                # 尝试解析JSON响应
                try:
                    # 清理响应内容，移除可能的markdown格式
                    content = ai_response
                    
                    if content.startswith('```json'):
                        content = content[7:]
                    if content.startswith('```'):
                        content = content[3:]
                    if content.endswith('```'):
                        content = content[:-3]
                    content = content.strip()
                    
                    # 解析JSON
                    result = json.loads(content)
                    
                    # 标准化字段名称和格式
                    standardized_result = {
                        "email": [],
                        "phone": [],
                        "social_media": {},
                        "business_info": {},
                        "remarks": ""
                    }
                    
                    # 处理邮箱
                    if "email" in result:
                        if isinstance(result["email"], str) and result["email"]:
                            emails = [email.strip() for email in result["email"].split(',') if email.strip()]
                            standardized_result["email"] = emails
                        elif isinstance(result["email"], list):
                            emails = [email for email in result["email"] if email]
                            standardized_result["email"] = emails
                    
                    # 处理电话
                    if "phone" in result:
                        if isinstance(result["phone"], str) and result["phone"]:
                            phones = [phone.strip() for phone in result["phone"].split(',') if phone.strip()]
                            standardized_result["phone"] = phones
                        elif isinstance(result["phone"], list):
                            phones = [phone for phone in result["phone"] if phone]
                            standardized_result["phone"] = phones
                    
                    # 处理社交媒体
                    if "social_media" in result and isinstance(result["social_media"], dict):
                        social_media = {k: v for k, v in result["social_media"].items() if v}
                        standardized_result["social_media"] = social_media
                    
                    # 处理公司信息
                    if "business_info" in result and isinstance(result["business_info"], dict):
                        business_info = {k: v for k, v in result["business_info"].items() if v}
                        standardized_result["business_info"] = business_info
                    
                    # 处理备注
                    if "remarks" in result and result["remarks"]:
                        standardized_result["remarks"] = result["remarks"]
                    
                    logger.info(f"Agent成功提取联系信息: {len(standardized_result['email'])}个邮箱, {len(standardized_result['phone'])}个电话")
                    return standardized_result
                    
                except json.JSONDecodeError:
                    # 尝试提取JSON部分
                    # 使用简单字符串查找替代正则表达式
                    start_idx = ai_response.find('{')
                    end_idx = ai_response.rfind('}')
                    if start_idx != -1 and end_idx != -1 and end_idx > start_idx:
                        json_str = ai_response[start_idx:end_idx+1]
                        try:
                            result = json.loads(json_str)
                            # 标准化结果格式（简化版）
                            standardized_result = {
                                "email": result.get("email", []),
                                "phone": result.get("phone", []),
                                "social_media": result.get("social_media", {}),
                                "business_info": result.get("business_info", {}),
                                "remarks": result.get("remarks", "")
                            }
                            logger.info(f"Agent通过正则提取成功解析JSON")
                            return standardized_result
                        except json.JSONDecodeError:
                            pass
                
            except Exception as e:
                logger.error(f"AI API调用失败 (尝试{attempt + 1}/{max_retries}): {str(e)}")
                if attempt == max_retries - 1:
                    logger.error(f"所有API调用尝试都失败，返回空结果")
                    break
                
                # 等待后重试
                time.sleep(2 ** attempt)
        
        # 返回空结果
        return {
            "email": [],
            "phone": [],
            "social_media": {},
            "business_info": {},
            "remarks": "AI分析失败"
        }
    
    except Exception as e:
        logger.error(f"AI API调用失败: {e}")
        import traceback
        logger.error(f"详细错误信息: {traceback.format_exc()}")
        
        return {
            "email": [],
            "phone": [],
            "social_media": {},
            "business_info": {},
            "remarks": f"AI分析失败: {str(e)}"
        }

def extract_company_related_links(html_content: str, base_url: str = "") -> List[str]:
    """
    智能提取公司信息相关链接
    
    策略：
    1. 基于语义关键词匹配（阿英文）
    2. URL路径模式识别
    3. 链接文本内容分析
    4. 同域名链接优先收集
    5. 优先级排序和数量限制
    
    Args:
        html_content: HTML内容
        base_url: 基础URL，用于处理相对路径
    
    Returns:
        公司相关链接列表（最多返回20个高质量链接，包含同域名链接）
    """
    if not html_content:
        return []
    
    try:
        soup = BeautifulSoup(html_content, 'html.parser')
        links = []
        
        # 🎯 高优先级关键词（公司核心信息）
        high_priority_keywords = {
            'en': ['about', 'company', 'services', 'products', 'business', 'corporate', 'profile', 'overview',
                   'about-us', 'about_us', 'our-company', 'our_company', 'our-services', 'our_services',
                   'our-products', 'our_products', 'company-profile', 'company_profile'],
            'ar': ['حول', 'الشركة', 'خدمات', 'منتجات', 'أعمال', 'نبذة', 'عنا', 'من-نحن', 'خدماتنا', 'منتجاتنا'],
        }
        
        # 🔍 中优先级关键词（补充信息）
        medium_priority_keywords = {
            'en': ['team', 'history', 'mission', 'vision', 'values', 'careers', 'news', 'contact',
                   'our-team', 'our_team', 'our-history', 'our_history', 'our-mission', 'our_mission',
                   'contact-us', 'contact_us', 'get-in-touch', 'achievements', 'awards'],
            'ar': ['فريق', 'تاريخ', 'مهمة', 'رؤية', 'قيم', 'وظائف', 'أخبار', 'اتصال', 'فريقنا', 'تاريخنا', 'اتصل-بنا'],
        }
        
        # ❌ 排除关键词（无关链接）
        exclude_keywords = {
            'en': ['login', 'register', 'cart', 'checkout', 'download', 'privacy', 'terms', 'cookie'],
            'ar': ['تسجيل', 'دخول', 'سلة', 'تحميل', 'خصوصية', 'شروط'],
        }
        
        # 🌐 提取base_url的域名用于同域名链接收集
        from urllib.parse import urlparse
        base_domain = None
        if base_url:
            parsed_base = urlparse(base_url)
            base_domain = parsed_base.netloc.lower()
            if base_domain.startswith('www.'):
                base_domain = base_domain[4:]
        
        # 📋 收集所有链接
        for a_tag in soup.find_all('a', href=True):
            href = a_tag['href'].strip()
            text = a_tag.get_text(strip=True).lower()
            
            if not href or href.startswith('#') or href.startswith('javascript:') or href.startswith('mailto:'):
                continue
            
            # 🔗 处理相对路径
            if href.startswith('/'):
                if base_url:
                    full_url = base_url.rstrip('/') + href
                else:
                    full_url = href
            elif not href.startswith('http'):
                if base_url:
                    full_url = base_url.rstrip('/') + '/' + href.lstrip('./')
                else:
                    full_url = href
            else:
                full_url = href
            
            # 🚫 排除无关链接
            should_exclude = False
            for lang_excludes in exclude_keywords.values():
                if any(keyword in text or keyword in href.lower() for keyword in lang_excludes):
                    should_exclude = True
                    break
            
            if should_exclude:
                continue
            
            # 🎯 计算优先级分数
            priority_score = 0
            
            # 🌐 同域名链接加分（新增）
            if base_domain and full_url.startswith('http'):
                parsed_url = urlparse(full_url)
                link_domain = parsed_url.netloc.lower()
                if link_domain.startswith('www.'):
                    link_domain = link_domain[4:]
                
                if link_domain == base_domain:
                    priority_score += 3  # 同域名链接基础分数
            
            # 高优先级关键词匹配（支持部分匹配）
            for lang_keywords in high_priority_keywords.values():
                for keyword in lang_keywords:
                    if keyword.lower() in text.lower() or keyword.lower() in href.lower():
                        priority_score += 10
                        break
            
            # 中优先级关键词匹配（支持部分匹配）
            for lang_keywords in medium_priority_keywords.values():
                for keyword in lang_keywords:
                    if keyword.lower() in text.lower() or keyword.lower() in href.lower():
                        priority_score += 5
                        break
            
            # URL路径模式加分
            path_patterns = ['/about', '/company', '/services', '/products', '/business']
            for pattern in path_patterns:
                if pattern in href.lower():
                    priority_score += 8
                    break
            
            # 保留有一定相关性的链接或同域名链接
            if priority_score > 0:
                links.append({
                    'url': full_url,
                    'text': text,
                    'score': priority_score
                })
        
        # 🏆 按优先级排序并去重
        links = sorted(links, key=lambda x: x['score'], reverse=True)
        unique_links = []
        seen_urls = set()
        
        for link in links:
            if link['url'] not in seen_urls and len(unique_links) < 20:  # 最多返回20个
                unique_links.append(link['url'])
                seen_urls.add(link['url'])
        
        return unique_links
        
    except Exception as e:
        logger.error(f"[❌] 提取公司相关链接失败: {e}")
        return []


def find_related_links(html_content: str, base_url: str = "") -> List[str]:
    """
    Agent智能链接选择：按优先级分析和选择相关链接
    
    按照文档中的链接选择策略：
    - 高优先级：Contact Us、About Us、Team, Whatsapp等直接联系页面
    - 中优先级：公司介绍、服务页面等可能包含联系信息的页面
    - 低优先级：新闻、博客等辅助信息页面
    - 智能过滤：排除明显无关的链接（如外部链接、下载链接等）
    
    Args:
        html_content: HTML内容
        base_url: 基础URL
    
    Returns:
        按优先级排序的相关链接列表
    """
    try:
        soup = BeautifulSoup(html_content, 'html.parser')
        
        # 定义不同优先级的关键词
        high_priority_keywords = {
            'contact': ['contact', 'contact-us', 'contactus', 'get-in-touch', 'reach-us'],
            'about': ['about', 'about-us', 'aboutus', 'who-we-are', 'our-story'],
            'team': ['team', 'staff', 'people', 'leadership', 'management', 'directors'],
            'location': ['location', 'office', 'address', 'find-us', 'visit-us']
        }
        
        medium_priority_keywords = {
            'services': ['services', 'solutions', 'products', 'offerings'],
            'company': ['company', 'organization', 'business', 'corporate'],
            'careers': ['careers', 'jobs', 'employment', 'join-us', 'work-with-us'],
            'support': ['support', 'help', 'customer-service', 'assistance']
        }
        
        low_priority_keywords = {
            'news': ['news', 'blog', 'articles', 'press', 'media'],
            'resources': ['resources', 'downloads', 'documents', 'guides']
        }
        
        # 需要排除的关键词
        exclude_keywords = [
            'login', 'register', 'signup', 'cart', 'checkout', 'payment',
            'privacy', 'terms', 'cookie', 'legal', 'disclaimer',
            'facebook', 'twitter', 'linkedin', 'instagram', 'youtube',
            'pdf', 'doc', 'docx', 'xls', 'xlsx', 'zip', 'rar'
        ]
        
        # 存储不同优先级的链接
        high_priority_links = []
        medium_priority_links = []
        low_priority_links = []
        
        for link in soup.find_all('a', href=True):
            href = link['href'].strip()
            link_text = link.get_text().strip().lower()
            href_lower = href.lower()
            
            # 跳过空链接和锚点链接
            if not href or href.startswith('#') or href.startswith('javascript:'):
                continue
            
            # 跳过外部链接（除非是同域名）
            if href.startswith('http') and base_url:
                from urllib.parse import urlparse
                base_domain = urlparse(base_url).netloc
                link_domain = urlparse(href).netloc
                if base_domain != link_domain:
                    continue
            
            # 检查是否包含排除关键词
            should_exclude = any(exclude_kw in link_text or exclude_kw in href_lower 
                               for exclude_kw in exclude_keywords)
            if should_exclude:
                continue
            
            # 构建完整URL
            full_url = href
            if not href.startswith('http'):
                if base_url:
                    if href.startswith('/'):
                        full_url = base_url.rstrip('/') + href
                    else:
                        full_url = base_url.rstrip('/') + '/' + href
                else:
                    continue
            
            # 按优先级分类
            link_priority = None
            
            # 检查高优先级
            for category, keywords in high_priority_keywords.items():
                if any(kw in link_text or kw in href_lower for kw in keywords):
                    high_priority_links.append((full_url, category, link_text))
                    link_priority = 'high'
                    break
            
            # 检查中优先级
            if not link_priority:
                for category, keywords in medium_priority_keywords.items():
                    if any(kw in link_text or kw in href_lower for kw in keywords):
                        medium_priority_links.append((full_url, category, link_text))
                        link_priority = 'medium'
                        break
            
            # 检查低优先级
            if not link_priority:
                for category, keywords in low_priority_keywords.items():
                    if any(kw in link_text or kw in href_lower for kw in keywords):
                        low_priority_links.append((full_url, category, link_text))
                        break
        
        # 去重并按优先级排序
        def deduplicate_links(links):
            seen_urls = set()
            unique_links = []
            for url, category, text in links:
                if url not in seen_urls:
                    seen_urls.add(url)
                    unique_links.append(url)
            return unique_links
        
        # 合并结果，高优先级在前
        result_links = []
        result_links.extend(deduplicate_links(high_priority_links))
        result_links.extend(deduplicate_links(medium_priority_links))
        result_links.extend(deduplicate_links(low_priority_links))
        
        # 限制返回链接数量，避免过多无关链接
        max_links = 10
        final_links = result_links[:max_links]
        
        logger.info(f"Agent智能链接选择完成: 高优先级{len(deduplicate_links(high_priority_links))}个, "
                   f"中优先级{len(deduplicate_links(medium_priority_links))}个, "
                   f"低优先级{len(deduplicate_links(low_priority_links))}个, 最终选择{len(final_links)}个")
        
        return final_links
    
    except Exception as e:
        logger.error(f"Agent智能链接选择失败: {e}")
        return []