"""联网搜索服务 - 搜索引擎API + 网站爬虫"""
import logging
import re
import time
from typing import List, Dict, Optional, Any
from datetime import datetime
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse
import aiohttp
import asyncio

from app.config import settings

logger = logging.getLogger(__name__)


class WebSearchService:
    """联网搜索服务"""
    
    def __init__(self):
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        })
        self.max_results = settings.MAX_SEARCH_RESULTS
    
    def search_via_api(self, query: str, year_range: Optional[tuple] = None) -> List[Dict[str, Any]]:
        """
        通过搜索引擎API搜索
        
        Args:
            query: 搜索关键词
            year_range: 年份范围，如 (2019, 2024)
            
        Returns:
            搜索结果列表
        """
        results = []
        
        # 构建搜索查询（包含年份范围）
        search_query = query
        if year_range:
            search_query += f" {year_range[0]}年-{year_range[1]}年 法考真题"
        
        try:
            # 使用 googlesearch-python 库（如果可用）
            try:
                from googlesearch import search
                logger.info(f"使用 Google 搜索: {search_query}")
                
                for url in search(search_query, num_results=min(20, self.max_results)):
                    results.append({
                        'url': url,
                        'title': '',
                        'snippet': '',
                        'source': 'google_api'
                    })
                    if len(results) >= self.max_results:
                        break
            except ImportError:
                logger.warning("googlesearch-python 未安装，跳过 API 搜索")
            except Exception as e:
                logger.error(f"API 搜索失败: {str(e)}")
        except Exception as e:
            logger.error(f"搜索过程出错: {str(e)}")
        
        return results
    
    def search_via_crawler(self, base_urls: List[str], keywords: List[str]) -> List[Dict[str, Any]]:
        """
        通过爬虫搜索特定网站
        
        Args:
            base_urls: 要爬取的网站基础URL列表
            keywords: 搜索关键词列表
            
        Returns:
            搜索结果列表
        """
        results = []
        
        for base_url in base_urls:
            try:
                logger.info(f"爬取网站: {base_url}")
                page_results = self._crawl_website(base_url, keywords)
                results.extend(page_results)
                
                # 避免请求过快
                time.sleep(1)
            except Exception as e:
                logger.error(f"爬取网站 {base_url} 失败: {str(e)}")
        
        return results
    
    def _crawl_website(self, base_url: str, keywords: List[str]) -> List[Dict[str, Any]]:
        """
        爬取单个网站
        
        Args:
            base_url: 网站基础URL
            keywords: 关键词列表
            
        Returns:
            搜索结果列表
        """
        results = []
        
        try:
            response = self.session.get(base_url, timeout=10)
            response.raise_for_status()
            
            soup = BeautifulSoup(response.content, 'html.parser')
            
            # 查找包含关键词的链接
            for keyword in keywords:
                # 查找包含关键词的链接
                links = soup.find_all('a', href=True, string=re.compile(keyword, re.I))
                links.extend(soup.find_all('a', href=True, title=re.compile(keyword, re.I)))
                
                for link in links[:10]:  # 限制每个关键词最多10个结果
                    href = link.get('href', '')
                    if href:
                        full_url = urljoin(base_url, href)
                        title = link.get_text(strip=True) or link.get('title', '')
                        
                        results.append({
                            'url': full_url,
                            'title': title,
                            'snippet': '',
                            'source': urlparse(base_url).netloc
                        })
        except Exception as e:
            logger.error(f"爬取网站失败: {str(e)}")
        
        return results
    
    def fetch_page_content(self, url: str) -> Optional[Dict[str, Any]]:
        """
        获取网页内容
        
        Args:
            url: 网页URL
            
        Returns:
            包含标题和内容的字典
        """
        try:
            response = self.session.get(url, timeout=10)
            response.raise_for_status()
            
            soup = BeautifulSoup(response.content, 'html.parser')
            
            # 提取标题
            title = soup.find('title')
            title_text = title.get_text(strip=True) if title else ''
            
            # 移除脚本和样式
            for script in soup(["script", "style"]):
                script.decompose()
            
            # 提取正文内容
            content = soup.get_text(separator='\n', strip=True)
            
            return {
                'url': url,
                'title': title_text,
                'content': content,
                'html': str(soup)
            }
        except Exception as e:
            logger.error(f"获取网页内容失败 {url}: {str(e)}")
            return None
    
    async def fetch_page_content_async(self, url: str) -> Optional[Dict[str, Any]]:
        """
        异步获取网页内容
        
        Args:
            url: 网页URL
            
        Returns:
            包含标题和内容的字典
        """
        try:
            async with aiohttp.ClientSession() as session:
                async with session.get(url, timeout=aiohttp.ClientTimeout(total=10)) as response:
                    if response.status == 200:
                        html = await response.text()
                        soup = BeautifulSoup(html, 'html.parser')
                        
                        # 提取标题
                        title = soup.find('title')
                        title_text = title.get_text(strip=True) if title else ''
                        
                        # 移除脚本和样式
                        for script in soup(["script", "style"]):
                            script.decompose()
                        
                        # 提取正文内容
                        content = soup.get_text(separator='\n', strip=True)
                        
                        return {
                            'url': url,
                            'title': title_text,
                            'content': content,
                            'html': html
                        }
        except Exception as e:
            logger.error(f"异步获取网页内容失败 {url}: {str(e)}")
            return None
    
    def search_exam_questions(
        self, 
        years: List[int] = None,
        subjects: List[str] = None
    ) -> List[Dict[str, Any]]:
        """
        搜索法考真题
        
        Args:
            years: 年份列表，如 [2019, 2020, 2021, 2022, 2023, 2024]
            subjects: 科目列表，如 ['民法', '刑法', '民诉']
            
        Returns:
            搜索结果列表
        """
        if years is None:
            # 默认搜索最近5年
            current_year = datetime.now().year
            years = list(range(current_year - 4, current_year + 1))
        
        if subjects is None:
            subjects = ['民法', '刑法', '民诉', '刑诉', '行政', '商经', '三国']
        
        all_results = []
        
        # 构建搜索关键词
        keywords = []
        for year in years:
            for subject in subjects:
                keywords.append(f"{year}年 {subject} 法考真题")
                keywords.append(f"{year}年 国家统一法律职业资格考试 {subject}")
        
        # 1. 使用搜索引擎API搜索
        logger.info("开始通过搜索引擎API搜索...")
        for keyword in keywords[:10]:  # 限制关键词数量
            api_results = self.search_via_api(keyword, year_range=(min(years), max(years)))
            all_results.extend(api_results)
            time.sleep(0.5)  # 避免请求过快
        
        # 2. 爬取特定法考网站
        logger.info("开始爬取法考网站...")
        exam_sites = [
            'https://www.chinalawedu.com',  # 法律教育网（示例）
            'https://www.66law.cn',  # 66法考（示例）
            # 可以添加更多法考相关网站
        ]
        
        crawler_results = self.search_via_crawler(exam_sites, ['法考真题', '历年真题'])
        all_results.extend(crawler_results)
        
        # 去重
        seen_urls = set()
        unique_results = []
        for result in all_results:
            if result['url'] not in seen_urls:
                seen_urls.add(result['url'])
                unique_results.append(result)
        
        logger.info(f"搜索完成，共找到 {len(unique_results)} 个唯一结果")
        return unique_results

