"""
SerpApi Google Scholar 数据获取器
专注于获取引用文章URL列表，支持年份过滤
"""

import requests
import time
import logging
from typing import List, Dict, Optional, Tuple
from urllib.parse import urljoin, urlparse
import json

from .config import (
    get_api_key, 
    SERPAPI_BASE_URL, 
    REQUEST_TIMEOUT, 
    MAX_RETRIES, 
    RETRY_DELAY,
    GOOGLE_SCHOLAR_ENGINE,
    DEFAULT_LANGUAGE,
    DEFAULT_NUM_RESULTS,
    TARGET_YEAR
)

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class SerpApiError(Exception):
    """SerpApi 相关错误的基类"""
    pass


class SerpApiAuthError(SerpApiError):
    """API认证错误"""
    pass


class SerpApiRateLimitError(SerpApiError):
    """API速率限制错误"""
    pass


class SerpApiFetcher:
    """
    SerpApi Google Scholar 数据获取器
    专注于获取引用文章URL列表
    """
    
    def __init__(self, api_key: Optional[str] = None):
        """
        初始化SerpApiFetcher
        
        Args:
            api_key: SerpApi API密钥，如果不提供则从配置文件获取
        """
        self.api_key = api_key or get_api_key()
        self.base_url = SERPAPI_BASE_URL
        self.session = requests.Session()
        
        # 验证API密钥
        if not self.api_key:
            raise SerpApiAuthError("API密钥未配置")
        
        logger.info("SerpApiFetcher 初始化完成")
    
    def _make_request(self, params: Dict) -> Dict:
        """
        发送API请求
        
        Args:
            params: 请求参数字典
            
        Returns:
            API响应的JSON数据
            
        Raises:
            SerpApiError: API请求失败时抛出
        """
        # 添加API密钥到参数
        params['api_key'] = self.api_key
        
        for attempt in range(MAX_RETRIES):
            try:
                logger.info(f"发送API请求 (尝试 {attempt + 1}/{MAX_RETRIES}): {params}")
                
                # 创建session并禁用代理
                session = requests.Session()
                session.trust_env = False  # 禁用环境变量中的代理设置
                
                response = session.get(
                    self.base_url,
                    params=params,
                    timeout=REQUEST_TIMEOUT,
                    proxies={}  # 明确禁用代理
                )
                
                # 检查HTTP状态码
                if response.status_code == 200:
                    data = response.json()
                    
                    # 检查API错误
                    if 'error' in data:
                        error_msg = data['error']
                        logger.error(f"API返回错误: {error_msg}")
                        raise SerpApiError(f"API错误: {error_msg}")
                    
                    logger.info("API请求成功")
                    return data
                
                elif response.status_code == 401:
                    raise SerpApiAuthError("API密钥无效或已过期")
                
                elif response.status_code == 429:
                    logger.warning("遇到速率限制，等待重试...")
                    if attempt < MAX_RETRIES - 1:
                        time.sleep(RETRY_DELAY * (attempt + 1))
                        continue
                    else:
                        raise SerpApiRateLimitError("API速率限制")
                
                else:
                    logger.error(f"HTTP错误: {response.status_code} - {response.text}")
                    if attempt < MAX_RETRIES - 1:
                        time.sleep(RETRY_DELAY)
                        continue
                    else:
                        raise SerpApiError(f"HTTP错误: {response.status_code}")
            
            except requests.exceptions.RequestException as e:
                logger.error(f"网络请求错误: {e}")
                if attempt < MAX_RETRIES - 1:
                    time.sleep(RETRY_DELAY)
                    continue
                else:
                    raise SerpApiError(f"网络错误: {e}")
        
        raise SerpApiError("请求失败，已达到最大重试次数")
    
    def search_article(self, query: str, num_results: int = DEFAULT_NUM_RESULTS) -> List[Dict]:
        """
        搜索学术文章
        
        Args:
            query: 搜索查询字符串
            num_results: 返回结果数量
            
        Returns:
            文章列表，每个文章包含标题、作者、年份等信息
        """
        params = {
            'engine': GOOGLE_SCHOLAR_ENGINE,
            'q': query,
            'hl': DEFAULT_LANGUAGE,
            'num': num_results
        }
        
        try:
            data = self._make_request(params)
            
            # 提取文章信息
            articles = []
            if 'organic_results' in data:
                for result in data['organic_results']:
                    article = {
                        'title': result.get('title', ''),
                        'link': result.get('link', ''),
                        'snippet': result.get('snippet', ''),
                        'publication_info': result.get('publication_info', {}),
                        'cited_by': result.get('inline_links', {}).get('cited_by', {}),
                        'result_id': result.get('result_id', '')
                    }
                    articles.append(article)
            
            logger.info(f"搜索到 {len(articles)} 篇文章")
            return articles
        
        except Exception as e:
            logger.error(f"搜索文章失败: {e}")
            raise
    
    def get_citing_articles(self, cites_id: str, year: int = TARGET_YEAR, 
                          max_results: int = 100) -> List[str]:
        """
        获取引用特定文章的文章URL列表
        
        Args:
            cites_id: 被引用文章的cites ID
            year: 过滤年份，默认为2025年
            max_results: 最大结果数量
            
        Returns:
            引用文章的URL列表
        """
        logger.info(f"获取文章 {cites_id} 在 {year} 年的引用文章")
        
        citing_urls = []
        start = 0
        
        page_size = 20  # 每页固定大小
        
        while len(citing_urls) < max_results:
            params = {
                'engine': GOOGLE_SCHOLAR_ENGINE,
                'cites': cites_id,
                'hl': DEFAULT_LANGUAGE,
                'start': start,
                'num': page_size,
                'as_ylo': year,  # 年份范围开始
                'as_yhi': year   # 年份范围结束
            }
            
            try:
                data = self._make_request(params)
                
                # 提取引用文章URL
                batch_urls = self._extract_citing_urls(data, year)
                
                if not batch_urls:
                    logger.info("没有更多引用文章")
                    break
                
                citing_urls.extend(batch_urls)
                logger.info(f"当前已获取 {len(citing_urls)} 个引用文章URL (第 {start//page_size + 1} 页)")
                
                # 检查是否还有更多结果
                if 'serpapi_pagination' not in data or 'next' not in data['serpapi_pagination']:
                    logger.info("已获取所有可用的引用文章")
                    break
                
                # 检查这一页的结果数量，如果少于page_size，说明已经到最后一页
                if len(batch_urls) < page_size:
                    logger.info("已到达最后一页")
                    break
                
                start += page_size  # 使用固定步长
                
                # 添加延迟避免速率限制
                time.sleep(1)
                
            except Exception as e:
                logger.error(f"获取引用文章失败: {e}")
                break
        
        logger.info(f"总共获取到 {len(citing_urls)} 个 {year} 年的引用文章URL")
        return citing_urls[:max_results]
    
    def _extract_citing_urls(self, data: Dict, target_year: int) -> List[str]:
        """
        从API响应中提取引用文章URL
        
        Args:
            data: API响应数据
            target_year: 目标年份
            
        Returns:
            URL列表
        """
        urls = []
        
        if 'organic_results' not in data:
            return urls
        
        for result in data['organic_results']:
            # 获取文章链接
            link = result.get('link', '')
            if not link:
                continue
            
            # 验证年份（如果有发表信息）
            pub_info = result.get('publication_info', {})
            if pub_info:
                summary = pub_info.get('summary', '')
                # 尝试从发表信息中提取年份
                if str(target_year) in summary:
                    urls.append(link)
                    logger.debug(f"找到 {target_year} 年文章: {result.get('title', '')[:50]}...")
                else:
                    # 如果没有明确的年份信息，也包含在内（因为已经通过API参数过滤）
                    urls.append(link)
            else:
                # 没有发表信息的情况下，信任API的年份过滤
                urls.append(link)
        
        return urls
    
    def get_article_cites_id(self, title: str) -> Optional[str]:
        """
        根据文章标题获取其cites_id
        
        Args:
            title: 文章标题
            
        Returns:
            文章的cites_id，如果未找到则返回None
        """
        logger.info(f"搜索文章获取cites_id: {title[:50]}...")
        
        try:
            articles = self.search_article(title, num_results=5)
            
            for article in articles:
                # 检查标题匹配度
                if self._is_title_match(title, article.get('title', '')):
                    cited_by = article.get('cited_by', {})
                    if 'cites_id' in cited_by:
                        cites_id = cited_by['cites_id']
                        logger.info(f"找到匹配文章的cites_id: {cites_id}")
                        return cites_id
            
            logger.warning("未找到匹配的文章")
            return None
            
        except Exception as e:
            logger.error(f"获取cites_id失败: {e}")
            return None
    
    def _is_title_match(self, target_title: str, result_title: str, 
                       threshold: float = 0.8) -> bool:
        """
        检查两个标题是否匹配
        
        Args:
            target_title: 目标标题
            result_title: 搜索结果标题
            threshold: 匹配阈值
            
        Returns:
            是否匹配
        """
        # 简单的字符串相似度检查
        target_words = set(target_title.lower().split())
        result_words = set(result_title.lower().split())
        
        if not target_words or not result_words:
            return False
        
        intersection = target_words.intersection(result_words)
        union = target_words.union(result_words)
        
        similarity = len(intersection) / len(union)
        return similarity >= threshold
    
    def test_connection(self) -> Tuple[bool, str]:
        """
        测试API连接
        
        Returns:
            (是否成功, 消息)
        """
        try:
            logger.info("测试SerpApi连接...")
            
            # 发送一个简单的测试请求
            params = {
                'engine': GOOGLE_SCHOLAR_ENGINE,
                'q': 'test',
                'num': 1
            }
            
            data = self._make_request(params)
            
            if 'organic_results' in data:
                return True, "SerpApi连接测试成功"
            else:
                return False, "API响应格式异常"
                
        except SerpApiAuthError as e:
            return False, f"认证失败: {e}"
        except SerpApiError as e:
            return False, f"API错误: {e}"
        except Exception as e:
            return False, f"连接测试失败: {e}"