"""
PubMed文献爬取模块
使用NCBI E-utilities API抓取开放获取的医学文献
"""
import requests
import time
import xml.etree.ElementTree as ET
from typing import List, Dict, Optional
from datetime import datetime
from tqdm import tqdm
import logging

from config import PUBMED_API_BASE, PUBMED_BATCH_SIZE, PUBMED_DATE_RANGE

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class PubMedCrawler:
    """PubMed文献爬虫"""
    
    def __init__(self):
        self.base_url = PUBMED_API_BASE
        
    def search_papers(
        self, 
        query: str = "", 
        max_results: int = 1000,
        retmax: int = PUBMED_BATCH_SIZE
    ) -> List[str]:
        """
        搜索论文ID
        
        Args:
            query: 搜索查询词
            max_results: 最大结果数
            retmax: 每批返回数量
            
        Returns:
            PubMed ID列表
        """
        pmids = []
        retstart = 0
        
        while len(pmids) < max_results:
            params = {
                "db": "pubmed",
                "term": query or "open access[journal] AND free full text[filter]",
                "retmode": "xml",
                "retmax": retmax,
                "retstart": retstart,
                "datetype": "pdat",
                "mindate": PUBMED_DATE_RANGE[0],
                "maxdate": PUBMED_DATE_RANGE[1]
            }
            
            try:
                response = requests.get(
                    f"{self.base_url}/esearch.fcgi",
                    params=params,
                    timeout=30
                )
                response.raise_for_status()
                
                root = ET.fromstring(response.content)
                ids = [id_elem.text for id_elem in root.findall(".//Id")]
                
                if not ids:
                    break
                    
                pmids.extend(ids)
                retstart += len(ids)
                
                logger.info(f"已获取 {len(pmids)} 条ID")
                time.sleep(0.35)  # 避免请求过快
                
            except Exception as e:
                logger.error(f"搜索论文失败: {e}")
                break
                
        return pmids[:max_results]
    
    def fetch_paper_details(self, pmids: List[str]) -> List[Dict]:
        """
        批量获取论文详情
        
        Args:
            pmids: PubMed ID列表
            
        Returns:
            论文详情列表
        """
        papers = []
        
        for i in tqdm(range(0, len(pmids), PUBMED_BATCH_SIZE), desc="获取论文详情"):
            batch = pmids[i:i + PUBMED_BATCH_SIZE]
            pmid_str = ",".join(batch)
            
            params = {
                "db": "pubmed",
                "id": pmid_str,
                "retmode": "xml",
                "rettype": "abstract"
            }
            
            try:
                response = requests.get(
                    f"{self.base_url}/efetch.fcgi",
                    params=params,
                    timeout=30
                )
                response.raise_for_status()
                
                root = ET.fromstring(response.content)
                
                for article in root.findall(".//PubmedArticle"):
                    paper = self._parse_article(article)
                    if paper:
                        papers.append(paper)
                
                time.sleep(0.35)
                
            except Exception as e:
                logger.error(f"获取论文详情失败: {e}")
                
        return papers
    
    def _parse_article(self, article: ET.Element) -> Optional[Dict]:
        """解析XML文章元素"""
        try:
            pmid_elem = article.find(".//PMID")
            pmid = pmid_elem.text if pmid_elem is not None else ""
            
            # 标题
            title_elem = article.find(".//ArticleTitle")
            title = title_elem.text if title_elem is not None else "No Title"
            
            # 摘要
            abstract_elems = article.findall(".//AbstractText")
            abstract = " ".join([elem.text for elem in abstract_elems if elem.text])
            
            # 作者
            authors = []
            for author in article.findall(".//Author"):
                last = author.find("LastName")
                first = author.find("ForeName")
                if last is not None and first is not None:
                    authors.append(f"{first.text} {last.text}")
            
            # 期刊
            journal_elem = article.find(".//Journal/Title")
            journal = journal_elem.text if journal_elem is not None else "Unknown Journal"
            
            # 发表日期
            pub_date_elem = article.find(".//PubDate")
            pub_date = ""
            if pub_date_elem is not None:
                year = pub_date_elem.find("Year")
                month = pub_date_elem.find("Month")
                if year is not None:
                    pub_date = year.text
                    if month is not None:
                        pub_date += f"-{month.text}"
            
            # 关键词
            keywords = []
            for keyword_elem in article.findall(".//Keyword"):
                if keyword_elem.text:
                    keywords.append(keyword_elem.text)
            
            return {
                "pmid": pmid,
                "title": title,
                "abstract": abstract,
                "authors": authors,
                "journal": journal,
                "publication_date": pub_date,
                "keywords": keywords,
                "full_text_url": f"https://pubmed.ncbi.nlm.nih.gov/{pmid}/"
            }
            
        except Exception as e:
            logger.error(f"解析文章失败: {e}")
            return None
    
    def crawl_recent_papers(
        self, 
        query: str = "",
        max_results: int = 1000
    ) -> List[Dict]:
        """
        爬取最近的开放获取论文
        
        Args:
            query: 搜索查询
            max_results: 最大结果数
            
        Returns:
            论文列表
        """
        logger.info("开始搜索论文ID...")
        pmids = self.search_papers(query=query, max_results=max_results)
        
        logger.info(f"找到 {len(pmids)} 篇论文，开始获取详情...")
        papers = self.fetch_paper_details(pmids)
        
        logger.info(f"成功获取 {len(papers)} 篇论文详情")
        return papers


if __name__ == "__main__":
    # 测试爬虫
    crawler = PubMedCrawler()
    papers = crawler.crawl_recent_papers(query="COVID-19 treatment", max_results=10)
    
    for paper in papers:
        print(f"\nTitle: {paper['title']}")
        print(f"Abstract: {paper['abstract'][:200]}...")
