# 检索文献多源检索整合
import json
import os
# arxiv
import arxiv
#  谷歌学术
from scholarly import scholarly
# scopus
from elsapy.elsclient import ElsClient
from elsapy.elssearch import ElsSearch
from elsapy.elsdoc import FullDoc, AbsDoc
# Semantic Scholar API
from semanticscholar import SemanticScholar
from paperscraper.pubmed import get_and_dump_pubmed_papers


# arxiv
def arxiv_search(query, max_results):
    """arxiv检索

    Args:
        query (str): 检索词支持高级检索
        max_results (int): 最大返回数目

    Returns:
        list: 返回结果列表
    """
    search = arxiv.Search(
        query=query,
        max_results=max_results,
        sort_by=arxiv.SortCriterion.SubmittedDate
    )
    results = []
    for result in search.results():
        results.append({
            "title": result.title,
            "abstract": result.summary,
            "authors": [i.name for i in result.authors],
            "date": result.updated.strftime("%Y-%m-%d"),
            "DOI": result.doi,
            "url": result.entry_id
        })
    return results

# 谷歌学术
def google_scholar_search(query, max_results:int=2,year_low=None,year_high=None,sort_by: str = "relevance"):
    """谷歌学术检索

    Args:
        query (str): 检索词
        max_results (int): 最大返回结果数目
        year_low (int): 最低年份
        year_high (int): 最高年份
        sort_by (str): 排序方式. 可选值: relevance, date.默认为relevance

    Returns:
        list: 返回为文献检索结果
    """
    # 参数字典
    params = {
        "query": query,
        "sort_by": sort_by,
    }
    if year_low:
        params["year_low"] = year_low
    if year_high:
        params["year_high"] = year_high

    try:
        search_query = scholarly.search_pubs(query,sort_by=sort_by, year_low=year_low, year_high=year_high)
        
        results = []
        for i in range(max_results):
            try:
                result = next(search_query)
                
                results.append({
                    "title": result.get('bib').get("title", ""),
                    "author": result.get('bib').get("author",[]),
                    "pub_year": result.get('bib').get("pub_year", ""),
                    "abstract": result.get('bib').get("abstract", ""),
                    "pub_url":result.get("pub_url",""),
                })
            except StopIteration:
                break
    except:
        return []
    return results


# Semantic Scholar检索文献
def Semantic_Scholar(text, limit=100, fields=["title", "abstract", "authors", "year", "journal", "url"]):
    """
    从Semantic Scholar检索文献信息
    
    Args:
        text (str): 搜索关键词
        limit (int): 返回结果数量限制
        fields (list): 需要检索的字段
        
    Returns:
        list: 包含文献信息的字典列表
    """
    try:
        sch = SemanticScholar()
        results = sch.search_paper(query=text, limit=limit, fields=fields)
        all_results = []
        
        for i in range(min(limit, results.total)):
            try:
                paper = results[i]
                # 安全处理作者信息
                authors = []
                if paper.authors:
                    for author in paper.authors:
                        if hasattr(author, 'name'):
                            authors.append(author.name)
                
                # 安全处理期刊信息
                journal_name = ""
                if paper.journal:
                    if isinstance(paper.journal, dict):
                        journal_name = paper.journal.get("name", "")
                    elif hasattr(paper.journal, 'name'):
                        journal_name = paper.journal.name
                
                tmp_dat = {
                    "title": getattr(paper, "title", ""),
                    "abstract": getattr(paper, "abstract", ""),
                    "authors": authors,
                    "date":  str(getattr(paper, "year", "")),
                    "journal": journal_name,
                    "url": getattr(paper, "url", "")
                }
                all_results.append(tmp_dat)
            except IndexError:
                break
            except Exception as e:
                print(f"处理第{i+1}条记录时出错: {str(e)}")
                continue
                
        return all_results
    except Exception as e:
        print(f"检索过程出错: {str(e)}")
        return []

# pubmed检索文献
def get_pubmed_papers(keywords, max_results=100, start_date="None", end_date="None"):
    """pubmed检索

    Args:
        query (str): 检索词
        max_results (int): 最大返回数目
        start_time (str): 开始时间
        end_time (str): 结束时间

    Returns:
        list: {
            "title": str,
            "abstract": str,
            "journal": str,
            "date": str,
            "authors": list[str],
            "url": str
        }
    """
    import uuid
    
    save_path = f"./data/pubmed_{uuid.uuid4()}.jsonl"
    params = {
        "keywords": keywords,
        "max_results": max_results,
        "start_date": start_date,
        "end_date": end_date,
        "output_filepath" : save_path
    }
    try:
        print(save_path)
        get_and_dump_pubmed_papers(**params)
        with open(save_path, "r") as f:
            results = [json.loads(line) for line in f.readlines()]
        os.remove(save_path)
        results = [{
            "title": result.get("title", "").split('\n')[0],
            "abstract": result.get("abstract", ""),
            "journal": str(result.get("journal", "")),
            "date": str(result.get("date", "")),
            "authors": json.dumps(result.get("authors", [])[:3] if len(result.get("authors", [])) > 5 else result.get("authors", [])),
            "url": "https://doi.org/"+result.get("doi").split('\n')[0] if result.get("doi") else ""
        } for result in results
        ]
        return results  
    except Exception as e:
        print(f"检索过程出错: {str(e)}")
        return []




def get_IF(doi):
    pass

if __name__ == '__main__':
    pass
    # print(arxiv_search("covid", 10))
    # print(google_scholar_search("sepsis",max_results=10))
    # print(Semantic_Scholar("covid", limit=100))
    # covid19 = ['COVID-19', 'SARS-CoV-2']
    # ai = ['Artificial intelligence', 'Deep learning', 'Machine learning']
    # mi = ['Medical imaging']
    # query = [covid19, ai, mi]
    # print(get_pubmed_papers(keywords=query,max_results=200))