import asyncio

import requests
import aiohttp
import os
import json
BASE_URL = "http://180.184.65.98:38880/atomgit/"

# 1. 根据文本查询搜索论文片段
def search_papers(query, top_k=30):
    url = f"{BASE_URL}search_papers"
    params = {"query": query, "top_k": top_k}
    response = requests.get(url, params=params)
    return response.json()

# 2. 根据论文 ID 查询论文片段
def query_by_paper_id(paper_id, top_k=100):
    url = f"{BASE_URL}query_by_paper_id"
    params = {"paper_id": paper_id, "top_k": top_k}
    response = requests.get(url, params=params)
    return response.json()

async def async_query_by_paper_id(paper_id, top_k=100):
    url = f"{BASE_URL}query_by_paper_id"
    params = {"paper_id": paper_id, "top_k": top_k}
    async with aiohttp.ClientSession() as session:
        async with session.get(url, params=params) as response:
            return await response.json()
# 3. 根据论文标题查询论文片段
def query_by_title(title, top_k=100):
    url = f"{BASE_URL}query_by_title"
    params = {"title": title, "top_k": top_k}
    response = requests.get(url, params=params)
    return response.json()

# 4. 获取论文数据库的元数据信息
def get_metadata():
    url = f"{BASE_URL}metadata"
    response = requests.get(url)
    return response.json()

# 5. 搜索标题中包含特定文本的论文片段
def query_by_title_contain(title, top_k=1000):
    url = f"{BASE_URL}query_by_title_contain"
    params = {"title": title, "top_k": top_k}
    response = requests.get(url, params=params)
    return response.json()

# 6. 搜索论文内容片段中包含特定文本的片段
def query_by_chunk_contain(chunk, top_k=1000):
    url = f"{BASE_URL}query_by_chunk_contain"
    params = {"chunk": chunk, "top_k": top_k}
    response = requests.get(url, params=params)
    return response.json()

async def get_paper_database_name(paper_id_list, top_k=100):
    ''' 根据论文id列表并行请求论文内容 '''
    # paper_id_list的格式为：["paper_id1", "paper_id2", ...]
    tasks = [async_query_by_paper_id(ref_id, top_k=top_k) for ref_id in paper_id_list]
    results = await asyncio.gather(*tasks)

    processed_data = []

    for chunks in results:
        # 处理空结果的情况 - 使用上一个有效结果填充
        if not chunks:
            if processed_data:  # 确保列表非空
                processed_data.append(processed_data[-1])
            continue

        # 提取论文标题和来源数据库信息
        paper_info = {
            "paper_title": chunks[0].get('paper_title', ''),
            "paper_database": chunks[0].get('original_filename', '')
        }
        processed_data.append(paper_info)

    return processed_data


async def fetch_references_and_process(ref_ids, chunk_list: list = None):
    """ 获取并处理论文引用内容  """
    # 设置默认chunk列表
    if chunk_list is None:
        chunk_list = list(range(100))

    # 并行获取所有引用论文的内容
    tasks = [async_query_by_paper_id(ref_id) for ref_id in ref_ids]
    results = await asyncio.gather(*tasks)

    # 处理获取的论文内容
    processed_data = {}
    for ref_id, chunks in zip(ref_ids, results):
        # 按指定的chunk_list筛选并格式化chunk内容
        text_list = [
            f"{{\nchunk_id: {c.get('chunk_id', '')},\nchunk_text: \"{c.get('chunk_text', '')}\"\n}},"
            for c in chunks
            if c.get('chunk_id') in chunk_list
        ]
        # 拼接所有chunk文本
        full_text = "\n".join(text_list)
        processed_data[ref_id] = full_text

    return processed_data


async def get_all_paper_full_text(self, data: list[dict] = [], chunk_list: list = None):
    """ 获取所有引用论文的全文内容 """

    # 存储所有论文ID到全文的映射
    all_text_data = {}

    # 遍历处理每组引用
    # data的格式为：[{"refs": ["ref_id1", "ref_id2", ...]}, {"refs": [xxx]}...]
    for item in data:
        refs = item["refs"]
        # 获取并处理当前组的引用论文内容
        processed_results = await self.fetch_references_and_process(refs, chunk_list)
        # 更新总结果字典
        all_text_data.update(processed_results)

    return all_text_data


def set_llm_api_token(model_list, api_token):
    """ 为配置字典中指定的模型设置API token。 """
    from model_config.config import CONFIG
    if api_token:
        for model_name in model_list:
            if model_name in CONFIG["MODELS"]:
                CONFIG["MODELS"][model_name]["token"] = api_token


if __name__ == "__main__":
    query_list = [
    "the", "a", "an", "and", "or", "of", "in", "on", "at", "to",
    "study", "research", "analysis", "method", "results",
    "we", "they", "this", "these", "those"
]
    with open('paper_id_list.json','r') as f:
        paper_id_list = json.load(f)
    with open('paper_title_list.json','r') as f:
        paper_title_list = json.load(f)
    unique_paper_id = set(paper_id_list)
    unique_paper_title = set(paper_title_list)
    index = 1
    for query in query_list:
        print(index)
        similarty_result = search_papers(query, top_k=1000)
        similarty_result = similarty_result
        title_result = query_by_title_contain(query, top_k=1000)
        chunk_result= query_by_chunk_contain(query, top_k=1000)
        result = title_result + chunk_result
        for item in similarty_result:
            unique_paper_id.add(item["entity"]["paper_id"])
            unique_paper_title.add(item["entity"]["paper_title"])
        for i in result:
            unique_paper_id.add(i["paper_id"])
            unique_paper_title.add(i["paper_title"])
        index += 1
    paper_id_list = list(unique_paper_id)
    print(len(paper_id_list))
    with open("paper_id_list.json","w") as f:
        f.write(json.dumps(paper_id_list,indent=4,ensure_ascii=False))
    with open("paper_title_list.json","w") as f:
        f.write(json.dumps(list(unique_paper_title),indent=4,ensure_ascii=False))
    # with open("test.json","w") as f:
    #     f.write(json.dumps(result,indent=4,ensure_ascii=False))
    