import requests

# Base URL for the API
BASE_URL = "http://180.184.65.98:38880/atomgit/"


def get_metadata():
    """
    获取论文数据库的元数据信息。

    :return: JSON 格式的元数据信息
    """
    url = BASE_URL + "metadata"
    response = requests.get(url)
    return response.json()


def search_papers(query, top_k=30):
    """
    根据文本查询搜索论文片段。

    :param query: 查询文本 (必需)
    :param top_k: 返回的最大结果数 (可选, 默认 30)
    :return: JSON 数组，包含匹配的论文片段信息
    """
    url = BASE_URL + "search_papers"
    params = {'query': query, 'top_k': top_k}
    response = requests.get(url, params=params)
    return response.json()


def query_by_paper_id(paper_id, top_k=5):
    """
    根据论文 ID 查询论文片段。

    :param paper_id: 论文 ID (必需)
    :param top_k: 返回的最大结果数 (可选, 默认 5)
    :return: JSON 数组，包含匹配的论文片段信息
    """
    url = BASE_URL + "query_by_paper_id"
    params = {'paper_id': paper_id, 'top_k': top_k}
    response = requests.get(url, params=params)
    return response.json()


def query_by_title(title, top_k=100):
    """
    根据论文标题查询论文片段。

    :param title: 论文标题 (必需)
    :param top_k: 返回的最大结果数 (可选, 默认 100)
    :return: JSON 数组，包含匹配的论文片段信息
    """
    url = BASE_URL + "query_by_title"
    params = {'title': title, 'top_k': top_k}
    response = requests.get(url, params=params)
    return response.json()


def query_by_title_contain(title, top_k=1000):
    """
    搜索标题中包含特定文本的论文片段。

    :param title: 标题中包含的文本 (必需)
    :param top_k: 返回的最大结果数 (可选, 默认 1000)
    :return: JSON 数组，包含匹配的论文片段信息
    """
    url = BASE_URL + "query_by_title_contain"
    params = {'title': title, 'top_k': top_k}
    response = requests.get(url, params=params)
    return response.json()


def query_by_chunk_contain(chunk, top_k=1000):
    """
    搜索论文内容片段中包含特定文本的片段。

    :param chunk: 内容片段中包含的文本 (必需)
    :param top_k: 返回的最大结果数 (可选, 默认 1000)
    :return: JSON 数组，包含匹配的论文片段信息
    """
    url = BASE_URL + "query_by_chunk_contain"
    params = {'chunk': chunk, 'top_k': top_k}
    response = requests.get(url, params=params)
    return response.json()

if __name__ == '__main__':
    # print(get_metadata())
#     from Searcher.llm import llm4,super_eval
#
#     prompt = '''
# You need to create a draft summary based on the user's given topic, which should include the following sections: Summary, Methods, Metrics, and Evaluation. The output should be in the following format:
#
# ```json
# {
#     "Summary":"100-word draft",
#     "Methods":"100-word draft",
#     "Metrics":"100-word draft",
#     "Evaluation":"100-word draft",
# }
# ```
# '''
#
#     title = 'Natural language reinforcement learning'
#     messages = [{'role':'system','content':prompt},
#                 {'role':'user','content':title}]
#
#     answers = super_eval(llm4(messages))
#     Summary = search_papers(answers['Summary'],top_k=20)
#     Methods = search_papers(answers['Methods'],top_k=20)
#     Metrics = search_papers(answers['Metrics'],top_k=20)
#     Evaluation = search_papers(answers['Evaluation'],top_k=20)


    # print(res)

    # print(query_by_title_contain('Controllable Text Generation with Reinforced Unlearning'))

    for i in search_papers('ontrollable Text Generation with Reinforced Unlearning'):
        print(query_by_paper_id(i['entity']['paper_id'])[0].keys())


        # print(query_by_paper_id(i['id']))
        # print(i['distance'])
        # print(i['entity'])
        # print(i.keys())
        #
        # i['entity']['paper_title'] += f"{i['chunk_id']}"