import requests
import json
import sys
import argparse
from datetime import datetime
import time

# 定义API的基础URL
BASE_URL = "http://180.184.65.98:38880/atomgit"

def search_papers(query, top_k=1000):
    """根据文本查询搜索论文片段"""
    url = f"{BASE_URL}/search_papers"
    params = {'query': query, 'top_k': top_k}
    return call_api(url, params, method="search", query=query)

def query_paper_by_id(paper_id, top_k=3):
    """根据论文ID查询论文片段"""
    url = f"{BASE_URL}/query_by_paper_id"
    params = {'paper_id': paper_id, 'top_k': top_k}
    return call_api(url, params, method="id", paper_id=paper_id)

def query_paper_by_title(title, top_k=100):
    """根据论文标题查询论文片段"""
    url = f"{BASE_URL}/query_by_title"
    params = {'title': title, 'top_k': top_k}
    return call_api(url, params, method="title", title=title)

def get_metadata():
    """获取论文数据库的元数据信息"""
    url = f"{BASE_URL}/metadata"
    return call_api(url, {}, method="metadata")

def query_by_title_contain(title, top_k=1000):
    """搜索标题中包含特定文本的论文片段"""
    url = f"{BASE_URL}/query_by_title_contain"
    params = {'title': title, 'top_k': top_k}
    return call_api(url, params, method="title_contain", title=title)

def query_by_chunk_contain(chunk, top_k=1000):
    """搜索论文内容片段中包含特定文本的片段"""
    url = f"{BASE_URL}/query_by_chunk_contain"
    params = {'chunk': chunk, 'top_k': top_k}
    return call_api(url, params, method="chunk_contain", chunk=chunk)

# def call_api(url, params, method=None, **kwargs):
#     """通用的API调用函数，并返回带有元数据的结果"""
#     response = requests.get(url, params=params)
    
#     if response.status_code == 200:
#         # 添加元数据到响应结果
#         result = {
#             'metadata': {
#                 'method': method,
#                 'timestamp': datetime.now().isoformat(),
#                 'parameters': kwargs,
#                 'api_url': url,
#                 'request_params': params
#             },
#             'data': response.json()
#         }
#         return result
#     else:
#         print(f"Error: Received HTTP status code {response.status_code}")
#         return None


def call_api(url, params, retries=3, backoff_factor=2, **kwargs):
    """
    通用的API调用函数，并返回带有元数据的结果。
    支持连接失败后多次尝试，并使用指数退避策略。
    
    参数:
    - url: API的URL
    - params: 请求参数
    - method: HTTP方法（可选，默认为GET）
    - retries: 最大重试次数（默认3次）
    - backoff_factor: 指数退避因子（默认2）
    - kwargs: 其他传递给API的参数
    """
    
    for attempt in range(1, retries + 1):
        try:
            # 发起HTTP请求
            response = requests.get(url, params=params, timeout=100)
            
            if response.status_code == 200:
                # 添加元数据到响应结果
                result = {
                    'metadata': {
                        'timestamp': datetime.now().isoformat(),
                        'parameters': kwargs,
                        'api_url': url,
                        'request_params': params,
                        'retries': attempt - 1  # 记录实际重试次数
                    },
                    'data': response.json()
                }
                return result
            else:
                print(f"Attempt {attempt}: Received HTTP status code {response.status_code}")
        
        except (requests.exceptions.RequestException, Exception) as e:
            print(f"Attempt {attempt} failed with exception: {e}")
        
        # 如果不是最后一次尝试，则等待一段时间再重试
        if attempt < retries:
            wait_time = backoff_factor ** (attempt - 1)
            print(f"Retrying in {wait_time} seconds...")
            time.sleep(wait_time)
    
    # 所有重试都失败，返回带有错误信息的结果
    return {
        'metadata': {
            'timestamp': datetime.now().isoformat(),
            'parameters': kwargs,
            'api_url': url,
            'request_params': params,
            'retries': retries,
            'error': f"All {retries} attempts failed"
        },
        'data': None
    }

def save_to_file(data, filename_prefix="api_test"):
    """将数据保存到JSON文件，并根据元数据构建描述性文件名"""
    if not data or 'metadata' not in data:
        print("No valid data to save.")
        return
    
    metadata = data['metadata']
    timestamp = metadata['timestamp'].replace(':', '-').replace('.', '_')  # 文件系统兼容的时间戳
    # import ipdb;ipdb.set_trace()
    
    parameters = metadata['parameters']
    method = parameters['method']
    # 根据方法和参数构建文件名
    if method == "search":
        query = parameters.get('query', 'unknown_query').replace(' ', '_')
        filename = f"{filename_prefix}_search_{query}_{timestamp}.json"
    elif method == "id":
        paper_id = parameters.get('paper_id', 'unknown_id')
        filename = f"{filename_prefix}_id_{paper_id}_{timestamp}.json"
    elif method == "title":
        title = parameters.get('title', 'unknown_title').replace(' ', '_')
        filename = f"{filename_prefix}_title_{title}_{timestamp}.json"
    elif method == "metadata":
        filename = f"{filename_prefix}_metadata_{timestamp}.json"
    elif method == "title_contain":
        title = parameters.get('title', 'unknown_title').replace(' ', '_')
        filename = f"{filename_prefix}_title_contain_{title}_{timestamp}.json"
    elif method == "chunk_contain":
        chunk = parameters.get('chunk', 'unknown_chunk').replace(' ', '_')
        filename = f"{filename_prefix}_chunk_contain_{chunk}_{timestamp}.json"
    else:
        filename = f"{filename_prefix}_{timestamp}.json"
    filename = filename.replace(':', '-').replace('.', '_')
    # import ipdb;ipdb.set_trace()
    with open(filename, 'w', encoding='utf-8') as f:
        json.dump(data, f, ensure_ascii=False, indent=4)
    print(f"Results saved to {filename}")

def main():
    # 设置命令行参数解析
    parser = argparse.ArgumentParser(description="Test the provided API functions.")
    parser.add_argument('--query', type=str, help='The query text for searching papers')
    parser.add_argument('--paper_id', type=str, help='The ID of the paper to query')
    parser.add_argument('--title', type=str, help='The title of the paper to query (exact match)')
    parser.add_argument('--title_contain', type=str, help='Search for papers with titles containing this string')
    parser.add_argument('--chunk', type=str, help='Search for papers with chunks containing this string')
    parser.add_argument('--metadata', action='store_true', help='Get metadata about the paper database')
    parser.add_argument('--top_k', type=int, default=None, help='The number of top results to return')
    args = parser.parse_args()

    # 根据提供的参数选择要测试的API
    if args.query:
        print("Testing search_papers...")
        result = search_papers(args.query, args.top_k or 30)
    elif args.paper_id:
        print("Testing query_paper_by_id...")
        result = query_paper_by_id(args.paper_id, args.top_k or 5)
    elif args.title:
        print("Testing query_paper_by_title...")
        result = query_paper_by_title(args.title, args.top_k or 100)
    elif args.metadata:
        print("Testing get_metadata...")
        result = get_metadata()
    elif args.title_contain:
        print("Testing query_by_title_contain...")
        result = query_by_title_contain(args.title_contain, args.top_k or 1000)
    elif args.chunk:
        print("Testing query_by_chunk_contain...")
        result = query_by_chunk_contain(args.chunk, args.top_k or 1000)
    else:
        print("No valid arguments provided. Please specify one of --query, --paper_id, --title, --metadata, --title_contain, or --chunk.")
        sys.exit(1)
    
    # import ipdb;ipdb.set_trace()
    # 如果有结果，则保存到文件
    if result is not None:
        save_to_file(result)
    # import ipdb;ipdb.set_trace()

if __name__ == "__main__":
    main()

# 要求不少于50，暂定为100篇

# 搜索包含特定关键词的论文：python chatglm_api.py --query "Large Language Models for Time Series" --top_k 100
# 根据论文ID查询论文：python chatglm_api.py --paper_id "65c19a4a939a5f40825db46b" --top_k 30
# 65c19a4a939a5f40825db46b
# 660b66cc13fb2c6cf6dd3548

# 搜索标题中包含特定字符串的论文：python chatglm_api.py --title_contain "Large Language Models"
# 搜索论文内容中包含特定文本的片段：python chatglm_api.py --chunk "Time Series"


# 根据论文标题精确匹配查询论文（有参考文献）：python chatglm_api.py --title "Timer: Generative Pre-trained Transformers Are Large Time Series Models"



# 获取论文数据库的元数据：python chatglm_api.py --metadata