import asyncio
import json
import os
import uuid
import time
import threading
from typing import Optional
from concurrent.futures import ThreadPoolExecutor
from tqdm import tqdm

import aiohttp
import requests
from zhipuai import ZhipuAI

from logger import main_logger

class LiteratureSearchAPIAsync:
    def __init__(self, base_url: str, save_json: bool = False):
        self.uuid = str(uuid.uuid4())[:8]
        self.base_url = base_url
        self._session: Optional[aiohttp.ClientSession] = None
        self.save_json = save_json
        self._endpoints = {
            "metadata": "/metadata",
            "search_by_query": "/search_papers",
            "search_by_paper_id": "/query_by_paper_id",
            "search_by_title": "/query_by_title",
            "search_by_title_contain": "/query_by_title_contain",
            "search_by_chunk": "/query_by_chunk_contain",
            "search_by_title_metadata": "/query_paper_metadata_that_title_contain",
            "search_similar_titles": "/titles_like"
        }
    
    async def __aenter__(self):
        self._session = aiohttp.ClientSession()
        return self
    
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        if self._session:
            await self._session.close()
    
    @property
    def session(self):
        if self._session is None:
            raise RuntimeError("API client not initialized. Use 'async with' context manager.")
        return self._session
    
    async def log(self, endpoint: str, params: dict, response):
        if response.status == 200:
            main_logger.info(f"【LiteratureSearchAPI】[{self.uuid}] - [endpoint]: {endpoint} - [params]: {params}")
        else:
            main_logger.error(f"【LiteratureSearchAPI】[{self.uuid}] - [endpoint]: {endpoint} - [params]: {params}")

    async def _make_request(self, endpoint: str, params: dict = None):
        """统一的请求处理方法"""
        if endpoint not in self._endpoints:
            raise ValueError(f"Unknown endpoint: {endpoint}")
        
        url = f"{self.base_url}{self._endpoints[endpoint]}"
        async with self.session.get(url, params=params) as response:
            await self.log(endpoint, params, response)
            return await self._handle_response(response)

    async def get_metadata(self):
        """获取论文库元数据"""
        return await self._make_request("metadata")

    async def search_by_query(self, query: str, top_k: int = 30):
        """根据查询词搜索论文"""
        return await self._make_request("search_by_query", {"query": query, "top_k": top_k})

    async def search_by_paper_id(self, paper_id: str, top_k: int = 5):
        """根据论文ID搜索论文"""
        return await self._make_request("search_by_paper_id", {"paper_id": paper_id, "top_k": top_k})

    async def search_by_title(self, title: str, top_k: int = 100):
        """根据论文标题精确搜索论文"""
        return await self._make_request("search_by_title", {"title": title, "top_k": top_k})

    async def search_by_title_contain(self, title: str, top_k: int = 1000):
        """根据论文标题模糊搜索论文"""
        return await self._make_request("search_by_title_contain", {"title": title, "top_k": top_k})

    async def search_by_chunk(self, chunk: str, top_k: int = 1000):
        """根据文本块搜索论文"""
        return await self._make_request("search_by_chunk", {"chunk": chunk, "top_k": top_k})

    async def search_by_title_metadata(self, title: str, top_k: int = 100):
        """搜索标题中包含特定关键词的论文元数据"""
        return await self._make_request("search_by_title_metadata", {"title": title, "top_k": top_k})

    async def search_similar_titles(self, title: str, top_k: int = 100):
        """查找相似的论文标题"""
        return await self._make_request("search_similar_titles", {"title": title, "top_k": top_k})

    async def _batch_search(self, method, items: list, top_k: int, desc: str):
        """通用批量搜索方法，带进度条"""
        tasks = []
        with tqdm(total=len(items), desc=desc, ncols=100) as pbar:
            for item in items:
                task = asyncio.create_task(method(item, top_k))
                task.add_done_callback(lambda _: pbar.update(1))
                tasks.append(task)
            return await asyncio.gather(*tasks)

    async def batch_search_by_query(self, queries: list[str], top_k: int = 30):
        """批量搜索论文"""
        return await self._batch_search(self.search_by_query, queries, top_k, "Batch searching by queries")

    async def batch_search_by_paper_id(self, paper_ids: list[str], top_k: int = 5):
        """批量根据论文ID搜索"""
        return await self._batch_search(self.search_by_paper_id, paper_ids, top_k, "Batch searching by paper IDs")

    async def batch_search_by_title(self, titles: list[str], top_k: int = 100):
        """批量根据标题精确搜索"""
        return await self._batch_search(self.search_by_title, titles, top_k, "Batch searching by titles")

    async def batch_search_by_title_contain(self, titles: list[str], top_k: int = 1000):
        """批量根据标题模糊搜索"""
        return await self._batch_search(self.search_by_title_contain, titles, top_k, "Batch searching by title keywords")

    async def batch_search_by_chunk(self, chunks: list[str], top_k: int = 1000):
        """批量根据文本块搜索"""
        return await self._batch_search(self.search_by_chunk, chunks, top_k, "Batch searching by chunks")

    async def batch_search_by_title_metadata(self, titles: list[str], top_k: int = 100):
        """批量搜索标题元数据"""
        return await self._batch_search(self.search_by_title_metadata, titles, top_k, "Batch searching title metadata")

    async def batch_search_similar_titles(self, titles: list[str], top_k: int = 100):
        """批量搜索相似标题"""
        return await self._batch_search(self.search_similar_titles, titles, top_k, "Batch searching similar titles")

    async def _handle_response(self, response):
        """处理 HTTP 响应"""
        if response.status == 200:
            j = await response.json()
            if self.save_json:
                ts = time.time()
                with open(f"api_results/{ts}-{self.uuid}.json", "w", encoding="utf-8") as f:
                    json.dump(j, f, ensure_ascii=False, indent=4)
            return j
        response.raise_for_status()


class LiteratureSearchAPI:
    """同步版本的 API 包装器"""
    def __init__(self, base_url: str, save_json: bool = False):
        self.base_url = base_url
        self._async_api = LiteratureSearchAPIAsync(base_url, save_json)
        self._loop = None

    def _ensure_loop(self):
        """确保有可用的事件循环"""
        try:
            self._loop = asyncio.get_event_loop()
        except RuntimeError:
            self._loop = asyncio.new_event_loop()
            asyncio.set_event_loop(self._loop)

    def _run_async(self, coro):
        """运行异步代码的辅助方法"""
        self._ensure_loop()
        async def _run():
            async with self._async_api as api:
                return await coro(api)
        return self._loop.run_until_complete(_run())
    
    def get_metadata(self):
        """获取论文库元数据"""
        return self._run_async(lambda api: api.get_metadata())

    def search_by_query(self, query: str, top_k: int = 30):
        """根据查询词搜索论文"""
        return self._run_async(lambda api: api.search_by_query(query, top_k))

    def search_by_paper_id(self, paper_id: str, top_k: int = 5):
        """根据论文ID搜索论文"""
        return self._run_async(lambda api: api.search_by_paper_id(paper_id, top_k))

    def search_by_title(self, title: str, top_k: int = 100):
        """根据论文标题精确搜索论文"""
        return self._run_async(lambda api: api.search_by_title(title, top_k))

    def search_by_title_contain(self, title: str, top_k: int = 1000):
        """根据论文标题模糊搜索论文"""
        return self._run_async(lambda api: api.search_by_title_contain(title, top_k))

    def search_by_chunk(self, chunk: str, top_k: int = 1000):
        """根据文本块搜索论文"""
        return self._run_async(lambda api: api.search_by_chunk(chunk, top_k))

    def search_by_title_metadata(self, title: str, top_k: int = 100):
        """搜索标题中包含特定关键词的论文元数据"""
        return self._run_async(lambda api: api.search_by_title_metadata(title, top_k))

    def search_similar_titles(self, title: str, top_k: int = 100):
        """查找相似的论文标题"""
        return self._run_async(lambda api: api.search_similar_titles(title, top_k))

    def batch_search_by_query(self, queries: list[str], top_k: int = 30):
        """批量搜索论文"""
        return self._run_async(lambda api: api.batch_search_by_query(queries, top_k))

    def batch_search_by_paper_id(self, paper_ids: list[str], top_k: int = 5):
        """批量根据论文ID搜索"""
        return self._run_async(lambda api: api.batch_search_by_paper_id(paper_ids, top_k))

    def batch_search_by_title(self, titles: list[str], top_k: int = 100):
        """批量根据标题精确搜索"""
        return self._run_async(lambda api: api.batch_search_by_title(titles, top_k))

    def batch_search_by_title_contain(self, titles: list[str], top_k: int = 1000):
        """批量根据标题模糊搜索"""
        return self._run_async(lambda api: api.batch_search_by_title_contain(titles, top_k))

    def batch_search_by_chunk(self, chunks: list[str], top_k: int = 1000):
        """批量根据文本块搜索"""
        return self._run_async(lambda api: api.batch_search_by_chunk(chunks, top_k))

    def batch_search_by_title_metadata(self, titles: list[str], top_k: int = 100):
        """批量搜索标题元数据"""
        return self._run_async(lambda api: api.batch_search_by_title_metadata(titles, top_k))

    def batch_search_similar_titles(self, titles: list[str], top_k: int = 100):
        """批量搜索相似标题"""
        return self._run_async(lambda api: api.batch_search_similar_titles(titles, top_k))

class BaseLLM:
    def __init__(self):
        pass

    def query(self, message: str):
        raise NotImplementedError
    
    def query_with_system(self, message: str, system: str):
        raise NotImplementedError


class ZhipuLLM(BaseLLM):
    def __init__(
            self, 
            api_key: str, 
            max_concurrent_requests: int = 50, 
            model: str = "GLM-4-Flash",
        ):
        self.uuid = str(uuid.uuid4())[:8]
        self.client = ZhipuAI(api_key=api_key)
        self.model = model
        self.prompt_tokens = 0
        self.completion_tokens = 0
        self.total_tokens = 0
        self.max_concurrent = max_concurrent_requests
        self._semaphore = threading.Semaphore(max_concurrent_requests)

    async def _async_query(self, message: str):
        """异步执行单个查询"""
        response = self.client.chat.asyncCompletions.create(
            model=self.model,
            messages=[
                {"role": "user", "content": message}
            ]
        )
        task_id = response.id
        task_status = ''
        get_cnt = 0

        while task_status != 'SUCCESS' and task_status != 'FAILED' and get_cnt <= 40:
            result_response = self.client.chat.asyncCompletions.retrieve_completion_result(id=task_id)
            task_status = result_response.task_status
            
            if task_status == 'SUCCESS':
                content = result_response.choices[0].message.content
                self.prompt_tokens += result_response.usage.prompt_tokens
                self.completion_tokens += result_response.usage.completion_tokens
                self.total_tokens += result_response.usage.total_tokens
                main_logger.info(f"【ZhipuLLM】[{self.uuid}] - [message]: {message} - [response]: {content}")
                return content
            elif task_status == 'FAILED':
                raise Exception(f"Task failed: {result_response}")
            
            await asyncio.sleep(2)
            get_cnt += 1
            
        raise TimeoutError("Query timeout after 80 seconds")

    def query(self, message: str):
        """同步方式调用异步查询"""
        with self._semaphore:
            response = self.client.chat.completions.create(
                model=self.model,
                messages=[
                    {"role": "user", "content": message}
                ]
            )
            content = response.choices[0].message.content
            self.prompt_tokens += response.usage.prompt_tokens
            self.completion_tokens += response.usage.completion_tokens
            self.total_tokens += response.usage.total_tokens
            main_logger.info(f"【ZhipuLLM】[{self.uuid}] - [message]: {message} - [response]: {content}")
            return content

    def batch_query(self, messages: list[str]):
        """并发批量查询"""
        with ThreadPoolExecutor(max_workers=self.max_concurrent) as executor:
            results = list(tqdm(
                executor.map(self.query, messages),
                total=len(messages),
                desc=f"ZhipuLLM[{self.model}]",
                ncols=100
            ))
            return results

    def query_with_system(self, message: str, system: str):
        """带系统提示的查询"""
        with self._semaphore:
            response = self.client.chat.completions.create(
                model=self.model,
                messages=[
                    {"role": "system", "content": system},
                    {"role": "user", "content": message}
                ]
            )
            content = response.choices[0].message.content
            main_logger.info(f"【ZhipuLLM】[{self.uuid}] - [message]: {message}, [system]: {system} - [response]: {content}")
            return content

    async def async_batch_query(self, messages: list[str]):
        """异步批量查询"""
        tasks = []
        async with asyncio.Semaphore(self.max_concurrent):
            for message in messages:
                task = asyncio.create_task(self._async_query(message))
                tasks.append(task)
            return await asyncio.gather(*tasks)

    def get_usage_info(self):
        return {
            "prompt_tokens": self.prompt_tokens,
            "completion_tokens": self.completion_tokens,
            "total_tokens": self.total_tokens
        }


class LocalLLM(BaseLLM):
    def __init__(self):
        self.uuid = str(uuid.uuid4())[:8]
        self.url = "http://10.68.84.28:21474/v1/chat/completions"
        self.model = "Qwen2.5-14B-Instruct"
        self.prompt_tokens = 0
        self.completion_tokens = 0
        self.total_tokens = 0

    def query(self, message: str):
        data = {
            "model": self.model,
            "messages": [
                {"role": "user", "content": message}
            ]
        }
        
        response = requests.post(self.url, json=data).json()
        content = response["choices"][0]["message"]["content"]
        self.prompt_tokens += response["usage"]["prompt_tokens"]
        self.completion_tokens += response["usage"]["completion_tokens"]
        self.total_tokens += response["usage"]["total_tokens"]
        main_logger.info(f"【LocalLLM({self.model})】[{self.uuid}] - [message]: {message} - [response]: {content}")
        return content
    
    def batch_query(self, messages: list[str]):
        results = [None] * len(messages)  # 预分配结果列表
        
        def query_and_store(message, index):
            result = self.query(message)
            results[index] = result  # 使用索引确保顺序
            
        threads = []
        for i, message in enumerate(messages):
            thread = threading.Thread(target=query_and_store, args=(message, i))
            thread.start()
            threads.append(thread)
            
        for thread in threads:
            thread.join()
            
        return results
    
    def query_with_system(self, message: str, system: str):
        data = {
            "model": self.model,
            "messages": [
                {"role": "system", "content": system},
                {"role": "user", "content": message}
            ]
        }
        response = requests.post(self.url, json=data).json()
        content = response["choices"][0]["message"]["content"]
        main_logger.info(f"【LocalLLM({self.model})】[{self.uuid}] - [message]: {message}, [system]: {system} - [response]: {content}")
        return content

    def get_usage_info(self):
        return {
            "prompt_tokens": self.prompt_tokens,
            "completion_tokens": self.completion_tokens,
            "total_tokens": self.total_tokens
        }

async def main(base_url: str):
    
    async with LiteratureSearchAPIAsync(base_url) as api:
        # 定义搜索的查询条件
        query = "RAG"
        paper_id = "6516338d3fda6d7f065e50d0"
        title = "Interactive Class-Agnostic Object Counting"

        # 并发执行搜索
        results = await asyncio.gather(
            api.search_by_query(query),
            api.search_by_paper_id(paper_id),
            api.query_paper_metadata_that_title_contain(query),
            api.titles_like(query)
        )
        
        search_by_query_result, search_by_paper_id_result, query_paper_metadata_that_title_contain_result, titles_like_result = results

        if not os.path.exists("api_results"):
            os.makedirs("api_results")

        for filename, result in [
            ("search_by_query.json", search_by_query_result),
            ("search_by_paper_id.json", search_by_paper_id_result),
            ("query_paper_metadata_that_title_contain.json", query_paper_metadata_that_title_contain_result),
            ("titles_like.json", titles_like_result)
        ]:
            with open(f"api_results/{filename}", "w", encoding="utf-8") as f:
                json.dump(result, f, ensure_ascii=False, indent=4)

        # 单独执行搜索
        search_by_title_result = await api.search_by_title(title)
        with open(f"api_results/search_by_title.json", "w", encoding="utf-8") as f:
            json.dump(search_by_title_result, f, ensure_ascii=False, indent=4)


if __name__ == "__main__":
    import config
    # 异步版本
    asyncio.run(main(base_url=config.paper_search_api_url))
    # 同步版本
    api = LiteratureSearchAPI(config.paper_search_api_url)
    result = api.search_by_query("machine learning")
    print(len(result))

    llm = ZhipuLLM(config.zhipu_api_key)
    print(llm.query("hello"))
    llm = LocalLLM()
    print(llm.query("hello"))
