
import re

import json
import sqlite3
import hashlib
from transformers import AutoModelForCausalLM, AutoTokenizer

from util.convert import convert_to_qwen_tools_format
from util.message_to_prompt import v1, v2


class LLMClient:
    def __init__(self, model_name_or_path=None, cache_db_path='llm_cache.db'):
        """
        初始化LLM客户端
        Args:
            cache_db_path (str): 缓存数据库路径
        """
        self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
        self.model = AutoModelForCausalLM.from_pretrained(
            model_name_or_path,
            torch_dtype="auto",
            device_map="auto",
        )
        self.cache_db_path = cache_db_path
        self.init_cache_db()

    def init_cache_db(self):
        """初始化缓存数据库"""
        conn = sqlite3.connect(self.cache_db_path)
        cursor = conn.cursor()
        cursor.execute('''
        CREATE TABLE IF NOT EXISTS llm_cache (
            request_hash TEXT PRIMARY KEY,
            request TEXT,
            response TEXT,
            created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
        )
        ''')
        conn.commit()
        conn.close()

    def get_cache(self, request_hash):
        """从缓存中获取结果"""
        conn = sqlite3.connect(self.cache_db_path)
        cursor = conn.cursor()
        cursor.execute('SELECT response FROM llm_cache WHERE request_hash = ?', (request_hash,))
        result = cursor.fetchone()
        conn.close()
        return json.loads(result[0]) if result else None

    def set_cache(self, request_hash, request, response):
        """将结果存入缓存"""
        conn = sqlite3.connect(self.cache_db_path)
        cursor = conn.cursor()
        cursor.execute(
            'INSERT OR REPLACE INTO llm_cache (request_hash, request, response) VALUES (?, ?, ?)',
            (request_hash, json.dumps(request), json.dumps(response, ensure_ascii=False))
        )
        conn.commit()
        conn.close()

    def create_chat_completion(self, messages: list, model=None,
                               tools=None, skip_cache=False, update_cache=True, convert=False, **kwargs) -> dict:
        if tools and convert:
            tools = convert_to_qwen_tools_format({"apis": tools})


        if not model:
            model = self.model

        # 生成请求的唯一标识
        request_data = {
            'messages': messages,
            'model': str(model),
            'tools': tools
        }
        request_hash = hashlib.md5(json.dumps(request_data, sort_keys=True).encode()).hexdigest()

        # 如果不跳过缓存，尝试从缓存获取结果
        if not skip_cache:
            cached_response = self.get_cache(request_hash)
            if cached_response:
                return cached_response


        text=v2(messages,tools)

        # 如果缓存中没有，则请求API
        inputs = self.tokenizer(text, return_tensors="pt").to(model.device)
        outputs = model.generate(**inputs, max_new_tokens=4096)
        output_text = self.tokenizer.batch_decode(outputs)[0][len(text):]

        # <tool_call> ... </tool_call>
        tool_calls = re.findall(r"<tool_call>\n*(.*?)\n*</tool_call>", output_text,flags=re.DOTALL)
        if tool_calls and len(tool_calls)>0:
            response={"tool_calls":tool_calls,"content":None}
        else:
            output_text = output_text.replace("Assistant:","").replace("<|im_start|>","").replace("<|im_end|>","")
            response={"tool_calls":None,"content":output_text}

        # 将结果存入缓存
        if update_cache:
            self.set_cache(request_hash, request_data, response)

        return response


if __name__ == "__main__":
    # 使用示例
    import dotenv

    dotenv.load_dotenv()

    llm_client = LLMClient()
    messages = [
        {"role": "system", "content": "你是一个AI助手"},
        {"role": "user", "content": "北京的天气"}
    ]
    tools = [
        {
            "type": "function",
            "function": {
                "name": "get_weather",
                "description": "Get current temperature for a given location.",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "location": {
                            "type": "string",
                            "description": "City and country e.g. Bogotá, Colombia"
                        }
                    },
                    "required": ["location"],
                    "additionalProperties": False
                },
                "strict": True
            }
        }
    ]
    response = llm_client.create_chat_completion(messages, tools=tools)
    print(response)
