import logging
import threading
from queue import Queue
from time import sleep

from openai import OpenAI
import os
import json
import sqlite3
import hashlib

from util.convert import convert_to_openai_tools_format, convert_to_qwen_tools_format
from util.message_to_prompt import v2

lock = threading.Lock()


def writer_thread(queue:Queue, output_file, lock, batch_size=512):
    buffer = []

    while True:
        try:
            line = queue.get(timeout=60)
        except Exception as e:
            line = None

        if line is None:
            # Write remaining data in buffer
            if buffer:
                with lock:
                    if not os.path.exists(output_file):
                        # 新建一个
                        with open(output_file, "w", encoding="utf-8") as f:
                            json.dump([], f, ensure_ascii=False)

                    # 读取文件内容
                    with open(output_file, "r", encoding="utf-8") as f:
                        dataset = json.load(f)

                    # 追加新数据
                    dataset.extend(buffer)

                    # 写回文件
                    with open(output_file, "w", encoding="utf-8") as f:
                        json.dump(dataset, f, ensure_ascii=False, indent=4)
            print("writer_thread exit")
            return

        buffer.append(line)
        if len(buffer) >= batch_size:
            with lock:
                if not os.path.exists(output_file):
                    # 新建一个
                    with open(output_file, "w", encoding="utf-8") as f:
                        json.dump([], f, ensure_ascii=False)

                # 读取文件内容
                with open(output_file, "r", encoding="utf-8") as f:
                    dataset = json.load(f)

                # 追加新数据
                dataset.extend(buffer)

                # 写回文件
                with open(output_file, "w", encoding="utf-8") as f:
                    json.dump(dataset, f, ensure_ascii=False, indent=4)
            buffer = []

        queue.task_done()
#


class LLMClient:
    def __init__(self, model=None,api_key=None,base_url=None,cache_db_path='llm_cache.db'):
        """
        初始化LLM客户端
        Args:
            cache_db_path (str): 缓存数据库路径
        """
        self.model = model if model else os.getenv("OPENAI_MODEL")
        self.api_key = api_key if api_key else os.getenv("OPENAI_API_KEY")
        self.base_url = base_url if base_url else os.getenv("OPENAI_BASE_URL")
        self.cache_db_path = cache_db_path
        self.client = OpenAI(
            api_key=self.api_key,
            base_url=self.base_url
        )
        self.init_cache_db()
        if "TRUE" == os.getenv("SFT_DATASET"):
            if not os.path.exists(os.getenv("SFT_DATASET_PATH")):
                # 新建一个
                with open(os.getenv("SFT_DATASET_PATH"), "w", encoding="utf-8") as f:
                    json.dump([], f, ensure_ascii=False)
            # 启动写线程
            self.queue = Queue()
            self.writer_thread = threading.Thread(target=writer_thread, args=(self.queue, os.getenv("SFT_DATASET_PATH"), lock))
            self.writer_thread.start()

    def init_cache_db(self):
        """初始化缓存数据库"""
        conn = sqlite3.connect(self.cache_db_path)
        cursor = conn.cursor()
        cursor.execute('''
        CREATE TABLE IF NOT EXISTS llm_cache (
            request_hash TEXT PRIMARY KEY,
            request TEXT,
            response TEXT,
            created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
        )
        ''')
        conn.commit()
        conn.close()

    def get_cache(self, request_hash):
        """从缓存中获取结果"""
        conn = sqlite3.connect(self.cache_db_path)
        cursor = conn.cursor()
        cursor.execute('SELECT response FROM llm_cache WHERE request_hash = ?', (request_hash,))
        result = cursor.fetchone()
        conn.close()
        return json.loads(result[0]) if result else None

    def set_cache(self, request_hash, request, response):
        """将结果存入缓存"""
        conn = sqlite3.connect(self.cache_db_path)
        cursor = conn.cursor()
        cursor.execute(
            'INSERT OR REPLACE INTO llm_cache (request_hash, request, response) VALUES (?, ?, ?)',
            (request_hash, json.dumps(request), json.dumps(response, ensure_ascii=False))
        )
        conn.commit()
        conn.close()

    def create_chat_completion(self, messages: list, model=None, tools=None,
                               skip_cache=False, update_cache=True, convert=False,
                               **kwargs) -> dict:
        """
        创建聊天完成请求的函数
        Args:
            messages: 消息列表
            model: 模型名称
            temperature: 温度参数
            top_p: top_p参数
            timeout: 超时时间
            tools: 工具列表
            skip_cache: 是否跳过缓存
            update_cache: 是否强制更新缓存
            convert: 是否转换工具格式
            **kwargs: 其他参数
        """
        create_sft_dataset= True if "TRUE"==os.getenv("SFT_DATASET") else False

        if tools and convert:
            _tools = convert_to_openai_tools_format({"apis": tools})
        else:
            _tools = tools

        if not model:
            model = self.model

        # 生成请求的唯一标识
        request_data = {
            'messages': messages,
            'model': model,
            'tools': _tools
        }
        request_data.update(kwargs)
        request_hash = hashlib.md5(json.dumps(request_data, sort_keys=True).encode()).hexdigest()

        # 如果不跳过缓存，尝试从缓存获取结果
        if not skip_cache:
            cached_response = self.get_cache(request_hash)
            if cached_response:
                if "reasoning_content" in cached_response:
                    cached_response["content"] = "<think>" + cached_response["reasoning_content"] + "</think>\n" + \
                                                 cached_response["content"]
                if create_sft_dataset:
                    self.create_sft_dataset(convert, create_sft_dataset, messages, cached_response, tools)
                return cached_response

        completion = self.client.chat.completions.create(
            messages=messages,
            model=model,
            tools=_tools,
            **kwargs
        )

        response = completion.choices[0].message.model_dump()
        # 检查输

        if not response["content"] and not response["tool_calls"]:
            logging.warning("Failed to get response from OpenAI")

        # 将结果存入缓存
        if update_cache:
            self.set_cache(request_hash, request_data, response)

        if "reasoning_content" in response:
            response["content"] = "<think>"+ response["reasoning_content"] + "</think>\n" + response["content"]

        if create_sft_dataset:
            self.create_sft_dataset(convert, create_sft_dataset, messages, response, tools)


        return response

    def create_sft_dataset(self, convert, create_sft_dataset, messages, response, tools):
        # 把问题和回答，保存为微调用数据集
        if create_sft_dataset:
            if tools and convert:
                _tools = convert_to_qwen_tools_format({"apis": tools})
            else:
                _tools = tools
            instruction = v2(messages, _tools)
            if response["tool_calls"]:
                raise Exception("需要实现")
                output = "<tool_call>\n" + response["tool_calls"] + "\n</tool_call>"
            else:
                output = response["content"]

            line = {
                "instruction": instruction,
                "input": "",
                "output": output
            }
            self.queue.put(line)



if __name__ == "__main__":
    # 使用示例
    import dotenv
    dotenv.load_dotenv()
    
    llm_client = LLMClient()
    messages = [
        {"role": "system", "content": "你是一个AI助手"},
        {"role": "user", "content": "北京的天气"}
    ]
    tools = [
        {
            "type": "function",
            "function": {
                "name": "get_weather",
                "description": "Get current temperature for a given location.",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "location": {
                            "type": "string",
                            "description": "City and country e.g. Bogotá, Colombia"
                        }
                    },
                    "required": ["location"],
                    "additionalProperties": False
                },
                "strict": True
            }
        }
    ]
    response = llm_client.create_chat_completion(messages, tools=None,create_sft_dataset=False,
                                                 skip_cache=True, update_cache=False)
    print(response)
