from typing import Dict, Any, Optional, List
import os
from enum import Enum
from openai import OpenAI
import asyncio
from concurrent.futures import ThreadPoolExecutor
from flask import current_app

class ModelType(str, Enum):
    DEEPSEEK_CODER = "deepseek-coder"
    DEEPSEEK_CHAT = "deepseek-chat"

class LLMService:
    def __init__(self):
        self.client = OpenAI(
            api_key=current_app.config['DEEPSEEK_API_KEY'],
            base_url=current_app.config['DEEPSEEK_API_BASE']
        )
        self._executor = ThreadPoolExecutor()

    async def _run_sync(self, func, *args, **kwargs):
        """在线程池中运行同步函数"""
        loop = asyncio.get_event_loop()
        return await loop.run_in_executor(self._executor, func, *args, **kwargs)

    async def generate_completion(self, prompt: str, model_type: ModelType = None, 
                                temperature: float = 0.7, max_tokens: int = 1000, 
                                additional_params: dict = None) -> dict:
        try:
            # 设置默认模型类型
            if model_type is None:
                model_type = ModelType.DEEPSEEK_CODER

            # 准备请求参数
            params = {
                "model": model_type,
                "messages": [{"role": "user", "content": prompt}],
                "temperature": temperature,
                "max_tokens": max_tokens,
                "stream": False
            }

            # 添加额外参数
            if additional_params:
                params.update(additional_params)

            response = await self._run_sync(
                lambda: self.client.chat.completions.create(**params)
            )
            return {
                "success": True,
                "response": response.choices[0].message.content
            }

        except Exception as e:
            return {
                "success": False,
                "error": str(e),
                "error_type": type(e).__name__
            }

    async def chat_completion(self, messages: list, model_type: ModelType = None,
                            temperature: float = 0.7, max_tokens: int = 1000,
                            additional_params: dict = None) -> dict:
        try:
            # 设置默认模型类型
            if model_type is None:
                model_type = ModelType.DEEPSEEK_CHAT

            # 准备请求参数
            params = {
                "model": model_type,
                "messages": messages,
                "temperature": temperature,
                "max_tokens": max_tokens,
                "stream": False
            }

            # 添加额外参数
            if additional_params:
                params.update(additional_params)

            response = await self._run_sync(
                lambda: self.client.chat.completions.create(**params)
            )
            return {
                "success": True,
                "response": response.choices[0].message.content
            }

        except Exception as e:
            return {
                "success": False,
                "error": str(e),
                "error_type": type(e).__name__
            } 