import time
import threading
import json
import requests
from typing import Dict, Any, Optional
from django.core.cache import cache
import hashlib
from .models import APIKey, RateLimit, ConversationSession
from django.conf import settings
import logging

logger = logging.getLogger(__name__)

# 全局配置
rate_lock = threading.Lock()

class LLMService:
    """大模型服务类，支持在线API和本地模型"""
    
    def __init__(self):
        self.config = getattr(settings, 'LLM_CONFIG', {})
        self.mode = self.config.get('mode', 'local')  # 'local' 或 'online'
        
    def call_online_api(self, prompt: str) -> str:
        """调用在线DeepSeek API"""
        try:
            from openai import OpenAI
            
            client = OpenAI(
                api_key=self.config.get('online_api_key', ''),
                base_url=self.config.get('online_base_url', 'https://api.deepseek.com')
            )
            
            response = client.chat.completions.create(
                model=self.config.get('online_model', 'deepseek-chat'),
                messages=[
                    {"role": "system", "content": "你是一个有帮助的助手"},
                    {"role": "user", "content": prompt}
                ],
                stream=False
            )
            
            return response.choices[0].message.content
            
        except ImportError:
            logger.error("请安装openai包: pip install openai")
            return "错误：未安装OpenAI客户端"
        except Exception as e:
            logger.error(f"在线API调用失败: {str(e)}")
            return f"API调用错误: {str(e)}"
    
    def call_local_ollama(self, prompt: str) -> str:
        """调用本地Ollama模型"""
        try:
            ollama_url = self.config.get('local_ollama_url', 'http://localhost:11434')
            model = self.config.get('local_model', 'deepseek-r1:7b')
            
            payload = {
                "model": model,
                "prompt": prompt,
                "stream": False
            }
            
            response = requests.post(
                f"{ollama_url}/api/generate",
                json=payload,
                timeout=120
            )
            
            if response.status_code == 200:
                result = response.json()
                return result.get('response', '无响应内容')
            else:
                logger.error(f"Ollama API错误: {response.status_code} - {response.text}")
                return f"本地模型调用失败: {response.status_code}"
                
        except requests.exceptions.RequestException as e:
            logger.error(f"Ollama连接失败: {str(e)}")
            return "错误：无法连接到本地Ollama服务，请确保Ollama已启动"
    
    def call_local_topk(self, prompt: str) -> str:
        """调用本地TopKLogSystem（原有方式）"""
        try:
            from topklogsystem import TopKLogSystem
            
            system = TopKLogSystem(
                log_path="./data/log",
                llm=self.config.get('local_model', 'deepseek-r1:7b'),
                embedding_model="bge-large:latest"
            )
            
            query = prompt
            result = system.query(query)
            time.sleep(0.5)
            
            logger.info(f"TopK系统响应: {result.get('response', '')}")
            return result.get('response', '无响应内容')
            
        except ImportError:
            logger.error("TopKLogSystem未找到")
            return "错误：TopKLogSystem未安装"
        except Exception as e:
            logger.error(f"TopK系统调用失败: {str(e)}")
            return f"本地系统错误: {str(e)}"
    
    def generate_response(self, prompt: str) -> str:
        """生成响应 - 主入口函数"""
        if self.mode == 'online':
            return self.call_online_api(prompt)
        elif self.mode == 'local':
            # 优先尝试Ollama，失败后尝试TopK
            response = self.call_local_ollama(prompt)
            if "错误" in response or "失败" in response:
                logger.info("Ollama调用失败，尝试TopK系统")
                response = self.call_local_topk(prompt)
            return response
        else:
            return f"未知的LLM模式: {self.mode}"

# 创建全局LLM服务实例
llm_service = LLMService()

def deepseek_r1_api_call(prompt: str) -> str:
    """DeepSeek-R1 API调用函数 - 兼容原有接口"""
    return llm_service.generate_response(prompt)

def deepseek_chat_api_call(prompt: str) -> str:
    """DeepSeek-Chat API调用函数"""
    return llm_service.generate_response(prompt)

def call_llm_with_tools(prompt: str, tools_config: Dict = None) -> Dict[str, Any]:
    """支持工具调用的LLM调用函数"""
    try:
        if tools_config:
            # 这里可以扩展工具调用逻辑
            logger.info(f"使用工具配置: {tools_config}")
        
        response = llm_service.generate_response(prompt)
        
        return {
            "success": True,
            "response": response,
            "usage": {"prompt_tokens": len(prompt), "completion_tokens": len(response)},
            "model": llm_service.config.get('current_model', 'unknown')
        }
    except Exception as e:
        logger.error(f"工具调用失败: {str(e)}")
        return {
            "success": False,
            "error": str(e),
            "response": f"调用失败: {str(e)}"
        }

# 原有的其他函数保持不变
def create_api_key(user: str) -> str:
    """创建 API Key 并保存到数据库"""
    key = APIKey.generate_key()
    expiry = time.time() + settings.TOKEN_EXPIRY_SECONDS
    
    api_key = APIKey.objects.create(
        key=key,
        user=user,
        expiry_time=expiry
    )
    
    RateLimit.objects.create(
        api_key=api_key,
        reset_time=time.time() + settings.RATE_LIMIT_INTERVAL
    )
    
    return key

def validate_api_key(key_str: str) -> bool:
    """验证 API Key 是否存在且未过期"""
    try:
        api_key = APIKey.objects.get(key=key_str)
        if api_key.is_valid():
            return True
        else:
            api_key.delete()
            return False
    except APIKey.DoesNotExist:
        return False

def check_rate_limit(key_str: str) -> bool:
    """检查 API Key 的请求频率是否超过限制"""
    with rate_lock:
        try:
            rate_limit = RateLimit.objects.select_related('api_key').get(api_key__key=key_str)
            
            current_time = time.time()
            if current_time > rate_limit.reset_time:
                rate_limit.count = 1
                rate_limit.reset_time = current_time + settings.RATE_LIMIT_INTERVAL
                rate_limit.save()
                return True
            elif rate_limit.count < settings.RATE_LIMIT_MAX:
                rate_limit.count += 1
                rate_limit.save()
                return True
            else:
                return False
        except RateLimit.DoesNotExist:
            try:
                current_time = time.time()
                api_key = APIKey.objects.get(key=key_str)
                RateLimit.objects.create(
                    api_key=api_key,
                    count=1,
                    reset_time=current_time + settings.RATE_LIMIT_INTERVAL
                )
                return True
            except APIKey.DoesNotExist:
                return False

def get_or_create_session(session_id: str, user: APIKey) -> ConversationSession:
    """获取或创建用户的专属会话"""
    session, created = ConversationSession.objects.get_or_create(
        session_id=session_id,
        user=user,
        defaults={'context': ''}
    )
    logger.info(f"会话 {session_id}（用户：{user.user}）{'创建新会话' if created else '加载旧会话'}")
    return session

def get_cached_reply(prompt: str, session_id: str, user: APIKey) -> str | None:
    """获取缓存回复"""
    cache_key = f"reply:{user.user}:{session_id}:{hash(prompt)}"
    return cache.get(cache_key)

def set_cached_reply(prompt: str, reply: str, session_id: str, user: APIKey, timeout=3600):
    """设置缓存回复"""
    cache_key = f"reply:{user.user}:{session_id}:{hash(prompt)}"
    cache.set(cache_key, reply, timeout)

def generate_cache_key(original_key: str) -> str:
    """生成安全的缓存键"""
    hash_obj = hashlib.sha256(original_key.encode('utf-8'))
    return hash_obj.hexdigest()