#!/usr/bin/env python3
"""
RAG + 本地模型 完全修复版本
解决所有 Pydantic 字段、api_url 属性和 LangChain 集成问题的最终版本
"""

import logging
import requests
from typing import List, Dict, Any, Optional
from pydantic import Field, computed_field

# LangChain 相关导入
from langchain.chains import RetrievalQA
from langchain.llms.base import LLM
from langchain.callbacks.manager import CallbackManagerForLLMRun

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class OllamaLLM(LLM):
    # 正确定义所有 Pydantic 字段
    model_name: str = Field(default="deepseek-r1:1.5b", description="model name")
    base_url: str = Field(default="http://localhost:11434", description="base url")
    temperature: float = Field(default=0.7, description="温度")
    max_tokens: int = Field(default=1000, description="最大token数")

    # 使用 computed_field 来定义 api_url，确保它被正确计算和访问
    @computed_field
    @property
    def api_url(self) -> str:
        #计算并返回完整的
        return f"{self.base_url}/api/chat"

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        logger.info(f"初始化 OllamaLLM，模型: {self.model_name}, API URL: {self.api_url}")

    @property
    def _llm_type(self) -> str:
        return "ollama" #返回 LLM 类型标识

    @property
    def _identifying_params(self) -> Dict[str, Any]:
        #返回识别参数，用于缓存
        return {
            "model_name": self.model_name,
            "base_url": self.base_url,
            "api_url": self.api_url,
            "temperature": self.temperature,
            "max_tokens": self.max_tokens,
        }

    def _call(
            self,
            prompt: str,
            stop: Optional[List[str]] = None,
            run_manager: Optional[CallbackManagerForLLMRun] = None,
            **kwargs: Any,
    ) -> str:
        return self.execute(prompt, stop)

    def execute(self, prompt, stop):
        # 调用 Ollama API 生成文本
        try:
            return self.excute0(prompt, stop)

        except requests.exceptions.RequestException as e:
            error_msg = f"网络请求失败: {str(e)}"
            logger.error(error_msg)
            return error_msg

        except Exception as e:
            error_msg = f"处理响应时出错: {str(e)}"
            logger.error(error_msg)
            return error_msg

    def excute0(self, prompt, stop):
        # 构建请求数据
        data = {
            "model": self.model_name,
            "messages": [
                {
                    "role": "user",
                    "content": prompt
                }
            ],
            "stream": False,
            "options": {
                "temperature": self.temperature,
                "num_predict": self.max_tokens,
            }
        }
        # 如果提供了 stop 序列，添加到请求中
        if stop is not None:
            data["stop"] = stop
        logger.debug(f"发送请求到 {self.api_url}，模型: {self.model_name}")
        # 发送 POST 请求
        response = requests.post(
            self.api_url,  # 现在可以安全地访问 api_url 属性
            json=data,
            headers={"Content-Type": "application/json"},
            timeout=60
        )
        if response.status_code == 200:
            result = response.json()
            content = result.get("message", {}).get("content", "")
            logger.debug(f"API 调用成功，响应长度: {len(content)}")
            return content
        else:
            error_msg = f"API 调用失败，状态码: {response.status_code}, 响应: {response.text}"
            logger.error(error_msg)
            return f"API 调用失败: {response.status_code}"