import logging
import ollama
import logging  # 新增：日志模块
import datetime  # 新增：日期时间处理
from openai import OpenAI
import os
import json
from file_tools import read_file, replace_in_file

from config import (  # 新增：导入配置
    OLLAMA_MODEL, OLLAMA_HOST,
    XAI_BASE_URL, XAI_MODEL, XAI_API_KEY_ENV,
    DEFAULT_BACKEND,  
    DEBUG_LOG_ENABLED,
    OLLAMA_DISABLE_PROXY
)

# 函数：获取当前小时的日志文件路径
def get_log_file():
    now = datetime.datetime.now()
    log_dir = './logs'
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    log_file = os.path.join(log_dir, f"log{now.strftime('%Y%m%d_%H')}.log")
    return log_file

# 设置全局 logger（在文件顶部，def query_ai 外）
logger = logging.getLogger('grok_cli')
logger.setLevel(logging.DEBUG)


def query_ai(prompt, backend=DEFAULT_BACKEND):  # 更新：使用默认后端
    has_file = '@' in prompt  # 新增: 检查是否有文件引用
    
    if DEBUG_LOG_ENABLED:  # 如果启用日志
            handler = logging.FileHandler(get_log_file(), mode='a')  # 'a' for append 追加模式
            handler.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))
            logger.addHandler(handler)

    if has_file:
        file_path = prompt.split('@')[1].strip()
        file_content = read_file(file_path)
        prompt = f"{prompt.replace(f'@{file_path}', '')} 请直接分析以下已提供的文件内容（无需调用工具读取文件）: {file_content}"
        logger.debug(f"Processed prompt: {prompt}") if DEBUG_LOG_ENABLED else None  # 替换 print

    # 工具定义（原有）
    tools = [
        {
            "type": "function",
            "function": {
                "name": "read_file",
                "description": "Read the content of a file.",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "file_path": {"type": "string", "description": "The path to the file."}
                    },
                    "required": ["file_path"]
                }
            }
        },
        {
            "type": "function",
            "function": {
                "name": "replace_in_file",
                "description": "Replace a string in a file.",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "file_path": {"type": "string", "description": "The path to the file."},
                        "old_str": {"type": "string", "description": "The old string to replace."},
                        "new_str": {"type": "string", "description": "The new string to insert."}
                    },
                    "required": ["file_path", "old_str", "new_str"]
                }
            }
        }
    ]
    
    if backend == 'ollama':
        # 新增：根据配置禁用本地代理
        if OLLAMA_DISABLE_PROXY:
            os.environ['no_proxy'] = 'localhost,127.0.0.1'  # 忽略 localhost 代理，确保直接连接

        # 更新：实例化 Ollama 客户端，使用配置中的主机
        client = ollama.Client(host=OLLAMA_HOST) # 使用配置的 OLLAMA_HOST
        try:
            tools_param = tools if has_file else None  # 新增: 只为文件提示传递 tools

            response = client.chat(  # 使用 client.chat 替换 ollama.chat
                model=OLLAMA_MODEL,
                messages=[{'role': 'user', 'content': prompt}],
                tools=tools_param
            )
            logger.debug(f"Initial response: {response}") if DEBUG_LOG_ENABLED else None  # 替换 print

            # 工具调用（原有逻辑，替换 ollama.chat 为 client.chat）
            if 'tool_calls' in response and response['tool_calls']:
                tool_calls = response['message']['tool_calls']
                logger.debug(f"Tool calls detected: {tool_calls}") if DEBUG_LOG_ENABLED else None

                if has_file and 'tool_calls' in response and response['tool_calls']:  # 更新: 只为文件处理工具调用
                    for tool_call in tool_calls:
                        func_name = tool_call['function']['name']
                        args = json.loads(tool_call['function']['arguments'])
                        logger.debug(f"Executing tool: {func_name} {args}") if DEBUG_LOG_ENABLED else None

                        try:
                            if func_name == 'read_file':
                                result = read_file(args['file_path'])
                            elif func_name == 'replace_in_file':
                                result = replace_in_file(args['file_path'], args['old_str'], args['new_str'])
                            else:
                                result = "Unknown tool"
                        except Exception as e:
                            result = f"Tool error: {str(e)}"
                            logger.debug(f"Tool execution error: {str(e)}") if DEBUG_LOG_ENABLED else None  # 新增: 日志记录错误  

                        logger.debug(f"Tool result: {result}") if DEBUG_LOG_ENABLED else None

                        # 反馈结果
                        response = client.chat(
                        model=OLLAMA_MODEL,
                        messages=response['messages'] + [{'role': 'tool', 'content': str(result)}],
                        tools=tools
                    )
                logger.debug(f"Updated response after tool: {response}") if DEBUG_LOG_ENABLED else None
                
            final_content = response.get('message', {}).get('content', "No content in response")
            logger.debug(f"Final response content: {final_content}") if DEBUG_LOG_ENABLED else None  # 新增：记录最终输出

            if DEBUG_LOG_ENABLED:
                logger.removeHandler(handler)  # 清理 handler，避免内存泄漏

            return final_content
        except ollama.ResponseError as e:
            return f"Ollama error: {e}. Please check config.py and pull the model first."
    
    elif backend == 'xai':
        # 更新：使用配置中的 API Key、URL
        api_key = os.getenv(XAI_API_KEY_ENV)
        if not api_key:
            raise ValueError(f"API Key not found in environment variable '{XAI_API_KEY_ENV}'")
        client = OpenAI(api_key=api_key, base_url=XAI_BASE_URL)  # 更新
        messages = [{'role': 'user', 'content': prompt}]
        
        response = client.chat.completions.create(
            model=XAI_MODEL,  # 更新：使用配置模型
            messages=messages,
            tools=tools,
            tool_choice="auto"
        )
        
        # 处理工具调用（原有）
        if response.choices[0].message.tool_calls:
            tool_calls = response.choices[0].message.tool_calls
            for tool_call in tool_calls:
                func_name = tool_call.function.name
                args = json.loads(tool_call.function.arguments)
                if func_name == 'read_file':
                    result = read_file(args['file_path'])
                elif func_name == 'replace_in_file':
                    result = replace_in_file(args['file_path'], args['old_str'], args['new_str'])
                else:
                    result = "Unknown tool"
                messages.append(response.choices[0].message)
                messages.append({'role': 'tool', 'tool_call_id': tool_call.id, 'name': func_name, 'content': str(result)})
            response = client.chat.completions.create(
                model=XAI_MODEL,  # 更新
                messages=messages,
                tools=tools
            )
        return response.choices[0].message.content