# tools/builtin/tools_func.py

import requests
import json
import hashlib
from urllib.parse import quote, urlencode
import urllib.parse
import os
import subprocess
import sys
import shutil

import root_agent.llm
import config
# These would typically be loaded from a config file
API_ENDPOINT = config.ZY_server_api_endpint
API_ENDPOINT_MAMEM = "http://127.0.0.1:8910"

llm_instance = root_agent.llm.LLMInterface()

def hash_fix(input_str, length, userid, openid, **kwargs):
    """
    Generates a fixed-length hash from a string.
    Args:
        input_str (str): The string to hash.
        length (int): The desired length of the hash.
    Returns:
        str: The generated hash string.
    """
    chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
    hash_array = [0] * length
    for char in input_str:
        for i in range(length):
            hash_array[i] = (hash_array[i] * 31 + ord(char) + i) % 256
    result = ''
    for val in hash_array:
        result += chars[val % len(chars)]
    return result


def getweb2md(url, userid, openid, **kwargs):
    """Converts a web page to markdown format.
    Args:
        url (str): The URL of the web page.
    Returns:
        str: The markdown content.
    """
    params = {"url": url, "userid": userid or "", "openid": openid or ""}
    encoded_params = urllib.parse.urlencode(params)
    full_url = f"{API_ENDPOINT}/html2markdown?{encoded_params}"
    return getweb(full_url, userid, openid, **kwargs)

def getweb(url, userid, openid, **kwargs):
    """
    Performs a GET request and returns the text content.
    Args:
        url (str): The URL to fetch.
    Returns:
        str: The text content of the response.
    """
    try:
        response = requests.get(url, timeout=20)
        response.raise_for_status()
        return response.text
    except requests.exceptions.RequestException as e:
        print(f"Error in getweb: {e}")
        return ""

def postweb(url, headers, data, userid, openid, **kwargs):
    """
    Performs a POST request and returns the text content.
    Args:
        url (str): The URL to post to.
        headers (dict): Request headers.
        data (dict): Request body data.
    Returns:
        str: The text content of the response.
    """
    try:
        response = requests.post(url, headers=headers, data=data, timeout=60)
        response.raise_for_status()
        return response.text
    except requests.exceptions.RequestException as e:
        print(f"Error in postweb: {e}")
        return ""

def chat_image(imgurl, prompt, userid, openid, **kwargs):
    """发送图片URL和提示词给聊天服务以进行分析。
    Args:
        imgurl (str): 要分析的图片URL。
        prompt (str): 关于图片的提示或问题。
    Returns:
        str: 服务的响应结果。
    """
    params = {"imgurl": imgurl or "", "prompt": prompt or "", "userid": userid or "", "openid": openid or ""}
    url = f"{API_ENDPOINT}/chat_image?{urlencode(params)}"
    return getweb(url, userid, openid, **kwargs)

def Screen_shot(userid, openid, **kwargs):
    """
    Captures a screenshot.
    Returns:
        str: The screenshot data.
    """
    url = f"{API_ENDPOINT}/screen_shot?{urllib.parse.urlencode({'userid': userid or '', 'openid': openid or ''})}"
    return getweb(url, userid, openid, **kwargs)
# #not export
# def run_cmd_in_desktop(cmds: str, userid, openid,  timeout: int = 600, command_type: str = "", no_safe_check: bool = False,**kwargs) -> str:
#     """
#     Runs a command in the desktop environment.
#     Args:
#         cmds (str): The command string.
#         timeout (int): Execution timeout in seconds.
#         command_type (str): Type of command (e.g., "cmd", "powershell").
#         no_safe_check (bool): Whether to bypass safety checks.
#     Returns:
#         str: The command output.
#     """
#     params = {
#         "cmds": cmds,
#         "timeout": timeout,
#         "command_type": command_type,
#         "no_safe_check": str(no_safe_check).lower(),
#         "userid": userid,
#         "openid": openid
#     }
#     url = f"{API_ENDPOINT}/run_cmd_in_desktop?{urllib.parse.urlencode(params)}"
#     return getweb(url, userid, openid, **kwargs)

# def run_cmd(cmd: str, userid: str, openid: str, timeout: int = 600, cmd_type: str = "",**kwargs) -> str:
#     """
#     Runs a command.
#     Args:
#         cmd (str or list): The command to run.
#         timeout (int): Execution timeout.
#         cmd_type (str): The type of command.
#     Returns:
#         str: The command output.
#     """
#     if isinstance(cmd, list):
#         full_cmd = ["cmd", "/C", "chcp", "65001", ">", "nul", "&&"] + cmd
#         cmdstr = "\n".join(full_cmd)
#     else:
#         cmdstr = cmd
#     return run_cmd_in_desktop(cmdstr, userid, openid, timeout, cmd_type, True, **kwargs)

def write_file_unsafe(path: str, content: str, userid, openid, **kwargs) -> bool:
    """
    Writes content to a file without safety checks.
    Args:
        path (str): The absolute path of the file.
        content (str): The content to write.
    Returns:
        bool: True on success, False otherwise.
    """
    url = f"{API_ENDPOINT}/write_file_unsafe"
    # Using requests.post with 'files' parameter automatically handles multipart/form-data
    files = {
        'file': ('file.bin', content.encode('utf-8'), 'application/octet-stream')
    }
    data = {
        'path': path,
        'userid': userid,
        'openid': openid
    }
    try:
        response = requests.post(url, data=data, files=files, timeout=60)
        response.raise_for_status()
        return response.text == "ok"
    except requests.exceptions.RequestException as e:
        print(f"Error in write_file_unsafe: {e}")
        return False

def write_project_file(path: str, content: str, userid: str, openid: str, project_dir: str = "") -> bool:
    """
    Writes content to a project file.
    Args:
        path (str): Relative path in the project_dir.
        content (str): Content to write.
        project_dir (str): The project directory (optional).
    Returns:
        bool: True on success, False otherwise.
    """
    # Simplified path handling: if path is relative and project_dir is provided, prepend it.
    if not (path.startswith('/') or path.startswith('\\')) and project_dir:
        full_path = f"{project_dir}{path}"
    else:
        full_path = path
    return write_file_unsafe(full_path, content, userid, openid, **kwargs)

def append_file_unsafe(path: str, content: str, userid: str, openid: str, **kwargs) -> bool:
    """
    Appends content to a file.
    Args:
        path (str): File path.
        content (str): The content to append.
    Returns:
        bool: True on success, False otherwise.
    """
    params = {
        "path": path,
        "content": content,
        "userid": userid,
        "openid": openid
    }
    url = f"{API_ENDPOINT}/append_file_unsafe?{urllib.parse.urlencode(params)}"
    return getweb(url, userid, openid, **kwargs) == "ok"

def query_vectordb(query: str, doc_id: str, source: str, author: str, topk: int, userid, openid, **kwargs) -> str:
    """查询向量数据库以查找相似文档。
    Args:
        query (str): 查询字符串。
        doc_id (str): 用于筛选的文档ID。
        source (str): 用于筛选的来源。
        author (str): 用于筛选的作者。
        topk (int): 返回结果的数量。
    Returns:
        str: 查询结果。
    """
    params = {"query": query, "doc_id": doc_id, "source": source, "author": author, "topk": topk, "userid": userid, "openid": openid}
    url = f"{API_ENDPOINT}/query_vectordb?{urlencode(params)}"
    return getweb(url, userid, openid, **kwargs)

def upsert_vectordb_text(task_name, content, userid, openid, **kwargs):
    """
    Inserts or updates text in the vector database.
    Args:
        task_name (str): The ID or name of the task/document.
        content (str): The text content to upsert.
    Returns:
        str: The result of the operation.
    """
    url = f"{API_ENDPOINT}/upsert_vectordb_text"
    headers = {"Content-Type": "application/x-www-form-urlencoded"}
    body = {"id": task_name, "content": content, "userid": userid, "openid": openid}
    return postweb(url, headers, urlencode(body), userid, openid, **kwargs)

def delete_vectordb_item(ids: str, document_id: str, source: str, author: str, deleteall: bool, userid: str, openid: str, **kwargs) -> str:
    """
    Deletes an item from the vector database.
    Args:
        ids (str): DB IDs to delete, comma-separated.
        document_id (str): Document ID.
        source (str): Data source.
        author (str): Author.
        deleteall (bool): Delete all matching items.
    Returns:
        str: "ok" on success.
    """
    params = {
        "ids": ids,
        "doc_id": document_id,
        "source": source,
        "author": author,
        "deleteall": str(deleteall).lower(),
        "userid": userid,
        "openid": openid
    }
    url = f"{API_ENDPOINT}/delete_vectordb_item?{urllib.parse.urlencode(params)}"
    return getweb(url, userid, openid, **kwargs)

def web_search(query, userid, openid, **kwargs):
    """
    Performs a web search.
    Args:
        query (str): The search query.
    Returns:
        str: The search results.
    """
    params = {"userid": userid or "", "openid": openid or "", "q": query or ""}
    url = f"{API_ENDPOINT}/websearch?{urlencode(params)}"
    return getweb(url, userid, openid, **kwargs)

# def SendText(content: str, userid: str, openid: str, mission: str = "", **kwargs) -> str:
#     """
#     Sends a message to the user.
#     Args:
#         content (str): Message content.
#         mission (str): Mission (optional).
#     Returns:
#         str: Send result.
#     """
#     print(f"Simulating SendText: UserID={userid}, OpenID={openid}, Content='{content}', Mission='{mission}'")
#     return "ok" # Simulate success

# def SendImage(imageData: bytes, userid: str, openid: str, **kwargs) -> str:
#     """
#     Sends an image to the user.
#     Args:
#         imageData (bytes): Binary image data.
#     Returns:
#         str: Send result.
#     """
#     url = f"{API_ENDPOINT}/sendimg?{urllib.parse.urlencode({'userid': userid or '', 'openid': openid or ''})}"
#     headers = {"Content-Type": "image/jpeg"}  # Assuming JPEG, adjust if your API expects a different type
#     return postweb(url, headers, imageData, userid, openid, **kwargs)

# def SendFile(filePath: str, fileData: bytes, userid: str, openid: str, **kwargs) -> str:
#     """
#     Sends a file to the user.
#     Args:
#         filePath (str): The path to the file.
#         fileData (bytes): Binary file data.
#     Returns:
#         str: Send result.
#     """
#     url = f"{API_ENDPOINT}/sendfile?{urllib.parse.urlencode({'userid': userid or '', 'openid': openid or '', 'fp': filePath or ''})}"
#     headers = {"Content-Type": "application/octet-stream"}
#     return postweb(url, headers, fileData, userid, openid, **kwargs)

def gen_image(prompt: str, input_image: str, output_path: str, userid: str, openid: str, **kwargs) -> str:
    """
    Generates an image.
    Args:
        prompt (str): Image generation prompt.
        input_image (str): Input image path (optional).
        output_path (str): Save path for the generated image.
    Returns:
        str: File path of the generated image.
    """
    params = {
        "userid": userid or "",
        "openid": openid or "",
        "input_image": input_image or "",
        "output_path": output_path or "",
        "prompt": prompt or ""
    }
    url = f"{API_ENDPOINT}/gen_image?{urllib.parse.urlencode(params)}"
    return getweb(url, userid, openid, **kwargs)

def LLM(prompt: str, content: str, userid: str, openid: str, AI: str = "", **kwargs) -> str:
    """将提示和内容发送给LLM进行处理。
    Args:
        prompt (str): 给LLM的提示。
        content (str): 要处理的内容。
        AI (str): 要使用的AI模型（可选）。
    Returns:
        str: LLM的响应。
    """
    url = f"{API_ENDPOINT}/llm"
    headers = {"Content-Type": "application/x-www-form-urlencoded"}
    post_body = urllib.parse.urlencode({
        "AI": AI,
        "userid": userid,
        "openid": openid,
        "prompt": prompt,
        "content": content
    })
    return postweb(url, headers, post_body, userid, openid, **kwargs)

# def load_solution_file(project_id: str, path: str, typee: str, enc_path: str, userid, openid, **kwargs) -> str:
#     """
#     Loads a solution file.
#     Args:
#         project_id (str): The project ID.
#         path (str): The file path.
#         typee (str): The type of file.
#         enc_path (str): The encoded path.
#     Returns:
#         str: The content of the solution file.
#     """
#     params = {
#         "project_id": project_id or "",
#         "path": path or "",
#         "type": typee or "",
#         "enc_path": enc_path or "",
#         "userid": userid or "",
#         "openid": openid or ""
#     }
#     url = f"{API_ENDPOINT}/load_solution_file?{urllib.parse.urlencode(params)}"
#     return getweb(url, userid, openid, **kwargs)

def ma_check_memory_service(**kwargs):
    """
    Checks if the MA-Memory service is running.
    Returns:
        str: The service status.
    """
    return getweb(f"{API_ENDPOINT_MAMEM}/", **kwargs)

def ma_add_memory(memory_data, userid, openid, **kwargs):
    """
    Adds a new memory to the MA-Memory service.
    Args:
        memory_data (dict): The memory data to add.
    Returns:
        str: The API response.
    """
    url = f"{API_ENDPOINT_MAMEM}/add_memory"
    headers = {"Content-Type": "application/json"}
    memory_data['userid'] = userid
    memory_data['openid'] = openid
    return postweb(url, headers, json.dumps(memory_data), userid, openid, **kwargs)

def ma_retrieve_memory(query_task, userid, openid, successful_topk=2, failed_topk=1, insight_topk=10, threshold=0.3, **kwargs):
    """
    Retrieves memories from the MA-Memory service.
    Args:
        query_task (str): The task description to query for.
        successful_topk (int): Number of successful cases to retrieve.
        failed_topk (int): Number of failed cases to retrieve.
        insight_topk (int): Number of insights to retrieve.
        threshold (float): Similarity threshold.
    Returns:
        str: The retrieved memories as a JSON string.
    """
    url = f"{API_ENDPOINT_MAMEM}/retrieve_memory"
    headers = {"Content-Type": "application/json"}
    body = {
        "query_task": query_task, "successful_topk": successful_topk,
        "failed_topk": failed_topk, "insight_topk": insight_topk,
        "threshold": threshold, "userid": userid, "openid": openid
    }
    return postweb(url, headers, json.dumps(body), userid, openid, **kwargs)

def ma_project_insights(raw_insights, role, task_traj, userid, openid, **kwargs):
    """将原始洞察投射到特定角色的视角。
    Args:
        raw_insights (list): 原始洞察列表。
        role (str): 投射的目标角色。
        task_traj (list): 当前任务轨迹。
    Returns:
        str: 投射后的洞察（JSON字符串）。
    """
    url = f"{API_ENDPOINT_MAMEM}/project_insights"
    headers = {"Content-Type": "application/json"}
    body = {
        "raw_insights": raw_insights, "role": role, "task_traj": task_traj,
        "userid": userid,
        "openid": openid
    }
    return postweb(url, headers, json.dumps(body), userid, openid, **kwargs)

# def set_image(task_name: str, image_base64: str, userid: str, openid: str, **kwargs) -> str:
#     """
#     Sets an image.
#     Args:
#         task_name (str): The name of the task.
#         image_base64 (str): The base64 encoded image data.
#     Returns:
#         str: The API response.
#     """
#     url = f"{API_ENDPOINT}/set_image"
#     headers = {"Content-Type": "application/x-www-form-urlencoded"}
#     post_body = urllib.parse.urlencode({
#         "id": task_name,
#         "content": image_base64,
#         "userid": userid,
#         "openid": openid
#     })
#     return postweb(url, headers, post_body, userid, openid, **kwargs)

def read_file_to_text(file: str, userid: str, openid: str, **kwargs) -> str:
    """
    Reads a document into text format.
    Args:
        file (str): The file path.
    Returns:
        str: The content of the file as text.
    """
    params = {
        "file": file or "",
        "userid": userid or "",
        "openid": openid or ""
    }
    url = f"{API_ENDPOINT}/read_file_to_text?{urllib.parse.urlencode(params)}"
    return getweb(url, userid, openid, **kwargs)

def echo(text: str, userid, openid, **kwargs) -> str:
    """
    Echoes the input text back.
    Args:
        text (str): The text to echo.
    Returns:
        str: The echoed text.
    """
    return text

def run_python_code(code: str, userid, openid, **kwargs) -> str:
    """
    Executes Python code.
    Args:
        code (str): The Python code to execute.
    Returns:
        str: Standard output and error of the executed code.
    """
    try:
        # Use subprocess to run the Python code
        process = subprocess.run(
            [sys.executable, "-c", code],
            capture_output=True,
            text=True,
            check=True
        )
        output = f"Stdout:\n{process.stdout}\nStderr:\n{process.stderr}"
        return output
    except subprocess.CalledProcessError as e:
        return f"Error executing Python code:\nStdout:\n{e.stdout}\nStderr:\n{e.stderr}\nReturn Code: {e.returncode}"
    except Exception as e:
        return f"An unexpected error occurred: {e}"

def delete_file(path: str, userid, openid, **kwargs) -> str:
    """
    Deletes a file.
    Args:
        path (str): The path to the file to delete.
    Returns:
        str: Success or error message.
    """
    try:
        os.remove(path)
        return f"Successfully deleted {path}"
    except FileNotFoundError:
        return f"Error: File not found at {path}"
    except Exception as e:
        return f"Error deleting file {path}: {e}"

def delete_directory(path: str, userid, openid, **kwargs) -> str:
    """
    Deletes a directory.
    Args:
        path (str): The path to the directory to delete.
    Returns:
        str: Success or error message.
    """
    try:
        shutil.rmtree(path)
        return f"Successfully deleted directory {path}"
    except FileNotFoundError:
        return f"Error: Directory not found at {path}"
    except OSError as e:
        return f"Error deleting directory {path}: {e}"
    except Exception as e:
        return f"An unexpected error occurred: {e}"

def list_directory(path: str, userid, openid, **kwargs) -> str:
    """
    Lists files and subdirectories in a path.
    Args:
        path (str): The absolute path to the directory.
    Returns:
        str: Comma-separated list of entries, or error message.
    """
    try:
        entries = os.listdir(path)
        return ", ".join(entries)
    except FileNotFoundError:
        return f"Error: Directory not found at {path}"
    except Exception as e:
        return f"An unexpected error occurred: {e}"

def generate_content_with_thinking(prompt: str, userid: str, openid: str, thinking_budget: int = -1, include_thoughts: bool = False, **kwargs) -> str:
    """
    Generates content using Gemini's thinking capabilities.
    Args:
        prompt (str): The prompt for the LLM.
        thinking_budget (int): The thinking budget for the model (-1 for dynamic, 0 to disable).
        include_thoughts (bool): Whether to include thought summaries in the response.
    Returns:
        str: The response from the LLM, including content, thoughts, and usage metrics.
    """
    result = llm_instance.generate_content_with_thinking(prompt, thinking_budget, include_thoughts)
    return json.dumps(result)

def generate_content_with_pdf(pdf_path: str, prompt: str, userid: str, openid: str, **kwargs) -> str:
    """
    Processes a local PDF file and generates content.
    Args:
        pdf_path (str): The path to the local PDF file.
        prompt (str): The prompt for the LLM.
    Returns:
        str: The response from the LLM, including content and usage metrics.
    """
    result = llm_instance.generate_content_with_pdf(pdf_path, prompt)
    return json.dumps(result)

def generate_content_with_pdf_url(pdf_url: str, prompt: str, userid: str, openid: str, **kwargs) -> str:
    """
    Processes a remote PDF file (via URL) and generates content.
    Args:
        pdf_url (str): The URL of the remote PDF file.
        prompt (str): The prompt for the LLM.
    Returns:
        str: The response from the LLM, including content and usage metrics.
    """
    result = llm_instance.generate_content_with_pdf_url(pdf_url, prompt)
    return json.dumps(result)

def generate_content_with_multiple_pdfs(pdf_paths: list, prompt: str, userid: str, openid: str, **kwargs) -> str:
    """
    Processes multiple local PDF files and generates content.
    Args:
        pdf_paths (list): A list of paths to the local PDF files.
        prompt (str): The prompt for the LLM.
    Returns:
        str: The response from the LLM, including content and usage metrics.
    """
    result = llm_instance.generate_content_with_multiple_pdfs(pdf_paths, prompt)
    return json.dumps(result)

def generate_content_with_audio(audio_path: str, prompt: str, userid: str, openid: str, **kwargs) -> str:
    """
    Processes a local audio file and generates content.
    Args:
        audio_path (str): The path to the local audio file.
        prompt (str): The prompt for the LLM.
    Returns:
        str: The response from the LLM, including content and usage metrics.
    """
    result = llm_instance.generate_content_with_audio(audio_path, prompt)
    return json.dumps(result)

def count_audio_tokens(audio_path: str, userid: str, openid: str, **kwargs) -> str:
    """
    Counts the number of tokens in an audio file.
    Args:
        audio_path (str): The path to the local audio file.
    Returns:
        str: The total number of tokens in the audio file.
    """
    result = llm_instance.count_audio_tokens(audio_path)
    return json.dumps(result)

import subprocess
import os
import base64
import re
import glob
import shutil
from pathlib import Path

def read_many_files(paths, exclude=None, include=None, recursive=True, useDefaultExcludes=True, respect_git_ignore=True):
    """
    Reads content from multiple files specified by paths or glob patterns.
    """
    all_files_content = []
    default_excludes = [
        "node_modules", ".git", "__pycache__", ".vscode", ".idea",
        "*.log", "*.tmp", "*.temp", "*.bak", "*.swp", "*.pyc", "*.o", "*.so",
        "*.dll", "*.exe", "*.bin", "*.zip", "*.tar.gz", "*.rar", "*.7z",
        "*.jpg", "*.jpeg", "*.png", "*.gif", "*.bmp", "*.tiff", "*.webp", "*.ico",
        "*.mp3", "*.wav", "*.ogg", "*.flac", "*.aac", "*.wma",
        "*.mp4", "*.avi", "*.mkv", "*.mov", "*.wmv", "*.flv",
        "*.pdf" # PDF is handled specially if explicitly included
    ]

    if exclude is None:
        exclude = []
    if include is None:
        include = []

    effective_excludes = set(exclude)
    if useDefaultExcludes:
        effective_excludes.update(default_excludes)

    found_files = set()
    for p in paths + include:
        # Handle glob patterns
        if '*' in p or '?' in p or '[' in p:
            if recursive:
                for f in glob.glob(p, recursive=True):
                    found_files.add(Path(f).resolve())
            else:
                for f in glob.glob(p):
                    found_files.add(Path(f).resolve())
        else:
            # Handle direct paths
            path_obj = Path(p).resolve()
            if path_obj.is_dir():
                for root, _, files in os.walk(path_obj):
                    for file in files:
                        found_files.add(Path(root) / file)
            elif path_obj.is_file():
                found_files.add(path_obj)

    for file_path in sorted(list(found_files)):
        # Apply exclusions
        excluded = False
        for ex_pattern in effective_excludes:
            if file_path.match(ex_pattern) or any(part.match(ex_pattern) for part in file_path.parts):
                excluded = True
                break
        if excluded:
            continue

        try:
            # Check if it's a text file or explicitly included image/pdf
            if file_path.suffix.lower() in ['.png', '.jpg', '.jpeg', '.gif', '.bmp', '.webp', '.svg', '.pdf']:
                with open(file_path, 'rb') as f:
                    encoded_content = base64.b64encode(f.read()).decode('utf-8')
                all_files_content.append(f"--- {file_path} (base64 encoded) ---{encoded_content}")
            else:
                # Attempt to read as text, skip if it's clearly binary
                with open(file_path, 'r', encoding='utf-8') as f:
                    content = f.read()
                all_files_content.append(f"--- {file_path} ---{content}")
        except UnicodeDecodeError:
            # Skip non-text files that are not explicitly handled as binary
            all_files_content.append(f"--- {file_path} (skipped: binary or undecodable text) ---")
        except Exception as e:
            all_files_content.append(f"--- {file_path} (error reading: {e}) ---")

    return {"status": "success", "content": "\n".join(all_files_content)}

def run_shell_command(command, description=None, directory=None):
    """
    Executes a given shell command.
    """
    try:
        original_dir = os.getcwd()
        if directory:
            os.chdir(directory)

        process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, encoding='utf-8')
        stdout, stderr = process.communicate()

        if directory:
            os.chdir(original_dir)

        return {
            "Command": command,
            "Directory": directory if directory else original_dir,
            "Stdout": stdout if stdout else "(empty)",
            "Stderr": stderr if stderr else "(empty)",
            "Error": "(none)" if process.returncode == 0 else f"Command exited with code {process.returncode}",
            "Exit Code": process.returncode,
            "Signal": "(none)",
            "Background PIDs": "(none)", # More complex to get reliably
            "Process Group PGID": "(none)" # More complex to get reliably
        }
    except Exception as e:
        if directory and os.getcwd() != original_dir: # Ensure we change back if an error occurred before the final chdir
            os.chdir(original_dir)
        return {
            "Command": command,
            "Directory": directory if directory else original_dir,
            "Stdout": "(empty)",
            "Stderr": str(e),
            "Error": str(e),
            "Exit Code": -1,
            "Signal": "(none)",
            "Background PIDs": "(none)",
            "Process Group PGID": "(none)"
        }

def list_directory(path, ignore=None, respect_git_ignore=True):
    """
    Lists files and subdirectories in a path.
    """
    try:
        entries = []
        for entry in os.listdir(path):
            full_path = os.path.join(path, entry)
            if ignore and any(glob.fnmatch.fnmatch(entry, pattern) for pattern in ignore):
                continue
            # Basic .gitignore respect (only for .gitignore files in the current directory)
            if respect_git_ignore and entry == ".gitignore":
                # For a full implementation, this would parse the .gitignore file
                # and apply its rules to other entries. This is a simplification.
                pass

            if os.path.isdir(full_path):
                entries.append(f"[DIR] {entry}")
            else:
                entries.append(entry)
        entries.sort() # Sort alphabetically
        # Directories first, then files (simple sort, can be improved)
        dirs = [e for e in entries if e.startswith("[DIR]")]
        files = [e for e in entries if not e.startswith("[DIR]")]
        return {"status": "success", "content": f"Directory listing for {path}:\n" + "\n".join(dirs + files)}
    except FileNotFoundError:
        return {"status": "error", "message": f"Directory not found: {path}"}
    except Exception as e:
        return {"status": "error", "message": f"Error listing directory: {e}"}

def read_file(absolute_path, limit=None, offset=None):
    """
    Reads and returns the content of a specified file.
    """
    try:
        file_path = Path(absolute_path)
        if not file_path.is_file():
            return {"status": "error", "message": f"File not found: {absolute_path}"}

        if file_path.suffix.lower() in ['.png', '.jpg', '.jpeg', '.gif', '.bmp', '.webp', '.svg', '.pdf']:
            with open(file_path, 'rb') as f:
                encoded_content = base64.b64encode(f.read()).decode('utf-8')
            return {"status": "success", "content": {"inlineData": {"mimeType": f"image/{file_path.suffix[1:]}", "data": encoded_content}}}
        else:
            with open(file_path, 'r', encoding='utf-8') as f:
                lines = f.readlines()

            total_lines = len(lines)
            start_line = offset if offset is not None else 0
            end_line = start_line + limit if limit is not None else total_lines

            truncated_message = ""
            if start_line > 0 or end_line < total_lines:
                truncated_message = f"[File content truncated: showing lines {start_line+1}-{min(end_line, total_lines)} of {total_lines} total lines...]\n"

            content_lines = lines[start_line:end_line]
            return {"status": "success", "content": truncated_message + "".join(content_lines)}
    except UnicodeDecodeError:
        return {"status": "error", "message": f"Cannot display content of binary file: {absolute_path}"}
    except Exception as e:
        return {"status": "error", "message": f"Error reading file: {e}"}

# def write_file(content, file_path):
#     """
#     Writes content to a specified file.
#     """
#     try:
#         path_obj = Path(file_path)
#         path_obj.parent.mkdir(parents=True, exist_ok=True)
#         with open(path_obj, 'w', encoding='utf-8') as f:
#             f.write(content)
#         return {"status": "success", "message": f"Successfully wrote to file: {file_path}"}
#     except Exception as e:
#         return {"status": "error", "message": f"Error writing file: {e}"}

def glob_files(pattern, case_sensitive=False, path=None, respect_git_ignore=True):
    """
    Finds files matching specific glob patterns.
    Renamed to glob_files to avoid conflict with the module name.
    """
    try:
        search_path = Path(path) if path else Path.cwd()
        found_paths = []
        # glob.glob does not have a case_sensitive option directly,
        # and respect_git_ignore is complex to implement natively.
        # This is a basic glob implementation.
        for f in search_path.glob(pattern):
            if f.is_file():
                found_paths.append(str(f.resolve()))
        
        # Sort by modification time (newest first)
        found_paths.sort(key=lambda f: os.path.getmtime(f), reverse=True)

        return {"status": "success", "content": f"Found {len(found_paths)} file(s) matching \"{pattern}\" within {search_path}, sorted by modification time (newest first):\n" + "\n".join(found_paths)}
    except Exception as e:
        return {"status": "error", "message": f"Error globbing files: {e}"}

def search_file_content(pattern, include=None, path=None):
    """
    Searches for a regular expression pattern within the content of files.
    """
    try:
        search_dir = Path(path) if path else Path.cwd()
        matches = []
        
        for root, _, files in os.walk(search_dir):
            for file_name in files:
                file_path = Path(root) / file_name
                if include and not glob.fnmatch.fnmatch(file_name, include):
                    continue
                
                try:
                    with open(file_path, 'r', encoding='utf-8') as f:
                        for i, line in enumerate(f):
                            if re.search(pattern, line):
                                matches.append(f"File: {file_path.relative_to(search_dir)}\nL{i+1}: {line.strip()}")
                except UnicodeDecodeError:
                    # Skip binary files
                    pass
                except Exception as e:
                    matches.append(f"Error reading {file_path}: {e}")

        if not matches:
            return {"status": "success", "content": f"No matches found for pattern \"{pattern}\" in {search_dir} (filter: {include if include else 'None'})"}
        
        return {"status": "success", "content": f"Found {len(matches)} match(es) for pattern \"{pattern}\" in path \"{search_dir}\" (filter: {include if include else 'None'}):\n---\n" + "\n---\n".join(matches) + "\n---"}
    except Exception as e:
        return {"status": "error", "message": f"Error searching file content: {e}"}

def replace(file_path, new_string, old_string, expected_replacements=1):
    """替换文件中的文本。"""
    try:
        path_obj = Path(file_path)
        if not path_obj.is_file():
            return {"status": "error", "message": f"File not found: {file_path}"}

        with open(path_obj, 'r', encoding='utf-8') as f:
            content = f.read()

        if old_string == "":
            if path_obj.exists():
                return {"status": "error", "message": f"File already exists: {file_path}. Cannot create new file with empty old_string."}
            else:
                path_obj.parent.mkdir(parents=True, exist_ok=True)
                with open(path_obj, 'w', encoding='utf-8') as f:
                    f.write(new_string)
                return {"status": "success", "message": f"Created new file: {file_path} with provided content."}

        # Count occurrences
        occurrences = content.count(old_string)

        if expected_replacements is not None and occurrences != expected_replacements:
            return {"status": "error", "message": f"Failed to edit, expected {expected_replacements} occurrences but found {occurrences}."}
        
        if occurrences == 0:
            return {"status": "error", "message": f"Failed to edit, 0 occurrences found for old_string."}

        new_content = content.replace(old_string, new_string, expected_replacements if expected_replacements is not None else -1)

        with open(path_obj, 'w', encoding='utf-8') as f:
            f.write(new_content)
        
        return {"status": "success", "message": f"Successfully modified file: {file_path} ({occurrences} replacements)."}
    except Exception as e:
        return {"status": "error", "message": f"Error replacing content in file: {e}"}

# Existing functions (if any) would be here.
# For this response, I'm assuming the previous content was just the simulated functions.
