#!/usr/bin/env python3
# Copyright 2025 Daytona Platforms Inc.
# SPDX-License-Identifier: Apache-2.0

"""
Script to convert async code to sync code using the unasync library.

This script transforms files from the _async folder to the _sync folder,
converting async/await patterns to synchronous equivalents, while:
  - preserving regions marked by # unasync: preserve start / # unasync: preserve end
  - dropping regions marked by # unasync: delete start / # unasync: delete end
  - stripping all Awaitable[...] and cleaning up Awaitable imports
  - replacing aiofiles.open calls with built-in open and removing aiofiles imports
  - translating await asyncio.to_thread(...) calls into direct method calls
  - removing unused asyncio imports
  - converting `await process_streaming_response(...)` into `asyncio.run(process_streaming_response(...))`
  - wrapping any unwrapped calls to `process_streaming_response(...)` in `asyncio.run(...)`
    (but not when `process_streaming_response` appears inside an import statement)
"""

import logging
import re
import sys
import tempfile
from pathlib import Path

import unasync

# Configure logging
logging.basicConfig(level=logging.INFO, format="%(message)s")
logger = logging.getLogger(__name__)

# Project paths
project_root = Path(__file__).resolve().parent.parent
sys.path.insert(0, str(project_root))

SOURCE_DIR = project_root / "src" / "daytona" / "_async"
TARGET_DIR = project_root / "src" / "daytona" / "_sync"

# Regex markers for blocks
MARKERS = {
    "preserve_start": re.compile(r"^\s*#\s*unasync:\s*preserve\s+start"),
    "preserve_end": re.compile(r"^\s*#\s*unasync:\s*preserve\s+end"),
    "delete_start": re.compile(r"^\s*#\s*unasync:\s*delete\s+start"),
    "delete_end": re.compile(r"^\s*#\s*unasync:\s*delete\s+end"),
}

# Simple token maps for unasync
ADDITIONAL_REPLACEMENTS = {
    # Async/await syntax
    "async def": "def",
    "async with": "with",
    "async for": "for",
    "await ": "",
    # Module & class renames
    "daytona_api_client_async": "daytona_api_client",
    "daytona_toolbox_api_client_async": "daytona_toolbox_api_client",
    "AsyncVolumeService": "VolumeService",
    "AsyncFileSystem": "FileSystem",
    "AsyncGit": "Git",
    "AsyncLspServer": "LspServer",
    "AsyncProcess": "Process",
    "AsyncDaytona": "Daytona",
    "AsyncSandbox": "Sandbox",
    "AsyncPaginatedSandboxes": "PaginatedSandboxes",
    # aiofiles replacement
    "aiofiles.open": "open",
    # aioboto3 replacement
    "aioboto3": "boto3",
    "async_pty_handle": "pty_handle",
}

# Complex regex-based tweaks
PRE_REPLACEMENTS = [
    (re.compile(r"\bAsync([A-Z]\w*)\b"), r"\1"),
]
POST_REPLACEMENTS = [
    # Remove any remaining 'await ' (including in docstrings)
    (re.compile(r"\bawait\s+"), ""),
    # Strip Sync prefix
    (re.compile(r"\bSync([A-Z]\w*)\b"), r"\1"),
    # Remove _async suffix from method calls
    (re.compile(r"(\w+)_async\("), r"\1("),
    # httpx client fix
    (re.compile(r"httpx\.SyncClient\b"), "httpx.Client"),
    (re.compile(r"\.aiter_bytes\b"), ".iter_bytes"),
    # Update module imports
    (re.compile(r"from daytona\._async"), "from daytona._sync"),
    # Fix websockets imports from asyncio to sync
    (re.compile(r"from websockets\.asyncio\.client import"), "from websockets.sync.client import"),
    (re.compile(r"import websockets\.asyncio\.client"), "import websockets.sync.client"),
    # Documentation cleanup
    (re.compile(r"\basynchronous methods\b"), "methods"),
    (re.compile(r"\basynchronous\b"), "synchronous"),
    (re.compile(r"\basynchronously\b"), ""),
    # Replace aiofiles.open with built-in open
    (re.compile(r"\baiofiles\.open\b"), "open"),
    # Replace aiofiles.os calls with os calls
    (re.compile(r"\baiofiles\.os\.makedirs\b"), "os.makedirs"),
    (re.compile(r"\baiofiles\.os\.path\.exists\b"), "os.path.exists"),
    (re.compile(r"\baiofiles\.os\.path\.abspath\b"), "os.path.abspath"),
    (re.compile(r"\baiofiles\.os\.path\.isfile\b"), "os.path.isfile"),
    (re.compile(r"\baiofiles\.os\.path\.isdir\b"), "os.path.isdir"),
    (re.compile(r"\baiofiles\.os\.path\.join\b"), "os.path.join"),
    (re.compile(r"\baiofiles\.os\.path\.dirname\b"), "os.path.dirname"),
    (re.compile(r"\baiofiles\.os\.path\.basename\b"), "os.path.basename"),
    (re.compile(r"\baiofiles\.os\.path\.normpath\b"), "os.path.normpath"),
    (re.compile(r"\baiofiles\.os\.path\.relpath\b"), "os.path.relpath"),
    (re.compile(r"\baiofiles\.os\.path\.splitdrive\b"), "os.path.splitdrive"),
    (re.compile(r"\baiofiles\.os\.path\.lstrip\b"), "os.path.lstrip"),
    (re.compile(r"\baiofiles\.os\.walk\b"), "os.walk"),
    # Replace _async_os_walk with os.walk
    (re.compile(r"\bself\._async_os_walk\b"), "os.walk"),
    # Remove aiofiles imports (including submodules)
    (re.compile(r"^import aiofiles(?:\.[\w_]+)?\s*$", flags=re.MULTILINE), ""),
    (re.compile(r"^from aiofiles(?:\.[\w_]+)? import .+$", flags=re.MULTILINE), ""),
]

# Auto-generation banner
AUTO_GEN_WARNING = (
    "# DO NOT EDIT THIS FILE MANUALLY.\n"
    "# This file is auto-generated by the unasync conversion script.\n"
    "# Edit the async source and re-run this script.\n\n"
)


def find_license_end(lines):
    """Locate end of license/header to inject banner."""
    license_end = 0
    for i, line in enumerate(lines):
        stripped = line.strip()
        if not stripped or stripped.startswith("#!") or stripped.startswith("#"):
            license_end = i + 1
        else:
            break
    return license_end


def transform_docstrings(text: str) -> str:
    """
    Transform docstrings so that code-block examples are converted from async to sync:
    1. async with AsyncDaytona() as var: -> var = Daytona()
    2. try/finally blocks with only daytona.close() -> remove try/finally, unindent body
    3. try/finally blocks with other content -> remove only daytona.close() line
    4. Properly handle indentation for all cases
    """

    def process_python_code_block(match):
        """Process individual ```python ... ``` blocks"""
        block = match.group(0)
        lines = block.split("\n")

        if len(lines) < 2:
            return block

        result_lines = [lines[0]]  # Keep opening fence
        i = 1

        while i < len(lines):
            line = lines[i]

            # Check for closing fence
            if line.strip() == "```":
                result_lines.append(line)
                break

            # Handle async with pattern
            async_with_match = re.match(
                r"^(\s*)async\s+with\s+(?:Async)?Daytona\(\)(?:\([^)]*\))?\s+as\s+(\w+):\s*(#.*)?$", line
            )
            if async_with_match:
                indent, var_name, comment = async_with_match.groups()
                # Transform to variable assignment
                new_line = f"{indent}{var_name} = Daytona()"
                if comment:
                    new_line += f"  {comment}"
                result_lines.append(new_line)

                # Process the block content, reducing indentation by 4 spaces
                i += 1
                async_with_indent = len(indent)

                while i < len(lines):
                    line = lines[i]

                    # Check for closing fence
                    if line.strip() == "```":
                        result_lines.append(line)
                        i = len(lines)  # Exit outer loop too
                        break

                    # Check if we've exited the async with block
                    line_indent = len(line) - len(line.lstrip()) if line.strip() else 0
                    if line.strip() and line_indent <= async_with_indent:
                        # We've exited the async with block, process this line normally
                        # Don't increment i here, let the outer loop handle it
                        break

                    # Reduce indentation by 4 spaces for lines inside async with
                    if line.strip():  # Skip empty lines
                        if line.startswith(" " * (async_with_indent + 4)):
                            new_line = line[4:]  # Remove 4 spaces
                        else:
                            new_line = line  # Keep as is if indentation is unexpected
                    else:
                        new_line = line  # Keep empty lines as is

                    result_lines.append(new_line)
                    i += 1
                continue

            # Handle try/finally blocks
            if line.strip() == "try:":
                try_indent = len(line) - len(line.lstrip())

                # Look for finally block
                j = i + 1
                finally_idx = None
                while j < len(lines):
                    if lines[j].strip() == "```":
                        break
                    if lines[j].strip() == "finally:":
                        finally_line_indent = len(lines[j]) - len(lines[j].lstrip())
                        if finally_line_indent == try_indent:
                            finally_idx = j
                            break
                    j += 1

                if finally_idx is not None:
                    # Check what's in the finally block
                    finally_content = []
                    k = finally_idx + 1
                    while k < len(lines):
                        if lines[k].strip() == "```":
                            break
                        finally_line_indent = len(lines[k]) - len(lines[k].lstrip()) if lines[k].strip() else 0
                        if lines[k].strip() and finally_line_indent <= try_indent:
                            break
                        finally_content.append(lines[k])
                        k += 1

                    # Check if finally only contains daytona.close()
                    non_empty_finally = [l for l in finally_content if l.strip()]
                    only_has_close = len(non_empty_finally) == 1 and (
                        re.search(r"daytona\w*\.close\(\)", non_empty_finally[0])
                        or re.search(r"await\s+daytona\w*\.close\(\)", non_empty_finally[0])
                    )

                    if only_has_close:
                        # Remove try/finally, unindent try body
                        # Skip the 'try:' line
                        i += 1

                        # Process try body with reduced indentation
                        while i < finally_idx:
                            line = lines[i]
                            if line.strip():  # Skip empty lines
                                if line.startswith(" " * (try_indent + 4)):
                                    new_line = " " * try_indent + line[try_indent + 4 :]
                                else:
                                    new_line = line
                            else:
                                new_line = line
                            result_lines.append(new_line)
                            i += 1

                        # Skip the finally block
                        i = k
                        continue
                    # Keep try/finally but remove daytona.close() lines
                    result_lines.append(line)  # Add 'try:' line
                    i += 1

                    # Add try body
                    while i < finally_idx:
                        result_lines.append(lines[i])
                        i += 1

                    # Add 'finally:' line
                    result_lines.append(lines[i])
                    i += 1

                    # Add finally body, filtering out daytona.close()
                    while i < k:
                        line = lines[i]
                        if not re.search(r"(?:await\s+)?daytona\w*\.close\(\)", line):
                            result_lines.append(line)
                        i += 1
                    continue

            # Regular line processing
            result_lines.append(line)
            i += 1

        return "\n".join(result_lines)

    # Process all python code blocks
    text = re.sub(r"(^[ \t]*```python\n.*?^[ \t]*```)", process_python_code_block, text, flags=re.MULTILINE | re.DOTALL)

    # Remove any stray daytona.close() lines outside code blocks
    text = re.sub(r"^\s*(?:await\s+)?daytona\w*\.close\(\)\s*$", "", text, flags=re.MULTILINE)

    # Clean up multiple blank lines
    text = re.sub(r"\n\s*\n\s*\n", "\n\n", text)

    # Remove await keywords outside code blocks
    text = re.sub(r"\bawait\s+", "", text)

    return text


def pre_filter(src: Path, dst: Path) -> dict:
    """
    Copy src to dst, removing delete-blocks and preserving verbatim sections.
    Returns a map of placeholder→block for all # unasync: preserve … blocks.
    """
    lines = src.read_text(encoding="utf-8").splitlines(keepends=True)
    out, block_map = [], {}
    buf, idx = [], 0
    in_preserve = in_delete = False

    for line in lines:
        if MARKERS["delete_start"].match(line):
            in_delete = True
            continue
        if MARKERS["delete_end"].match(line):
            in_delete = False
            continue
        if in_delete:
            continue
        if MARKERS["preserve_start"].match(line):
            in_preserve = True
            buf.clear()
            continue
        if MARKERS["preserve_end"].match(line):
            in_preserve = False
            placeholder = f"# UNASYNC_SKIP_BLOCK_{idx}\n"
            block_map[placeholder] = "".join(buf)
            out.append(placeholder)
            idx += 1
            continue
        if in_preserve:
            buf.append(line)
        else:
            out.append(line)

    dst.write_text("".join(out), encoding="utf-8")
    return block_map


def apply_replacements(text: str, replacements: list) -> str:
    """
    Apply a list of (pattern, replacement) to every line in 'text',
    skipping lines that contain 'run_async' (we never want to rewrite those).
    """
    lines = text.splitlines(keepends=True)
    processed = []
    for line in lines:
        if "run_async" in line:
            processed.append(line)
        else:
            tmp = line
            for pat, repl in replacements:
                tmp = pat.sub(repl, tmp)
            processed.append(tmp)
    return "".join(processed)


def convert_async_executor_patterns(text: str) -> str:
    """
    Convert async executor patterns to sync equivalents:
    - Remove 'loop = asyncio.get_running_loop()' lines
    - Convert 'loop.run_in_executor(None, func, args)' to 'func(args)'
    """
    lines = text.splitlines(keepends=True)
    result_lines = []

    for line in lines:
        # Skip lines that assign asyncio.get_running_loop()
        if re.search(r"^\s*loop\s*=\s*asyncio\.get_running_loop\(\)", line):
            continue

        # Convert loop.run_in_executor patterns
        # Pattern: loop.run_in_executor(None, reader.read, 1024 * 64)
        # Result: reader.read(1024 * 64)
        if "loop.run_in_executor(None," in line:
            # Extract the function and arguments
            pattern = r"loop\.run_in_executor\(None,\s*([^,]+),\s*(.+?)\)"
            match = re.search(pattern, line)
            if match:
                func = match.group(1).strip()
                args = match.group(2).strip()
                # Replace the entire match with func(args)
                replacement = f"{func}({args})"
                line = re.sub(pattern, replacement, line)

        result_lines.append(line)

    return "".join(result_lines)


def replace_all_to_thread_calls(text: str) -> str:
    """
    Translate all occurrences of:
      await asyncio.to_thread(func_expr, arg1, arg2, ...)
    into:
      func_expr(arg1, arg2, ...)
    """

    result_parts = []
    i = 0
    while True:
        idx = text.find("asyncio.to_thread", i)
        if idx == -1:
            # No more occurrences; append the remaining text
            result_parts.append(text[i:])
            break

        # Append text up to the match
        result_parts.append(text[i:idx])

        # Find the opening parenthesis after "asyncio.to_thread"
        start_paren = text.find("(", idx)
        if start_paren == -1:
            # Malformed usage; just skip it
            i = idx + len("asyncio.to_thread")
            result_parts.append(text[idx:i])
            continue

        # Find the matching closing parenthesis
        paren_count = 0
        j = start_paren
        end_paren = None
        while j < len(text):
            if text[j] == "(":
                paren_count += 1
            elif text[j] == ")":
                paren_count -= 1
                if paren_count == 0:
                    end_paren = j
                    break
            j += 1

        if end_paren is None:
            # No matching closing parenthesis; skip
            i = idx
            result_parts.append(text[idx : idx + 1])
            continue

        # Extract the inside of the parentheses
        inside = text[start_paren + 1 : end_paren]

        # Split inside into function expression and arguments at top-level comma
        func_expr = ""
        rest_args = ""
        par = 0
        split_index = None
        for k, ch in enumerate(inside):
            if ch == "(":
                par += 1
            elif ch == ")":
                par -= 1
            elif ch == "," and par == 0:
                split_index = k
                break

        if split_index is not None:
            func_expr = inside[:split_index].strip()
            rest_args = inside[split_index + 1 :].strip()
        else:
            func_expr = inside.strip()
            rest_args = ""

        # Construct replacement text
        if rest_args:
            replacement = f"{func_expr}({rest_args})"
        else:
            replacement = f"{func_expr}()"

        result_parts.append(replacement)
        i = end_paren + 1

    return "".join(result_parts)


def replace_await_process_streaming(text: str) -> str:
    """
    Replace occurrences of:
      await process_streaming_response(...)

    with:
      asyncio.run(process_streaming_response(...))
    """
    result = []
    i = 0
    pattern = "await process_streaming_response"
    plen = len(pattern)

    while True:
        idx = text.find(pattern, i)
        if idx == -1:
            result.append(text[i:])
            break

        # Append text up to the match
        result.append(text[i:idx])

        # Find the opening parenthesis after the pattern
        start_paren = text.find("(", idx + plen)
        if start_paren == -1:
            # Malformed usage; just copy pattern and move on
            result.append(pattern)
            i = idx + plen
            continue

        # Now find the matching closing parenthesis for this call
        paren_count = 0
        j = start_paren
        end_paren = None
        while j < len(text):
            if text[j] == "(":
                paren_count += 1
            elif text[j] == ")":
                paren_count -= 1
                if paren_count == 0:
                    end_paren = j
                    break
            j += 1

        if end_paren is None:
            # Unbalanced parentheses; copy pattern and move on
            result.append(pattern)
            i = idx + plen
            continue

        # Extract the inside of the parentheses
        call_args = text[start_paren + 1 : end_paren]

        # Reconstruct the replacement:
        #   "asyncio.run(process_streaming_response(" + call_args + "))"
        replacement = f"asyncio.run(process_streaming_response({call_args}))"
        result.append(replacement)

        # Advance i past the original call
        i = end_paren + 1

    return "".join(result)


def replace_unwrapped_process_streaming(text: str) -> str:
    """
    Wrap standalone calls to process_streaming_response(...) with asyncio.run(...)
    if they are not already wrapped, and skip wrapping in import statements.
    """
    result_parts = []
    i = 0
    func_name = "process_streaming_response"
    wrapper_prefix = "asyncio.run("
    length_name = len(func_name)

    while True:
        idx = text.find(func_name, i)
        if idx == -1:
            result_parts.append(text[i:])
            break

        # Append text up to this occurrence
        result_parts.append(text[i:idx])

        # Determine the start of the current line
        line_start = text.rfind("\n", 0, idx) + 1
        line_prefix = text[line_start:idx].lstrip()

        # If this occurrence is inside an import statement, skip wrapping
        if line_prefix.startswith("import ") or line_prefix.startswith("from "):
            # Find end of this occurrence (skip over the function name and any following "()")
            j = idx + length_name
            # If next non-space character is '(', skip to matching ')'
            while j < len(text) and text[j].isspace():
                j += 1
            if j < len(text) and text[j] == "(":
                paren_count = 0
                k = j
                while k < len(text):
                    if text[k] == "(":
                        paren_count += 1
                    elif text[k] == ")":
                        paren_count -= 1
                        if paren_count == 0:
                            j = k + 1
                            break
                    k += 1
            result_parts.append(text[idx:j])
            i = j
            continue

        # Find opening parenthesis after the function name
        start_paren = text.find("(", idx + length_name)
        if start_paren == -1:
            # No parenthesis, skip this occurrence
            result_parts.append(text[idx : idx + length_name])
            i = idx + length_name
            continue

        # Find matching closing parenthesis
        paren_count = 0
        j = start_paren
        end_paren = None
        while j < len(text):
            if text[j] == "(":
                paren_count += 1
            elif text[j] == ")":
                paren_count -= 1
                if paren_count == 0:
                    end_paren = j
                    break
            j += 1
        if end_paren is None:
            # Unbalanced parentheses, copy as is and move on
            result_parts.append(text[idx : idx + length_name])
            i = idx + length_name
            continue

        # Check if already wrapped in asyncio.run(
        head = text[:idx]
        if re.search(r"asyncio\.run\s*\(\s*$", head):
            # Already within an asyncio.run(, so do not wrap
            result_parts.append(text[idx : end_paren + 1])
            i = end_paren + 1
            continue

        # Otherwise, wrap the call
        original_call = text[idx : end_paren + 1]
        wrapped = f"{wrapper_prefix}{original_call})"
        result_parts.append(wrapped)
        i = end_paren + 1

    return "".join(result_parts)


def replace_asyncio_sleep_calls(text: str) -> str:
    """
    Replace occurrences of:
      await asyncio.sleep(<some-value>)
      asyncio.sleep(<some-value>)
    with:
      time.sleep(<some-value>)
    """
    # First handle "await asyncio.sleep" pattern
    result = []
    i = 0
    pattern = "await asyncio.sleep"
    plen = len(pattern)

    while True:
        idx = text.find(pattern, i)
        if idx == -1:
            result.append(text[i:])
            break

        # Append text up to the match
        result.append(text[i:idx])

        # Find the opening parenthesis after the pattern
        start_paren = text.find("(", idx + plen)
        if start_paren == -1:
            # Malformed usage; just copy pattern and move on
            result.append(pattern)
            i = idx + plen
            continue

        # Now find the matching closing parenthesis for this call
        paren_count = 0
        j = start_paren
        end_paren = None
        while j < len(text):
            if text[j] == "(":
                paren_count += 1
            elif text[j] == ")":
                paren_count -= 1
                if paren_count == 0:
                    end_paren = j
                    break
            j += 1

        if end_paren is None:
            # Unbalanced parentheses; copy pattern and move on
            result.append(pattern)
            i = idx + plen
            continue

        # Extract the inside of the parentheses
        call_args = text[start_paren + 1 : end_paren]

        # Reconstruct the replacement:
        #   "time.sleep(" + call_args + ")"
        replacement = f"time.sleep({call_args})"
        result.append(replacement)

        # Advance i past the original call
        i = end_paren + 1

    text = "".join(result)

    # Then handle "asyncio.sleep" pattern (after unasync removes await)
    result = []
    i = 0
    pattern = "asyncio.sleep"
    plen = len(pattern)

    while True:
        idx = text.find(pattern, i)
        if idx == -1:
            result.append(text[i:])
            break

        # Append text up to the match
        result.append(text[i:idx])

        # Find the opening parenthesis after the pattern
        start_paren = text.find("(", idx + plen)
        if start_paren == -1:
            # Malformed usage; just copy pattern and move on
            result.append(pattern)
            i = idx + plen
            continue

        # Now find the matching closing parenthesis for this call
        paren_count = 0
        j = start_paren
        end_paren = None
        while j < len(text):
            if text[j] == "(":
                paren_count += 1
            elif text[j] == ")":
                paren_count -= 1
                if paren_count == 0:
                    end_paren = j
                    break
            j += 1

        if end_paren is None:
            # Unbalanced parentheses; copy pattern and move on
            result.append(pattern)
            i = idx + plen
            continue

        # Extract the inside of the parentheses
        call_args = text[start_paren + 1 : end_paren]

        # Reconstruct the replacement:
        #   "time.sleep(" + call_args + ")"
        replacement = f"time.sleep({call_args})"
        result.append(replacement)

        # Advance i past the original call
        i = end_paren + 1

    return "".join(result)


def replace_asyncio_create_task_with_threading(text: str) -> str:
    """
    Replace:
      <var> = asyncio.create_task(<some_method>())
    with:
      <var> = threading.Thread(target=<some_method>)
      <var>.start()
    and replace any line that is just <var> with <var>.join()
    Also, ensure 'import threading' is present if any such replacement is made.
    """
    import_vars = set()
    lines = text.splitlines(keepends=True)
    new_lines = []
    pattern = re.compile(r"^(\s*)(\w+)\s*=\s*asyncio\.create_task\(\s*([\w\.]+)\s*\(\s*\)\s*\)\s*$")
    # Pass 1: Replace assignments and collect variable names
    for line in lines:
        m = pattern.match(line)
        if m:
            indent, var, method = m.groups()
            new_lines.append(f"{indent}{var} = threading.Thread(target={method})\n{indent}{var}.start()\n")
            import_vars.add(var)
        else:
            new_lines.append(line)
    # Pass 2: Replace usages of the variable alone on a line with .join()
    final_lines = []
    for line in new_lines:
        stripped = line.strip()
        replaced = False
        for var in import_vars:
            if stripped == var:
                indent = line[: line.index(var)]
                final_lines.append(f"{indent}{var}.join()\n")
                replaced = True
                break
        if not replaced:
            final_lines.append(line)
    # Pass 3: Ensure import threading is present if needed
    if import_vars:
        has_threading_import = any(re.match(r"^\s*import threading\s*$", l) for l in final_lines)
        if not has_threading_import:
            # Insert after the last import but before other code
            insert_idx = 0
            for i, l in enumerate(final_lines):
                if (
                    l.strip().startswith("import")
                    or l.strip().startswith("from")
                    or l.strip().startswith("#")
                    or not l.strip()
                ):
                    insert_idx = i + 1
                else:
                    break
            final_lines.insert(insert_idx, "import threading\n")
    return "".join(final_lines)


def manage_asyncio_imports(text: str) -> str:
    """
    Manage asyncio, time, and aiofiles imports based on usage:
    - If asyncio is used in the text but not imported, add "import asyncio"
    - If asyncio is not used, remove any existing asyncio imports
    - If time is used in the text but not imported, add "import time"
    - If time is not used, remove any existing time imports
    - If aiofiles is not used, remove any existing aiofiles imports
    """
    lines = text.splitlines(keepends=True)

    # Check if asyncio, time, and aiofiles are used anywhere in the text (excluding import lines)
    asyncio_used = False
    time_used = False
    aiofiles_used = False
    non_import_text = ""
    for line in lines:
        if not (
            line.strip().startswith("import asyncio")
            or line.strip().startswith("from asyncio import")
            or line.strip().startswith("import time")
            or line.strip().startswith("from time import")
            or line.strip().startswith("import aiofiles")
            or line.strip().startswith("from aiofiles import")
        ):
            non_import_text += line

    if re.search(r"\basyncio\.", non_import_text):
        asyncio_used = True
    if re.search(r"\btime\.", non_import_text):
        time_used = True
    if re.search(r"\baiofiles\.", non_import_text):
        aiofiles_used = True

    # Check if asyncio, time, and aiofiles are already imported
    has_asyncio_import = False
    has_time_import = False

    new_lines = []
    in_multiline_import = False
    paren_count = 0

    for i, line in enumerate(lines):
        # Check for existing asyncio imports
        if re.match(r"^\s*import asyncio\s*$", line):
            has_asyncio_import = True
            if asyncio_used:
                new_lines.append(line)  # Keep the import
            # else: skip this line (remove unused import)
            continue

        # Check for existing time imports
        if re.match(r"^\s*import time\s*$", line):
            has_time_import = True
            if time_used:
                new_lines.append(line)  # Keep the import
            # else: skip this line (remove unused import)
            continue

        # Check for existing aiofiles imports
        if re.match(r"^\s*import aiofiles(?:\.[\w_]+)?\s*$", line):
            if aiofiles_used:
                new_lines.append(line)  # Keep the import
            # else: skip this line (remove unused import)
            continue

        # Check for "from asyncio import X, Y" - remove if not used
        m = re.match(r"^\s*from asyncio import (.+)$", line)
        if m:
            if asyncio_used:
                # Keep specific imports only if those specific names are used
                imported = [imp.strip() for imp in m.group(1).split(",")]
                used_imports = []
                for imp in imported:
                    pattern = rf"\b{re.escape(imp)}\b"
                    if re.search(pattern, non_import_text):
                        used_imports.append(imp)

                if used_imports:
                    new_lines.append(f"from asyncio import {', '.join(used_imports)}\n")
            # else: skip this line (remove unused import)
            continue

        # Check for "from time import X, Y" - remove if not used
        m = re.match(r"^\s*from time import (.+)$", line)
        if m:
            if time_used:
                # Keep specific imports only if those specific names are used
                imported = [imp.strip() for imp in m.group(1).split(",")]
                used_imports = []
                for imp in imported:
                    pattern = rf"\b{re.escape(imp)}\b"
                    if re.search(pattern, non_import_text):
                        used_imports.append(imp)

                if used_imports:
                    new_lines.append(f"from time import {', '.join(used_imports)}\n")
            # else: skip this line (remove unused import)
            continue

        # Check for "from aiofiles import X, Y" - remove if not used
        m = re.match(r"^\s*from aiofiles(?:\.[\w_]+)? import (.+)$", line)
        if m:
            if aiofiles_used:
                # Keep specific imports only if those specific names are used
                imported = [imp.strip() for imp in m.group(1).split(",")]
                used_imports = []
                for imp in imported:
                    pattern = rf"\b{re.escape(imp)}\b"
                    if re.search(pattern, non_import_text):
                        used_imports.append(imp)

                if used_imports:
                    new_lines.append(f"from aiofiles import {', '.join(used_imports)}\n")
            # else: skip this line (remove unused import)
            continue

        # Track multi-line imports by counting parentheses
        if line.strip().startswith(("import ", "from ")):
            paren_count += line.count("(") - line.count(")")
            if paren_count > 0:
                in_multiline_import = True
        elif in_multiline_import:
            paren_count += line.count("(") - line.count(")")
            if paren_count <= 0:
                in_multiline_import = False
                paren_count = 0

        # Add the line to output (only once!)
        new_lines.append(line)

    # If asyncio is used but not imported, add the import
    if asyncio_used and not has_asyncio_import:
        # Find the best position - after all imports (including multi-line ones)
        insert_idx = 0
        in_multiline_import = False
        paren_count = 0

        for i, line in enumerate(new_lines):
            stripped = line.strip()

            # Skip license/comments at the beginning
            if not stripped or stripped.startswith("#"):
                insert_idx = i + 1
                continue

            # Handle import statements
            if stripped.startswith(("import ", "from ")):
                paren_count += line.count("(") - line.count(")")
                if paren_count > 0:
                    in_multiline_import = True
                insert_idx = i + 1
            elif in_multiline_import:
                paren_count += line.count("(") - line.count(")")
                if paren_count <= 0:
                    in_multiline_import = False
                    paren_count = 0
                insert_idx = i + 1
            elif not in_multiline_import:
                # This is the first non-import line, insert here
                break
            else:
                insert_idx = i + 1

        new_lines.insert(insert_idx, "import asyncio\n")

    # If time is used but not imported, add the import
    if time_used and not has_time_import:
        # Find the best position - after imports but before other code
        insert_idx = 0
        in_multiline_import = False
        paren_count = 0

        for i, line in enumerate(new_lines):
            stripped = line.strip()

            # Skip license/comments at the beginning
            if not stripped or stripped.startswith("#"):
                insert_idx = i + 1
                continue

            # Handle import statements
            if stripped.startswith(("import ", "from ")):
                paren_count += line.count("(") - line.count(")")
                if paren_count > 0:
                    in_multiline_import = True
                insert_idx = i + 1
            elif in_multiline_import:
                paren_count += line.count("(") - line.count(")")
                if paren_count <= 0:
                    in_multiline_import = False
                    paren_count = 0
                insert_idx = i + 1
            elif not in_multiline_import:
                # This is the first non-import line, insert here
                break
            else:
                insert_idx = i + 1

        new_lines.insert(insert_idx, "import time\n")

    return "".join(new_lines)


def manage_typing_imports(text: str) -> str:
    """
    Manage typing imports based on usage:
    - Remove unused typing imports (Union, Optional, List, Dict, Callable, etc.)
    - Keep only the typing imports that are actually used in the code
    """
    lines = text.splitlines(keepends=True)

    # Build non-import text to check usage
    non_import_text = ""
    for line in lines:
        if not (line.strip().startswith("from typing import") or line.strip().startswith("import typing")):
            non_import_text += line

    new_lines = []
    in_multiline_import = False
    paren_count = 0

    for i, line in enumerate(lines):
        # Handle "from typing import ..." lines
        m = re.match(r"^\s*from typing import (.+)$", line)
        if m:
            # Parse the imported items, handling multiline imports
            import_content = m.group(1)
            j = i + 1

            # Check if this is a multiline import (contains opening parenthesis)
            if "(" in import_content and ")" not in import_content:
                while j < len(lines) and ")" not in lines[j]:
                    import_content += lines[j].replace("\n", " ")
                    j += 1
                if j < len(lines):
                    import_content += lines[j].replace("\n", " ")
                    j += 1

                # Skip the lines we've consumed
                for skip_idx in range(i + 1, j):
                    if skip_idx < len(lines):
                        lines[skip_idx] = ""

            # Remove parentheses and split by comma
            import_content = import_content.replace("(", "").replace(")", "")
            imported_items = [item.strip() for item in import_content.split(",") if item.strip()]

            # Check which items are actually used
            used_imports = []
            for item in imported_items:
                # Clean up any aliases (e.g., "Dict as MyDict" -> "Dict")
                clean_item = item.split(" as ")[0].strip()

                # Check if this typing import is used in the code
                # Look for the item as a word boundary
                pattern = rf"\b{re.escape(clean_item)}\b"
                if re.search(pattern, non_import_text):
                    used_imports.append(item)

            # Only keep the line if there are used imports
            if used_imports:
                if len(used_imports) == 1:
                    new_lines.append(f"from typing import {used_imports[0]}\n")
                elif len(used_imports) <= 3:
                    new_lines.append(f"from typing import {', '.join(used_imports)}\n")
                else:
                    # Use multiline import for many items
                    new_lines.append("from typing import (\n")
                    for idx, item in enumerate(used_imports):
                        if idx == len(used_imports) - 1:
                            new_lines.append(f"    {item},\n")
                        else:
                            new_lines.append(f"    {item},\n")
                    new_lines.append(")\n")
            # else: skip this line (remove unused import)
            continue

        # Handle "import typing" lines
        if re.match(r"^\s*import typing\s*$", line):
            # Check if typing.X is used anywhere
            if re.search(r"\btyping\.", non_import_text):
                new_lines.append(line)  # Keep the import
            # else: skip this line (remove unused import)
            continue

        # Track multi-line imports by counting parentheses
        if line.strip().startswith(("import ", "from ")):
            paren_count += line.count("(") - line.count(")")
            if paren_count > 0:
                in_multiline_import = True
        elif in_multiline_import:
            paren_count += line.count("(") - line.count(")")
            if paren_count <= 0:
                in_multiline_import = False
                paren_count = 0

        # Add all other lines
        new_lines.append(line)

    return "".join(new_lines)


def replace_with_constructor_as_var(text: str) -> str:
    """
    Convert:
        with ObjectStorage(...) as <var>:
            <block>
    to:
        <var> = ObjectStorage(...) (preserving multi-line formatting)
        <block> (at the same indentation as the original 'with')
    Handles multi-line constructor calls, including when 'as <var>:' is on a separate line.
    Uses block unindentation logic similar to transform_docstrings, but preserves parent indentation.
    Only processes ObjectStorage constructor calls.
    """
    lines = text.splitlines(keepends=True)
    out = []
    i = 0
    n = len(lines)
    while i < n:
        line = lines[i]
        match = re.match(r"^(\s*)with (.*)", line)
        if match:
            with_indent = match.group(1)
            with_indent_len = len(with_indent)
            # Accumulate lines until we find 'as <var>:'
            constructor_lines = [line]
            j = i + 1
            while j < n and not re.search(r"as \w+:\s*$", "".join(constructor_lines)):
                constructor_lines.append(lines[j])
                j += 1
            full_with = "".join(constructor_lines)
            as_match = re.search(r"with (.*) as (\w+):\s*$", full_with.replace("\n", " "), re.DOTALL)
            if as_match:
                # Check if this is an ObjectStorage constructor call
                constructor_call = as_match.group(1).strip()
                if "ObjectStorage" not in constructor_call:
                    # Not an ObjectStorage call, keep as is
                    out.append(line)
                    i += 1
                    continue

                # Find the line where 'as <var>:' occurs
                for idx, cl in enumerate(constructor_lines):
                    if re.search(r"as \w+:\s*$", cl):
                        as_line_idx = idx
                        break
                else:
                    as_line_idx = len(constructor_lines) - 1
                # Everything up to and including as_line_idx is the constructor call
                constructor_call_lines = constructor_lines[: as_line_idx + 1]
                # Remove 'with ' from the first line, and ' as <var>:' from the last
                first = constructor_call_lines[0]
                first = re.sub(r"^(\s*)with ", r"\1", first)
                last = constructor_call_lines[-1]
                last = re.sub(r" as \w+:\s*$", "", last)
                constructor_call_lines[0] = first
                constructor_call_lines[-1] = last
                # Write the assignment, preserving multi-line formatting and parent indentation
                out.append(f"{with_indent}{as_match.group(2)} = ")
                for l in constructor_call_lines:
                    out.append(l[with_indent_len:])
                if not out[-1].endswith("\n"):
                    out[-1] += "\n"
                # Now robustly unindent the following block, preserving parent indentation
                k = j
                # Find the first non-blank line to determine block_indent
                block_indent = None
                while k < n:
                    next_line = lines[k]
                    if next_line.strip() == "":
                        out.append(next_line)
                        k += 1
                        continue
                    block_indent = len(next_line) - len(next_line.lstrip())
                    break
                # Unindent all lines at block_indent or deeper, stop at less indented line
                while k < n:
                    block_line = lines[k]
                    if block_line.strip() == "":
                        out.append(block_line)
                        k += 1
                        continue
                    curr_indent = len(block_line) - len(block_line.lstrip())
                    if curr_indent < (block_indent or 0):
                        break
                    # Remove block_indent, add with_indent
                    out.append(with_indent + block_line[(block_indent or 0) :])
                    k += 1
                i = k
                continue
        out.append(line)
        i += 1
    return "".join(out)


def restore_blocks(path: Path, block_map: dict):
    """
    Replace each placeholder "# UNASYNC_SKIP_BLOCK_n" with its original preserved block.
    """
    content = path.read_text(encoding="utf-8")
    for placeholder, block in block_map.items():
        content = content.replace(placeholder, block)
    path.write_text(content, encoding="utf-8")


def post_process(path: Path):
    """
    1) Call transform_docstrings()
    2) Apply POST_REPLACEMENTS (strip 'await', rename imports, etc.)
    3) Translate await asyncio.to_thread(...) calls
    4) Convert await process_streaming_response(...) calls
    5) Wrap any unwrapped process_streaming_response(...) calls
    6) Convert await asyncio.sleep(...) calls to time.sleep(...)
    7) Strip Awaitable[...] wrappers
    8) Clean up typing imports
    9) Remove any leftover daytona.close() lines
    10) Collapse >2 blank lines into exactly 2
    11) Manage asyncio and time imports (add if needed based on usage, remove if unused)
    12) Inject auto‐gen banner if missing
    """
    text = path.read_text(encoding="utf-8")
    original = text

    # 0) Convert 'with <constructor-call> as <var>:' to '<var> = <constructor-call>' and unindent block
    text = replace_with_constructor_as_var(text)

    # 1) Transform docstrings so that code‐block examples go from async→sync
    text = transform_docstrings(text)

    # 2) Apply simple regex‐based post‐replacements (strip 'await', rename imports, etc.)
    text = apply_replacements(text, POST_REPLACEMENTS)

    # 3) Translate any "await asyncio.to_thread(...)" calls into direct calls
    text = replace_all_to_thread_calls(text)

    # 3.5) Convert async executor patterns to sync equivalents
    text = convert_async_executor_patterns(text)

    # 4) Convert "await process_streaming_response(...)" calls into "asyncio.run(...)"
    text = replace_await_process_streaming(text)

    # 5) Wrap any unwrapped "process_streaming_response(...)" calls with asyncio.run(...)
    #    (skipping lines where it appears inside an import statement)
    text = replace_unwrapped_process_streaming(text)

    # 6) Convert "await asyncio.sleep(...)" calls into "time.sleep(...)"
    text = replace_asyncio_sleep_calls(text)

    # 6.5) Convert 'asyncio.create_task' to 'threading.Thread' and usages
    text = replace_asyncio_create_task_with_threading(text)

    # 7) Strip type Awaitable[...] wrappers
    text = re.sub(r"Awaitable\[(.*?)\]", r"\1", text)

    # 8) Clean up typing imports, dropping "Awaitable"
    def clean_imports(m):
        imps = [i.strip() for i in m.group(1).split(",") if i.strip() != "Awaitable"]
        return f"from typing import {', '.join(imps)}" if imps else ""

    text = re.sub(
        r"^from typing import (.+)$",
        clean_imports,
        text,
        flags=re.MULTILINE,
    )

    # 9) Remove any leftover "daytona.close()" lines
    text = re.sub(r"^\s*daytona\w*\.close\(\)\s*$", "", text, flags=re.MULTILINE)

    # 10) Collapse more than two blank lines into exactly two
    text = re.sub(r"\n\s*\n\s*\n", "\n\n", text)

    # 11) Manage asyncio and time imports (add if needed based on usage, remove if unused)
    text = manage_asyncio_imports(text)

    # 11.5) Manage typing imports (remove unused typing imports)
    text = manage_typing_imports(text)

    # 12) Inject auto‐gen banner if it's missing in the first few lines
    lines = text.splitlines(keepends=True)
    if not any("auto-generated by the unasync conversion script" in l for l in lines[:10]):
        idx = find_license_end(lines)
        lines.insert(idx, AUTO_GEN_WARNING)
        text = "".join(lines)

    if text != original:
        path.write_text(text, encoding="utf-8")
        logger.info(f"  Post-processed: {path.name}")


def main():
    TARGET_DIR.mkdir(parents=True, exist_ok=True)
    async_files = [f for f in SOURCE_DIR.glob("*.py") if not f.name.startswith("__")]

    with tempfile.TemporaryDirectory() as tmpdir:
        tmp_dir = Path(tmpdir)
        sync_tmp = tmp_dir / "sync"
        sync_tmp.mkdir()

        # 1) Pre‐filter each async file: strip out delete-blocks, capture preserve-blocks
        placeholders = {}
        for src in async_files:
            tmp_file = tmp_dir / src.name
            placeholders[src.name] = pre_filter(src, tmp_file)
            txt = tmp_file.read_text(encoding="utf-8")
            # 2) Apply PRE_REPLACEMENTS (strip "AsyncX" → "X", etc.)
            tmp_file.write_text(apply_replacements(txt, PRE_REPLACEMENTS), encoding="utf-8")

        # 3) Run unasync to create raw sync files
        rule = unasync.Rule(
            fromdir=str(tmp_dir),
            todir=str(sync_tmp),
            additional_replacements=ADDITIONAL_REPLACEMENTS,
        )
        unasync.unasync_files([str(tmp_dir / f.name) for f in async_files], [rule])

        # 4) Move generated files into TARGET_DIR, then post-process each
        for gen in sync_tmp.glob("*.py"):
            dest = TARGET_DIR / gen.name
            dest.write_text(gen.read_text(encoding="utf-8"), encoding="utf-8")
            post_process(dest)
            restore_blocks(dest, placeholders[gen.name])

    logger.info("Unasync transformation completed successfully!")
    return 0


if __name__ == "__main__":
    sys.exit(main())
