"""
Memory Management Utilities for u5_FreeVRAM
MIT License - Independent implementation
"""

import gc
import sys
import logging
import time

# 標準ライブラリのみ使用（psutilは必須でない）
try:
    import psutil
    HAS_PSUTIL = True
except ImportError:
    HAS_PSUTIL = False

# PyTorch（ComfyUIで使用）
try:
    import torch
    HAS_TORCH = True
except ImportError:
    HAS_TORCH = False

# ComfyUI API
try:
    import comfy.model_management
    HAS_COMFY = True
except ImportError:
    HAS_COMFY = False
    logging.warning("ComfyUI model_management not found. Running in standalone mode.")


class MemoryManager:
    """
    独自実装のメモリ管理クラス

    """

    @staticmethod
    def get_memory_status():
        """
        現在のメモリ状況を取得

        Returns:
            dict: メモリ状況の辞書
        """
        status = {
            "free_vram_gb": 0.0,
            "total_vram_gb": 0.0,
            "used_vram_gb": 0.0,
            "free_ram_gb": 0.0,
            "total_ram_gb": 0.0,
            "used_ram_gb": 0.0,
        }

        # GPU メモリ情報
        if HAS_TORCH and torch.cuda.is_available():
            try:
                # CUDA メモリ情報を取得
                free_vram, total_vram = torch.cuda.mem_get_info()
                status["free_vram_gb"] = free_vram / (1024**3)
                status["total_vram_gb"] = total_vram / (1024**3)
                status["used_vram_gb"] = status["total_vram_gb"] - status["free_vram_gb"]
            except Exception as e:
                logging.warning(f"Failed to get CUDA memory info: {e}")

        # システムメモリ情報
        if HAS_PSUTIL:
            try:
                ram = psutil.virtual_memory()
                status["free_ram_gb"] = ram.available / (1024**3)
                status["total_ram_gb"] = ram.total / (1024**3)
                status["used_ram_gb"] = ram.used / (1024**3)
            except Exception as e:
                logging.warning(f"Failed to get system memory info: {e}")
        else:
            # psutilがない場合の簡易実装
            try:
                import resource
                rusage = resource.getrusage(resource.RUSAGE_SELF)
                # Linux/Macでのメモリ使用量（KB単位）
                status["used_ram_gb"] = rusage.ru_maxrss / (1024**2)  # KB to GB
            except:
                pass

        return status

    @staticmethod
    def cleanup_memory(unload_models=True, clear_cache=True, force_gc=True, min_free_vram_gb=0.0):
        """
        メモリクリーンアップを実行

        Args:
            unload_models (bool): モデルをアンロードするか
            clear_cache (bool): GPUキャッシュをクリアするか
            force_gc (bool): ガベージコレクションを強制するか
            min_free_vram_gb (float): クリーンアップ前に確保しておきたい空きVRAM量（GB）。
                現在の空きVRAMがこの値以上なら処理をスキップする。

        Returns:
            dict: クリーンアップ結果
        """
        result = {
            "success": True,
            "message": "",
            "freed_vram_gb": 0.0,
            "freed_ram_gb": 0.0,
            "initial_status": {},
            "final_status": {},
            "actions_performed": []
        }

        result["initial_status"] = MemoryManager.get_memory_status()
        current_free_vram = result["initial_status"].get("free_vram_gb", 0.0)
        if min_free_vram_gb and current_free_vram >= min_free_vram_gb:
            result["final_status"] = result["initial_status"]
            result["success"] = True
            result["message"] = (
                "Cleanup skipped (current free VRAM "
                f"{MemoryManager.format_memory_size(current_free_vram)} >= "
                f"threshold {MemoryManager.format_memory_size(min_free_vram_gb)})"
            )
            result["actions_performed"].append("Threshold satisfied: cleanup skipped")
            logging.info("VRAMFree: threshold satisfied - cleanup skipped")
            return result



        try:
            # 1. モデルのアンロード (ComfyUI API)
            if unload_models and HAS_COMFY:
                try:
                    comfy.model_management.unload_all_models()
                    result["actions_performed"].append("Models unloaded")
                    logging.info("Models unloaded successfully")
                except Exception as e:  # noqa: BLE001
                    logging.warning(f"Failed to unload models: {e}")
                    result["actions_performed"].append(f"Model unload failed: {e}")

            # 2. ソフトキャッシュのクリア (ComfyUI)
            if clear_cache and HAS_COMFY:
                try:
                    comfy.model_management.soft_empty_cache(True)
                    result["actions_performed"].append("Comfy cache cleared")
                    logging.info("ComfyUI soft cache cleared successfully")
                except Exception as e:  # noqa: BLE001
                    logging.warning(f"Failed to clear ComfyUI cache: {e}")
                    result["actions_performed"].append(f"Comfy cache clear failed: {e}")

            # 3. PyTorchキャッシュクリア
            if clear_cache and HAS_TORCH and torch.cuda.is_available():
                try:
                    torch.cuda.empty_cache()
                    torch.cuda.ipc_collect()
                    result["actions_performed"].append("GPU cache cleared")
                    logging.info("GPU cache cleared successfully")
                except Exception as e:  # noqa: BLE001
                    logging.warning(f"Failed to clear GPU cache: {e}")
                    result["actions_performed"].append(f"GPU cache clear failed: {e}")

            # 4. Pythonガベージコレクション (& ComfyGC)
            if force_gc:
                if HAS_COMFY:
                    try:
                        comfy.model_management.cleanup_models_gc()
                        result["actions_performed"].append("Comfy model GC cleanup")
                    except Exception as gc_err:  # noqa: BLE001
                        logging.warning(f"Failed to run Comfy model GC: {gc_err}")
                        result["actions_performed"].append(f"Comfy model GC failed: {gc_err}")

                try:
                    collected = gc.collect(2)
                    result["actions_performed"].append(f"GC collected {collected} objects")
                    logging.info(f"Garbage collection completed: {collected} objects collected")
                except Exception as e:  # noqa: BLE001
                    logging.warning(f"Failed to run garbage collection: {e}")
                    result["actions_performed"].append(f"GC failed: {e}")

            if HAS_COMFY:
                prompt_queue = None
                prompt_server_module = sys.modules.get("server")
                if prompt_server_module is not None:
                    prompt_server_class = getattr(prompt_server_module, "PromptServer", None)
                    prompt_server_instance = getattr(prompt_server_class, "instance", None) if prompt_server_class else None
                    prompt_queue = getattr(prompt_server_instance, "prompt_queue", None) if prompt_server_instance else None
                if prompt_queue is not None:
                    try:
                        prompt_queue.set_flag("unload_models", True)
                        if clear_cache:
                            prompt_queue.set_flag("free_memory", True)
                        result["actions_performed"].append("Prompt queue flagged for cleanup")
                    except Exception as queue_err:  # noqa: BLE001
                        logging.warning(f"Failed to set prompt queue flags: {queue_err}")
                        result["actions_performed"].append(f"Prompt queue flag failed: {queue_err}")

            final_status = MemoryManager.get_memory_status()
            best_status = dict(final_status)
            best_vram = final_status.get("free_vram_gb", 0.0)
            best_ram = final_status.get("free_ram_gb", 0.0)

            async_observed = False
            if HAS_COMFY:
                deadline = time.time() + 3.0
                while time.time() < deadline:
                    time.sleep(0.15)
                    polled = MemoryManager.get_memory_status()
                    polled_vram = polled.get("free_vram_gb", 0.0)
                    polled_ram = polled.get("free_ram_gb", 0.0)
                    if polled_vram > best_vram + 0.01 or polled_ram > best_ram + 0.01:
                        best_status = polled
                        best_vram = polled_vram
                        best_ram = polled_ram
                        async_observed = True

                if async_observed:
                    delta_info = []
                    vram_delta = best_vram - final_status.get("free_vram_gb", 0.0)
                    if vram_delta > 0.01:
                        delta_info.append(f"VRAM +{MemoryManager.format_memory_size(vram_delta)}")
                    ram_delta = best_ram - final_status.get("free_ram_gb", 0.0)
                    if ram_delta > 0.01:
                        delta_info.append(f"RAM +{MemoryManager.format_memory_size(ram_delta)}")
                    if delta_info:
                        result["actions_performed"].append("Async flush observed: " + ", ".join(delta_info))


            result["final_status"] = best_status

            result["freed_vram_gb"] = max(0,
                best_vram - result["initial_status"]["free_vram_gb"])
            result["freed_ram_gb"] = max(0,
                best_ram - result["initial_status"]["free_ram_gb"])

            result["success"] = True
            result["message"] = f"Cleanup completed. Actions: {', '.join(result['actions_performed'])}"

        except Exception as e:  # noqa: BLE001
            result["success"] = False
            result["message"] = f"Cleanup failed: {str(e)}"
            logging.error(f"Memory cleanup failed: {e}")

        return result

    @staticmethod
    def format_memory_size(size_gb):
        """
        メモリサイズを読みやすい形式にフォーマット

        Args:
            size_gb (float): ギガバイト単位のサイズ

        Returns:
            str: フォーマットされた文字列
        """
        if size_gb < 0.001:
            return "0 GB"
        elif size_gb < 1:
            return f"{size_gb * 1024:.1f} MB"
        else:
            return f"{size_gb:.2f} GB"

    @staticmethod
    def get_memory_summary(status=None):
        """現在のメモリ状況を整形して返すヘルパー

        Args:
            status (dict | None): レポート対象のメモリ情報。None の場合は
                get_memory_status() で新たに取得する。

        Returns:
            str: メモリ状況の要約テキスト。取得できない場合はフォールバック文字列。
        """
        if status is None:
            status = MemoryManager.get_memory_status()

        lines = []

        # VRAM summary
        total_vram = status.get("total_vram_gb", 0.0)
        if total_vram > 0:
            used_vram = status.get("used_vram_gb", 0.0)
            vram_usage = (used_vram / total_vram) * 100
            lines.append(
                f"VRAM: {MemoryManager.format_memory_size(used_vram)}"
                f" / {MemoryManager.format_memory_size(total_vram)}"
                f" ({vram_usage:.1f}%)"
            )

        # RAM summary
        total_ram = status.get("total_ram_gb", 0.0)
        if total_ram > 0:
            used_ram = status.get("used_ram_gb", 0.0)
            ram_usage = (used_ram / total_ram) * 100
            lines.append(
                f"RAM: {MemoryManager.format_memory_size(used_ram)}"
                f" / {MemoryManager.format_memory_size(total_ram)}"
                f" ({ram_usage:.1f}%)"
            )

        return "\n".join(lines) if lines else "Memory status unavailable"