# ComfyUI Custom Node: Orpheus-TTS Base Loader
# - 목적: Orpheus TTS 베이스 모델과 토크나이저를 로드해 핸들(ORPHEUS_MODEL)을 반환
# - 특징:
#   * 지연 임포트(lazy import)로 CPU 로컬에서도 안전하게 노드 UI 확인(dry_run)
#   * device/dtype 자동 선택
#   * Hugging Face 토큰(optional) 지원

import os
from typing import Dict, Any

class OrpheusLoader:
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "base_model_id": ("STRING", {
                    "default": "unsloth/orpheus-3b-0.1-pretrained",  # 환경에 맞게 교체 가능
                }),
                "device_pref": (["auto", "cpu", "cuda"],),
                "dtype_pref": (["auto", "float32", "float16", "bfloat16"],),
                "hf_token": ("STRING", {"default": "", "multiline": False}),  # private 모델 대비
                "low_cpu_mem_usage": ("BOOL", {"default": True}),
                "dry_run": ("BOOL", {"default": False}),
            }
        }

    RETURN_TYPES = ("ORPHEUS_MODEL",)        # 커스텀 타입 이름: ComfyUI가 파이썬 객체 전달
    RETURN_NAMES = ("model",)
    FUNCTION = "run"
    CATEGORY = "Audio/TTS"

    # ---- 내부 유틸 ----
    def _pick_device(self, pref: str) -> str:
        try:
            import torch
            if pref == "cpu": return "cpu"
            if pref == "cuda": return "cuda" if torch.cuda.is_available() else "cpu"
            # auto
            return "cuda" if torch.cuda.is_available() else "cpu"
        except Exception:
            return "cpu"

    def _pick_dtype(self, pref: str, device: str):
        try:
            import torch
            if pref == "float16": return torch.float16
            if pref == "bfloat16": return torch.bfloat16
            if pref == "float32": return torch.float32
            # auto
            return torch.float16 if device == "cuda" else torch.float32
        except Exception:
            return None

    # ---- 실행 ----
    def run(self,
            base_model_id: str,
            device_pref: str,
            dtype_pref: str,
            hf_token: str,
            low_cpu_mem_usage: bool,
            dry_run: bool):

        # 로컬 맥에서 ComfyUI UI 노출만 확인하고 싶을 때
        if dry_run:
            stub = {
                "_type": "orpheus_stub",
                "base_model_id": base_model_id,
            }
            return (stub,)

        # 실행 시점에만 무거운 모듈 임포트 (지연 임포트)
        import torch
        from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM

        device = self._pick_device(device_pref)
        dtype = self._pick_dtype(dtype_pref, device)

        # HF 토큰 처리 (private/EULA 모델 대비)
        use_auth = hf_token.strip() if hf_token else None

        # 토크나이저/설정
        tok = AutoTokenizer.from_pretrained(
            base_model_id,
            use_fast=True,
            token=use_auth,
            trust_remote_code=True
        )
        cfg = AutoConfig.from_pretrained(
            base_model_id,
            token=use_auth,
            trust_remote_code=True
        )
        model = AutoModelForCausalLM.from_pretrained(
            base_model_id,
            torch_dtype=(dtype if device == "cuda" else torch.float32),
            low_cpu_mem_usage=low_cpu_mem_usage,
            token=use_auth,
            trust_remote_code=True
        )

        model.eval()
        model.to(device)

        handle: Dict[str, Any] = {
            "_type": "orpheus",
            "base_model_id": base_model_id,
            "tokenizer": tok,
            "config": cfg,
            "model": model,
            "device": device,
            "dtype": str(dtype) if dtype is not None else "float32",
            "hf_token_used": bool(use_auth),
        }
        return (handle,)


NODE_CLASS_MAPPINGS = {
    "OrpheusLoader": OrpheusLoader,
}
NODE_DISPLAY_NAME_MAPPINGS = {
    "OrpheusLoader": "Orpheus-TTS Base Loader",
}

