from .base_llm_node import BaseLLMNode

class YiLLMNode(BaseLLMNode):
    """Yi 系列模型節點 (Yi-6B, Yi-34B) - 中英文雙語優化"""
    
    @classmethod
    def INPUT_TYPES(cls):
        base_inputs = BaseLLMNode._common_input_types(
            "/workspace/ComfyUI/models/yi-6b-chat",
            "請介紹深度學習的基本概念和應用領域。"
        )
        # Yi 6B/34B 特定優化: 32K context, 建議最大輸出 4K
        base_inputs["required"]["max_new_tokens"] = ("INT", {"default": 512, "min": 1, "max": 4096})
        # 修改預設 system prompt 為中文
        base_inputs["required"]["system_prompt"] = (
            "STRING", 
            {"multiline": True, "default": "你是一個AI助手，能夠提供準確和有幫助的回答。請用清晰、詳細的方式回應用戶的問題。"}
        )
        # 添加 Yi 特有的模板選項
        base_inputs["required"]["template_style"] = (
            ["yi_chatml", "yi_simple"], 
            {"default": "yi_chatml"}
        )
        return base_inputs

    RETURN_TYPES = ("STRING",)
    RETURN_NAMES = ("generated_text",)
    FUNCTION = "infer"
    CATEGORY = "MyNodes/LLM/Yi"

    def _build_prompt(self, system_prompt: str, user_prompt: str, template_style: str) -> str:
        """構建 Yi 系列的 prompt"""
        templates = {
            "yi_chatml": f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n",
            "yi_simple": f"{system_prompt}\n\n{user_prompt}\n\n回答："
        }
        
        prompt = templates.get(template_style, templates["yi_chatml"])
        print(f"Using Yi template: {template_style}")
        return prompt

    def infer(self, prompt, system_prompt, model_id, template_style, max_new_tokens, temperature, seed,
              top_k=50, top_p=0.9, repetition_penalty=1.1, length_penalty=1.0, 
              no_repeat_ngram_size=3, num_beams=1, early_stopping=False, do_sample=True):
        try:
            # 載入模型
            model, tokenizer = self._load_model(model_id)
            
            # 構建 prompt
            full_prompt = self._build_prompt(system_prompt, prompt, template_style)
            print(f"Yi prompt preview: {full_prompt[:100]}...")
            
            # 生成文本
            result = self._generate(model, tokenizer, full_prompt, max_new_tokens, temperature, seed,
                                  top_k, top_p, repetition_penalty, length_penalty, 
                                  no_repeat_ngram_size, 0.0, num_beams, do_sample, early_stopping)
            
            return (result,)
            
        except Exception as e:
            return self._handle_error(e)