
"""
基础分析模板
"""
from abc import ABC, abstractmethod
from typing import Union
from pathlib import Path
from core.llm.template_parser import TemplateParser
from langchain_openai import ChatOpenAI
from json_repair import repair_json
from config import settings
from retry import retry


class BaseAnalyzer(ABC):

    def __init__(self, prompt_file: Union[str, Path]):
        with open(prompt_file, 'r', encoding='utf-8') as fs:
            template_prompt = fs.read()
        self.template_prompt = TemplateParser(template_prompt)
        self.llm = ChatOpenAI(
            model=settings.LLM_MODEL,
            extra_body = {"thinking": {"type": "enabled"}} if settings.USE_THINKING else None,
            temperature=0,
        )

    @retry(tries=3, delay=1)
    def generate(self, prompt):
        result = self.llm.invoke(prompt)
        return result.content

    def analyze(self, prompt: str):
        """调用 LLM 进行分析"""
        output = self.generate(prompt)
        think_end_pos = output.find('</think>')
        if think_end_pos != -1:
            output = output[think_end_pos + len('</think>'):]
        data = repair_json(output, return_objects=True)
        return data


    @retry(tries=3, delay=1)
    @abstractmethod
    def invoke(self, **kwargs):
        """大模型分析"""
