"""LLM module using LangChain for survey generation."""
from typing import Any, List, Optional
import time

from langchain_core.language_models import BaseLLM
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.outputs import Generation, LLMResult
from zhipuai import ZhipuAI

from config import ZHIPUAI_API_KEY, DEFAULT_MODEL, TEMPERATURE, MAX_TOKENS

class SurveyLLM(BaseLLM):
    """Custom LLM class for survey generation using Zhipu AI."""
    
    model_name: str = DEFAULT_MODEL
    # temperature: float = TEMPERATURE
    # max_tokens: int = MAX_TOKENS
    max_retries: int = 20
    retry_delay: int = 10
    
    @property
    def _llm_type(self) -> str:
        return "survey_llm"

    def _call_with_retry(self, client: ZhipuAI, messages: List[dict]) -> str:
        """Call API with retry logic for rate limits."""
        for attempt in range(self.max_retries):
            try:
                response = client.chat.completions.create(
                    model=self.model_name,
                    messages=messages,
                    stream=False,
                )
                return response.choices[0].message.content
            except Exception as e:
                error_msg = str(e)
                if "code\":\"1302" in error_msg and attempt < self.max_retries - 1:
                    print(f"Rate limit hit, retrying in {self.retry_delay} seconds...")
                    time.sleep(self.retry_delay)
                    continue
                raise e
        raise Exception("Max retries exceeded")

    def _generate(
        self,
        prompts: List[str],
        stop: Optional[List[str]] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        **kwargs: Any,
    ) -> LLMResult:
        """Generate completions for the given prompts."""
        client = ZhipuAI(api_key=ZHIPUAI_API_KEY)
        generations = []
        
        for prompt in prompts:
            messages = [
                {"role": "system", "content": "You are a helpful assistant."},
                {"role": "user", "content": prompt}
            ]
            text = self._call_with_retry(client, messages)
            generations.append([Generation(text=text)])
            
        return LLMResult(generations=generations)

# Create a singleton instance
llm = SurveyLLM() 