# backend/models/anthropic_model.py
"""
Anthropic Claude模型实现
"""

from typing import AsyncGenerator
from langchain_anthropic import ChatAnthropic
from langchain.schema import BaseMessage
from .base_model import BaseAIModel, AIModelResponse
import logging

logger = logging.getLogger(__name__)

class AnthropicModel(BaseAIModel):
    """Anthropic Claude模型实现"""
    
    def get_provider_name(self) -> str:
        return "Anthropic"
    
    async def initialize(self) -> None:
        """初始化Anthropic模型"""
        try:
            self.llm = ChatAnthropic(
                model=self.model_name,
                anthropic_api_key=self.api_key,
                temperature=0.7,
                streaming=True
            )
            logger.info(f"Anthropic模型 {self.model_name} 初始化成功")
        except Exception as e:
            logger.error(f"Anthropic模型初始化失败: {e}")
            raise
    
    async def generate_response(
        self, 
        messages: list[BaseMessage],
        **kwargs
    ) -> AIModelResponse:
        """生成完整响应"""
        try:
            if not self.llm:
                await self.initialize()
            
            response = await self.llm.ainvoke(messages)
            
            return AIModelResponse(
                content=response.content,
                model_name=self.model_name,
                provider=self.provider,
                metadata={
                    "usage": getattr(response, "response_metadata", {}).get("usage", {})
                }
            )
        except Exception as e:
            logger.error(f"Anthropic响应生成失败: {e}")
            return AIModelResponse(
                content=f"抱歉，Claude模型响应失败: {str(e)}",
                model_name=self.model_name,
                provider=self.provider,
                metadata={"error": str(e)}
            )
    
    async def stream_response(
        self, 
        messages: list[BaseMessage],
        **kwargs
    ) -> AsyncGenerator[str, None]:
        """流式生成响应"""
        try:
            if not self.llm:
                await self.initialize()
            
            async for chunk in self.llm.astream(messages):
                if chunk.content:
                    yield chunk.content
                    
        except Exception as e:
            logger.error(f"Anthropic流式响应失败: {e}")
            yield f"抱歉，生成响应时出现错误: {str(e)}"

