"""
Dify工作流集成服务
"""
import requests
import json
import logging
from django.conf import settings
from typing import Dict, Any, Generator, Optional

logger = logging.getLogger(__name__)


class DifyService:
    """Dify工作流服务类"""
    
    def __init__(self):
        # 从设置中获取Dify配置，如果没有则使用默认值
        self.api_key = getattr(settings, 'DIFY_API_KEY', 'app-your-dify-api-key')
        self.base_url = getattr(settings, 'DIFY_BASE_URL', 'https://api.dify.ai/v1')
        self.timeout = getattr(settings, 'DIFY_TIMEOUT', 30)
        
    def send_message(self, 
                    query: str, 
                    user: str,
                    conversation_id: Optional[str] = None,
                    inputs: Optional[Dict[str, Any]] = None,
                    response_mode: str = 'streaming') -> Dict[str, Any]:
        """
        发送消息到Dify工作流
        
        Args:
            query: 用户输入/提问内容
            user: 用户标识
            conversation_id: 会话ID（可选）
            inputs: 变量值（可选）
            response_mode: 响应模式 streaming/blocking
            
        Returns:
            响应数据
        """
        url = f"{self.base_url}/chat-messages"
        
        headers = {
            'Authorization': f'Bearer {self.api_key}',
            'Content-Type': 'application/json'
        }
        
        data = {
            'query': query,
            'user': user,
            'response_mode': response_mode,
            'inputs': inputs or {}
        }
        
        if conversation_id:
            data['conversation_id'] = conversation_id
            
        try:
            if response_mode == 'streaming':
                return self._handle_streaming_request(url, headers, data)
            else:
                return self._handle_blocking_request(url, headers, data)
                
        except Exception as e:
            logger.error(f"Dify API调用失败: {e}")
            raise
    
    def _handle_blocking_request(self, url: str, headers: Dict, data: Dict) -> Dict[str, Any]:
        """处理阻塞式请求"""
        response = requests.post(url, headers=headers, json=data, timeout=self.timeout)
        response.raise_for_status()
        return response.json()
    
    def _handle_streaming_request(self, url: str, headers: Dict, data: Dict) -> Generator[str, None, None]:
        """处理流式请求"""
        response = requests.post(
            url, 
            headers=headers, 
            json=data, 
            stream=True,
            timeout=self.timeout
        )
        response.raise_for_status()
        
        for line in response.iter_lines():
            if line:
                line_str = line.decode('utf-8')
                if line_str.startswith('data: '):
                    yield line_str[6:]  # 去掉 'data: ' 前缀
    
    def parse_streaming_message(self, message: str) -> Dict[str, Any]:
        """
        解析流式消息
        
        Args:
            message: 流式消息内容
            
        Returns:
            解析后的消息数据
        """
        try:
            data = json.loads(message)
            event = data.get('event', '')
            
            if event == 'message':
                return {
                    'type': 'message',
                    'content': data.get('answer', ''),
                    'task_id': data.get('task_id'),
                    'message_id': data.get('message_id'),
                    'conversation_id': data.get('conversation_id'),
                    'created_at': data.get('created_at')
                }
            elif event == 'message_end':
                return {
                    'type': 'message_end',
                    'task_id': data.get('task_id'),
                    'message_id': data.get('message_id'),
                    'conversation_id': data.get('conversation_id'),
                    'metadata': data.get('metadata'),
                    'usage': data.get('usage')
                }
            elif event == 'error':
                return {
                    'type': 'error',
                    'error': data.get('message', '未知错误'),
                    'status': data.get('status'),
                    'code': data.get('code')
                }
            else:
                return {
                    'type': 'unknown',
                    'data': data
                }
                
        except json.JSONDecodeError as e:
            logger.error(f"解析流式消息失败: {e}, 消息内容: {message}")
            return {
                'type': 'error',
                'error': f'消息解析失败: {e}'
            }
    
    def create_mock_response(self, query: str, conversation_id: Optional[str] = None) -> Dict[str, Any]:
        """
        创建模拟响应（用于测试）
        
        Args:
            query: 用户查询
            conversation_id: 会话ID
            
        Returns:
            模拟响应数据
        """
        import uuid
        from datetime import datetime
        
        # 根据用户输入生成不同的模拟回复
        if '开始' in query or '你好' in query:
            answer = "您好！欢迎来到面试练习环节。我是您的AI面试官，今天将为您进行一场模拟面试。请先简单介绍一下您自己，包括您的姓名、专业背景和求职意向。"
        elif '介绍' in query or '自己' in query:
            answer = "很好的自我介绍！接下来我想了解一下您的技术背景。请详细说说您最熟悉的编程语言或技术栈，以及您在这方面的项目经验。"
        elif '项目' in query or '经验' in query:
            answer = "听起来您有不错的项目经验。能否详细描述一个您认为最有挑战性的项目？包括您在其中遇到的技术难题以及是如何解决的？"
        elif '结束' in query or '完成' in query:
            answer = "感谢您参与本次面试练习！您表现得很不错。我会为您生成详细的面试反馈报告，包括优势分析和改进建议。祝您求职顺利！"
        else:
            answer = "这是一个很好的问题。在实际面试中，面试官可能会从多个角度来评估您的回答。请继续分享您的想法，我会根据您的回答给出相应的反馈。"
        
        return {
            'event': 'message',
            'task_id': str(uuid.uuid4()),
            'message_id': str(uuid.uuid4()),
            'conversation_id': conversation_id or str(uuid.uuid4()),
            'answer': answer,
            'created_at': int(datetime.now().timestamp())
        }
    
    def create_mock_streaming_response(self, query: str, conversation_id: Optional[str] = None) -> Generator[str, None, None]:
        """
        创建模拟流式响应（用于测试）
        
        Args:
            query: 用户查询
            conversation_id: 会话ID
            
        Yields:
            流式消息数据
        """
        import time
        
        mock_response = self.create_mock_response(query, conversation_id)
        answer = mock_response['answer']
        
        # 模拟流式输出，每次输出几个字符
        chunk_size = 5
        for i in range(0, len(answer), chunk_size):
            chunk = answer[i:i + chunk_size]
            
            message_data = {
                'event': 'message',
                'task_id': mock_response['task_id'],
                'message_id': mock_response['message_id'],
                'conversation_id': mock_response['conversation_id'],
                'answer': chunk,
                'created_at': mock_response['created_at']
            }
            
            yield json.dumps(message_data)
            time.sleep(0.1)  # 模拟网络延迟
        
        # 发送结束消息
        end_message = {
            'event': 'message_end',
            'task_id': mock_response['task_id'],
            'message_id': mock_response['message_id'],
            'conversation_id': mock_response['conversation_id'],
            'metadata': {},
            'usage': {
                'total_tokens': len(answer),
                'prompt_tokens': len(query),
                'completion_tokens': len(answer)
            }
        }
        
        yield json.dumps(end_message)


# 全局Dify服务实例
dify_service = DifyService()
