"""
强化学习服务
使用强化学习优化工作流决策
"""

import numpy as np
import random
from typing import Dict, Any, List, Tuple, Optional
import json
import logging
from datetime import datetime
from collections import defaultdict, deque
import pickle

from src.utils.logging import get_logger

logger = get_logger(__name__)

class ReinforcementLearningService:
    """强化学习服务类，用于优化工作流决策"""
    
    def __init__(self, learning_rate=0.1, discount_factor=0.9, epsilon=0.1):
        """
        初始化强化学习服务
        
        Args:
            learning_rate: 学习率 (α)
            discount_factor: 折扣因子 (γ)
            epsilon: ε-贪婪策略参数
        """
        # Q表：状态-动作价值函数
        self.q_table = defaultdict(lambda: defaultdict(float))
        
        # 参数设置
        self.learning_rate = learning_rate
        self.discount_factor = discount_factor
        self.epsilon = epsilon
        
        # 历史记录
        self.experience_buffer = deque(maxlen=10000)  # 经验回放缓冲区
        self.state_action_history = []  # 状态-动作历史
        
        # 动作空间定义
        self.action_space = [
            "use_multi_agent_workflow",
            "use_optimized_workflow", 
            "use_streaming_workflow",
            "adjust_timeout",
            "increase_iterations",
            "decrease_iterations",
            "enable_multimodal",
            "disable_multimodal"
        ]
        
        # 状态空间特征
        self.state_features = [
            "question_complexity",
            "question_length",
            "domain_type",
            "historical_success_rate",
            "user_preference",
            "content_type",
            "time_constraint"
        ]
        
        logger.info("强化学习服务初始化完成")
    
    def _state_to_key(self, state: Dict[str, Any]) -> str:
        """
        将状态转换为字符串键，用于Q表索引
        
        Args:
            state: 状态字典
            
        Returns:
            str: 状态键
        """
        # 对状态进行排序以确保一致性
        sorted_state = sorted(state.items(), key=lambda x: x[0])
        return json.dumps(sorted_state, sort_keys=True)
    
    def get_action(self, state: Dict[str, Any]) -> str:
        """
        根据当前状态选择动作（ε-贪婪策略）
        
        Args:
            state: 当前状态
            
        Returns:
            str: 选择的动作
        """
        state_key = self._state_to_key(state)
        
        # ε-贪婪策略
        if random.random() < self.epsilon:
            # 探索：随机选择动作
            action = random.choice(self.action_space)
            logger.debug(f"探索性动作选择: {action}")
        else:
            # 利用：选择Q值最高的动作
            q_values = self.q_table[state_key]
            if not q_values:
                # 如果状态未见过，随机选择
                action = random.choice(self.action_space)
                logger.debug(f"新状态，随机动作选择: {action}")
            else:
                # 使用列表推导式来避免类型检查问题
                action = max([(k, v) for k, v in q_values.items()], key=lambda x: x[1])[0]
                logger.debug(f"利用性动作选择: {action} (Q值: {q_values[action]})")
        
        # 记录状态-动作对
        self.state_action_history.append((state_key, action))
        
        return action
    
    def update_q_value(self, state: Dict[str, Any], action: str, 
                       reward: float, next_state: Dict[str, Any]) -> bool:
        """
        更新Q值（Q-learning更新规则）
        
        Args:
            state: 当前状态
            action: 执行的动作
            reward: 获得的奖励
            next_state: 下一状态
            
        Returns:
            bool: 是否更新成功
        """
        try:
            state_key = self._state_to_key(state)
            next_state_key = self._state_to_key(next_state)
            
            # 获取当前Q值
            current_q = self.q_table[state_key][action]
            
            # 获取下一状态的最大Q值
            next_max_q = max(self.q_table[next_state_key].values(), default=0.0)
            
            # Q-learning更新规则: Q(s,a) ← Q(s,a) + α[r + γmax Q(s',a') - Q(s,a)]
            new_q = current_q + self.learning_rate * (reward + self.discount_factor * next_max_q - current_q)
            
            # 更新Q表
            self.q_table[state_key][action] = new_q
            
            # 保存到经验缓冲区
            experience = {
                'state': state,
                'action': action,
                'reward': reward,
                'next_state': next_state,
                'timestamp': datetime.utcnow()
            }
            self.experience_buffer.append(experience)
            
            logger.debug(f"Q값更新: 状态={state_key[:50]}..., 动作={action}, 奖励={reward}, 新Q값={new_q}")
            return True
            
        except Exception as e:
            logger.error(f"Q값更新失败: {e}")
            return False
    
    def calculate_reward(self, execution_result: Dict[str, Any]) -> float:
        """
        根据执行结果计算奖励
        
        Args:
            execution_result: 执行结果
            
        Returns:
            float: 奖励值
        """
        reward = 0.0
        
        # 基于执行成功率的奖励
        if execution_result.get('success', False):
            reward += 10.0
        else:
            reward -= 5.0
        
        # 基于执行时间的奖励（越快越好）
        execution_time = execution_result.get('execution_time', 0)
        if execution_time > 0:
            # 时间越短奖励越高，但设置上限
            time_reward = max(0, 5.0 - execution_time / 10.0)
            reward += time_reward
        
        # 基于结果质量的奖励
        quality_score = execution_result.get('quality_score', 0)
        reward += quality_score * 2.0  # 质量评分权重
        
        # 基于用户反馈的奖励
        user_feedback = execution_result.get('user_feedback', 0)  # -1到1的范围
        reward += user_feedback * 3.0  # 用户反馈权重
        
        # 基于资源使用的惩罚
        resource_usage = execution_result.get('resource_usage', 0)
        if resource_usage > 0.8:  # 资源使用超过80%时开始惩罚
            reward -= (resource_usage - 0.8) * 5.0
        
        logger.debug(f"奖励计算: 基础奖励={reward}, 执行结果={execution_result}")
        return reward
    
    def extract_state_features(self, context: Dict[str, Any]) -> Dict[str, Any]:
        """
        从上下文提取状态特征
        
        Args:
            context: 上下文信息
            
        Returns:
            Dict[str, Any]: 状态特征
        """
        state = {}
        
        # 问题复杂度（基于长度和特殊字符）
        question = context.get('question', '')
        state['question_length'] = len(question)
        state['question_complexity'] = min(len(question) / 100.0, 1.0)  # 归一化到0-1
        
        # 领域类型（从问题中推断）
        domain_keywords = {
            'tech': ['技术', '编程', '开发', 'code', 'program', 'software', '算法', 'computer'],
            'business': ['商业', '市场', '营销', 'business', 'market', 'finance', '经济'],
            'science': ['科学', '研究', '实验', 'science', 'research', '物理', '化学', '生物'],
            'general': ['一般', '日常', '生活', 'general', 'life', '普通']
        }
        
        domain_scores = {}
        for domain, keywords in domain_keywords.items():
            domain_scores[domain] = sum(1 for keyword in keywords if keyword in question.lower())
        
        state['domain_type'] = max(domain_scores.keys(), key=lambda x: domain_scores[x]) if domain_scores else 'general'
        
        # 历史成功率
        state['historical_success_rate'] = context.get('historical_success_rate', 0.5)
        
        # 用户偏好
        state['user_preference'] = context.get('user_preference', 'balanced')
        
        # 内容类型
        state['content_type'] = context.get('content_type', 'text')
        
        # 时间约束
        state['time_constraint'] = context.get('time_constraint', 'normal')
        
        # 添加新的特征
        # 问题类型
        from src.utils.nlp_enhancement import NLPSupport
        nlp = NLPSupport()
        state['question_type'] = nlp.classify_question_type(question)
        
        # 关键词数量
        keywords = nlp.extract_keywords(question)
        state['keyword_count'] = len(keywords)
        
        # 是否包含数字
        import re
        state['has_numbers'] = bool(re.search(r'\d+', question))
        
        # 是否包含比较词
        comparison_words = ['比较', '对比', '区别', '不同', '优于', '劣于', '相比', 'than', 'compare', 'versus', 'vs']
        state['is_comparison'] = any(word in question.lower() for word in comparison_words)
        
        # 是否包含时间相关词
        time_words = ['时间', '日期', '现在', '当前', '最新', 'recent', 'current', 'now', 'today', 'tomorrow', 'yesterday']
        state['time_related'] = any(word in question.lower() for word in time_words)
        
        return state
    
    def train_from_experience(self, batch_size: int = 32) -> bool:
        """
        从经验缓冲区中采样进行训练
        
        Args:
            batch_size: 批次大小
            
        Returns:
            bool: 是否训练成功
        """
        try:
            if len(self.experience_buffer) < batch_size:
                return True
            
            # 从经验缓冲区随机采样
            batch = random.sample(self.experience_buffer, batch_size)
            
            # 对每个经验进行Q값更新
            for experience in batch:
                self.update_q_value(
                    experience['state'],
                    experience['action'],
                    experience['reward'],
                    experience['next_state']
                )
            
            logger.info(f"从经验中训练完成，批次大小: {batch_size}")
            return True
            
        except Exception as e:
            logger.error(f"经验回放训练失败: {e}")
            return False
    
    def get_policy_stats(self) -> Dict[str, Any]:
        """
        获取策略统计信息
        
        Returns:
            Dict[str, Any]: 统计信息
        """
        stats = {
            'q_table_size': len(self.q_table),
            'total_experiences': len(self.experience_buffer),
            'action_distribution': defaultdict(int)
        }
        
        # 统计动作分布
        for experience in self.experience_buffer:
            stats['action_distribution'][experience['action']] += 1
        
        return dict(stats)
    
    def save_model(self, filepath: str) -> bool:
        """
        保存模型到文件
        
        Args:
            filepath: 文件路径
            
        Returns:
            bool: 是否保存成功
        """
        try:
            model_data = {
                'q_table': dict(self.q_table),
                'experience_buffer': list(self.experience_buffer),
                'parameters': {
                    'learning_rate': self.learning_rate,
                    'discount_factor': self.discount_factor,
                    'epsilon': self.epsilon
                }
            }
            
            with open(filepath, 'wb') as f:
                pickle.dump(model_data, f)
            
            logger.info(f"模型保存成功: {filepath}")
            return True
            
        except Exception as e:
            logger.error(f"模型保存失败: {e}")
            return False
    
    def load_model(self, filepath: str) -> bool:
        """
        从文件加载模型
        
        Args:
            filepath: 文件路径
            
        Returns:
            bool: 是否加载成功
        """
        try:
            with open(filepath, 'rb') as f:
                model_data = pickle.load(f)
            
            self.q_table = defaultdict(lambda: defaultdict(float), model_data['q_table'])
            self.experience_buffer = deque(model_data['experience_buffer'], maxlen=10000)
            
            # 更新参数
            params = model_data.get('parameters', {})
            self.learning_rate = params.get('learning_rate', self.learning_rate)
            self.discount_factor = params.get('discount_factor', self.discount_factor)
            self.epsilon = params.get('epsilon', self.epsilon)
            
            logger.info(f"模型加载成功: {filepath}")
            return True
            
        except Exception as e:
            logger.error(f"模型加载失败: {e}")
            return False


# 全局实例
reinforcement_learning_service = ReinforcementLearningService()


def get_reinforcement_learning_service() -> ReinforcementLearningService:
    """获取强化学习服务实例"""
    return reinforcement_learning_service